repo_name
stringlengths 6
103
| path
stringlengths 5
191
| copies
stringlengths 1
4
| size
stringlengths 4
6
| content
stringlengths 986
970k
| license
stringclasses 15
values |
---|---|---|---|---|---|
vdumoulin/fuel | fuel/transformers/sequences.py | 7 | 4963 | from fuel.transformers import Transformer
class Window(Transformer):
"""Return pairs of source and target windows from a stream.
This data stream wrapper takes as an input a data stream outputting
sequences of potentially varying lengths (e.g. sentences, audio tracks,
etc.). It then returns two sliding windows (source and target) over
these sequences.
For example, to train an n-gram model set `source_window` to n,
`target_window` to 1, no offset, and `overlapping` to false. This will
give chunks [1, N] and [N + 1]. To train an RNN you often want to set
the source and target window to the same size and use an offset of 1
with overlap, this would give you chunks [1, N] and [2, N + 1].
Parameters
----------
offset : int
The offset from the source window where the target window starts.
source_window : int
The size of the source window.
target_window : int
The size of the target window.
overlapping : bool
If true, the source and target windows overlap i.e. the offset of
the target window is taken to be from the beginning of the source
window. If false, the target window offset is taken to be from the
end of the source window.
data_stream : :class:`.DataStream` instance
The data stream providing sequences. Each example is assumed to be
an object that supports slicing.
target_source : str, optional
This data stream adds a new source for the target words. By default
this source is 'targets'.
"""
def __init__(self, offset, source_window, target_window,
overlapping, data_stream, target_source='targets', **kwargs):
if not data_stream.produces_examples:
raise ValueError('the wrapped data stream must produce examples, '
'not batches of examples.')
if len(data_stream.sources) > 1:
raise ValueError('{} expects only one source'
.format(self.__class__.__name__))
super(Window, self).__init__(data_stream, produces_examples=True,
**kwargs)
self.sources = self.sources + (target_source,)
self.offset = offset
self.source_window = source_window
self.target_window = target_window
self.overlapping = overlapping
self.sentence = []
self._set_index()
def _set_index(self):
"""Set the starting index of the source window."""
self.index = 0
# If offset is negative, target window might start before 0
self.index = -min(0, self._get_target_index())
def _get_target_index(self):
"""Return the index where the target window starts."""
return (self.index + self.source_window * (not self.overlapping) +
self.offset)
def _get_end_index(self):
"""Return the end of both windows."""
return max(self.index + self.source_window,
self._get_target_index() + self.target_window)
def get_data(self, request=None):
if request is not None:
raise ValueError
while not self._get_end_index() <= len(self.sentence):
self.sentence, = next(self.child_epoch_iterator)
self._set_index()
source = self.sentence[self.index:self.index + self.source_window]
target = self.sentence[self._get_target_index():
self._get_target_index() + self.target_window]
self.index += 1
return (source, target)
class NGrams(Window):
"""Return n-grams from a stream.
This data stream wrapper takes as an input a data stream outputting
sentences. From these sentences n-grams of a fixed order (e.g. bigrams,
trigrams, etc.) are extracted and returned. It also creates a
``targets`` data source. For each example, the target is the word
immediately following that n-gram. It is normally used for language
modeling, where we try to predict the next word from the previous *n*
words.
.. note::
Unlike the :class:`Window` stream, the target returned by
:class:`NGrams` is a single element instead of a window.
Parameters
----------
ngram_order : int
The order of the n-grams to output e.g. 3 for trigrams.
data_stream : :class:`.DataStream` instance
The data stream providing sentences. Each example is assumed to be
a list of integers.
target_source : str, optional
This data stream adds a new source for the target words. By default
this source is 'targets'.
"""
def __init__(self, ngram_order, *args, **kwargs):
super(NGrams, self).__init__(
0, ngram_order, 1, False, *args, **kwargs)
def get_data(self, *args, **kwargs):
source, target = super(NGrams, self).get_data(*args, **kwargs)
return (source, target[0])
| mit |
yafeunteun/wikipedia-spam-classifier | revscoring/revscoring/utilities/tune.py | 1 | 9729 | """
Tunes a set of models against a training set to identify the best
model/configuration.
Usage:
tune <params-config> <features> <label>
[--observations=<path>]
[--scoring=<type>]
[--test-prop=<prop>]
[--folds=<num>]
[--report=<path>]
[--label-type=<type>]
[--processes=<num>]
[--cv-timeout=<mins>]
[--scale-features]
[--verbose] [--debug]
Options:
<params-config> The path to a YAML configuration file containing the
models and parameter values to search when tuning
<features> The classpath to a feature_list to use when
interpreting the feature values of the observations
<label> The name of the field to be predicted
--observations=<path> The path to a file containing observations to train
and test against. [default: <stdin>]
--scoring=<type> The type of scoring strategy to optimize for when
choosing parameter sets [default: roc_auc]
--folds=<num> The number of cross-validation folds to try
[default: 5]
--report=<path> Path to a file to write the tuning report to
[default: <stdout>]
--processes=<num> The number of parallel processes to start for
model building [default: <cpu-count>]
--cv-timeout=<mins> The number of minutes to wait for a model to
cross-validate before timing out
[default: <forever>]
--scale-features Scales the feature values before tuning
--verbose Print progress information to stderr
--debug Print debug information to stderr
"""
import datetime
import json
import logging
import multiprocessing
import sys
import time
import traceback
from collections import defaultdict
import docopt
import numpy
import yamlconf
from sklearn import cross_validation, grid_search, preprocessing
from tabulate import tabulate
from . import metrics
from .. import __version__
from ..dependencies import solve
from .util import Timeout, read_observations
logger = logging.getLogger(__name__)
def main(argv=None):
args = docopt.docopt(__doc__, argv=argv)
logging.basicConfig(
level=logging.INFO if not args['--debug'] else logging.DEBUG,
format='%(asctime)s %(levelname)s:%(name)s -- %(message)s'
)
params_config = yamlconf.load(open(args['<params-config>']))
features_path = args['<features>']
features = yamlconf.import_path(features_path)
if args['--observations'] == "<stdin>":
observations = read_observations(sys.stdin)
else:
observations = read_observations(open(args['--observations']))
logger.info("Reading feature values & labels...")
label_name = args['<label>']
value_labels = \
[(list(solve(features, cache=ob['cache'])), ob[label_name])
for ob in observations]
# Get a sepecialized scorer if we have one
scoring = metrics.SCORERS.get(args['--scoring'], args['--scoring'])
folds = int(args['--folds'])
if args['--report'] == "<stdout>":
report = sys.stdout
else:
report = open(args['--report'], "w")
if args['--processes'] == "<cpu-count>":
processes = multiprocessing.cpu_count()
else:
processes = int(args['--processes'])
if args['--cv-timeout'] == "<forever>":
cv_timeout = None
else:
cv_timeout = float(args['--cv-timeout']) * 60 # Convert to seconds
scale_features = args['--scale-features']
verbose = args['--verbose']
run(params_config, features_path, value_labels, scoring, folds,
report, processes, cv_timeout, scale_features, verbose)
def run(params_config, features_path, value_labels, scoring, folds,
report, processes, cv_timeout, scale_features, verbose):
if scale_features:
logger.debug("Scaling features...")
ss = preprocessing.StandardScaler()
feature_values, labels = (list(vect) for vect in zip(*value_labels))
scaled_feature_values = ss.fit_transform(feature_values)
value_labels = list(zip(scaled_feature_values, labels))
# Prepare the worker pool
logger.debug("Starting up multiprocessing pool (processes={0})"
.format(processes))
pool = multiprocessing.Pool(processes=processes)
# Start writing the model tuning report
possible_labels = set(label for _, label in value_labels)
report.write("# Model tuning report\n")
report.write("- Revscoring version: {0}\n".format(__version__))
report.write("- Features: {0}\n".format(features_path))
report.write("- Date: {0}\n".format(datetime.datetime.now().isoformat()))
report.write("- Observations: {0}\n".format(len(value_labels)))
report.write("- Labels: {0}\n".format(json.dumps(list(possible_labels))))
report.write("- Scoring: {0}\n".format(scoring))
report.write("- Folds: {0}\n".format(folds))
report.write("\n")
# For each estimator and paramset, submit the job.
cv_result_sets = defaultdict(lambda: [])
for name, estimator, param_grid in _estimator_param_grid(params_config):
logger.debug("Submitting jobs for {0}:".format(name))
for params in param_grid:
logger.debug("\tsubmitting {0}..."
.format(format_params(params)))
result = pool.apply_async(_cross_validate,
[value_labels, estimator, params],
{'cv_timeout': cv_timeout,
'scoring': scoring, 'folds': folds})
cv_result_sets[name].append((params, result))
# Barrier synchronization
logger.info("Running gridsearch for {0} model/params pairs ..."
.format(sum(len(p_r) for p_r in cv_result_sets)))
grid_scores = []
for name, param_results in cv_result_sets.items():
for params, result in param_results:
scores = result.get() # This is a line that blocks
grid_scores.append((name, params, scores.mean(), scores.std()))
# Write the rest of the report! First, print the top 10 combinations
report.write("# Top scoring configurations\n")
grid_scores.sort(key=lambda gs: gs[2], reverse=True)
table = tabulate(
((name, round(mean_score, 3), round(std_score, 3),
format_params(params))
for name, params, mean_score, std_score in
grid_scores[:10]),
headers=["model", "mean(scores)", "std(scores)", "params"],
tablefmt="pipe"
)
report.write(table + "\n")
report.write("\n")
# Now print out scores for each model.
report.write("# Models\n")
for name, param_results in cv_result_sets.items():
report.write("## {0}\n".format(name))
param_scores = ((p, r.get()) for p, r in param_results)
param_stats = [(p, s.mean(), s.std()) for p, s in param_scores]
param_stats.sort(key=lambda v: v[1], reverse=True)
table = tabulate(
((round(mean_score, 3), round(std_score, 3),
format_params(params))
for params, mean_score, std_score in
param_stats),
headers=["mean(scores)", "std(scores)", "params"],
tablefmt="pipe"
)
report.write(table + "\n")
report.write("\n")
report.close()
def format_params(doc):
return ", ".join("{0}={1}".format(k, json.dumps(v))
for k, v in doc.items())
def _estimator_param_grid(params_config):
for name, config in params_config.items():
try:
EstimatorClass = yamlconf.import_module(config['class'])
estimator = EstimatorClass()
except Exception:
logger.warn("Could not load estimator {0}"
.format(config['class']))
logger.warn("Exception:\n" + traceback.format_exc())
continue
if not hasattr(estimator, "fit"):
logger.warn("Estimator {0} does not have a fit() method."
.format(config['class']))
continue
param_grid = grid_search.ParameterGrid(config['params'])
yield name, estimator, param_grid
def _cross_validate(value_labels, estimator, params, scoring="roc_auc",
folds=5, cv_timeout=None, verbose=False):
start = time.time()
feature_values, labels = (list(vect) for vect in zip(*value_labels))
estimator.set_params(**params)
try:
logger.debug("Running cross-validation for " +
"{0} with timeout of {1} seconds"
.format(estimator.__class__.__name__, cv_timeout))
with Timeout(cv_timeout):
scores = cross_validation.cross_val_score(
estimator, feature_values,
labels, scoring=scoring,
cv=folds)
duration = time.time() - start
logger.debug("Cross-validated {0} with {1} in {2} minutes: {3} ({4})"
.format(estimator.__class__.__name__,
format_params(params),
round(duration / 60, 3),
round(scores.mean(), 3),
round(scores.std(), 3)))
return scores
except Exception:
logger.warn("Could not cross-validate estimator {0}"
.format(estimator.__class__.__name__))
logger.warn("Exception:\n" + traceback.format_exc())
return numpy.array([0] * folds)
| mit |
lakshayg/tensorflow | tensorflow/contrib/learn/python/learn/preprocessing/categorical.py | 151 | 4269 | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Implements preprocessing transformers for categorical variables."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import math
import numpy as np
# pylint: disable=g-bad-import-order
from . import categorical_vocabulary
from ..learn_io.data_feeder import setup_processor_data_feeder
# pylint: enable=g-bad-import-order
class CategoricalProcessor(object):
"""Maps documents to sequences of word ids.
As a common convention, Nan values are handled as unknown tokens.
Both float('nan') and np.nan are accepted.
"""
def __init__(self, min_frequency=0, share=False, vocabularies=None):
"""Initializes a CategoricalProcessor instance.
Args:
min_frequency: Minimum frequency of categories in the vocabulary.
share: Share vocabulary between variables.
vocabularies: list of CategoricalVocabulary objects for each variable in
the input dataset.
Attributes:
vocabularies_: list of CategoricalVocabulary objects.
"""
self.min_frequency = min_frequency
self.share = share
self.vocabularies_ = vocabularies
def freeze(self, freeze=True):
"""Freeze or unfreeze all vocabularies.
Args:
freeze: Boolean, indicate if vocabularies should be frozen.
"""
for vocab in self.vocabularies_:
vocab.freeze(freeze)
def fit(self, x, unused_y=None):
"""Learn a vocabulary dictionary of all categories in `x`.
Args:
x: numpy matrix or iterable of lists/numpy arrays.
unused_y: to match fit format signature of estimators.
Returns:
self
"""
x = setup_processor_data_feeder(x)
for row in x:
# Create vocabularies if not given.
if self.vocabularies_ is None:
# If not share, one per column, else one shared across.
if not self.share:
self.vocabularies_ = [
categorical_vocabulary.CategoricalVocabulary() for _ in row
]
else:
vocab = categorical_vocabulary.CategoricalVocabulary()
self.vocabularies_ = [vocab for _ in row]
for idx, value in enumerate(row):
# Nans are handled as unknowns.
if (isinstance(value, float) and math.isnan(value)) or value == np.nan:
continue
self.vocabularies_[idx].add(value)
if self.min_frequency > 0:
for vocab in self.vocabularies_:
vocab.trim(self.min_frequency)
self.freeze()
return self
def fit_transform(self, x, unused_y=None):
"""Learn the vocabulary dictionary and return indexies of categories.
Args:
x: numpy matrix or iterable of lists/numpy arrays.
unused_y: to match fit_transform signature of estimators.
Returns:
x: iterable, [n_samples]. Category-id matrix.
"""
self.fit(x)
return self.transform(x)
def transform(self, x):
"""Transform documents to category-id matrix.
Converts categories to ids give fitted vocabulary from `fit` or
one provided in the constructor.
Args:
x: numpy matrix or iterable of lists/numpy arrays.
Yields:
x: iterable, [n_samples]. Category-id matrix.
"""
self.freeze()
x = setup_processor_data_feeder(x)
for row in x:
output_row = []
for idx, value in enumerate(row):
# Return <UNK> when it's Nan.
if (isinstance(value, float) and math.isnan(value)) or value == np.nan:
output_row.append(0)
continue
output_row.append(self.vocabularies_[idx].get(value))
yield np.array(output_row, dtype=np.int64)
| apache-2.0 |
musically-ut/statsmodels | examples/python/tsa_dates.py | 29 | 1169 |
## Dates in timeseries models
from __future__ import print_function
import statsmodels.api as sm
import pandas as pd
# ## Getting started
data = sm.datasets.sunspots.load()
# Right now an annual date series must be datetimes at the end of the year.
dates = sm.tsa.datetools.dates_from_range('1700', length=len(data.endog))
# ## Using Pandas
#
# Make a pandas TimeSeries or DataFrame
endog = pd.TimeSeries(data.endog, index=dates)
# Instantiate the model
ar_model = sm.tsa.AR(endog, freq='A')
pandas_ar_res = ar_model.fit(maxlag=9, method='mle', disp=-1)
# Out-of-sample prediction
pred = pandas_ar_res.predict(start='2005', end='2015')
print(pred)
# ## Using explicit dates
ar_model = sm.tsa.AR(data.endog, dates=dates, freq='A')
ar_res = ar_model.fit(maxlag=9, method='mle', disp=-1)
pred = ar_res.predict(start='2005', end='2015')
print(pred)
# This just returns a regular array, but since the model has date information attached, you can get the prediction dates in a roundabout way.
print(ar_res.data.predict_dates)
# Note: This attribute only exists if predict has been called. It holds the dates associated with the last call to predict.
| bsd-3-clause |
jpzk/evopy | evopy/examples/experiments/cv_ppv_dsesscv/plot_precisions.py | 1 | 4256 | '''
This file is part of evopy.
Copyright 2012 - 2013, Jendrik Poloczek
evopy is free software: you can redistribute it
and/or modify it under the terms of the GNU General Public License as published
by the Free Software Foundation, either version 3 of the License, or (at your
option) any later version.
evopy is distributed in the hope that it will be
useful, but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General
Public License for more details.
You should have received a copy of the GNU General Public License along with
evopy. If not, see <http://www.gnu.org/licenses/>.
'''
from sys import path
path.append("../../../..")
from pickle import load
from copy import deepcopy
from numpy import matrix, log10, array
from scipy.stats import wilcoxon
from itertools import chain
from pylab import errorbar
from matplotlib.backends.backend_pdf import PdfPages
from evopy.strategies.ori_dses_svc_repair import ORIDSESSVCR
from evopy.strategies.ori_dses_svc import ORIDSESSVC
from evopy.strategies.ori_dses import ORIDSES
from evopy.simulators.simulator import Simulator
from evopy.problems.sphere_problem_origin_r1 import SphereProblemOriginR1
from evopy.problems.sphere_problem_origin_r2 import SphereProblemOriginR2
from evopy.problems.schwefels_problem_26 import SchwefelsProblem26
from evopy.problems.tr_problem import TRProblem
from evopy.metamodel.dses_svc_linear_meta_model import DSESSVCLinearMetaModel
from sklearn.cross_validation import KFold
from evopy.operators.scaling.scaling_standardscore import ScalingStandardscore
from evopy.operators.scaling.scaling_dummy import ScalingDummy
from evopy.metamodel.cv.svc_cv_sklearn_grid_linear import SVCCVSkGridLinear
from evopy.operators.termination.or_combinator import ORCombinator
from evopy.operators.termination.accuracy import Accuracy
from evopy.operators.termination.generations import Generations
from evopy.operators.termination.convergence import Convergence
from evopy.helper.timeseries_aggregator import TimeseriesAggregator
import matplotlib.pyplot as plt
from setup import *
precisionfile = file("output/precision_file.save", "r")
precisions = load(precisionfile)
none = lambda x : type(x) != type(None)
for problem in precisions.keys():
figure_accs = plt.figure(figsize=(8,6), dpi=10, facecolor="w", edgecolor="k")
plt.xlabel("Generation")
plt.ylabel("Gemittelter Positiver Vorhersagewert")
plt.xlim([0, 50])
plt.ylim([0.0, 1.0])
o_colors = {
get_method_TR_none: "g",\
get_method_TR_nor: "k",\
get_method_TR_ssc: "#044977",\
get_method_SphereProblemR1_none: "g",\
get_method_SphereProblemR1_nor: "k",\
get_method_SphereProblemR1_ssc: "#044977",\
get_method_SphereProblemR2_none: "g",\
get_method_SphereProblemR2_nor: "k",\
get_method_SphereProblemR2_ssc: "#044977",\
get_method_Schwefel26_none: "g",\
get_method_Schwefel26_nor: "k",\
get_method_Schwefel26_ssc: "#044977"}
o_markers = {
get_method_TR_none: "x",\
get_method_TR_nor: "+",\
get_method_TR_ssc: ".",\
get_method_SphereProblemR1_none: "x",\
get_method_SphereProblemR1_nor: "+",\
get_method_SphereProblemR1_ssc: ".",\
get_method_SphereProblemR2_none: "x",\
get_method_SphereProblemR2_nor: "+",\
get_method_SphereProblemR2_ssc: ".",\
get_method_Schwefel26_none: "x",\
get_method_Schwefel26_nor: "+",\
get_method_Schwefel26_ssc: "."}
optimizers = precisions[problem].keys()
for index, optimizer in enumerate(optimizers):
precisions_po = precisions[problem][optimizer]
precisions_agg, errors_agg =\
TimeseriesAggregator(precisions_po).get_aggregate()
generations = range(0, len(precisions_agg))
eb = errorbar(generations,\
precisions_agg,\
marker=o_markers[optimizer],
color=o_colors[optimizer],\
ecolor="#CCCCCC",\
linestyle="none",
yerr=errors_agg)
pp = PdfPages("output/p_%s.pdf" % str(problem).split('.')[-1])
plt.savefig(pp, format='pdf')
pp.close()
| gpl-3.0 |
lakshayg/tensorflow | tensorflow/python/keras/_impl/keras/datasets/fashion_mnist.py | 12 | 2055 | # Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Fashion-MNIST dataset.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import gzip
import os
import numpy as np
from tensorflow.python.keras._impl.keras.utils.data_utils import get_file
def load_data():
"""Loads the Fashion-MNIST dataset.
Returns:
Tuple of Numpy arrays: `(x_train, y_train), (x_test, y_test)`.
"""
dirname = os.path.join('datasets', 'fashion-mnist')
base = 'http://fashion-mnist.s3-website.eu-central-1.amazonaws.com/'
files = [
'train-labels-idx1-ubyte.gz', 'train-images-idx3-ubyte.gz',
't10k-labels-idx1-ubyte.gz', 't10k-images-idx3-ubyte.gz'
]
paths = []
for given_file in files:
paths.append(
get_file(given_file, origin=base + given_file, cache_subdir=dirname))
with gzip.open(paths[0], 'rb') as lbpath:
y_train = np.frombuffer(lbpath.read(), np.uint8, offset=8)
with gzip.open(paths[1], 'rb') as imgpath:
x_train = np.frombuffer(
imgpath.read(), np.uint8, offset=16).reshape(len(y_train), 28, 28)
with gzip.open(paths[2], 'rb') as lbpath:
y_test = np.frombuffer(lbpath.read(), np.uint8, offset=8)
with gzip.open(paths[3], 'rb') as imgpath:
x_test = np.frombuffer(
imgpath.read(), np.uint8, offset=16).reshape(len(y_test), 28, 28)
return (x_train, y_train), (x_test, y_test)
| apache-2.0 |
musically-ut/statsmodels | statsmodels/datasets/statecrime/data.py | 25 | 3128 | #! /usr/bin/env python
"""Statewide Crime Data"""
__docformat__ = 'restructuredtext'
COPYRIGHT = """Public domain."""
TITLE = """Statewide Crime Data 2009"""
SOURCE = """
All data is for 2009 and was obtained from the American Statistical Abstracts except as indicated below.
"""
DESCRSHORT = """State crime data 2009"""
DESCRLONG = DESCRSHORT
#suggested notes
NOTE = """::
Number of observations: 51
Number of variables: 8
Variable name definitions:
state
All 50 states plus DC.
violent
Rate of violent crimes / 100,000 population. Includes murder, forcible
rape, robbery, and aggravated assault. Numbers for Illinois and
Minnesota do not include forcible rapes. Footnote included with the
American Statistical Abstract table reads:
"The data collection methodology for the offense of forcible
rape used by the Illinois and the Minnesota state Uniform Crime
Reporting (UCR) Programs (with the exception of Rockford, Illinois,
and Minneapolis and St. Paul, Minnesota) does not comply with
national UCR guidelines. Consequently, their state figures for
forcible rape and violent crime (of which forcible rape is a part)
are not published in this table."
murder
Rate of murders / 100,000 population.
hs_grad
Precent of population having graduated from high school or higher.
poverty
% of individuals below the poverty line
white
Percent of population that is one race - white only. From 2009 American
Community Survey
single
Calculated from 2009 1-year American Community Survey obtained obtained
from Census. Variable is Male householder, no wife present, family
household combined with Female household, no husband prsent, family
household, divided by the total number of Family households.
urban
% of population in Urbanized Areas as of 2010 Census. Urbanized
Areas are area of 50,000 or more people."""
import numpy as np
from statsmodels.datasets import utils as du
from os.path import dirname, abspath
def load():
"""
Load the statecrime data and return a Dataset class instance.
Returns
-------
Dataset instance:
See DATASET_PROPOSAL.txt for more information.
"""
data = _get_data()
##### SET THE INDICES #####
#NOTE: None for exog_idx is the complement of endog_idx
return du.process_recarray(data, endog_idx=2, exog_idx=[7, 4, 3, 5],
dtype=float)
def load_pandas():
data = _get_data()
##### SET THE INDICES #####
#NOTE: None for exog_idx is the complement of endog_idx
return du.process_recarray_pandas(data, endog_idx=2, exog_idx=[7,4,3,5],
dtype=float, index_idx=0)
def _get_data():
filepath = dirname(abspath(__file__))
##### EDIT THE FOLLOWING TO POINT TO DatasetName.csv #####
data = np.recfromtxt(open(filepath + '/statecrime.csv', 'rb'),
delimiter=",", names=True, dtype=None)
return data
| bsd-3-clause |
PAIR-code/recommendation-rudders | hyperbolic-rs/preprocess.py | 1 | 11964 | # Copyright 2017 The Rudders Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from absl import app, flags
import pickle
import tensorflow as tf
import numpy as np
import random
from tqdm import tqdm
from pathlib import Path
from rudders.relations import Relations
from rudders.datasets import movielens, keen, amazon, amazon_relations, synopsis
from rudders.config import CONFIG
from rudders.utils import set_seed, sort_items_by_popularity, save_as_pickle, add_to_train_split
FLAGS = flags.FLAGS
flags.DEFINE_string('prep_id', default='foobar', help='Name of prep to store')
flags.DEFINE_string('item', default='amazon', help='Item to process: "keen", "gem", "ml-1m", "amazon" or "synopsis"')
flags.DEFINE_string('dataset_path', default='data/amazon', help='Path to raw dataset')
flags.DEFINE_string('amazon_reviews', default='Musical_Instruments_5.json.gz',
help='Name of the 5-core amazon reviews file')
flags.DEFINE_string('amazon_meta', default='meta_Musical_Instruments.json.gz',
help='Name of the 5-core amazon reviews file')
flags.DEFINE_string('item_item_file', default='Musical_Instruments_th0.6_cosdistances.pickle',
help='Path to the item-item distance file')
flags.DEFINE_boolean('plot_graph', default=False, help='Plots the user-item graph')
flags.DEFINE_boolean('shuffle', default=False, help='Whether to shuffle the interactions or not')
flags.DEFINE_boolean('add_extra_relations', default=True, help='For the amazon dataset, adds extra relations')
flags.DEFINE_boolean('export_splits', default=True, help='Exports (user_id, item_id) pairs of all splits')
flags.DEFINE_integer('min_user_interactions', default=5,
help='Keens users with less than min_user_interactions are filtered')
flags.DEFINE_integer('min_item_interactions', default=2,
help='Keens/gems with less than this interactions are filtered')
flags.DEFINE_integer('max_item_interactions', default=150,
help='Keens/gems with more than this interactions are filtered')
flags.DEFINE_integer('similarity_items_per_item', default=10, help='Amount of similarity items to add per item')
flags.DEFINE_integer('seed', default=42, help='Random seed')
flags.DEFINE_integer('filter_most_popular', default=-1,
help='Filters out most popular keens/gems. If -1 it does not filter')
def plot_graph(samples):
"""Plot user-item graph, setting different colors for items and users."""
import networkx as nx
import matplotlib.pyplot as plt
graph = nx.Graph()
for uid, ints in samples.items():
for iid in ints:
graph.add_edge(uid, iid)
color_map = ["red" if node in samples else "blue" for node in graph]
fig = plt.figure()
pos = nx.spring_layout(graph, iterations=100)
nx.draw(graph, pos, ax=fig.add_subplot(111), node_size=20, node_color=color_map)
plt.show()
def map_raw_ids_to_sequential_ids(samples):
"""
For each unique user or item id, this function creates a mapping to a sequence of number starting in 0.
This will be the index of the embeddings in the model.
Items ids will be from 0 to n_items - 1.
Users ids will be from n_items to n_items + n_users - 1
This condition is required to later build the distance matrix
:param samples: dict of <user_id1>: [<item_id1>, <item_id2>, ...]
:return: dicts of {<user_idX>: indexY} and {<item_idX>: indexW}
"""
uid2id, iid2id = {}, {}
sorted_samples = sorted(samples.items(), key=lambda x: x[0])
# first sets items ids only
for _, ints in sorted_samples:
sorted_ints = sorted(ints)
for iid in sorted_ints:
if iid not in iid2id:
iid2id[iid] = len(iid2id)
# users ids come after item ids
for uid, _ in sorted_samples:
if uid not in uid2id:
uid2id[uid] = len(uid2id) + len(iid2id)
return uid2id, iid2id
def create_splits(samples, relation_id, do_random=False, seed=42):
"""
Splits (user, item) dataset to train, dev and test.
:param samples: Dict of sorted examples.
:param relation_id: number that identifies the user-item interaction relation to form the triplets
:param do_random: Bool whether to extract dev and test by random sampling. If False, dev, test are the last two
items per user.
:return: examples: Dictionary with 'train','dev','test' splits as numpy arrays
containing corresponding (user_id, item_id) pairs, and 'to_skip' to a Dictionary containing filters
for each user.
"""
train, dev, test = [], [], []
for uid, ints in samples.items():
if do_random:
random.seed(seed)
random.shuffle(ints)
if len(ints) >= 3:
test.append((uid, relation_id, ints[-1]))
dev.append((uid, relation_id, ints[-2]))
for iid in ints[:-2]:
train.append((uid, relation_id, iid))
else:
for iid in ints:
train.append((uid, relation_id, iid))
return {
'samples': samples,
'train': np.array(train).astype('int64'),
'dev': np.array(dev).astype('int64'),
'test': np.array(test).astype('int64')
}
def load_item_item_distances(item_item_file_path):
"""Loads item-item distances that were precomputed with item_graph.py."""
print(f"Loading data from {item_item_file_path}")
with tf.io.gfile.GFile(str(item_item_file_path), 'rb') as f:
data = pickle.load(f)
return data["item_item_distances"]
def build_item_item_triplets(item_item_distances_dict, iid2id, top_k):
"""
Builds item item triples from the item-item distances
:param item_item_distances_dict: dict of src_iid: [(dst_iid, distance)]
:param iid2id: dict of item ids
:param top_k: adds top_k items per item at most
:return:
"""
triplets = set()
for src_iid, dists in tqdm(item_item_distances_dict.items(), desc="item_item_triplets"):
if src_iid not in iid2id:
continue
src_id = iid2id[src_iid]
sorted_dists = sorted(dists, key=lambda t: t[1])
added = 0
for dst_iid, cos_dist in sorted_dists:
if dst_iid not in iid2id or cos_dist > 0.3:
continue
dst_id = iid2id[dst_iid]
if cos_dist <= 0.1:
triplets.add((src_id, Relations.SEM_HIGH_SIM.value, dst_id))
elif 0.2 >= cos_dist > 0.1:
triplets.add((src_id, Relations.SEM_MEDIUM_SIM.value, dst_id))
else: # 0.3 >= cos_dist > 0.2
triplets.add((src_id, Relations.SEM_LOW_SIM.value, dst_id))
added += 1
if added >= top_k:
break
return list(triplets)
def export_splits(data, to_save_dir, prep_id):
"""Exports (user_id, item_id) pairs of all splits splits"""
split_names = ["train", "dev", "test"]
id2uid, id2iid = data["id2uid"], data["id2iid"]
for split_name in split_names:
split = data[split_name]
if split_name == "train":
split = [(uid, r, iid) for uid, r, iid in split if r == Relations.USER_ITEM.value]
lines = [f"{id2uid[u_id]},{id2iid[i_id]}\n" for u_id, _, i_id in split]
with open(to_save_dir / f"{prep_id}_ui_{split_name}.csv", "w") as f:
f.writelines(lines)
def main(_):
set_seed(FLAGS.seed, set_tf_seed=True)
dataset_path = Path(FLAGS.dataset_path)
if FLAGS.item == "keen":
samples = keen.load_user_keen_interactions(dataset_path, min_user_ints=FLAGS.min_user_interactions,
min_item_ints=FLAGS.min_item_interactions,
max_item_ints=FLAGS.max_item_interactions)
iid2name = keen.build_iid2title(item_id_key="keen_id", item_title_key="keen_title")
elif FLAGS.item == "gem":
samples = keen.load_keen_gems_interactions(dataset_path, min_keen_keen_edges=2, max_keen_keen_edges=1000,
min_overlapping_users=2,
min_keen_ints=FLAGS.min_user_interactions,
min_item_ints=FLAGS.min_item_interactions,
max_item_ints=FLAGS.max_item_interactions)
iid2name = keen.build_iid2title(item_id_key="gem_id", item_title_key="gem_link_title")
elif FLAGS.item == "ml-1m":
samples = movielens.movielens_to_dict(dataset_path)
iid2name = movielens.build_movieid2title(dataset_path)
elif "amazon" in FLAGS.item:
samples = amazon.load_interactions(dataset_path / FLAGS.amazon_reviews)
iid2name = amazon.build_itemid2name(dataset_path / FLAGS.amazon_meta)
elif FLAGS.item == "synopsis":
samples = synopsis.synopsis_to_dict(dataset_path)
iid2name = synopsis.build_movieid2title(dataset_path)
else:
raise ValueError(f"Unknown item: {FLAGS.item}")
if FLAGS.filter_most_popular > 0:
print(f"Filtering {FLAGS.filter_most_popular} most popular items")
sorted_items = sort_items_by_popularity(samples)
iid_to_filter = set([iid for iid, _ in sorted_items[:FLAGS.filter_most_popular]])
samples = {uid: list(set(ints) - iid_to_filter) for uid, ints in samples.items()}
samples = {uid: ints for uid, ints in samples.items() if ints}
if FLAGS.plot_graph:
plot_graph(samples)
return
uid2id, iid2id = map_raw_ids_to_sequential_ids(samples)
id_samples = {}
for uid, ints in samples.items():
if FLAGS.item == "keen" or FLAGS.item == "gem":
ints = sorted(ints)
id_samples[uid2id[uid]] = [iid2id[iid] for iid in ints]
data = create_splits(id_samples, Relations.USER_ITEM.value, do_random=FLAGS.shuffle, seed=FLAGS.seed)
data["iid2name"] = {iid: iid2name.get(iid, "None") for iid in iid2id}
data["id2uid"] = {v: k for k, v in uid2id.items()}
data["id2iid"] = {v: k for k, v in iid2id.items()}
print(f"User item interaction triplets: {len(data['train'])}")
n_entities = len(uid2id) + len(iid2id)
# if there is an item-item graph, we preprocess it
if FLAGS.item_item_file:
item_item_distances_dict = load_item_item_distances(dataset_path / FLAGS.item_item_file)
item_item_triplets = build_item_item_triplets(item_item_distances_dict, iid2id, FLAGS.similarity_items_per_item)
add_to_train_split(data, item_item_triplets)
print(f"Added item-item similarity triplets: {len(item_item_triplets)}")
if "amazon" in FLAGS.item and FLAGS.add_extra_relations:
print("Adding extra relations")
n_entities = amazon_relations.load_relations(dataset_path / FLAGS.amazon_meta, data, iid2id, n_entities)
data["n_entities"] = n_entities
# creates directories to save preprocessed data
print(f"Final training split: {len(data['train'])} triplets")
prep_path = Path(CONFIG["string"]["prep_dir"][1])
prep_path.mkdir(parents=True, exist_ok=True)
to_save_dir = prep_path / FLAGS.item
to_save_dir.mkdir(parents=True, exist_ok=True)
save_as_pickle(to_save_dir / f'{FLAGS.prep_id}.pickle', data)
if FLAGS.export_splits:
export_splits(data, to_save_dir, FLAGS.prep_id)
print("Done!")
if __name__ == '__main__':
app.run(main)
| apache-2.0 |
openplans/shareabouts-api | src/sa_api_v2/tasks.py | 1 | 10690 |
import requests
import ujson as json
from celery import shared_task
from celery.result import AsyncResult
from django.db import transaction
from django.test.client import RequestFactory
from django.utils.timezone import now
from itertools import chain
#from social.apps.django_app.default.models import UserSocialAuth
from .models import DataSnapshotRequest, DataSnapshot, DataSet, User, Place, Submission
from .serializers import SimplePlaceSerializer, SimpleSubmissionSerializer, SimpleDataSetSerializer
from .renderers import CSVRenderer, JSONRenderer, GeoJSONRenderer
import logging
log = logging.getLogger(__name__)
# =========================================================
# Generating snapshots
#
def generate_bulk_content(dataset, submission_set_name, **flags):
renderer_classes = {
'csv': CSVRenderer,
'json': GeoJSONRenderer if submission_set_name == 'places' else JSONRenderer
}
if submission_set_name == 'places':
submissions = dataset.places.all()
serializer = SimplePlaceSerializer(submissions, many=True)
else:
submissions = dataset.submissions.filter(set_name=submission_set_name)
serializer = SimpleSubmissionSerializer(submissions, many=True)
# Construct a request for the serializer context
r_data = {}
for flag_attr, flag_val in flags.items():
if flag_val: r_data[flag_attr] = 'true'
r = RequestFactory().get('', data=r_data)
r.get_dataset = lambda: dataset
# Render the data in each format
serializer.context['request'] = r
data = serializer.data
content = {}
for format, renderer_class in list(renderer_classes.items()):
renderer = renderer_class()
content[format] = renderer.render(data)
return content
@shared_task
def store_bulk_data(request_id):
task_id = store_bulk_data.request.id
log.info('Creating a snapshot request with task id %s' % (task_id,))
datarequest = DataSnapshotRequest.objects.get(pk=request_id)
datarequest.guid = task_id
datarequest.save()
# Generate the content
content = generate_bulk_content(
datarequest.dataset,
datarequest.submission_set,
include_submissions=datarequest.include_submissions,
include_private=datarequest.include_private,
include_invisible=datarequest.include_invisible)
# Store the information
bulk_data = DataSnapshot(
request=datarequest,
csv=content['csv'],
json=content['json'])
bulk_data.save()
datarequest.fulfilled_at = now()
datarequest.save()
return task_id
@shared_task
def bulk_data_status_update(uuid):
"""
A callback task that updates the status of a data snapshot request, whether
successful or not.
"""
taskresult = AsyncResult(uuid)
datarequest = DataSnapshotRequest.objects.get(guid=uuid)
datarequest.status = taskresult.status.lower()
datarequest.save()
@shared_task
def clone_related_dataset_data(orig_dataset_id, new_dataset_id):
qs = DataSet.objects.select_related('owner')\
.filter(id__in=(orig_dataset_id, new_dataset_id))\
.prefetch_related('things',
'things__place',
'things__place__dataset',
'things__place__submitter',
'things__place__submissions',
'things__place__submissions__dataset',
'things__place__submissions__submitter',
'permissions',
'groups',
'groups__submitters',
'groups__permissions',
'keys',
'keys__permissions',
'origins',
'origins__permissions',
)
datasets = list(qs)
if datasets[0].id == orig_dataset_id:
orig_dataset, new_dataset = datasets
else:
new_dataset, orig_dataset = datasets
with transaction.atomic():
orig_dataset.clone_related(onto=new_dataset)
# =========================================================
# Loading a dataset
#
def get_twitter_extra_data(user_data):
return {
'id': user_data.get('provider_id'),
'profile_image_url': user_data.get('avatar_url'),
'access_token': {
'screen_name': user_data.get('username'),
'oauth_token_secret': 'abc',
'oauth_token': '123',
'user_id': user_data.get('provider_id')
},
'name': user_data.get('name')
}
def get_facebook_extra_data(user_data):
return {
'access_token': 'abc123',
'picture': {
"data": {
"url": user_data.get('avatar_url'),
}
},
"id": user_data.get('provider_id'),
"name": user_data.get('name'),
}
def get_or_create_user(user_data, users_map):
if user_data is None:
return
# Check whether the user is already cached
username = user_data.get('username')
user = users_map.get(username)
if user:
return user
# Create and cache the user
user = User.objects.create(username=username, password='!')
users_map[username] = user
# Create a social auth entry for the user, if appropriate
provider = user_data.get('provider_type')
uid = user_data.get('provider_id')
if provider and uid:
UserSocialAuth.objects.create(
user=user,
provider=provider,
uid=uid,
extra_data=
get_twitter_extra_data(user_data)
if provider == 'twitter' else
get_facebook_extra_data(user_data)
)
def preload_users(data):
"""
Construct a mapping from usernames to users for Users that already exist
in the API.
"""
usernames = set()
def collect_username(data):
submitter_data = data.get('submitter')
if submitter_data:
usernames.add(submitter_data.get('username'))
for place_data in data.get('features', []):
collect_username(place_data['properties'])
for _, submissions_data in place_data['properties'].get('submission_sets', {}).items():
for submission_data in submissions_data:
collect_username(submission_data)
users = User.objects.filter(username__in=usernames)
users_map = dict([(user.username, user) for user in users])
return users_map
def list_errors(errors):
errors_list = []
for key, l in list(errors.items()):
if isinstance(l, list):
for msg in l:
errors_list.append('%s: %s' % (key, str(msg)))
else:
msg = l
errors_list.append('%s: %s' % (key, str(msg)))
return errors_list
@shared_task
def load_dataset_archive(dataset_id, archive_url):
dataset = DataSet.objects.get(id=dataset_id)
archive_response = requests.get(archive_url)
if archive_response.status_code == 200:
data = archive_response.json()
# Preload users
users_map = preload_users(data)
with transaction.atomic():
# Construct the dataset from metadata
metadata = data.get('metadata')
if metadata:
metadata.pop('id', None)
metadata.pop('owner', None)
serializer = SimpleDataSetSerializer(dataset, data=data.get('metadata'))
assert serializer.is_valid, list_errors(serializer.errors)
serializer.save()
# Create a stub view object to use in serializer contexts.
class Stub (object): pass
view = Stub()
view.request = Stub()
view.request.META = {'HTTP_X_SHAREABOUTS_SILENT': 'True'}
view.request.user = Stub()
view.request.user.is_authenticated = lambda: False
# Construct each place and submission individually
for place_data in data.get('features'):
place_data.pop('type', None)
place_data.update(place_data.pop('properties', {}))
place_data.pop('id', None)
place_data.pop('dataset', None)
place_data.pop('created_datetime', None)
place_data.pop('updated_datetime', None)
submission_sets_data = place_data.pop('submission_sets', {})
submitter_data = place_data.pop('submitter', None)
serializer_context = {'view': view, 'request': view.request}
serializer = SimplePlaceSerializer(data=place_data, context=serializer_context)
assert serializer.is_valid(), list_errors(serializer.errors)
place = Place()
for attr, value in serializer.validated_data.items():
setattr(place, attr, value)
place.dataset = dataset
place.submitter = get_or_create_user(submitter_data, users_map)
place.save(silent=True, reindex=False)
for set_name, submissions_data in submission_sets_data.items():
for submission_data in submissions_data:
submission_data.pop('id', None)
submission_data.pop('place', None)
submission_data.pop('dataset', None)
submission_data.pop('attachments', None)
submission_data.pop('created_datetime', None)
submission_data.pop('updated_datetime', None)
submitter_data = submission_data.pop('submitter', None)
serializer_context = {'view': view, 'request': view.request}
serializer = SimpleSubmissionSerializer(data=submission_data, context=serializer_context)
assert serializer.is_valid(), list_errors(serializer.errors)
submission = Submission()
for attr, value in serializer.validated_data.items():
setattr(submission, attr, value)
submission.set_name = set_name
submission.place = place
submission.dataset = dataset
submission.submitter = get_or_create_user(submitter_data, users_map)
submission.save(silent=True, reindex=False)
dataset.reindex()
# Load meta-data like permissions and such
# metadata = data.get('metadata')
# for permission_data in metadata.get('permissions'):
| gpl-3.0 |
kastnerkyle/pylearn2 | pylearn2/datasets/stl10.py | 1 | 5304 | """
.. todo::
WRITEME
"""
__authors__ = "Ian Goodfellow"
__copyright__ = "Copyright 2010-2012, Universite de Montreal"
__credits__ = ["Ian Goodfellow"]
__license__ = "3-clause BSD"
__maintainer__ = "LISA Lab"
__email__ = "pylearn-dev@googlegroups"
import numpy as np
from pylearn2.datasets import dense_design_matrix
from pylearn2.utils.serial import load
from pylearn2.utils import contains_nan
class STL10(dense_design_matrix.DenseDesignMatrix):
"""
The STL-10 dataset
Adam Coates, Honglak Lee, Andrew Y. Ng An Analysis of Single Layer
Networks in Unsupervised Feature Learning AISTATS, 2011
http://www.stanford.edu/~acoates//stl10/
When reporting results on this dataset, you are meant to use a somewhat
unusal evaluation procedure.
Use STL10(which_set='train') to load the training set. Then restrict the
training set to one of the ten folds using the restrict function below. You
must then train only on the data from that fold.
For the test set, report the average test set performance over the ten
trials obtained by training on each of the ten folds.
The folds here do not define the splits you should use for cross
validation. You are free to make your own split within each fold.
Parameters
----------
which_set : WRITEME
center : WRITEME
example_range : WRITEME
"""
def __init__(self, which_set, center=False, example_range=None):
"""
.. todo::
WRITEME
"""
if which_set == 'train':
train = load('${PYLEARN2_DATA_PATH}/stl10/stl10_matlab/train.mat')
# Load the class names
self.class_names = [array[0].encode('utf-8')
for array in train['class_names'][0]]
# Load the fold indices
fold_indices = train['fold_indices']
assert fold_indices.shape == (1, 10)
self.fold_indices = np.zeros((10, 1000), dtype='uint16')
for i in xrange(10):
indices = fold_indices[0, i]
assert indices.shape == (1000, 1)
assert indices.dtype == 'uint16'
self.fold_indices[i, :] = indices[:, 0]
# The data is stored as uint8
# If we leave it as uint8, it will cause the CAE to silently fail
# since theano will treat derivatives wrt X as 0
X = np.cast['float32'](train['X'])
assert X.shape == (5000, 96 * 96 * 3)
if example_range is not None:
X = X[example_range[0]:example_range[1], :]
# this is uint8
y = train['y'][:, 0]
assert y.shape == (5000,)
elif which_set == 'test':
test = load('${PYLEARN2_DATA_PATH}/stl10/stl10_matlab/test.mat')
# Load the class names
self.class_names = [array[0].encode('utf-8')
for array in test['class_names'][0]]
# The data is stored as uint8
# If we leave it as uint8, it will cause the CAE to silently fail
# since theano will treat derivatives wrt X as 0
X = np.cast['float32'](test['X'])
assert X.shape == (8000, 96 * 96 * 3)
if example_range is not None:
X = X[example_range[0]:example_range[1], :]
# this is uint8
y = test['y'][:, 0]
assert y.shape == (8000,)
elif which_set == 'unlabeled':
unlabeled = load('${PYLEARN2_DATA_PATH}/stl10/stl10_matlab/'
'unlabeled.mat')
X = unlabeled['X']
# this file is stored in HDF format, which transposes everything
assert X.shape == (96 * 96 * 3, 100000)
assert X.dtype == 'uint8'
if example_range is None:
X = X.value
else:
X = X.value[:, example_range[0]:example_range[1]]
X = np.cast['float32'](X.T)
unlabeled.close()
y = None
else:
raise ValueError('"' + which_set + '" is not an STL10 dataset. '
'Recognized values are "train", "test", and '
'"unlabeled".')
if center:
X -= 127.5
view_converter = dense_design_matrix.DefaultViewConverter((96, 96, 3))
super(STL10, self).__init__(X=X, y=y, view_converter=view_converter)
for i in xrange(self.X.shape[0]):
mat = X[i:i + 1, :]
topo = self.get_topological_view(mat)
for j in xrange(topo.shape[3]):
temp = topo[0, :, :, j].T.copy()
topo[0, :, :, j] = temp
mat = self.get_design_matrix(topo)
X[i:i + 1, :] = mat
assert not contains_nan(self.X)
def restrict(dataset, fold):
"""
Restricts the dataset to use the specified fold (1 to 10).
dataset should be the training set.
"""
fold_indices = dataset.fold_indices
assert fold_indices.shape == (10, 1000)
idxs = fold_indices[fold, :] - 1
dataset.X = dataset.X[idxs, :].copy()
assert dataset.X.shape[0] == 1000
dataset.y = dataset.y[idxs, ...].copy()
assert dataset.y.shape[0] == 1000
return dataset
| bsd-3-clause |
nicproulx/mne-python | examples/connectivity/plot_mixed_source_space_connectity.py | 3 | 6976 | """
===============================================================================
Compute mixed source space connectivity and visualize it using a circular graph
===============================================================================
This example computes the all-to-all connectivity between 75 regions in
a mixed source space based on dSPM inverse solutions and a FreeSurfer cortical
parcellation. The connectivity is visualized using a circular graph which
is ordered based on the locations of the regions.
"""
# Author: Annalisa Pascarella <a.pascarella@iac.cnr.it>
#
# License: BSD (3-clause)
import os.path as op
import numpy as np
import mne
from mne.datasets import sample
from mne import setup_volume_source_space, setup_source_space
from mne import make_forward_solution
from mne.io import read_raw_fif
from mne.minimum_norm import make_inverse_operator, apply_inverse_epochs
from mne.connectivity import spectral_connectivity
from mne.viz import circular_layout, plot_connectivity_circle
# Set dir
data_path = sample.data_path()
subject = 'sample'
data_dir = op.join(data_path, 'MEG', subject)
subjects_dir = op.join(data_path, 'subjects')
bem_dir = op.join(subjects_dir, subject, 'bem')
# Set file names
fname_aseg = op.join(subjects_dir, subject, 'mri', 'aseg.mgz')
fname_model = op.join(bem_dir, '%s-5120-bem.fif' % subject)
fname_bem = op.join(bem_dir, '%s-5120-bem-sol.fif' % subject)
fname_raw = data_dir + '/sample_audvis_filt-0-40_raw.fif'
fname_trans = data_dir + '/sample_audvis_raw-trans.fif'
fname_cov = data_dir + '/ernoise-cov.fif'
fname_event = data_dir + '/sample_audvis_filt-0-40_raw-eve.fif'
# List of sub structures we are interested in. We select only the
# sub structures we want to include in the source space
labels_vol = ['Left-Amygdala',
'Left-Thalamus-Proper',
'Left-Cerebellum-Cortex',
'Brain-Stem',
'Right-Amygdala',
'Right-Thalamus-Proper',
'Right-Cerebellum-Cortex']
# Setup a surface-based source space
src = setup_source_space(subject, fname=None, subjects_dir=subjects_dir,
spacing='oct6', add_dist=False)
# Setup a volume source space
# set pos=7.0 for speed issue
vol_src = setup_volume_source_space(subject, mri=fname_aseg,
pos=7.0,
bem=fname_model,
volume_label=labels_vol,
subjects_dir=subjects_dir)
# Generate the mixed source space
src += vol_src
# compute the fwd matrix
fwd = make_forward_solution(fname_raw, fname_trans, src, fname_bem,
mindist=5.0, # ignore sources<=5mm from innerskull
meg=True, eeg=False,
n_jobs=1)
# Load data
raw = read_raw_fif(fname_raw, preload=True)
noise_cov = mne.read_cov(fname_cov)
events = mne.read_events(fname_event)
# Add a bad channel
raw.info['bads'] += ['MEG 2443']
# Pick MEG channels
picks = mne.pick_types(raw.info, meg=True, eeg=False, stim=False, eog=True,
exclude='bads')
# Define epochs for left-auditory condition
event_id, tmin, tmax = 1, -0.2, 0.5
epochs = mne.Epochs(raw, events, event_id, tmin, tmax, picks=picks,
baseline=(None, 0), reject=dict(mag=4e-12, grad=4000e-13,
eog=150e-6))
# Compute inverse solution and for each epoch
snr = 1.0 # use smaller SNR for raw data
inv_method = 'dSPM' # sLORETA, MNE, dSPM
parc = 'aparc' # the parcellation to use, e.g., 'aparc' 'aparc.a2009s'
lambda2 = 1.0 / snr ** 2
# Compute inverse operator
inverse_operator = make_inverse_operator(raw.info, fwd, noise_cov,
loose=None, depth=None,
fixed=False)
stcs = apply_inverse_epochs(epochs, inverse_operator, lambda2, inv_method,
pick_ori=None, return_generator=True)
# Get labels for FreeSurfer 'aparc' cortical parcellation with 34 labels/hemi
labels_parc = mne.read_labels_from_annot(subject, parc=parc,
subjects_dir=subjects_dir)
# Average the source estimates within each label of the cortical parcellation
# and each sub structures contained in the src space
# If mode = 'mean_flip' this option is used only for the cortical label
src = inverse_operator['src']
label_ts = mne.extract_label_time_course(stcs, labels_parc, src,
mode='mean_flip',
allow_empty=True,
return_generator=False)
# We compute the connectivity in the alpha band and plot it using a circular
# graph layout
fmin = 8.
fmax = 13.
sfreq = raw.info['sfreq'] # the sampling frequency
con, freqs, times, n_epochs, n_tapers = spectral_connectivity(
label_ts, method='pli', mode='multitaper', sfreq=sfreq, fmin=fmin,
fmax=fmax, faverage=True, mt_adaptive=True, n_jobs=1)
# We create a list of Label containing also the sub structures
labels_aseg = mne.get_volume_labels_from_src(src, subject, subjects_dir)
labels = labels_parc + labels_aseg
# read colors
node_colors = [label.color for label in labels]
# We reorder the labels based on their location in the left hemi
label_names = [label.name for label in labels]
lh_labels = [name for name in label_names if name.endswith('lh')]
rh_labels = [name for name in label_names if name.endswith('rh')]
# Get the y-location of the label
label_ypos_lh = list()
for name in lh_labels:
idx = label_names.index(name)
ypos = np.mean(labels[idx].pos[:, 1])
label_ypos_lh.append(ypos)
try:
idx = label_names.index('Brain-Stem')
ypos = np.mean(labels[idx].pos[:, 1])
lh_labels.append('Brain-Stem')
label_ypos_lh.append(ypos)
except ValueError:
pass
# Reorder the labels based on their location
lh_labels = [label for (yp, label) in sorted(zip(label_ypos_lh, lh_labels))]
# For the right hemi
rh_labels = [label[:-2] + 'rh' for label in lh_labels
if label != 'Brain-Stem' and label[:-2] + 'rh' in rh_labels]
# Save the plot order
node_order = list()
node_order = lh_labels[::-1] + rh_labels
node_angles = circular_layout(label_names, node_order, start_pos=90,
group_boundaries=[0, len(label_names) // 2])
# Plot the graph using node colors from the FreeSurfer parcellation. We only
# show the 300 strongest connections.
conmat = con[:, :, 0]
plot_connectivity_circle(conmat, label_names, n_lines=300,
node_angles=node_angles, node_colors=node_colors,
title='All-to-All Connectivity left-Auditory '
'Condition (PLI)')
# Uncomment the following line to save the figure
'''
import matplotlib.pyplot as plt
plt.savefig('circle.png', facecolor='black')
'''
| bsd-3-clause |
tabhitmy/MLTF | WORKFLOW/code/python_code/sklearnTrainer.py | 1 | 12746 | # sklearnTrainer
import numpy
import numpy as np
import copy
from toolkitJ import cell2dmatlab_jsp
import matplotlib as mpl
from matplotlib.font_manager import FontProperties
zhfont = FontProperties(fname="/usr/share/fonts/cjkuni-ukai/ukai.ttc") # 图片显示中文字体
mpl.use('Agg')
import pprint
from sklearn.externals.six import StringIO
# import pydot
import sklearn.model_selection as skmdls
import sklearn.ensemble as skemb
import sklearn.tree as sktree
import sklearn.linear_model as sklinmdl
import sklearn.discriminant_analysis as skdisa
import sklearn.svm as sksvm
import sklearn.naive_bayes as sknb
import GVal
from controlPanelSubFunc_NFDA_J import dVM
from trainerSubFunc_NFDA_J import *
###################################
# Classifier Subfunction ################
###################################
def adaboost(X_tra, y_tra, X_val, y_val, index_no, classifier_num):
y_tra, X_tra, y_val, X_val, weights = dataRegulationSKL(y_tra, X_tra, y_val, X_val, index_no)
weakClf_list = {
27: 'decisionTree',
271: 'decisionTree'
}
clf = skemb.AdaBoostClassifier(sktree.DecisionTreeClassifier(max_depth=2, min_samples_split=30, min_samples_leaf=5),
algorithm=dVM[2702][2], n_estimators=dVM[2700][2], learning_rate=dVM[2701][2], random_state=dVM[2703][2])
clf.fit(X_tra, y_tra)
return processLearning(clf, X_tra, y_tra, X_val, y_val)
##
def lda(X_tra, y_tra, X_val, y_val, index_no, classifier_num):
y_tra, X_tra, y_val, X_val, weights = dataRegulationSKL(y_tra, X_tra, y_val, X_val, index_no)
clf = skdisa.LinearDiscriminantAnalysis(solver=dVM[2300][2], n_components=dVM[2303][2])
clf.fit(X_tra, y_tra)
return processLearning(clf, X_tra, y_tra, X_val, y_val)
##
def qda(X_tra, y_tra, X_val, y_val, index_no, classifier_num):
y_tra, X_tra, y_val, X_val, weights = dataRegulationSKL(y_tra, X_tra, y_val, X_val, index_no)
clf = skdisa.QuadraticDiscriminantAnalysis()
clf.fit(X_tra, y_tra)
return processLearning(clf, X_tra, y_tra, X_val, y_val)
##
def naiveBayes(X_tra, y_tra, X_val, y_val, index_no, classifier_num):
y_tra, X_tra, y_val, X_val, weights = dataRegulationSKL(y_tra, X_tra, y_val, X_val, index_no)
clfname_list = {
25: sknb.GaussianNB,
251: sknb.GaussianNB,
252: sknb.MultinomialNB,
253: sknb.BernoulliNB,
}
clf = clfname_list[classifier_num]()
clf.fit(X_tra, y_tra, sample_weight=weights)
return processLearning(clf, X_tra, y_tra, X_val, y_val)
##
def svmKernel(X_tra, y_tra, X_val, y_val, index_no, classifier_num):
y_tra, X_tra, y_val, X_val, weights = dataRegulationSKL(y_tra, X_tra, y_val, X_val, index_no)
kernelname_list = {
22: 'rbf',
221: 'rbf',
222: 'poly',
223: 'sigmoid',
224: 'precompute'
}
kernelname = kernelname_list[classifier_num]
clf = sksvm.SVC(C=0.1, kernel=kernelname, degree=3, gamma=0.7)
clf.fit(X_tra, y_tra, sample_weight=weights)
return processLearning(clf, X_tra, y_tra, X_val, y_val)
##
def svmLinear(X_tra, y_tra, X_val, y_val, index_no, classifier_num):
y_tra, X_tra, y_val, X_val, weights = dataRegulationSKL(y_tra, X_tra, y_val, X_val, index_no)
clf = sksvm.LinearSVC(penalty=dVM[2100][2], loss=dVM[2101][2],
dual=dVM[2102][2], tol=dVM[2103][2], C=dVM[2104][2])
# clf = sksvm.LinearSVC()
clf.fit(X_tra, y_tra, sample_weight=weights)
cx = clf.coef_[0]
clfc = np.around(cx, decimals=2)
print('### Feature coefficient with L penalty: ' + str(clfc))
return processLearning(clf, X_tra, y_tra, X_val, y_val)
##
def linearRegression(X_tra, y_tra, X_val, y_val, index_no, classifier_num):
# Not Applicable at this moment
# weights = Y_train_raw[:, 0]
# weights[np.nonzero(weights == 0)[0]] = 1
# weights = weights / 7
# y_tra, y_val, X_val = dataRegulation(y_tra, y_val, X_val, index_no)
# clf = sklinmdl.LinearRegression()
# clf.fit(X_tra, y_tra, sample_weight=weights)
# score = clf.score(X_tra, y_tra, sample_weight=weights)
# print()
# Z = clf.predict(X_val)
# print(Z.shape)
# TP = np.nonzero(np.logical_and(Z == 1, y_val == 1))[0]
# print(TP)
# print(TP.shape)
# print(max(weights))
# print(min(weights))
return clf, score, FRAP
##
def sgdClassifier(X_tra, y_tra, X_val, y_val, index_no, classifier_num):
y_tra, X_tra, y_val, X_val, weights = dataRegulationSKL(y_tra, X_tra, y_val, X_val, index_no)
clf = sklinmdl.SGDClassifier(loss='hinge', penalty='l2', alpha=0.1)
clf.fit(X_tra, y_tra, sample_weight=weights)
return processLearning(clf, X_tra, y_tra, X_val, y_val)
##
def logiRegression(X_tra, y_tra, X_val, y_val, index_no, classifier_num):
y_tra, X_tra, y_val, X_val, weights = dataRegulationSKL(y_tra, X_tra, y_val, X_val, index_no)
clf = sklinmdl.LogisticRegression(penalty=dVM[3000][2], dual=dVM[3001][2], tol=dVM[3002][2],
C=dVM[3003][2], random_state=dVM[3007][2],
solver=dVM[3008][2], max_iter=dVM[3009][2])
clf.fit(X_tra, y_tra, sample_weight=weights)
return processLearning(clf, X_tra, y_tra, X_val, y_val)
##
def decisionTree(X_tra, y_tra, X_val, y_val, index_no, classifier_num):
y_tra, X_tra, y_val, X_val, weights = dataRegulationSKL(y_tra, X_tra, y_val, X_val, index_no)
clf = sktree.DecisionTreeClassifier(criterion=dVM[3100][2], splitter=dVM[3101][2],
max_depth=dVM[3102][2], min_samples_split=dVM[3103][2],
min_samples_leaf=dVM[3104][2], max_features=dVM[3106][2],
random_state=dVM[3107][2])
clf.fit(X_tra, y_tra, sample_weight=weights)
path = GVal.getPARA('path_PARA')
with open(path['fig_path'] + 'dtclf.dot', 'w') as f:
f = sktree.export_graphviz(clf, out_file=f, class_names=['0', '1'])
# sktree.export_graphviz(clf, out_file=path['fig_path'] + 'tree.dot')
# exit()
return processLearning(clf, X_tra, y_tra, X_val, y_val)
##
def randomForest(X_tra, y_tra, X_val, y_val, index_no, classifier_num):
y_tra, X_tra, y_val, X_val, weights = dataRegulationSKL(y_tra, X_tra, y_val, X_val, index_no)
# http://blog.csdn.net/xuxiatian/article/details/54410086
clf = skemb.RandomForestClassifier(n_estimators=dVM[3200][2],
criterion=dVM[3201][2], max_features=dVM[3202][2],
max_depth=dVM[3203][2], min_samples_split=dVM[3204][2],
min_samples_leaf=dVM[3205][2], min_weight_fraction_leaf=dVM[3206][2],
random_state=dVM[3213][2])
# GVal.show('dVM_PARA')
clf.fit(X_tra, y_tra, sample_weight=weights)
# print(clf.get_params())
# print(clf)
path = GVal.getPARA('path_PARA')
i_tree = 0
for tree_in_forest in clf.estimators_:
with open(path['fig_path'] + '/RF/tree_' + str(i_tree) + '.dot', 'w') as my_file:
my_file = sktree.export_graphviz(tree_in_forest, out_file=my_file, class_names=['0', '1'])
i_tree = i_tree + 1
return processLearning(clf, X_tra, y_tra, X_val, y_val)
def bagging(X_tra, y_tra, X_val, y_val, index_no, classifier_num):
y_tra, X_tra, y_val, X_val, weights = dataRegulationSKL(y_tra, X_tra, y_val, X_val, index_no)
clf = skemb.BaggingClassifier(base_estimator=sktree.DecisionTreeClassifier(max_depth=2, min_samples_split=30, min_samples_leaf=5),
n_estimators=dVM[3300][2], max_samples=dVM[3301][2], max_features=dVM[3302][2],
bootstrap=dVM[3303][2], random_state=dVM[3308][2])
clf.fit(X_tra, y_tra, sample_weight=weights)
return processLearning(clf, X_tra, y_tra, X_val, y_val)
def voting(X_tra, y_tra, X_val, y_val, index_no, classifier_num):
#
classifier_list = GVal.getPARA('classifier_list_PARA')
# dVM[3400] = ['estimators', [21, 23, 25, 30, 31], [21, 23, 25, 30, 31]]
estims = []
for i in range(len(dVM[3400][2])):
clf_temp = (classifier_list[dVM[3400][2][i]][1], classifier_list[int(str(dVM[3400][2][i])[0:2])][0](X_tra, y_tra, X_val, y_val, index_no, dVM[3400][2][i])[0])
estims.append(clf_temp)
y_tra, X_tra, y_val, X_val, weights = dataRegulationSKL(y_tra, X_tra, y_val, X_val, index_no)
clf = skemb.VotingClassifier(estimators=estims, voting=dVM[3401][2])
clf.fit(X_tra, y_tra)
return processLearning(clf, X_tra, y_tra, X_val, y_val)
def gradboost(X_tra, y_tra, X_val, y_val, index_no, classifier_num):
y_tra, X_tra, y_val, X_val, weights = dataRegulationSKL(y_tra, X_tra, y_val, X_val, index_no)
clf = skemb.GradientBoostingClassifier(loss=dVM[3500][2], learning_rate=dVM[3501][2],
n_estimators=dVM[3502][2], max_depth=dVM[3503][2], criterion=dVM[3504][2],
min_samples_split=dVM[3505][2], min_samples_leaf=dVM[3506][2],
subsample=dVM[3508][2], random_state=dVM[3515][2])
clf.fit(X_tra, y_tra, sample_weight=weights)
return processLearning(clf, X_tra, y_tra, X_val, y_val)
###################################
# Main #############################
###################################
def sklearnTrainer(classifier_num, X_train_raw, Y_train_raw, X_valid_raw, Y_valid_raw, path):
feature_index = GVal.getPARA('feature_index_PARA')
X, y, X_valid, y_valid, index_no = dataSetPreparation(feature_index, X_train_raw, Y_train_raw, X_valid_raw, Y_valid_raw)
classifier_list = {
21: [svmLinear, 'Linear SVM', []],
22: [svmKernel, 'Kernel SVM (Default:rbf)'],
221: [svmKernel, 'Kernel SVM (rbf)'],
222: [svmKernel, 'Kernel SVM (poly)'],
223: [svmKernel, 'Kernel SVM (sigmoid)'],
224: [svmKernel, 'Kernel SVM (precompute)'],
23: [lda, 'LDA'],
24: [qda, 'QDA'],
25: [naiveBayes, 'Naive Bayes (Default: Gaussian)'],
251: [naiveBayes, 'Naive Bayes (Guassian)'],
252: [naiveBayes, 'Naive Bayes (Multinominal)'],
253: [naiveBayes, 'Naive Bayes (Bernoulli)'],
# 26: neuralNetwork,
27: [adaboost, 'Adaboost'],
271: [adaboost, 'Adaboost(WC:DecisionTree)'],
# 28: [linearRegression, 'Linear Regression'],
29: [sgdClassifier, 'SGD Classifier'],
30: [logiRegression, 'Logistic Regression'],
31: [decisionTree, 'Decision Tree'],
32: [randomForest, 'Random Forest'],
33: [bagging, 'bagging with DT'],
34: [voting, 'Voter'],
35: [gradboost, 'Gradient Tree Boosting']
}
GVal.setPARA('classifier_list_cache', classifier_list)
# classifier serial code: [[model], [training score], [predicting rate]]
clf_cache = {
21: cell2dmatlab_jsp([1], 1, []),
22: cell2dmatlab_jsp([1], 1, []),
221: cell2dmatlab_jsp([1], 1, []),
222: cell2dmatlab_jsp([1], 1, []),
223: cell2dmatlab_jsp([1], 1, []),
224: cell2dmatlab_jsp([1], 1, []),
23: cell2dmatlab_jsp([1], 1, []),
24: cell2dmatlab_jsp([1], 1, []),
25: cell2dmatlab_jsp([1], 1, []),
251: cell2dmatlab_jsp([1], 1, []),
252: cell2dmatlab_jsp([1], 1, []),
253: cell2dmatlab_jsp([1], 1, []),
27: cell2dmatlab_jsp([1], 1, []),
271: cell2dmatlab_jsp([1], 1, []),
28: cell2dmatlab_jsp([1], 1, []),
29: cell2dmatlab_jsp([1], 1, []),
30: cell2dmatlab_jsp([1], 1, []),
31: cell2dmatlab_jsp([1], 1, []),
32: cell2dmatlab_jsp([1], 1, []),
33: cell2dmatlab_jsp([1], 1, []),
34: cell2dmatlab_jsp([1], 1, [])
}
print('### With model: [' + classifier_list[classifier_num][1] + ']')
# Loading model to do the classification
clf, score, FRAP = classifier_list[int(str(classifier_num)[0:2])][0](X, y, X_valid, y_valid, index_no, classifier_num)
clf_cache[classifier_num] = clf
# return clf,score,FRAP
clf_info = cell2dmatlab_jsp([3], 1, [])
clf_info[0] = classifier_num
clf_info[1] = classifier_list[classifier_num][1]
clf_info[2] = clf
return clf_info, score, FRAP
| mit |
aranega/pyecore | experimental/m2m/transfo_example.py | 2 | 2226 | import motra
# generated using
# https://github.com/kolovos/datasets/blob/master/github-mde/ghmde.ecore
# as input metamodel
import ghmde
from pyecore.ecore import *
# Define a graph like metamodel in a static way
eClass = EPackage('graph', nsURI='http://graph/1.0', nsPrefix='graph')
@EMetaclass
class Node(object):
name = EAttribute(eType=EString)
@EMetaclass
class Graph(object):
name = EAttribute(eType=EString)
nodes = EReference(eType=Node, upper=-1, containment=True)
# Transfo definition
ghmde2graph = motra.Transformation('ghmde2graph',
inputs=['ghmde_model'],
outputs=['graph_model'])
@ghmde2graph.main
def main(ghmde_model=None, graph_model=None):
print('Transforming repository to graph', graph_model)
for repository in motra.objects_of_kind(ghmde_model, ghmde.File):
file2node(repository)
for repository in motra.objects_of_kind(ghmde_model, ghmde.Repository):
repository2graph(repository, postfix='_graph')
def does_not_starts_with(self, postfix):
return not self.name.startswith(postfix)
@ghmde2graph.mapping(when=does_not_starts_with)
def repository2graph(self: ghmde.Repository, postfix: str) -> Graph:
result.name = self.name + postfix
for repo_file in self.files:
result.nodes.append(file2node(repo_file))
@ghmde2graph.mapping
def file2node(self: ghmde.File) -> Node:
result.name = self.path
# @transfo.main
# def main(inputs, outputs):
# print('in main')
# print(inputs.ghmde.contents)
# for o in motra.objects_of_kind(inputs.ghmde, ghmde.Repository):
# test_dispatch(o)
#
#
# @transfo.mapping(when=lambda self: self.name is not None)
# def test1(self: ghmde.Repository) -> ghmde.Repository:
# print('changing name', result is self, self.name)
# result.name = self.name
# self.name += '_toto'
#
#
# @transfo.mapping(output_model='test2',
# when=lambda self: self.name is None)
# def test2(self: ghmde.Repository) -> ghmde.Repository:
# result.name = 'from_empty_' + str(self)
#
#
# @transfo.disjunct(mappings=[test1, test2])
# def test_dispatch(self: ghmde.Repository) -> ghmde.Repository:
# pass
| bsd-3-clause |
vdumoulin/fuel | fuel/converters/cifar100.py | 18 | 3576 | import os
import tarfile
import h5py
import numpy
import six
from six.moves import cPickle
from fuel.converters.base import fill_hdf5_file, check_exists
DISTRIBUTION_FILE = 'cifar-100-python.tar.gz'
@check_exists(required_files=[DISTRIBUTION_FILE])
def convert_cifar100(directory, output_directory,
output_filename='cifar100.hdf5'):
"""Converts the CIFAR-100 dataset to HDF5.
Converts the CIFAR-100 dataset to an HDF5 dataset compatible with
:class:`fuel.datasets.CIFAR100`. The converted dataset is saved as
'cifar100.hdf5'.
This method assumes the existence of the following file:
`cifar-100-python.tar.gz`
Parameters
----------
directory : str
Directory in which the required input files reside.
output_directory : str
Directory in which to save the converted dataset.
output_filename : str, optional
Name of the saved dataset. Defaults to 'cifar100.hdf5'.
Returns
-------
output_paths : tuple of str
Single-element tuple containing the path to the converted dataset.
"""
output_path = os.path.join(output_directory, output_filename)
h5file = h5py.File(output_path, mode="w")
input_file = os.path.join(directory, 'cifar-100-python.tar.gz')
tar_file = tarfile.open(input_file, 'r:gz')
file = tar_file.extractfile('cifar-100-python/train')
try:
if six.PY3:
train = cPickle.load(file, encoding='latin1')
else:
train = cPickle.load(file)
finally:
file.close()
train_features = train['data'].reshape(train['data'].shape[0],
3, 32, 32)
train_coarse_labels = numpy.array(train['coarse_labels'],
dtype=numpy.uint8)
train_fine_labels = numpy.array(train['fine_labels'],
dtype=numpy.uint8)
file = tar_file.extractfile('cifar-100-python/test')
try:
if six.PY3:
test = cPickle.load(file, encoding='latin1')
else:
test = cPickle.load(file)
finally:
file.close()
test_features = test['data'].reshape(test['data'].shape[0],
3, 32, 32)
test_coarse_labels = numpy.array(test['coarse_labels'], dtype=numpy.uint8)
test_fine_labels = numpy.array(test['fine_labels'], dtype=numpy.uint8)
data = (('train', 'features', train_features),
('train', 'coarse_labels', train_coarse_labels.reshape((-1, 1))),
('train', 'fine_labels', train_fine_labels.reshape((-1, 1))),
('test', 'features', test_features),
('test', 'coarse_labels', test_coarse_labels.reshape((-1, 1))),
('test', 'fine_labels', test_fine_labels.reshape((-1, 1))))
fill_hdf5_file(h5file, data)
h5file['features'].dims[0].label = 'batch'
h5file['features'].dims[1].label = 'channel'
h5file['features'].dims[2].label = 'height'
h5file['features'].dims[3].label = 'width'
h5file['coarse_labels'].dims[0].label = 'batch'
h5file['coarse_labels'].dims[1].label = 'index'
h5file['fine_labels'].dims[0].label = 'batch'
h5file['fine_labels'].dims[1].label = 'index'
h5file.flush()
h5file.close()
return (output_path,)
def fill_subparser(subparser):
"""Sets up a subparser to convert the CIFAR100 dataset files.
Parameters
----------
subparser : :class:`argparse.ArgumentParser`
Subparser handling the `cifar100` command.
"""
return convert_cifar100
| mit |
agnusfeec/tattCBIR | lib_sistema.py | 1 | 25313 | # -*- coding: utf-8 -*-
"""
Created on Thu Jul 14 13:36:05 2016
@author: agnus
"""
#%%
def monta_lista_imagens(path = '.', ext='.png'):
import os
imagens = {}
for dirname, dirnames, filenames in os.walk(path):
# print path to all filenames with extension py.
for filename in filenames:
fname_path = os.path.join(dirname, filename)
fext = os.path.splitext(fname_path)[1]
if fext == ext:
#file_dat = [filename, dirname]
#imagens.append(file_dat)
imagens[filename]=dirname
else:
continue
return imagens
#%%
def grava_db_imagens(arquivo, imagens):
#arquivo = './tatt_c.db'
with open(arquivo, 'wb') as db_image_file:
for nome_img, caminho in imagens.items():
db_image_file.write(nome_img+ '\t' + caminho + '\n')
db_image_file.close()
#%%
def grava_config(arquivo = './example_mem.cfg'):
import ConfigParser
config = ConfigParser.RawConfigParser()
# When adding sections or items, add them in the reverse order of
# how you want them to be displayed in the actual file.
# In addition, please note that using RawConfigParser's and the raw
# mode of ConfigParser's respective set functions, you can assign
# non-string values to keys internally, but will receive an error
# when attempting to write to a file or when you get it in non-raw
# mode. SafeConfigParser does not allow such assignments to take place.
config.add_section('Geral')
config.set('Geral', 'Image Database', 'Tatt-C')
config.set('Geral', 'Database Image Folder', '/media/sf_Projeto/dataset/tatt_dca/')
config.set('Geral', 'Indexa image database', 'True')
config.set('Geral', 'Database filename', './tatt_c.db')
config.set('Geral', 'Image filename extension','.jpg')
config.set('Geral', 'Training File', 'train1')
config.set('Geral', 'Testing File', 'test1')
config.add_section('Folds')
config.set('Folds', 'Folds Folder', '/media/sf_Projeto/dataset/tatt_dca/folds/')
config.set('Folds', 'Quantidade subsets', '3')
config.set('Folds', 'Subset_1', 'gallery{1}.txt')
config.set('Folds', 'Subset_2', 'probes{1}.txt')
config.set('Folds', 'Subset_3', 'bg{1}.txt')
config.set('Folds', 'Ground_truth', 'ground_truth.txt')
config.add_section('SIFT')
config.set('SIFT','SIFT Folder', '/media/sf_Projeto/dataset/tatt_dca/SIFT/')
# Writing our configuration file to 'example.cfg'
with open(arquivo, 'wb') as configfile:
config.write(configfile)
#%%
def folds_construct(subsets, folds_folder):
n_folds =len(subsets[0])
n_subsets = len(subsets)
folds = []
for i in range(n_folds):
sub = []
for j in range(n_subsets):
arquivo = subsets[j][i]
aux = []
with open(folds_folder+arquivo, 'r') as imagefiles:
for nomef in imagefiles:
if nomef[-1] == '\n' : nomef = nomef[:-1]
aux.append(nomef)
imagefiles.close()
sub.append(aux)
folds.append(sub)
return folds
#%%
def le_config():
import ConfigParser
config = ConfigParser.RawConfigParser()
config.read('./example_mem.cfg')
# getfloat() raises an exception if the value is not a float
# getint() and getboolean() also do this for their respective types
base = config.get('Geral', 'image database')
indexa = config.getboolean('Geral', 'indexa image database')
print base
if indexa:
print "indexa base"
arquivo = config.get('Geral','database filename')
caminho = config.get('Geral', 'database image folder')
extensao = config.get('Geral', 'image filename extension')
print arquivo, caminho, extensao
imagens = monta_lista_imagens(caminho, extensao)
grava_db_imagens(arquivo, imagens)
folds_folder = config.get('Folds','folds folder')
n_subsets = config.getint('Folds', 'quantidade subsets')
subsets=[]
for i in range(n_subsets):
sub = config.get('Folds', 'subset_'+str(i+1))
ps = sub.find("{")
pe = sub.find("}")
ped = sub[ps+1:pe]
indices = ped.split(',')
aux = []
for ind in indices:
aux.append(sub[:ps]+ind+'.txt') # incluir extensão variável
subsets.append(aux)
#print subsets
#n_folds = config.getint('Folds', 'quantidade folds')
n_folds =len(subsets[0])
folds = []
for i in range(n_folds):
sub = []
for j in range(n_subsets):
arquivo = subsets[j][i]
aux = []
with open(folds_folder+arquivo, 'r') as imagefiles:
for nomef in imagefiles:
if nomef[-1] == '\n' : nomef = nomef[:-1]
aux.append(nomef)
imagefiles.close()
sub.append(aux)
folds.append(sub)
#print folds[0]
gt_filename = config.get('Folds', 'ground_truth')
sift_folder = config.get('SIFT', 'sift folder')
print sift_folder, folds_folder, caminho
return (folds, imagens, gt_filename, sift_folder, folds_folder, caminho, subsets)
#%%
def sift(nomes_imagens, imagens, sift_folder):
import cv2
import os
from math import sqrt
#ds = []
#kp = []
t = len(nomes_imagens)
i=1
for filename in nomes_imagens:
fname = os.path.join(sift_folder, filename[:-3]+'sift_ds')
if os.path.isfile(fname) == False :
print filename
#file_img = os.path.join(diretorio, filename)
diretorio = imagens[filename]
img = cv2.imread(os.path.join(diretorio, filename)) #file_img)
# Redimensiona imagem para aplicação do Fisher Vectors
#img = cv2.resize(img, (256,256))
aux = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
gray = cv2.equalizeHist(aux)
k = sqrt((240.0*480.0*0.5)/(gray.shape[0]*gray.shape[1]))
res = cv2.resize(gray,None,fx=k, fy=k, interpolation = cv2.INTER_CUBIC)
cv2.imwrite("/media/sf_Projeto/dataset/tatt_dca//img_Reduzido/"+filename,res)
sift = cv2.xfeatures2d.SIFT_create()
(kps, descs) = sift.detectAndCompute(res, None)
#ds.append(descs)
#kp.append(kps)
arquivo = os.path.join(sift_folder, filename[:-3]+'sift_ds')
with open(arquivo, 'wb') as sift_file:
for desc in descs:
sift_file.write(','.join(str(x) for x in desc)+'\n')
sift_file.close()
arquivo = os.path.join(sift_folder, filename[:-3]+'sift_kp')
with open(arquivo, 'wb') as sift_file:
for point in kps:
temp = [point.pt[0], point.pt[1], point.size, point.angle,
point.response, point.octave, point.class_id]
sift_file.write(','.join(str(x) for x in temp)+'\n')
sift_file.close()
print (i*100)/t,
i=i+1
#return ds
#%%
def sift_match(ds1, kp1, ds2, kp2):
import cv2
MIN_MATCH_COUNT = 10
bf = cv2.BFMatcher()
matches = bf.knnMatch(ds1,ds2, k=2)
# Apply ratio test
good = []
for m,n in matches:
if m.distance < 0.7*n.distance:
good.append(m)
qm = len(good)
(nr1,c) = ds1.shape
(nr2,c) = ds2.shape
# if qm>MIN_MATCH_COUNT:
# src_pts = np.float32([ kp1[m.queryIdx].pt for m in good ]).reshape(-1,1,2)
# dst_pts = np.float32([ kp2[m.trainIdx].pt for m in good ]).reshape(-1,1,2)
#
# M, mask = cv2.findHomography(src_pts, dst_pts, cv2.RANSAC,5.0)
# if mask != None:
# matchesMask = mask.ravel().tolist()
# rt = np.sum(np.asarray(matchesMask))
# else:
# rt = 0
# else:
# #print "Not enough matches are found - %d/%d" % (len(good),MIN_MATCH_COUNT)
# #matchesMask = None
# rt = 0
nr = nr1
if nr2>nr:
nr = nr2
rt = (100.0*qm/nr)
# if qm > 0:
# rt = 1.0/qm
# else:
# rt = 10^8
return rt
#%%
def gera_sift_base(folds, imagens, sift_folder):
# Inicialmente gera se necessario o SIFT para as imagens de treinamento e teste
# pode ser otimizado, gerando para toda a base, caso se utilize toda a base
# o que pode ter um custo alto pois na base existem imagens para outros casos
# de uso.
n_folds = len(folds)
#Poder ser implementado diferente pois as linhas abaixo apenas agregram os nomes
#das imagens para que sejam gerados os sifts para cada um dos folds
for i in range(n_folds):
test = folds[i][1]
train = folds[i][0]
bg = folds[i][2]
for j in range(n_folds):
if j!=i :
train = train + folds[j][0]+folds[j][1]+folds[j][2]
print 'Gerando sift do conjunto de treinamento'
#train_kp, train_ds = sift(train, imagens, sift_folder)
sift(train, imagens, sift_folder)
print 'Gerando sift do conjunto de teste'
#test_kp, test_ds = sift(test, imagens)
sift(test, imagens, sift_folder)
print 'Gerando sift do conjunto de bg'
#bg_kp, bg_ds = sift(bg, imagens)
sift(bg, imagens, sift_folder)
#%%
def processa_sift(folds, imagens, sift_folder):
import numpy as np
import os
import cv2
n_folds = len(folds)
#Alterei para que inclua nas imagens da galeria i no conj. train, de forma a que as
# imagens correspondentes ao probe existam na galeria (train)
for i in range(n_folds):
test = folds[i][1]
bg = folds[i][2]
train = folds[i][0]#+bg
for j in range(n_folds):
if j!=i :
train = train + folds[j][0]+folds[j][1]+folds[j][2]
n_test = len(test)
n_train = len(train)
dist = np.zeros((n_train), dtype=np.float)
nn = n_test * n_train
print 'Gerando o match entre o treinamento e o conjunto de teste'
mem = True
if mem==True :
ds=[]
ks=[]
arquivo = './clist_mem_'+str(i+1)+'.txt'
with open(arquivo, 'w') as clist_file:
l = 0
for file_test in test:
fname = os.path.join(sift_folder, file_test[:-3]+'sift_ds')
ds1 = (np.loadtxt(open(fname,"r"),delimiter=",")).astype(np.uint8) #,skiprows=1)
fname = os.path.join(sift_folder, file_test[:-3]+'sift_kp')
kps = (np.loadtxt(open(fname,"r"),delimiter=",")).astype(np.float) #,skiprows=1)
kp1=[]
kp2=[]
for kp in kps:
kpoint = cv2.KeyPoint(float(kp[0]), float(kp[1]),
float(kp[2]), float(kp[3]),
float(kp[4]), int(kp[5]), int(kp[6]))
kp1.append(kpoint)
diretorio = imagens[file_test]
img1 = cv2.imread(os.path.join(diretorio, file_test),0)
#print os.path.join(diretorio, file_test)
j = 0
for file_train in train:
diretorio = imagens[file_train]
img2 = cv2.imread(os.path.join(diretorio, file_train),0)
#print os.path.join(diretorio, file_train)
if (mem == True and len(ds)<len(train)):
fname = os.path.join(sift_folder, file_train[:-3]+'sift_ds')
ds.append ( np.asarray((np.loadtxt(open(fname,"r"),delimiter=",")).astype(np.uint8)) ) #,skiprows=1)
ds2 = ds[j]
fname = os.path.join(sift_folder, file_train[:-3]+'sift_kp')
kps = (np.loadtxt(open(fname,"r"),delimiter=",")).astype(np.float) #,skiprows=1)
aux =[]
for kp in kps:
kpoint = cv2.KeyPoint(float(kp[0]), float(kp[1]),
float(kp[2]), float(kp[3]),
float(kp[4]), int(kp[5]), int(kp[6]))
aux.append(kpoint)
ks.append(aux)
kp2 = ks[j]
elif (mem == True and len(ds)==len(train)):
ds2 = ds[j]
kp2 = ks[j]
elif mem == False:
fname = os.path.join(sift_folder, file_train[:-3]+'sift_ds')
ds2 = ( (np.loadtxt(open(fname,"r"),delimiter=",")).astype(np.uint8) )
fname = os.path.join(sift_folder, file_train[:-3]+'sift_kp')
kps = (np.loadtxt(open(fname,"r"),delimiter=",")).astype(np.float) #,skiprows=1)
kp2 = []
for kp in kps:
kpoint = cv2.KeyPoint(float(kp[0]), float(kp[1]),
float(kp[2]), float(kp[3]),
float(kp[4]), int(kp[5]), int(kp[6]))
kp2.append(kpoint)
#print ds1
#print ds2
rt = sift_match(ds1, np.asarray(kp1), ds2, np.asarray(kp2))
dist[j] = rt
j = j + 1
print i,(((l*n_train)+j)*100)/nn,
indice = np.argsort(dist)[::-1]
k = 1
for id in indice:
clist_file.write(file_test+'|'+ str(k) + '|' + train[id] + '|' + str(dist[id]) +'\n')
k = k + 1
l = l + 1
clist_file.close()
break
#%%
def ground_truth(folds_folder, gt_filename):
"""Reads a ground truth table from text file.
Keyword arguments:
folds_folder -- the path for the ground truth file
gt_filename -- the file name of the ground truth file with extension
Returns:
gt_images -- ground truth table stored in a dictionary
"""
#folds_folder = '/media/sf_Projeto/dataset/tatt-c_update_v1.4/5-fold/tattoo_identification/'
#gt_filename = 'ground_truth.txt'
gt_imagens = {}
with open(folds_folder+gt_filename, 'r') as gt_arq:
for nomef in gt_arq:
imgs = nomef.split('|')
if imgs[1][-1] == '\n' : imgs[1] = imgs[1][:-1]
#print imgs[0], imgs[1]
gt_imagens[imgs[0]] = imgs[1]
gt_arq.close()
return gt_imagens
#%%
def compute_cmc(arquivo, gt_imagens):
"""Reads a classification list from text file and sumarize rank results for
every image reference based in the ground truth dictionary.
Keyword arguments:
arquivo -- the filename of classification list file
gt_images -- ground truth table stored in a dictionary
Returns:
cmc -- acummulated accuracy for each rank stored in a numpy array
"""
import numpy as np
i = 0
acc = np.zeros(400)
#arquivo = './clist_mem_'+str(i+1)+'.txt'
with open(arquivo, 'r') as clist_file:
for nomef in clist_file:
imgs = nomef.split('|')
if imgs[3][-1] == '\n' : imgs[3] = imgs[3][:-1]
if gt_imagens[imgs[0]] == imgs[2] :
r = int(imgs[1])
acc[r] = acc[r]+1
clist_file.close()
#print cmc
ft = sum(acc)
#print cmc/ft
cmc = np.zeros(400)
for i in range(1,400):
cmc[i] = cmc[i-1]+acc[i]/ft
#print cmc1
return cmc
#%%
def plot_cmc(cmc, ni=200):
import matplotlib.pyplot as plt
import pylab as P
import numpy as np
fig = P.figure()
fig.suptitle('Acumulative Match Characteristic', fontsize=18, fontweight='bold')
P.ylabel('%', fontsize=16)
P.xlabel('Rank', fontsize=16)
P.xlim(0, ni)
P.ylim(0,101)
P.xticks(np.arange(0, ni, 10.0))
P.yticks(np.arange(0, 101, 5.0))
xticklabels = P.getp(P.gca(), 'xticklabels')
yticklabels = P.getp(P.gca(), 'yticklabels')
P.setp(yticklabels, 'color', 'k', fontsize='x-large')
P.setp(xticklabels, 'color', 'k', fontsize='x-large')
P.grid(True)
fig.set_size_inches(19,7)
#P.plot(cmc*100)
P.plot(cmc*100)
fig.savefig('cmc_bf_knn.png')
P.show()
#%%%
#Author: Jacob Gildenblat, 2014
#http://jacobcv.blogspot.com.br/2014/12/fisher-vector-in-python.html
#License: you may use this for whatever you like
#Adaptation: Agnus A. Horta
def fv_dictionary(descriptors, N):
import numpy as np
import cv2
em = cv2.ml.EM_create()
em.setClustersNumber(N)
#em = cv2.EM(N)
em.trainEM(descriptors)
return np.float32(em.getMeans()), \
np.float32(em.getCovs()), np.float32(em.getWeights())[0]
def fv_generate_gmm(descriptors, N, dt):
import numpy as np
words = np.concatenate(descriptors)
#np.concatenate([folder_descriptors(folder) for folder in glob.glob(input_folder + '*')])
#print("Training GMM of size", N)
means, covs, weights = fv_dictionary(words, N)
#Throw away gaussians with weights that are too small:
th = 1.0 / N
means = np.float32([m for k,m in zip(range(0, len(weights)), means) if weights[k] > th])
covs = np.float32([m for k,m in zip(range(0, len(weights)), covs) if weights[k] > th])
weights = np.float32([m for k,m in zip(range(0, len(weights)), weights) if weights[k] > th])
#print 'Means: ',means
#print 'Covs: ',covs
#print 'Weights: ',weights
np.save("./dat/means" + dt + ".gmm", means)
np.save("./dat/covs" + dt + ".gmm", covs)
np.save("./dat/weights" + dt + ".gmm", weights)
return means, covs, weights
def fv_load_gmm(dt, folder = "./dat"):
import numpy as np
files = ["means" + dt + ".gmm" +".npy", "covs" + dt + ".gmm.npy", "weights" + dt + ".gmm.npy"]
try:
return map(lambda file: np.load(file), map(lambda s : folder + "/" + s , files))
except IOError:
return (None, None, None)
def fv_likelihood_moment(x, ytk, moment):
import numpy as np
x_moment = np.power(np.float32(x), moment) if moment > 0 else np.float32([1])
return x_moment * ytk
def fv_likelihood_statistics(samples, means, covs, weights):
from scipy.stats import multivariate_normal
import numpy as np
gaussians, s0, s1,s2 = {}, {}, {}, {}
samples = zip(range(0, len(samples)), samples)
#print samples
g = [multivariate_normal(mean=means[k], cov=covs[k]) for k in range(0, len(weights)) ]
for index, x in samples:
gaussians[index] = np.array([g_k.pdf(x) for g_k in g])
for k in range(0, len(weights)):
s0[k], s1[k], s2[k] = 0, 0, 0
for index, x in samples:
probabilities = np.multiply(gaussians[index], weights)
probabilities = probabilities / np.sum(probabilities)
s0[k] = s0[k] + fv_likelihood_moment(x, probabilities[k], 0)
s1[k] = s1[k] + fv_likelihood_moment(x, probabilities[k], 1)
s2[k] = s2[k] + fv_likelihood_moment(x, probabilities[k], 2)
return s0, s1, s2
def fv_fisher_vector_weights(s0, s1, s2, means, covs, w, T):
import numpy as np
return np.float32([((s0[k] - T * w[k]) / np.sqrt(w[k]) ) for k in range(0, len(w))])
def fv_fisher_vector_means(s0, s1, s2, means, sigma, w, T):
import numpy as np
return np.float32([(s1[k] - means[k] * s0[k]) / (np.sqrt(w[k] * sigma[k])) for k in range(0, len(w))])
def fv_fisher_vector_sigma(s0, s1, s2, means, sigma, w, T):
import numpy as np
return np.float32([(s2[k] - 2 * means[k]*s1[k] + (means[k]*means[k] - sigma[k]) * s0[k]) / (np.sqrt(2*w[k])*sigma[k]) for k in range(0, len(w))])
def fv_normalize(fisher_vector):
import numpy as np
v = np.sqrt(abs(fisher_vector)) * np.sign(fisher_vector)
return v / np.sqrt(np.dot(v, v))
def fv_fisher_vector(samples, means, covs, w):
import numpy as np
#print 'fisher_vector(samples, means, covs, w)'
s0, s1, s2 = fv_likelihood_statistics(samples, means, covs, w)
T = samples.shape[0]
covs = np.float32([np.diagonal(covs[k]) for k in range(0, covs.shape[0])])
a = fv_fisher_vector_weights(s0, s1, s2, means, covs, w, T)
b = fv_fisher_vector_means(s0, s1, s2, means, covs, w, T)
c = fv_fisher_vector_sigma(s0, s1, s2, means, covs, w, T)
fv = np.concatenate([np.concatenate(a), np.concatenate(b), np.concatenate(c)])
fv = fv_normalize(fv)
#print 'fv = ', fv
return fv
def le_descritores(sift_folder, subset, tipo=1):
import os
import numpy as np
#n_folds = len(folds)
#Alterei para que inclua nas imagens da galeria i no conj. train, de forma a que as
# imagens correspondentes ao probe existam na galeria (train)
# for i in range(n_folds):
# train = folds[i][0]
# for j in range(n_folds):
# if j!=i :
# train = train + folds[j][0]+folds[j][1]+folds[j][2]
#
# n_train = len(train)
ch = 0
ds = []
id_ds = []
for image in subset:
fname = os.path.join(sift_folder, image[:-3]+'sift_ds')
ds1 = (np.loadtxt(open(fname,"r"),delimiter=",")).astype(np.uint8) #,skiprows=1)
if tipo == 1:
if ch == 0:
ch = 1
ds = []
ds.append(ds1)
id_ds.append(ds1.shape[0])
else:
ds.append(ds1)
id_ds.append(ds1.shape[0])
else:
if ch == 0:
ch = 1
ds = np.empty_like(ds1)
ds[:] = ds1
id_ds.append(ds1.shape[0])
else:
print ds.shape, ds1.shape
ds = np.concatenate((ds, ds1), axis=0)
id_ds.append(ds1.shape[0])
return ds, id_ds
#%%
def bov_histogramas_grava(arquivo, hists, dt):
resultFile = open(arquivo, 'w')
i = len(hists)
for h in hists:
line = (''.join(str(e) + ", " for e in h.tolist()))[:-2]
resultFile.write(line)
if i > 0:
resultFile.write("\n")
i = i - 1
resultFile.close()
#%%
def bov_codebook_gera(l_sift, nc, tipo):
if tipo == 1:
# http://scikit-learn.org/stable/modules/generated/sklearn.cluster.KMeans.html#sklearn.cluster.KMeans.fit
from sklearn.cluster import KMeans
est = KMeans(n_clusters=nc, init='k-means++', n_init=10, max_iter=100,
tol=0.0001, precompute_distances='auto', verbose=0,
random_state=None, copy_x=True, n_jobs=4)
est.fit(l_sift)
labels = est.labels_
centers = est.cluster_centers_
elif tipo == 2:
from sklearn.cluster import MiniBatchKMeans
est = MiniBatchKMeans(n_clusters=nc, init='k-means++', max_iter=100,
batch_size=3*nc, verbose=0, compute_labels=True,
random_state=None, tol=0.0, max_no_improvement=10,
init_size=None, n_init=3, reassignment_ratio=0.01)
est.fit(l_sift)
labels = est.labels_
centers = est.cluster_centers_
else:
import random
from scipy.cluster.vq import vq
import numpy as np
list_of_random_items = random.sample(np.arange(l_sift.shape[0]), nc)
l_centroids = []
for i in list_of_random_items:
l_centroids.append(l_sift[i])
centers = np.asarray(l_centroids)
labels, _ = vq(l_sift, centers)
return (centers, labels)
#%%
def bov_histogramas_gera(labels, id_ds, k, nomes_imagens, vis=False):
from matplotlib import pyplot as plt
import numpy as np
#fv = np.vectorize(f)
hists = []
i = 0
for j in range(len(nomes_imagens)):
#ld = X[indices[j]].tolist()
n = id_ds[j]
sl = labels[i:i+n]
hist, bins = np.histogram(sl, bins=k, range=(0, k), normed=False,
weights=None, density=True)
if vis == True:
width = 0.7 * (bins[1] - bins[0])
center = (bins[:-1] + bins[1:]) / 2
plt.title("Histogram "+nomes_imagens[j])
plt.xlabel("Visual Word")
plt.ylabel("Frequency")
plt.bar(center, hist, align='center', width=width)
plt.show()
#print j
hists.append(hist)
#print hist
i = i + n
#j = j +1
return hists
def bov_descritores_codifica(X, centers):
from scipy.cluster.vq import vq
labels,_ = vq(X,centers)
return labels
| gpl-3.0 |
aalmah/pylearn2 | pylearn2/devtools/tests/test_format.py | 24 | 25785 | """
Unit tests for format checking
"""
from __future__ import print_function
from nose.plugins.skip import SkipTest
import os
import pylearn2
from pylearn2.devtools.tests.docscrape import docstring_errors
from pylearn2.devtools.list_files import list_files
from pylearn2.devtools.tests.pep8.pep8 import StyleGuide
whitelist_pep8 = [
"rbm_tools.py",
"distributions/mnd.py",
"models/sparse_autoencoder.py",
"models/tests/test_dbm.py",
"models/tests/test_s3c_inference.py",
"models/tests/test_mnd.py",
"models/tests/test_s3c_misc.py",
"models/gsn.py",
"models/dbm/layer.py",
"models/dbm/__init__.py",
"models/dbm/ising.py",
"models/differentiable_sparse_coding.py",
"models/local_coordinate_coding.py",
"models/mnd.py",
"models/s3c.py",
"tests/test_monitor.py",
"kmeans.py",
"packaged_dependencies/theano_linear/conv2d.py",
"packaged_dependencies/theano_linear/imaging.py",
"packaged_dependencies/theano_linear/pyramid.py",
"packaged_dependencies/theano_linear/unshared_conv/"
"test_gpu_unshared_conv.py",
"packaged_dependencies/theano_linear/unshared_conv/"
"test_localdot.py",
"packaged_dependencies/theano_linear/unshared_conv/localdot.py",
"packaged_dependencies/theano_linear/unshared_conv/"
"unshared_conv.py",
"packaged_dependencies/theano_linear/linear.py",
"packaged_dependencies/theano_linear/test_spconv.py",
"packaged_dependencies/theano_linear/test_matrixmul.py",
"packaged_dependencies/theano_linear/spconv.py",
"expr/tests/test_coding.py",
"expr/tests/test_normalize.py",
"expr/tests/test_stochastic_pool.py",
"expr/stochastic_pool.py",
"expr/sampling.py",
"expr/information_theory.py",
"expr/basic.py",
"gui/graph_2D.py",
"sandbox/cuda_convnet/weight_acts.py",
"sandbox/cuda_convnet/filter_acts.py",
"sandbox/cuda_convnet/tests/test_filter_acts_strided.py",
"sandbox/cuda_convnet/tests/test_probabilistic_max_pooling.py",
"sandbox/cuda_convnet/tests/test_filter_acts.py",
"sandbox/cuda_convnet/tests/test_weight_acts_strided.py",
"sandbox/cuda_convnet/tests/test_image_acts_strided.py",
"sandbox/cuda_convnet/tests/test_img_acts.py",
"sandbox/cuda_convnet/tests/test_stochastic_pool.py",
"sandbox/cuda_convnet/specialized_bench.py",
"sandbox/cuda_convnet/response_norm.py",
"sandbox/cuda_convnet/__init__.py",
"sandbox/cuda_convnet/img_acts.py",
"sandbox/cuda_convnet/convnet_compile.py",
"sandbox/cuda_convnet/pthreads.py",
"sandbox/cuda_convnet/pool.py",
"sandbox/cuda_convnet/bench.py",
"sandbox/cuda_convnet/stochastic_pool.py",
"sandbox/cuda_convnet/probabilistic_max_pooling.py",
"sandbox/tuple_var.py",
"sandbox/lisa_rl/bandit/average_agent.py",
"sandbox/lisa_rl/bandit/classifier_bandit.py",
"sandbox/lisa_rl/bandit/classifier_agent.py",
"sandbox/lisa_rl/bandit/plot_reward.py",
"config/old_config.py",
"utils/utlc.py",
"utils/tests/test_serial.py",
"utils/common_strings.py",
"utils/mem.py",
"dataset_get/dataset-get.py",
"dataset_get/helper-scripts/make-archive.py",
"dataset_get/dataset_resolver.py",
"optimization/minres.py",
"linear/conv2d.py",
"linear/local_c01b.py",
"linear/linear_transform.py",
"linear/conv2d_c01b.py",
"energy_functions/rbm_energy.py",
"scripts/pkl_inspector.py",
"scripts/show_binocular_greyscale_examples.py",
"scripts/jobman/tester.py",
"scripts/dbm/dbm_metrics.py",
"scripts/papers/maxout/svhn_preprocessing.py",
"scripts/papers/jia_huang_wkshp_11/fit_final_model.py",
"scripts/papers/jia_huang_wkshp_11/evaluate.py",
"scripts/papers/jia_huang_wkshp_11/extract_features.py",
"scripts/papers/jia_huang_wkshp_11/assemble.py",
"scripts/gpu_pkl_to_cpu_pkl.py",
"scripts/gsn_example.py",
"scripts/tutorials/deep_trainer/run_deep_trainer.py",
"scripts/tutorials/grbm_smd/test_grbm_smd.py",
"scripts/icml_2013_wrepl/multimodal/"
"extract_layer_2_kmeans_features.py",
"scripts/icml_2013_wrepl/multimodal/make_submission.py",
"scripts/icml_2013_wrepl/multimodal/lcn.py",
"scripts/icml_2013_wrepl/multimodal/extract_kmeans_features.py",
"scripts/icml_2013_wrepl/emotions/emotions_dataset.py",
"scripts/icml_2013_wrepl/emotions/make_submission.py",
"scripts/icml_2013_wrepl/black_box/black_box_dataset.py",
"scripts/icml_2013_wrepl/black_box/make_submission.py",
"scripts/diff_monitor.py",
"corruption.py",
"sandbox/lisa_rl/bandit/gaussian_bandit.py",
"utils/track_version.py",
"scripts/get_version.py",
"training_algorithms/tests/test_bgd.py",
"training_algorithms/tests/test_default.py",
"training_algorithms/default.py",
"training_algorithms/training_algorithm.py",
"distributions/tests/test_mnd.py",
"distributions/parzen.py",
"distributions/uniform_hypersphere.py",
"models/setup.py",
"models/independent_multiclass_logistic.py",
"models/softmax_regression.py",
"models/tests/test_reflection_clip.py",
"models/tests/test_maxout.py",
"models/tests/test_convelemwise_sigm.py",
"models/dbm/sampling_procedure.py",
"models/rbm.py",
"models/pca.py",
"tests/test_train.py",
"packaged_dependencies/theano_linear/unshared_conv/gpu_unshared_conv.py",
"packaged_dependencies/theano_linear/unshared_conv/test_unshared_conv.py",
"packaged_dependencies/theano_linear/linearmixin.py",
"packaged_dependencies/theano_linear/util.py",
"packaged_dependencies/theano_linear/__init__.py",
"packaged_dependencies/theano_linear/test_linear.py",
"expr/tests/test_nnet.py",
"expr/image.py",
"expr/coding.py",
"expr/normalize.py",
"expr/probabilistic_max_pooling.py",
"testing/tests/test.py",
"testing/skip.py",
"testing/prereqs.py",
"testing/__init__.py",
"gui/get_weights_report.py",
"gui/patch_viewer.py",
"sandbox/cuda_convnet/tests/test_response_norm.py",
"sandbox/cuda_convnet/tests/profile_probabilistic_max_pooling.py",
"sandbox/cuda_convnet/tests/test_rop_pool.py",
"sandbox/cuda_convnet/tests/test_pool.py",
"sandbox/cuda_convnet/tests/test_common.py",
"sandbox/cuda_convnet/shared_code.py",
"sandbox/cuda_convnet/code_templates.py",
"sandbox/lisa_rl/bandit/agent.py",
"sandbox/lisa_rl/bandit/algorithm.py",
"sandbox/lisa_rl/bandit/environment.py",
"sandbox/lisa_rl/__init__.py",
"datasets/avicenna.py",
"datasets/iris.py",
"datasets/adult.py",
"datasets/npy_npz.py",
"datasets/control.py",
"datasets/cifar100.py",
"datasets/transformer_dataset.py",
"termination_criteria/__init__.py",
"__init__.py",
"utils/logger.py",
"utils/tests/test_mnist_ubyte.py",
"utils/tests/test_data_specs.py",
"utils/tests/test_bit_strings.py",
"utils/tests/test_iteration.py",
"utils/theano_graph.py",
"utils/__init__.py",
"utils/datasets.py",
"utils/data_specs.py",
"utils/insert_along_axis.py",
"utils/environ.py",
"utils/call_check.py",
"utils/python26.py",
"deprecated/classifier.py",
"train.py",
"classifier.py",
"dataset_get/helper-scripts/make-sources.py",
"pca.py",
"optimization/test_linesearch.py",
"optimization/test_minres.py",
"optimization/test_batch_gradient_descent.py",
"optimization/linear_cg.py",
"optimization/test_feature_sign.py",
"optimization/feature_sign.py",
"optimization/test_linear_cg.py",
"optimization/linesearch.py",
"linear/tests/test_conv2d.py",
"linear/tests/test_conv2d_c01b.py",
"linear/matrixmul.py",
"energy_functions/energy_function.py",
"scripts/make_weights_image.py",
"scripts/plot_monitor.py",
"scripts/print_monitor.py",
"scripts/num_parameters.py",
"scripts/benchmark/time_relu.py",
"scripts/jobman/experiment.py",
"scripts/jobman/__init__.py",
"scripts/dbm/show_negative_chains.py",
"scripts/papers/maxout/compute_test_err.py",
"scripts/papers/jia_huang_wkshp_11/npy2mat.py",
"scripts/datasets/step_through_small_norb.py",
"scripts/datasets/step_through_norb_foveated.py",
"scripts/datasets/make_downsampled_stl10.py",
"scripts/datasets/browse_small_norb.py",
"scripts/datasets/make_mnistplus.py",
"scripts/mlp/predict_csv.py",
"scripts/find_gpu_fields.py",
"scripts/tutorials/deep_trainer/test_deep_trainer.py",
"scripts/icml_2013_wrepl/multimodal/make_wordlist.py",
"base.py",
"devtools/tests/test_via_pyflakes.py",
"devtools/tests/test_shebangs.py",
"devtools/tests/pep8/pep8.py",
"devtools/tests/docscrape.py",
"devtools/run_pyflakes.py",
"devtools/record.py",
"train_extensions/tests/test_window_flip.py",
"train_extensions/__init__.py",
]
whitelist_docstrings = [
'scripts/datasets/step_through_norb_foveated.py',
'blocks.py',
'datasets/hdf5.py',
'rbm_tools.py',
'training_algorithms/tests/test_bgd.py',
'training_algorithms/tests/test_sgd.py',
'training_algorithms/tests/test_default.py',
'training_algorithms/bgd.py',
'training_algorithms/default.py',
'training_algorithms/training_algorithm.py',
'training_algorithms/__init__.py',
'training_algorithms/sgd.py',
'distributions/tests/test_mnd.py',
'distributions/multinomial.py',
'distributions/parzen.py',
'distributions/__init__.py',
'distributions/mnd.py',
'distributions/uniform_hypersphere.py',
'models/setup.py',
'models/independent_multiclass_logistic.py',
'models/softmax_regression.py',
'models/sparse_autoencoder.py',
'models/tests/test_reflection_clip.py',
'models/tests/test_dbm.py',
'models/tests/test_gsn.py',
'models/tests/test_dropout.py',
'models/tests/test_autoencoder.py',
'models/tests/test_mlp.py',
'models/tests/test_s3c_inference.py',
'models/tests/test_maxout.py',
'models/tests/test_mnd.py',
'models/tests/test_vae.py',
'models/tests/test_rbm.py',
'models/tests/test_s3c_misc.py',
'models/gsn.py',
'models/dbm/sampling_procedure.py',
'models/dbm/layer.py',
'models/dbm/__init__.py',
'models/dbm/dbm.py',
'models/dbm/ising.py',
'models/differentiable_sparse_coding.py',
'models/local_coordinate_coding.py',
'models/maxout.py',
'models/s3c.py',
'models/mnd.py',
'models/rbm.py',
'models/autoencoder.py',
'tests/test_dbm_metrics.py',
'tests/test_monitor.py',
'tests/test_train.py',
'tests/rbm/test_ais.py',
'kmeans.py',
'packaged_dependencies/__init__.py',
'packaged_dependencies/theano_linear/imaging.py',
'packaged_dependencies/theano_linear/unshared_conv/__init__.py',
'packaged_dependencies/theano_linear/unshared_conv/unshared_conv.py',
'packaged_dependencies/theano_linear/linearmixin.py',
'packaged_dependencies/theano_linear/linear.py',
'packaged_dependencies/theano_linear/test_spconv.py',
'expr/activations.py',
'expr/tests/test_probabilistic_max_pooling.py',
'expr/tests/test_preprocessing.py',
'expr/tests/test_nnet.py',
'expr/tests/test_coding.py',
'expr/tests/test_normalize.py',
'expr/tests/test_stochastic_pool.py',
'expr/preprocessing.py',
'expr/image.py',
'expr/coding.py',
'expr/__init__.py',
'expr/stochastic_pool.py',
'expr/sampling.py',
'expr/normalize.py',
'expr/probabilistic_max_pooling.py',
'expr/information_theory.py',
'expr/basic.py',
'testing/tests/test.py',
'testing/skip.py',
'testing/prereqs.py',
'testing/__init__.py',
'testing/datasets.py',
'gui/get_weights_report.py',
'gui/__init__.py',
'gui/patch_viewer.py',
'scalar.py',
'sandbox/cuda_convnet/weight_acts.py',
'sandbox/cuda_convnet/filter_acts.py',
'sandbox/cuda_convnet/tests/test_filter_acts_strided.py',
'sandbox/cuda_convnet/tests/test_probabilistic_max_pooling.py',
'sandbox/cuda_convnet/tests/test_filter_acts.py',
'sandbox/cuda_convnet/tests/test_img_acts.py',
'sandbox/cuda_convnet/tests/test_response_norm.py',
'sandbox/cuda_convnet/tests/profile_probabilistic_max_pooling.py',
'sandbox/cuda_convnet/tests/test_weight_acts.py',
'sandbox/cuda_convnet/tests/test_rop_pool.py',
'sandbox/cuda_convnet/tests/test_pool.py',
'sandbox/cuda_convnet/tests/test_common.py',
'sandbox/cuda_convnet/tests/test_stochastic_pool.py',
'sandbox/cuda_convnet/shared_code.py',
'sandbox/cuda_convnet/__init__.py',
'sandbox/cuda_convnet/img_acts.py',
'sandbox/cuda_convnet/base_acts.py',
'sandbox/cuda_convnet/pool.py',
'sandbox/cuda_convnet/stochastic_pool.py',
'sandbox/cuda_convnet/code_templates.py',
'sandbox/cuda_convnet/probabilistic_max_pooling.py',
'sandbox/tuple_var.py',
'sandbox/__init__.py',
'sandbox/lisa_rl/bandit/simulator.py',
'sandbox/lisa_rl/bandit/agent.py',
'sandbox/lisa_rl/bandit/algorithm.py',
'sandbox/lisa_rl/bandit/environment.py',
'sandbox/lisa_rl/bandit/average_agent.py',
'sandbox/lisa_rl/bandit/classifier_bandit.py',
'sandbox/lisa_rl/bandit/__init__.py',
'sandbox/lisa_rl/bandit/classifier_agent.py',
'sandbox/lisa_rl/bandit/gaussian_bandit.py',
'sandbox/lisa_rl/__init__.py',
'config/old_config.py',
'config/tests/test_yaml_parse.py',
'config/yaml_parse.py',
'space/tests/test_space.py',
'space/__init__.py',
'datasets/norb.py',
'datasets/utlc.py',
'datasets/mnistplus.py',
'datasets/cos_dataset.py',
'datasets/cifar10.py',
'datasets/svhn.py',
'datasets/tests/test_preprocessing.py',
'datasets/tests/test_mnist.py',
'datasets/tests/test_imports.py',
'datasets/tests/test_cifar10.py',
'datasets/tests/test_norb.py',
'datasets/tests/test_dense_design_matrix.py',
'datasets/tests/test_vector_spaces_dataset.py',
'datasets/tests/test_four_regions.py',
'datasets/tests/test_csv_dataset.py',
'datasets/tests/test_icml07.py',
'datasets/tests/test_utlc.py',
'datasets/preprocessing.py',
'datasets/avicenna.py',
'datasets/iris.py',
'datasets/config.py',
'datasets/dense_design_matrix.py',
'datasets/adult.py',
'datasets/tfd.py',
'datasets/icml07.py',
'datasets/filetensor.py',
'datasets/npy_npz.py',
'datasets/hepatitis.py',
'datasets/wiskott.py',
'datasets/control.py',
'datasets/exc.py',
'datasets/__init__.py',
'datasets/mnist.py',
'datasets/sparse_dataset.py',
'datasets/csv_dataset.py',
'datasets/cifar100.py',
'datasets/tl_challenge.py',
'datasets/transformer_dataset.py',
'datasets/norb_small.py',
'datasets/retina.py',
'datasets/ocr.py',
'datasets/stl10.py',
'datasets/matlab_dataset.py',
'datasets/vector_spaces_dataset.py',
'datasets/four_regions.py',
'datasets/debug.py',
'datasets/binarizer.py',
'termination_criteria/__init__.py',
'__init__.py',
'utils/utlc.py',
'utils/setup.py',
'utils/compile.py',
'utils/logger.py',
'utils/general.py',
'utils/testing.py',
'utils/tests/test_mnist_ubyte.py',
'utils/tests/test_data_specs.py',
'utils/tests/test_video.py',
'utils/tests/test_bit_strings.py',
'utils/tests/test_rng.py',
'utils/tests/test_pooling.py',
'utils/tests/test_iteration.py',
'utils/tests/test_insert_along_axis.py',
'utils/tests/test_utlc.py',
'utils/tests/test_compile.py',
'utils/tests/test_key_aware.py',
'utils/key_aware.py',
'utils/video.py',
'utils/bit_strings.py',
'utils/iteration.py',
'utils/pooling.py',
'utils/theano_graph.py',
'utils/common_strings.py',
'utils/datasets.py',
'utils/data_specs.py',
'utils/shell.py',
'utils/rng.py',
'utils/insert_along_axis.py',
'utils/environ.py',
'utils/call_check.py',
'utils/mnist_ubyte.py',
'utils/track_version.py',
'utils/mem.py',
'utils/python26.py',
'utils/timing.py',
'deprecated/__init__.py',
'deprecated/classifier.py',
'train.py',
'format/tests/test_target_format.py',
'format/__init__.py',
'dataset_get/dataset-get.py',
'dataset_get/helper-scripts/make-sources.py',
'dataset_get/helper-scripts/make-archive.py',
'dataset_get/dataset_resolver.py',
'pca.py',
'monitor.py',
'optimization/batch_gradient_descent.py',
'optimization/__init__.py',
'optimization/test_batch_gradient_descent.py',
'optimization/linear_cg.py',
'optimization/minres.py',
'optimization/test_feature_sign.py',
'optimization/feature_sign.py',
'optimization/linesearch.py',
'linear/conv2d.py',
'linear/tests/test_matrixmul.py',
'linear/local_c01b.py',
'linear/matrixmul.py',
'linear/__init__.py',
'linear/linear_transform.py',
'linear/conv2d_c01b.py',
'energy_functions/tests/__init__.py',
'energy_functions/rbm_energy.py',
'energy_functions/__init__.py',
'energy_functions/energy_function.py',
'scripts/plot_monitor.py',
'scripts/print_model.py',
'scripts/tests/__init__.py',
'scripts/pkl_inspector.py',
'scripts/get_version.py',
'scripts/print_monitor.py',
'scripts/show_binocular_greyscale_examples.py',
'scripts/num_parameters.py',
'scripts/jobman/tester.py',
'scripts/jobman/experiment.py',
'scripts/jobman/__init__.py',
'scripts/dbm/__init__.py',
'scripts/dbm/dbm_metrics.py',
'scripts/papers/__init__.py',
'scripts/papers/jia_huang_wkshp_11/extract_features.py',
'scripts/print_channel_doc.py',
'scripts/gpu_pkl_to_cpu_pkl.py',
'scripts/datasets/step_through_small_norb.py',
'scripts/datasets/download_mnist.py',
'scripts/datasets/download_binarized_mnist.py',
'scripts/datasets/browse_small_norb.py',
'scripts/datasets/make_mnistplus.py',
'scripts/__init__.py',
'scripts/gsn_example.py',
'scripts/mlp/predict_csv.py',
'scripts/mlp/__init__.py',
'scripts/find_gpu_fields.py',
'scripts/tutorials/dbm_demo/train_dbm.py',
'scripts/tutorials/dbm_demo/__init__.py',
'scripts/tutorials/tests/test_dbm.py',
'scripts/tutorials/tests/test_mlp_nested.py',
'scripts/tutorials/multilayer_perceptron/tests/test_mlp.py',
'scripts/tutorials/softmax_regression/tests/test_softmaxreg.py',
'scripts/tutorials/deep_trainer/__init__.py',
'scripts/tutorials/deep_trainer/run_deep_trainer.py',
'scripts/tutorials/grbm_smd/make_dataset.py',
'scripts/tutorials/grbm_smd/__init__.py',
'scripts/tutorials/grbm_smd/test_grbm_smd.py',
'scripts/tutorials/__init__.py',
'scripts/tutorials/jobman_demo/utils.py',
'scripts/tutorials/jobman_demo/__init__.py',
'scripts/tutorials/stacked_autoencoders/tests/test_dae.py',
'scripts/icml_2013_wrepl/__init__.py',
'scripts/icml_2013_wrepl/multimodal/extract_layer_2_kmeans_features.py',
'scripts/icml_2013_wrepl/multimodal/make_submission.py',
'scripts/icml_2013_wrepl/multimodal/lcn.py',
'scripts/icml_2013_wrepl/multimodal/__init__.py',
'scripts/icml_2013_wrepl/multimodal/extract_kmeans_features.py',
'scripts/icml_2013_wrepl/emotions/emotions_dataset.py',
'scripts/icml_2013_wrepl/emotions/make_submission.py',
'scripts/icml_2013_wrepl/emotions/__init__.py',
'scripts/icml_2013_wrepl/black_box/black_box_dataset.py',
'scripts/icml_2013_wrepl/black_box/make_submission.py',
'scripts/icml_2013_wrepl/black_box/__init__.py',
'scripts/diff_monitor.py',
'base.py',
'devtools/tests/test_via_pyflakes.py',
'devtools/tests/test_shebangs.py',
'devtools/tests/__init__.py',
'devtools/tests/docscrape.py',
'devtools/run_pyflakes.py',
'devtools/__init__.py',
'devtools/record.py',
'corruption.py',
'datasets/tests/test_tl_challenge.py',
'datasets/tests/test_tfd.py',
'datasets/tests/test_npy_npz.py',
'linear/tests/test_conv2d.py',
'devtools/tests/pep8/pep8.py',
'devtools/tests/pep8/__init__.py',
'scripts/lcc_tangents/make_dataset.py',
'scripts/icml_2013_wrepl/multimodal/make_wordlist.py',
'scripts/datasets/make_stl10_whitened.py',
'scripts/datasets/make_stl10_patches_8x8.py',
'scripts/datasets/make_stl10_patches.py',
'scripts/datasets/make_cifar10_whitened.py',
'scripts/datasets/make_cifar10_gcn_whitened.py',
'scripts/datasets/make_cifar100_patches.py',
'scripts/datasets/make_cifar100_gcn_whitened.py',
'scripts/datasets/make_svhn_pytables.py',
'energy_functions/tests/test_rbm_energy.py',
]
# add files which fail to run to whitelist_docstrings
whitelist_docstrings.extend([
'sandbox/rnn/models/mlp_hook.py',
'training_algorithms/tests/test_learning_rule.py',
'models/pca.py',
'datasets/tests/test_hdf5.py',
'linear/tests/test_conv2d_c01b.py',
'packaged_dependencies/theano_linear/conv2d.py',
'packaged_dependencies/theano_linear/pyramid.py',
'packaged_dependencies/theano_linear/unshared_conv/gpu_unshared_conv.py',
'packaged_dependencies/theano_linear/unshared_conv/'
'test_gpu_unshared_conv.py',
'packaged_dependencies/theano_linear/unshared_conv/test_localdot.py',
'packaged_dependencies/theano_linear/unshared_conv/test_unshared_conv.py',
'packaged_dependencies/theano_linear/unshared_conv/localdot.py',
'packaged_dependencies/theano_linear/util.py',
'packaged_dependencies/theano_linear/__init__.py',
'packaged_dependencies/theano_linear/test_matrixmul.py',
'packaged_dependencies/theano_linear/test_linear.py',
'packaged_dependencies/theano_linear/spconv.py',
'sandbox/cuda_convnet/tests/test_weight_acts_strided.py',
'sandbox/cuda_convnet/tests/test_image_acts_strided.py',
'sandbox/cuda_convnet/specialized_bench.py',
'sandbox/cuda_convnet/response_norm.py',
'sandbox/cuda_convnet/convnet_compile.py',
'sandbox/cuda_convnet/pthreads.py',
'sandbox/cuda_convnet/bench.py',
'sandbox/lisa_rl/bandit/plot_reward.py',
'sandbox/lisa_rl/bandit/simulate.py',
'config/__init__.py',
'utils/__init__.py',
'optimization/test_linesearch.py',
'optimization/test_minres.py',
'optimization/test_linear_cg.py',
'scripts/papers/maxout/svhn_preprocessing.py',
'scripts/papers/maxout/compute_test_err.py',
'scripts/papers/jia_huang_wkshp_11/fit_final_model.py',
'scripts/papers/jia_huang_wkshp_11/evaluate.py',
'scripts/papers/jia_huang_wkshp_11/npy2mat.py',
'scripts/papers/jia_huang_wkshp_11/assemble.py',
'scripts/datasets/make_cifar100_patches_8x8.py',
'scripts/datasets/make_downsampled_stl10.py',
'scripts/datasets/make_cifar100_whitened.py',
'scripts/tutorials/deep_trainer/test_deep_trainer.py',
'scripts/icml_2013_wrepl/black_box/learn_zca.py',
'train_extensions/tests/test_window_flip.py',
'train_extensions/window_flip.py',
'linear/tests/test_local_c01b.py',
'sandbox/cuda_convnet/debug.py', ])
def test_format_pep8():
"""
Test if pep8 is respected.
"""
pep8_checker = StyleGuide()
files_to_check = []
for path in list_files(".py"):
rel_path = os.path.relpath(path, pylearn2.__path__[0])
if rel_path in whitelist_pep8:
continue
else:
files_to_check.append(path)
report = pep8_checker.check_files(files_to_check)
if report.total_errors > 0:
raise AssertionError("PEP8 Format not respected")
def print_files_information_pep8():
"""
Print the list of files which can be removed from the whitelist and the
list of files which do not respect PEP8 formatting that aren't in the
whitelist
"""
infracting_files = []
non_infracting_files = []
pep8_checker = StyleGuide(quiet=True)
for path in list_files(".py"):
number_of_infractions = pep8_checker.input_file(path)
rel_path = os.path.relpath(path, pylearn2.__path__[0])
if number_of_infractions > 0:
if rel_path not in whitelist_pep8:
infracting_files.append(path)
else:
if rel_path in whitelist_pep8:
non_infracting_files.append(path)
print("Files that must be corrected or added to whitelist:")
for file in infracting_files:
print(file)
print("Files that can be removed from whitelist:")
for file in non_infracting_files:
print(file)
def test_format_docstrings():
"""
Test if docstrings are well formatted.
"""
try:
verify_format_docstrings()
except SkipTest as e:
import traceback
traceback.print_exc(e)
raise AssertionError(
"Some file raised SkipTest on import, and inadvertently"
" canceled the documentation testing."
)
def verify_format_docstrings():
"""
Implementation of `test_format_docstrings`. The implementation is
factored out so it can be placed inside a guard against SkipTest.
"""
format_infractions = []
for path in list_files(".py"):
rel_path = os.path.relpath(path, pylearn2.__path__[0])
if rel_path in whitelist_docstrings:
continue
try:
format_infractions.extend(docstring_errors(path))
except Exception as e:
format_infractions.append(["%s failed to run so format cannot "
"be checked. Error message:\n %s" %
(rel_path, e)])
if len(format_infractions) > 0:
msg = "\n".join(':'.join(line) for line in format_infractions)
raise AssertionError("Docstring format not respected:\n%s" % msg)
if __name__ == "__main__":
print_files_information_pep8()
| bsd-3-clause |
jfsantos/ift6266h14 | old/test_timit_iy.py | 1 | 2996 | from timit_full import TimitFullCorpusReader
import itertools
import numpy as np
from pylearn2.datasets import DenseDesignMatrix
from pylearn2.models.mlp import *
from pylearn2.costs.mlp.dropout import Dropout
from pylearn2.termination_criteria import EpochCounter
from pylearn2.training_algorithms.sgd import SGD
from pylearn2.training_algorithms import learning_rule
from pylearn2.train import Train
from pylearn2.train_extensions import best_params
import cPickle as pickle
import theano
# Gets all utterances from <spkrid>, splits them into <framelen>
# frames with <overlap> overlaps. Returns the frames and correspondent
# phone symbols.
spkrid = 'MTCS0'
class TimitPhoneData(DenseDesignMatrix):
def __init__(self, spkrid, phone, framelen, overlap, start, stop):
data = TimitFullCorpusReader('/home/jfsantos/data/TIMIT/')
# Some list comprehension/zip magic here (but it works!)
spkrfr = [data.frames(z, 160, 159) for z in
data.utteranceids(spkrid=spkrid)]
fr, ph = zip(*[(x[0], x[1]) for x in spkrfr])
fr = np.vstack(fr)*2**-15
ph = list(itertools.chain(*ph))
# Get all elements for which the phone is 'iy'
iy_idx = [i for i,x in enumerate(ph) if x == 'iy']
fr_iy = fr[iy_idx]
X = fr_iy[:,0:159]
y = np.array([fr_iy[:,159]]).T # y.ndim has to be 2
super(TimitPhoneData,self).__init__(X=X[start:stop], y=y[start:stop])
train = TimitPhoneData(spkrid='FPLS0', phone='iy', framelen=160, overlap=159, start=0, stop=10000)
valid = TimitPhoneData(spkrid='FPLS0', phone='iy', framelen=160, overlap=159, start=10000, stop=12000)
test = TimitPhoneData(spkrid='FPLS0', phone='iy', framelen=160, overlap=159, start=12000, stop=18000)
i0 = VectorSpace(159)
s0 = Sigmoid(layer_name='h0', dim=500, sparse_init=15)
l0 = Linear(layer_name='y', dim=1, sparse_init=15)
mdl = MLP(layers=[s0, l0], nvis=159, input_space=i0)
trainer = SGD(batch_size=512, learning_rate = .01, init_momentum = .5,
monitoring_dataset = {'train' : train, 'valid': valid,
'test' : test}, termination_criterion =
EpochCounter(max_epochs=200))
watcher = best_params.MonitorBasedSaveBest(
channel_name='test_objective',
save_path='nextsample_iy_FPLS0_mlp_sig_lin_watcher.pkl')
experiment = Train(dataset=train,
model=mdl,
algorithm=trainer, extensions = [watcher])
experiment.main_loop()
# Now we have the best model, let's load it and use it to generate some
# samples!
bestmdl = pickle.load(open('nextsample_iy_FPLS0_mlp_sig_lin_watcher.pkl'))
X = theano.tensor.dmatrix('X')
y = bestmdl.fprop(X)
predict = theano.function([X], y)
# Let's start with a all zero vector, then use the prediction to populate the next sample
x0 = np.asmatrix(np.zeros((1,16000)))
for k in np.arange(160,16000):
frame = x0[:,k-160:k-1]
x0[0,k] = predict(frame)
| mit |
neuroneuro15/natnetclient | build/lib/natnetclient/utils.py | 1 | 1318 | __author__ = 'ratcave'
import numpy as np
from sklearn.decomposition import PCA
def rotate_to_var(markers):
"""Returns degrees to rotate about y axis so greatest marker variance points in +X direction"""
# Mean-Center
markers -= np.mean(markers, axis=0)
# Vector in direction of greatest variance
pca = PCA(n_components=1).fit(markers[:, [0, 2]])
coeff_vec = pca.components_[0]
# # Flip coeff_vec in direction of max variance along the vector.
# marker_var = markers[markers[:,2].argsort(), 2] # Check variance along component to determine whether to flip.
# winlen = int(len(marker_var)/2+1) # Window length for moving mean (two steps, with slight overlap)
# var_means = np.array([marker_var[:winlen], marker_var[-winlen:]]).mean(axis=1)
# coeff_vec = coeff_vec * -1 if np.diff(var_means)[0] < 0 else coeff_vec
# Rotation amount, in radians
base_vec = np.array([1, 0]) # Vector in +X direction
msin, mcos = np.cross(coeff_vec, base_vec), np.dot(coeff_vec, base_vec)
angle = np.degrees(np.arctan2(msin, mcos))
print("Angle within function: {}".format(angle))
return angle
# def get_pca_rotation(markers):
#
# markers_2d = markers[:, [0, 2]]
# pca = PCA(n_components=1).fit(markers[:, [0, 2]])
#
# coeff = pca.components_[0]
| gpl-2.0 |
joshbohde/scikit-learn | sklearn/linear_model/tests/test_sgd.py | 1 | 16225 | import numpy as np
from numpy.testing import assert_array_equal, assert_approx_equal
from numpy.testing import assert_almost_equal, assert_array_almost_equal
from sklearn import linear_model, datasets, metrics
from sklearn import preprocessing
import unittest
from nose.tools import raises
from nose.tools import assert_raises
##
## Test Data
##
# test sample 1
X = np.array([[-2, -1], [-1, -1], [-1, -2], [1, 1], [1, 2], [2, 1]])
Y = [1, 1, 1, 2, 2, 2]
T = np.array([[-1, -1], [2, 2], [3, 2]])
true_result = [1, 2, 2]
# test sample 2
X2 = np.array([[-1, 1], [-0.75, 0.5], [-1.5, 1.5],
[1, 1], [0.75, 0.5], [1.5, 1.5],
[-1, -1], [0, -0.5], [1, -1]])
Y2 = [1, 1, 1, 2, 2, 2, 3, 3, 3]
T2 = np.array([[-1.5, 0.5], [1, 2], [0, -2]])
true_result2 = [1, 2, 3]
# test sample 3
X3 = np.array([[1, 1, 0, 0, 0, 0], [1, 1, 0, 0, 0, 0],
[0, 0, 1, 0, 0, 0], [0, 0, 1, 0, 0, 0],
[0, 0, 0, 0, 1, 1], [0, 0, 0, 0, 1, 1],
[0, 0, 0, 1, 0, 0], [0, 0, 0, 1, 0, 0]])
Y3 = np.array([1, 1, 1, 1, 2, 2, 2, 2])
# test sample 4 - two more or less redundent feature groups
X4 = np.array([[1, 0.9, 0.8, 0, 0, 0], [1, .84, .98, 0, 0, 0],
[1, .96, .88, 0, 0, 0], [1, .91, .99, 0, 0, 0],
[0, 0, 0, .89, .91, 1], [0, 0, 0, .79, .84, 1],
[0, 0, 0, .91, .95, 1], [0, 0, 0, .93, 1, 1]])
Y4 = np.array([1, 1, 1, 1, 2, 2, 2, 2])
iris = datasets.load_iris()
##
## Classification Test Case
##
class DenseSGDClassifierTestCase(unittest.TestCase):
"""Test suite for the dense representation variant of SGD"""
factory = linear_model.SGDClassifier
def test_sgd(self):
"""Check that SGD gives any results :-)"""
clf = self.factory(penalty='l2', alpha=0.01, fit_intercept=True,
n_iter=10, shuffle=True)
clf.fit(X, Y)
#assert_almost_equal(clf.coef_[0], clf.coef_[1], decimal=7)
assert_array_equal(clf.predict(T), true_result)
def test_sgd_penalties(self):
"""Check whether penalties and hyperparameters are set properly"""
clf = self.factory(penalty='l2')
assert clf.rho == 1.0
clf = self.factory(penalty='l1')
assert clf.rho == 0.0
clf = self.factory(penalty='elasticnet', rho=0.85)
assert clf.rho == 0.85
@raises(ValueError)
def test_sgd_bad_penalty(self):
"""Check whether expected ValueError on bad penalty"""
self.factory(penalty='foobar', rho=0.85)
def test_sgd_losses(self):
"""Check whether losses and hyperparameters are set properly"""
clf = self.factory(loss='hinge')
assert isinstance(clf.loss_function, linear_model.Hinge)
clf = self.factory(loss='log')
assert isinstance(clf.loss_function, linear_model.Log)
clf = self.factory(loss='modified_huber')
assert isinstance(clf.loss_function, linear_model.ModifiedHuber)
@raises(ValueError)
def test_sgd_bad_loss(self):
"""Check whether expected ValueError on bad loss"""
self.factory(loss="foobar")
@raises(ValueError)
def test_sgd_n_iter_param(self):
"""Test parameter validity check"""
self.factory(n_iter=-10000)
@raises(ValueError)
def test_sgd_shuffle_param(self):
"""Test parameter validity check"""
self.factory(shuffle="false")
@raises(TypeError)
def test_arument_coef(self):
"""Checks coef_init not allowed as model argument (only fit)"""
# Provided coef_ does not match dataset.
self.factory(coef_init=np.zeros((3,))).fit(X, Y)
@raises(ValueError)
def test_provide_coef(self):
"""Checks coef_init shape for the warm starts"""
# Provided coef_ does not match dataset.
self.factory().fit(X, Y, coef_init=np.zeros((3,)))
@raises(ValueError)
def test_set_intercept(self):
"""Checks intercept_ shape for the warm starts"""
# Provided intercept_ does not match dataset.
self.factory().fit(X, Y, intercept_init=np.zeros((3,)))
@raises(ValueError)
def test_sgd_at_least_two_labels(self):
"""Target must have at least two labels"""
self.factory(alpha=0.01, n_iter=20).fit(X2, np.ones(9))
def test_sgd_multiclass(self):
"""Multi-class test case"""
clf = self.factory(alpha=0.01, n_iter=20).fit(X2, Y2)
assert clf.coef_.shape == (3, 2)
assert clf.intercept_.shape == (3,)
assert clf.decision_function([0, 0]).shape == (1, 3)
pred = clf.predict(T2)
assert_array_equal(pred, true_result2)
def test_sgd_multiclass_with_init_coef(self):
"""Multi-class test case"""
clf = self.factory(alpha=0.01, n_iter=20)
clf.fit(X2, Y2, coef_init=np.zeros((3, 2)),
intercept_init=np.zeros(3))
assert clf.coef_.shape == (3, 2)
assert clf.intercept_.shape == (3,)
pred = clf.predict(T2)
assert_array_equal(pred, true_result2)
def test_sgd_multiclass_njobs(self):
"""Multi-class test case with multi-core support"""
clf = self.factory(alpha=0.01, n_iter=20, n_jobs=2).fit(X2, Y2)
assert clf.coef_.shape == (3, 2)
assert clf.intercept_.shape == (3,)
assert clf.decision_function([0, 0]).shape == (1, 3)
pred = clf.predict(T2)
assert_array_equal(pred, true_result2)
def test_set_coef_multiclass(self):
"""Checks coef_init and intercept_init shape for for multi-class
problems"""
# Provided coef_ does not match dataset
clf = self.factory()
assert_raises(ValueError, clf.fit, X2, Y2, coef_init=np.zeros((2, 2)))
# Provided coef_ does match dataset
clf = self.factory().fit(X2, Y2, coef_init=np.zeros((3, 2)))
# Provided intercept_ does not match dataset
clf = self.factory()
assert_raises(ValueError, clf.fit, X2, Y2,
intercept_init=np.zeros((1,)))
# Provided intercept_ does match dataset.
clf = self.factory().fit(X2, Y2, intercept_init=np.zeros((3,)))
def test_sgd_proba(self):
"""Check SGD.predict_proba for log loss only"""
# hinge loss does not allow for conditional prob estimate
clf = self.factory(loss="hinge", alpha=0.01, n_iter=10).fit(X, Y)
assert_raises(NotImplementedError, clf.predict_proba, [3, 2])
# log loss implements the logistic regression prob estimate
clf = self.factory(loss="log", alpha=0.01, n_iter=10).fit(X, Y)
p = clf.predict_proba([3, 2])
assert p > 0.5
p = clf.predict_proba([-1, -1])
assert p < 0.5
def test_sgd_l1(self):
"""Test L1 regularization"""
n = len(X4)
np.random.seed(13)
idx = np.arange(n)
np.random.shuffle(idx)
X = X4[idx, :]
Y = Y4[idx, :]
clf = self.factory(penalty='l1', alpha=.2, fit_intercept=False,
n_iter=2000)
clf.fit(X, Y)
assert_array_equal(clf.coef_[0, 1:-1], np.zeros((4,)))
pred = clf.predict(X)
assert_array_equal(pred, Y)
def test_class_weight(self):
"""
Test class weights.
"""
X = np.array([[-1.0, -1.0], [-1.0, 0], [-.8, -1.0],
[1.0, 1.0], [1.0, 0.0]])
y = [1, 1, 1, -1, -1]
clf = self.factory(alpha=0.1, n_iter=1000, fit_intercept=False)
clf.fit(X, y)
assert_array_equal(clf.predict([[0.2, -1.0]]), np.array([1]))
# we give a small weights to class 1
clf.fit(X, y, class_weight={1: 0.001})
# now the hyperplane should rotate clock-wise and
# the prediction on this point should shift
assert_array_equal(clf.predict([[0.2, -1.0]]), np.array([-1]))
def test_equal_class_weight(self):
"""Test if equal class weights approx. equals no class weights. """
X = [[1, 0], [1, 0], [0, 1], [0, 1]]
y = [0, 0, 1, 1]
clf = self.factory(alpha=0.1, n_iter=1000)
clf.fit(X, y)
X = [[1, 0], [0, 1]]
y = [0, 1]
clf_weighted = self.factory(alpha=0.1, n_iter=1000)
clf_weighted.fit(X, y, class_weight={0: 0.5, 1: 0.5})
# should be similar up to some epsilon due to learning rate schedule
assert_almost_equal(clf.coef_, clf_weighted.coef_, decimal=2)
@raises(ValueError)
def test_wrong_class_weight_label(self):
"""ValueError due to not existing class label."""
clf = self.factory(alpha=0.1, n_iter=1000)
clf.fit(X, Y, class_weight={0: 0.5})
@raises(ValueError)
def test_wrong_class_weight_format(self):
"""ValueError due to wrong class_weight argument type."""
clf = self.factory(alpha=0.1, n_iter=1000)
clf.fit(X, Y, class_weight=[0.5])
def test_auto_weight(self):
"""Test class weights for imbalanced data"""
# compute reference metrics on iris dataset that is quite balanced by
# default
X, y = iris.data, iris.target
X = preprocessing.scale(X)
idx = np.arange(X.shape[0])
np.random.seed(13)
np.random.shuffle(idx)
X = X[idx]
y = y[idx]
clf = self.factory(alpha=0.0001, n_iter=1000).fit(X, y)
assert_approx_equal(metrics.f1_score(y, clf.predict(X)), 0.96, 2)
# make the same prediction using automated class_weight
clf_auto = self.factory(alpha=0.0001,
n_iter=1000).fit(X, y, class_weight="auto")
assert_approx_equal(metrics.f1_score(y, clf_auto.predict(X)), 0.96, 2)
# Make sure that in the balanced case it does not change anything
# to use "auto"
assert_array_almost_equal(clf.coef_, clf_auto.coef_, 6)
# build an very very imbalanced dataset out of iris data
X_0 = X[y == 0, :]
y_0 = y[y == 0]
X_imbalanced = np.vstack([X] + [X_0] * 10)
y_imbalanced = np.concatenate([y] + [y_0] * 10)
# fit a model on the imbalanced data without class weight info
clf = self.factory(n_iter=1000)
clf.fit(X_imbalanced, y_imbalanced)
y_pred = clf.predict(X)
assert metrics.f1_score(y, y_pred) < 0.96
# fit a model with auto class_weight enabled
clf = self.factory(n_iter=1000)
clf.fit(X_imbalanced, y_imbalanced, class_weight="auto")
y_pred = clf.predict(X)
assert metrics.f1_score(y, y_pred) > 0.96
def test_sample_weights(self):
"""
Test weights on individual samples
"""
X = np.array([[-1.0, -1.0], [-1.0, 0], [-.8, -1.0],
[1.0, 1.0], [1.0, 0.0]])
y = [1, 1, 1, -1, -1]
clf = self.factory(alpha=0.1, n_iter=1000, fit_intercept=False)
clf.fit(X, y)
assert_array_equal(clf.predict([[0.2, -1.0]]), np.array([1]))
# we give a small weights to class 1
clf.fit(X, y, sample_weight=[0.001] * 3 + [1] * 2)
# now the hyperplane should rotate clock-wise and
# the prediction on this point should shift
assert_array_equal(clf.predict([[0.2, -1.0]]), np.array([-1]))
@raises(ValueError)
def test_wrong_sample_weights(self):
"""Test if ValueError is raised if sample_weight has wrong shape"""
clf = self.factory(alpha=0.1, n_iter=1000, fit_intercept=False)
# provided sample_weight too long
clf.fit(X, Y, sample_weight=range(7))
class SparseSGDClassifierTestCase(DenseSGDClassifierTestCase):
"""Run exactly the same tests using the sparse representation variant"""
factory = linear_model.sparse.SGDClassifier
################################################################################
# Regression Test Case
class DenseSGDRegressorTestCase(unittest.TestCase):
"""Test suite for the dense representation variant of SGD"""
factory = linear_model.SGDRegressor
def test_sgd(self):
"""Check that SGD gives any results."""
clf = self.factory(alpha=0.1, n_iter=2,
fit_intercept=False)
clf.fit([[0, 0], [1, 1], [2, 2]], [0, 1, 2])
assert clf.coef_[0] == clf.coef_[1]
def test_sgd_penalties(self):
"""Check whether penalties and hyperparameters are set properly"""
clf = self.factory(penalty='l2')
assert clf.rho == 1.0
clf = self.factory(penalty='l1')
assert clf.rho == 0.0
clf = self.factory(penalty='elasticnet', rho=0.85)
assert clf.rho == 0.85
@raises(ValueError)
def test_sgd_bad_penalty(self):
"""Check whether expected ValueError on bad penalty"""
self.factory(penalty='foobar', rho=0.85)
def test_sgd_losses(self):
"""Check whether losses and hyperparameters are set properly"""
clf = self.factory(loss='squared_loss')
assert isinstance(clf.loss_function, linear_model.SquaredLoss)
clf = self.factory(loss='huber', p=0.5)
assert isinstance(clf.loss_function, linear_model.Huber)
assert clf.p == 0.5
@raises(ValueError)
def test_sgd_bad_loss(self):
"""Check whether expected ValueError on bad loss"""
self.factory(loss="foobar")
def test_sgd_least_squares_fit(self):
xmin, xmax = -5, 5
n_samples = 100
X = np.linspace(xmin, xmax, n_samples).reshape(n_samples, 1)
# simple linear function without noise
y = 0.5 * X.ravel()
clf = self.factory(loss='squared_loss', alpha=0.1, n_iter=20,
fit_intercept=False)
clf.fit(X, y)
score = clf.score(X, y)
assert score > 0.99
# simple linear function with noise
y = 0.5 * X.ravel() \
+ np.random.randn(n_samples, 1).ravel()
clf = self.factory(loss='squared_loss', alpha=0.1, n_iter=20,
fit_intercept=False)
clf.fit(X, y)
score = clf.score(X, y)
assert score > 0.5
def test_sgd_huber_fit(self):
xmin, xmax = -5, 5
n_samples = 100
X = np.linspace(xmin, xmax, n_samples).reshape(n_samples, 1)
# simple linear function without noise
y = 0.5 * X.ravel()
clf = self.factory(loss="huber", p=0.1, alpha=0.1, n_iter=20,
fit_intercept=False)
clf.fit(X, y)
score = clf.score(X, y)
assert score > 0.99
# simple linear function with noise
y = 0.5 * X.ravel() \
+ np.random.randn(n_samples, 1).ravel()
clf = self.factory(loss="huber", p=0.1, alpha=0.1, n_iter=20,
fit_intercept=False)
clf.fit(X, y)
score = clf.score(X, y)
assert score > 0.5
def test_elasticnet_convergence(self):
"""Check that the SGD ouput is consistent with coordinate descent"""
n_samples, n_features = 1000, 5
np.random.seed(0)
X = np.random.randn(n_samples, n_features)
# ground_truth linear model that generate y from X and to which the
# models should converge if the regularizer would be set to 0.0
ground_truth_coef = np.random.randn(n_features)
y = np.dot(X, ground_truth_coef)
# XXX: alpha = 0.1 seems to cause convergence problems
for alpha in [0.01, 0.001]:
for rho in [0.5, 0.8, 1.0]:
cd = linear_model.ElasticNet(alpha=alpha, rho=rho,
fit_intercept=False)
cd.fit(X, y)
sgd = self.factory(penalty='elasticnet', n_iter=50,
alpha=alpha, rho=rho, fit_intercept=False)
sgd.fit(X, y)
err_msg = ("cd and sgd did not converge to comparable "
"results for alpha=%f and rho=%f" % (alpha, rho))
assert_almost_equal(cd.coef_, sgd.coef_, decimal=2,
err_msg=err_msg)
class SparseSGDRegressorTestCase(DenseSGDRegressorTestCase):
"""Run exactly the same tests using the sparse representation variant"""
factory = linear_model.sparse.SGDRegressor
| bsd-3-clause |
ephes/scikit-learn | sklearn/semi_supervised/label_propagation.py | 127 | 15312 | # coding=utf8
"""
Label propagation in the context of this module refers to a set of
semisupervised classification algorithms. In the high level, these algorithms
work by forming a fully-connected graph between all points given and solving
for the steady-state distribution of labels at each point.
These algorithms perform very well in practice. The cost of running can be very
expensive, at approximately O(N^3) where N is the number of (labeled and
unlabeled) points. The theory (why they perform so well) is motivated by
intuitions from random walk algorithms and geometric relationships in the data.
For more information see the references below.
Model Features
--------------
Label clamping:
The algorithm tries to learn distributions of labels over the dataset. In the
"Hard Clamp" mode, the true ground labels are never allowed to change. They
are clamped into position. In the "Soft Clamp" mode, they are allowed some
wiggle room, but some alpha of their original value will always be retained.
Hard clamp is the same as soft clamping with alpha set to 1.
Kernel:
A function which projects a vector into some higher dimensional space. This
implementation supprots RBF and KNN kernels. Using the RBF kernel generates
a dense matrix of size O(N^2). KNN kernel will generate a sparse matrix of
size O(k*N) which will run much faster. See the documentation for SVMs for
more info on kernels.
Examples
--------
>>> from sklearn import datasets
>>> from sklearn.semi_supervised import LabelPropagation
>>> label_prop_model = LabelPropagation()
>>> iris = datasets.load_iris()
>>> random_unlabeled_points = np.where(np.random.random_integers(0, 1,
... size=len(iris.target)))
>>> labels = np.copy(iris.target)
>>> labels[random_unlabeled_points] = -1
>>> label_prop_model.fit(iris.data, labels)
... # doctest: +NORMALIZE_WHITESPACE +ELLIPSIS
LabelPropagation(...)
Notes
-----
References:
[1] Yoshua Bengio, Olivier Delalleau, Nicolas Le Roux. In Semi-Supervised
Learning (2006), pp. 193-216
[2] Olivier Delalleau, Yoshua Bengio, Nicolas Le Roux. Efficient
Non-Parametric Function Induction in Semi-Supervised Learning. AISTAT 2005
"""
# Authors: Clay Woolam <clay@woolam.org>
# Licence: BSD
from abc import ABCMeta, abstractmethod
from scipy import sparse
import numpy as np
from ..base import BaseEstimator, ClassifierMixin
from ..metrics.pairwise import rbf_kernel
from ..utils.graph import graph_laplacian
from ..utils.extmath import safe_sparse_dot
from ..utils.validation import check_X_y, check_is_fitted
from ..externals import six
from ..neighbors.unsupervised import NearestNeighbors
### Helper functions
def _not_converged(y_truth, y_prediction, tol=1e-3):
"""basic convergence check"""
return np.abs(y_truth - y_prediction).sum() > tol
class BaseLabelPropagation(six.with_metaclass(ABCMeta, BaseEstimator,
ClassifierMixin)):
"""Base class for label propagation module.
Parameters
----------
kernel : {'knn', 'rbf'}
String identifier for kernel function to use.
Only 'rbf' and 'knn' kernels are currently supported..
gamma : float
Parameter for rbf kernel
alpha : float
Clamping factor
max_iter : float
Change maximum number of iterations allowed
tol : float
Convergence tolerance: threshold to consider the system at steady
state
n_neighbors : integer > 0
Parameter for knn kernel
"""
def __init__(self, kernel='rbf', gamma=20, n_neighbors=7,
alpha=1, max_iter=30, tol=1e-3):
self.max_iter = max_iter
self.tol = tol
# kernel parameters
self.kernel = kernel
self.gamma = gamma
self.n_neighbors = n_neighbors
# clamping factor
self.alpha = alpha
def _get_kernel(self, X, y=None):
if self.kernel == "rbf":
if y is None:
return rbf_kernel(X, X, gamma=self.gamma)
else:
return rbf_kernel(X, y, gamma=self.gamma)
elif self.kernel == "knn":
if self.nn_fit is None:
self.nn_fit = NearestNeighbors(self.n_neighbors).fit(X)
if y is None:
return self.nn_fit.kneighbors_graph(self.nn_fit._fit_X,
self.n_neighbors,
mode='connectivity')
else:
return self.nn_fit.kneighbors(y, return_distance=False)
else:
raise ValueError("%s is not a valid kernel. Only rbf and knn"
" are supported at this time" % self.kernel)
@abstractmethod
def _build_graph(self):
raise NotImplementedError("Graph construction must be implemented"
" to fit a label propagation model.")
def predict(self, X):
"""Performs inductive inference across the model.
Parameters
----------
X : array_like, shape = [n_samples, n_features]
Returns
-------
y : array_like, shape = [n_samples]
Predictions for input data
"""
probas = self.predict_proba(X)
return self.classes_[np.argmax(probas, axis=1)].ravel()
def predict_proba(self, X):
"""Predict probability for each possible outcome.
Compute the probability estimates for each single sample in X
and each possible outcome seen during training (categorical
distribution).
Parameters
----------
X : array_like, shape = [n_samples, n_features]
Returns
-------
probabilities : array, shape = [n_samples, n_classes]
Normalized probability distributions across
class labels
"""
check_is_fitted(self, 'X_')
if sparse.isspmatrix(X):
X_2d = X
else:
X_2d = np.atleast_2d(X)
weight_matrices = self._get_kernel(self.X_, X_2d)
if self.kernel == 'knn':
probabilities = []
for weight_matrix in weight_matrices:
ine = np.sum(self.label_distributions_[weight_matrix], axis=0)
probabilities.append(ine)
probabilities = np.array(probabilities)
else:
weight_matrices = weight_matrices.T
probabilities = np.dot(weight_matrices, self.label_distributions_)
normalizer = np.atleast_2d(np.sum(probabilities, axis=1)).T
probabilities /= normalizer
return probabilities
def fit(self, X, y):
"""Fit a semi-supervised label propagation model based
All the input data is provided matrix X (labeled and unlabeled)
and corresponding label matrix y with a dedicated marker value for
unlabeled samples.
Parameters
----------
X : array-like, shape = [n_samples, n_features]
A {n_samples by n_samples} size matrix will be created from this
y : array_like, shape = [n_samples]
n_labeled_samples (unlabeled points are marked as -1)
All unlabeled samples will be transductively assigned labels
Returns
-------
self : returns an instance of self.
"""
X, y = check_X_y(X, y)
self.X_ = X
# actual graph construction (implementations should override this)
graph_matrix = self._build_graph()
# label construction
# construct a categorical distribution for classification only
classes = np.unique(y)
classes = (classes[classes != -1])
self.classes_ = classes
n_samples, n_classes = len(y), len(classes)
y = np.asarray(y)
unlabeled = y == -1
clamp_weights = np.ones((n_samples, 1))
clamp_weights[unlabeled, 0] = self.alpha
# initialize distributions
self.label_distributions_ = np.zeros((n_samples, n_classes))
for label in classes:
self.label_distributions_[y == label, classes == label] = 1
y_static = np.copy(self.label_distributions_)
if self.alpha > 0.:
y_static *= 1 - self.alpha
y_static[unlabeled] = 0
l_previous = np.zeros((self.X_.shape[0], n_classes))
remaining_iter = self.max_iter
if sparse.isspmatrix(graph_matrix):
graph_matrix = graph_matrix.tocsr()
while (_not_converged(self.label_distributions_, l_previous, self.tol)
and remaining_iter > 1):
l_previous = self.label_distributions_
self.label_distributions_ = safe_sparse_dot(
graph_matrix, self.label_distributions_)
# clamp
self.label_distributions_ = np.multiply(
clamp_weights, self.label_distributions_) + y_static
remaining_iter -= 1
normalizer = np.sum(self.label_distributions_, axis=1)[:, np.newaxis]
self.label_distributions_ /= normalizer
# set the transduction item
transduction = self.classes_[np.argmax(self.label_distributions_,
axis=1)]
self.transduction_ = transduction.ravel()
self.n_iter_ = self.max_iter - remaining_iter
return self
class LabelPropagation(BaseLabelPropagation):
"""Label Propagation classifier
Read more in the :ref:`User Guide <label_propagation>`.
Parameters
----------
kernel : {'knn', 'rbf'}
String identifier for kernel function to use.
Only 'rbf' and 'knn' kernels are currently supported..
gamma : float
Parameter for rbf kernel
n_neighbors : integer > 0
Parameter for knn kernel
alpha : float
Clamping factor
max_iter : float
Change maximum number of iterations allowed
tol : float
Convergence tolerance: threshold to consider the system at steady
state
Attributes
----------
X_ : array, shape = [n_samples, n_features]
Input array.
classes_ : array, shape = [n_classes]
The distinct labels used in classifying instances.
label_distributions_ : array, shape = [n_samples, n_classes]
Categorical distribution for each item.
transduction_ : array, shape = [n_samples]
Label assigned to each item via the transduction.
n_iter_ : int
Number of iterations run.
Examples
--------
>>> from sklearn import datasets
>>> from sklearn.semi_supervised import LabelPropagation
>>> label_prop_model = LabelPropagation()
>>> iris = datasets.load_iris()
>>> random_unlabeled_points = np.where(np.random.random_integers(0, 1,
... size=len(iris.target)))
>>> labels = np.copy(iris.target)
>>> labels[random_unlabeled_points] = -1
>>> label_prop_model.fit(iris.data, labels)
... # doctest: +NORMALIZE_WHITESPACE +ELLIPSIS
LabelPropagation(...)
References
----------
Xiaojin Zhu and Zoubin Ghahramani. Learning from labeled and unlabeled data
with label propagation. Technical Report CMU-CALD-02-107, Carnegie Mellon
University, 2002 http://pages.cs.wisc.edu/~jerryzhu/pub/CMU-CALD-02-107.pdf
See Also
--------
LabelSpreading : Alternate label propagation strategy more robust to noise
"""
def _build_graph(self):
"""Matrix representing a fully connected graph between each sample
This basic implementation creates a non-stochastic affinity matrix, so
class distributions will exceed 1 (normalization may be desired).
"""
if self.kernel == 'knn':
self.nn_fit = None
affinity_matrix = self._get_kernel(self.X_)
normalizer = affinity_matrix.sum(axis=0)
if sparse.isspmatrix(affinity_matrix):
affinity_matrix.data /= np.diag(np.array(normalizer))
else:
affinity_matrix /= normalizer[:, np.newaxis]
return affinity_matrix
class LabelSpreading(BaseLabelPropagation):
"""LabelSpreading model for semi-supervised learning
This model is similar to the basic Label Propgation algorithm,
but uses affinity matrix based on the normalized graph Laplacian
and soft clamping across the labels.
Read more in the :ref:`User Guide <label_propagation>`.
Parameters
----------
kernel : {'knn', 'rbf'}
String identifier for kernel function to use.
Only 'rbf' and 'knn' kernels are currently supported.
gamma : float
parameter for rbf kernel
n_neighbors : integer > 0
parameter for knn kernel
alpha : float
clamping factor
max_iter : float
maximum number of iterations allowed
tol : float
Convergence tolerance: threshold to consider the system at steady
state
Attributes
----------
X_ : array, shape = [n_samples, n_features]
Input array.
classes_ : array, shape = [n_classes]
The distinct labels used in classifying instances.
label_distributions_ : array, shape = [n_samples, n_classes]
Categorical distribution for each item.
transduction_ : array, shape = [n_samples]
Label assigned to each item via the transduction.
n_iter_ : int
Number of iterations run.
Examples
--------
>>> from sklearn import datasets
>>> from sklearn.semi_supervised import LabelSpreading
>>> label_prop_model = LabelSpreading()
>>> iris = datasets.load_iris()
>>> random_unlabeled_points = np.where(np.random.random_integers(0, 1,
... size=len(iris.target)))
>>> labels = np.copy(iris.target)
>>> labels[random_unlabeled_points] = -1
>>> label_prop_model.fit(iris.data, labels)
... # doctest: +NORMALIZE_WHITESPACE +ELLIPSIS
LabelSpreading(...)
References
----------
Dengyong Zhou, Olivier Bousquet, Thomas Navin Lal, Jason Weston,
Bernhard Schoelkopf. Learning with local and global consistency (2004)
http://citeseer.ist.psu.edu/viewdoc/summary?doi=10.1.1.115.3219
See Also
--------
LabelPropagation : Unregularized graph based semi-supervised learning
"""
def __init__(self, kernel='rbf', gamma=20, n_neighbors=7, alpha=0.2,
max_iter=30, tol=1e-3):
# this one has different base parameters
super(LabelSpreading, self).__init__(kernel=kernel, gamma=gamma,
n_neighbors=n_neighbors,
alpha=alpha, max_iter=max_iter,
tol=tol)
def _build_graph(self):
"""Graph matrix for Label Spreading computes the graph laplacian"""
# compute affinity matrix (or gram matrix)
if self.kernel == 'knn':
self.nn_fit = None
n_samples = self.X_.shape[0]
affinity_matrix = self._get_kernel(self.X_)
laplacian = graph_laplacian(affinity_matrix, normed=True)
laplacian = -laplacian
if sparse.isspmatrix(laplacian):
diag_mask = (laplacian.row == laplacian.col)
laplacian.data[diag_mask] = 0.0
else:
laplacian.flat[::n_samples + 1] = 0.0 # set diag to 0.0
return laplacian
| bsd-3-clause |
google-research/scenic | scenic/projects/robust_segvit/datasets/cityscapes_variants.py | 1 | 11019 | """Data generators for the Cityscapes dataset variants.
Supported datasets, set by dataset_configs.dataset_name in the config file:
cityscapes_corrupted: https://arxiv.org/pdf/1907.07484.pdf
fishyscapes: https://link.springer.com/article/10.1007/s11263-021-01511-6
Implementation details:
cityscapes_c: https://github.com/ekellbuch/cityscapes-c
"""
import functools
from typing import Optional
from absl import logging
from flax import jax_utils
import jax.numpy as jnp
from scenic.dataset_lib import cityscapes_dataset
from scenic.dataset_lib import dataset_utils
from scenic.dataset_lib import datasets
import tensorflow as tf
import tensorflow_datasets as tfds
CITYSCAPES_C_CORRUPTIONS = [
'gaussian_noise',
]
FISHYSCAPES_CORRUPTIONS = [
'Static',
]
CITYSCAPES_C_SEVERITIES = range(1, 6)
DATASET_INFO = {
'cityscapes': {
'tfds_name': 'cityscapes',
'split': 'validation',
'num_of_examples': 500,
},
'cityscapes_corrupted': {
'tfds_name': 'internal',
'split': 'validation',
'num_of_examples': 500,
},
'fishycapes': {
'tfds_name': 'internal',
'split': 'validation',
'num_of_examples': 30,
},
}
# Adds cityscapes_c
for severity in CITYSCAPES_C_SEVERITIES:
for corruption in CITYSCAPES_C_CORRUPTIONS:
temp_dataset_name = f'cityscapes_corrupted/semantic_segmentation_{corruption}_{severity}'
DATASET_INFO[temp_dataset_name] = {
'tfds_name': temp_dataset_name,
'split': 'validation',
'num_of_examples': 500,
}
# Adds fishyscapes
for corruption in FISHYSCAPES_CORRUPTIONS:
temp_dataset_name = f'fishyscapes/{corruption}'
DATASET_INFO[temp_dataset_name] = {
'tfds_name': temp_dataset_name,
'split': 'validation',
'num_of_examples': 30,
}
cityscapes_meta_data = {
'num_classes':
len([c.id for c in cityscapes_dataset.CLASSES if not c.ignore_in_eval]),
'class_names':
cityscapes_dataset.get_class_names(),
'class_colors':
cityscapes_dataset.get_class_colors(),
'class_proportions':
cityscapes_dataset.get_class_proportions(),
}
fishyscapes_meta_data = {
'num_classes': 2,
'class_names': ['ind', 'ood'],
'class_colors': [(0, 0, 1), (1, 0, 0)],
}
def normalize(image, dtype=tf.float32):
"""Normalizes the value of pixels in the given image.
Args:
image: `Tensor` representing an image binary of arbitrary size.
dtype: Tensorflow data type, Data type of the image.
Returns:
A normalized image `Tensor`.
"""
image = tf.cast(image, dtype=dtype)
if dtype not in [tf.int32, tf.int64, tf.uint32, tf.uint64]:
image /= tf.constant(255.0, shape=[1, 1, 1], dtype=dtype)
return image
def preprocess_example_fishyscapes(example,
train,
dtype=tf.float32,
resize=None,
include_mask=True):
"""Preprocesses the given image.
Args:
example: dict; Example coming from TFDS.
train: bool; Whether to apply training-specific preprocessing or not.
dtype: Tensorflow data type; Data type of the image.
resize: sequence; [H, W] to which image and labels should be resized.
include_mask: include batch_mask to ignore specific classes.
Returns:
An example dict as required by the model.
"""
image = normalize(example['image_left'], dtype)
mask = example['mask']
# Resize test images (train images are cropped/resized during augmentation):
if not train:
if resize is not None:
image = tf.image.resize(image, resize, 'bilinear')
mask = tf.image.resize(mask, resize, 'nearest')
image = tf.cast(image, dtype)
mask = tf.cast(mask, dtype)
mask = tf.squeeze(mask, axis=2)
outputs = {'inputs': image, 'label': mask}
if include_mask:
# Fishyscapes mask has values 0,1, 255, background pixels are set as 255.
# create batch_mask array and set background pixels to 0 and
# pixels that should be included during eval to 1
batch_mask = tf.ones_like(mask, dtype)
batch_mask = tf.cast(batch_mask*(1-tf.cast(mask == 255, dtype)), dtype)
# update the mask array to be 0 or by setting cls 255 to cls 0.
mask = tf.cast(mask*(1-tf.cast(mask == 255, dtype)), dtype)
outputs = {'inputs': image, 'label': mask, 'batch_mask': batch_mask}
return outputs
preprocess_examples = {
'cityscapes': cityscapes_dataset.preprocess_example,
'fishyscapes': preprocess_example_fishyscapes,
}
def cityscapes_load_split(
dataset_name,
batch_size,
train=False,
dtype=tf.float32,
shuffle_buffer_size=10,
shuffle_seed=None,
data_augmentations=None,
preprocess_ex_eval=None,
cache=True,
data_dir: Optional[str] = None,
):
"""Creates a split from the Cityscapes dataset using TensorFlow Datasets.
For the training set, we drop the last partial batch. This is fine to do
because we additionally shuffle the data randomly each epoch, thus the trainer
will see all data in expectation. For the validation set, we pad the final
batch to the desired batch size.
Args:
dataset_name: string; Dataset name defined in DATASET_INFO.
batch_size: int; The batch size returned by the data pipeline.
train: bool; Whether to load the train or evaluation split.
dtype: TF data type; Data type of the image.
shuffle_buffer_size: int; Buffer size for the TFDS prefetch.
shuffle_seed: The seed to use when shuffling the train split.
data_augmentations: list(str); Types of data augmentation applied on
preprocess_ex_eval: preprocessing function. Default None.
cache: bool; Whether to cache dataset in memory.
data_dir: directory with data.
Returns:
A `tf.data.Dataset`.
"""
assert not train, 'Only evaluation is supported.'
assert dataset_name in DATASET_INFO
del data_augmentations
cityscapes_variant_info = DATASET_INFO.get(dataset_name, {})
split = cityscapes_variant_info['split'] # only supports validation
# Load the preprocessing function
if 'cityscapes' in cityscapes_variant_info.get('tfds_name'):
if dataset_name == 'cityscapes':
builder = tfds.builder(dataset_name, dtype=dtype)
elif 'cityscapes_corrupted' in dataset_name:
if data_dir is None:
# pylint: disable=line-too-long
data_dir = 'gs://ub-ekb/tensorflow_datasets/cityscapes_corrupted/tfrecords/v.0.0' # pylint: disable=line-too-long
# pylint: enable=line-too-long
builder = tfds.builder(dataset_name, data_dir=data_dir)
elif 'fishyscapes' in cityscapes_variant_info.get('tfds_name'):
if data_dir is None:
data_dir = 'gs://ub-ekb/tensorflow_datasets/fishyscapes/tfrecords/v.0.0'
builder = tfds.builder(dataset_name, data_dir=data_dir)
else:
raise NotImplementedError(f'{dataset_name} not available')
ds, ds_info = dataset_utils.load_split_from_tfds_builder(
builder=builder,
batch_size=batch_size,
split=split,
preprocess_example=preprocess_ex_eval,
shuffle_buffer_size=shuffle_buffer_size,
shuffle_seed=shuffle_seed,
cache=cache)
return ds, ds_info
def _check_dataset_exists(dataset_configs):
assert 'dataset_name' in dataset_configs, ('Must specify dataset_name in '
'dataset_configs.')
dataset_name = dataset_configs['dataset_name']
assert dataset_configs[
'dataset_name'] in DATASET_INFO, f'{dataset_name} is not supported.'
return dataset_name
@datasets.add_dataset('cityscapes_variants')
def get_dataset(*,
batch_size,
eval_batch_size,
num_shards,
dtype_str='float32',
shuffle_seed=0,
prefetch_buffer_size=2,
rng=None,
dataset_configs=None,
dataset_service_address: Optional[str] = None):
"""Returns generators for the Cityscapes validation, and test set.
Args:
batch_size: int; Determines the train batch size.
eval_batch_size: int; Determines the evaluation batch size.
num_shards: int; Number of shards --> batch shape: [num_shards, bs, ...].
dtype_str: Data type of the image (e.g. 'float32').
shuffle_seed: int; Seed for shuffling the training data.
prefetch_buffer_size: int; Buffer size for the TFDS prefetch.
rng: JAX rng key, which can be used for augmentation, shuffling, etc.
dataset_configs: dict; Dataset specific configurations.
dataset_service_address: If set, will distribute the training dataset using
the given tf.data service at the given address.
Returns:
A dataset_utils.Dataset() which includes a train_iter, a valid_iter,
a test_iter, and a dict of meta_data.
"""
del batch_size
del shuffle_seed, rng
del dataset_service_address
dtype = getattr(tf, dtype_str)
dataset_configs = dataset_configs or {}
dataset_name = _check_dataset_exists(dataset_configs)
cityscapes_variant_info = DATASET_INFO.get(dataset_name)
target_size = dataset_configs.get('target_size', None)
if 'cityscapes' in dataset_name:
preprocess_example = preprocess_examples['cityscapes']
elif 'fishyscapes' in dataset_name:
preprocess_example = preprocess_examples['fishyscapes']
preprocess_ex_eval = functools.partial(
preprocess_example, train=False, dtype=dtype, resize=target_size)
logging.info('Loading validation split of the %s dataset.', dataset_name)
eval_ds, _ = cityscapes_load_split(
dataset_name=dataset_name,
batch_size=eval_batch_size,
train=False,
dtype=dtype,
preprocess_ex_eval=preprocess_ex_eval)
maybe_pad_batches_eval = functools.partial(
dataset_utils.maybe_pad_batch,
train=False,
batch_size=eval_batch_size,
pixel_level=True)
shard_batches = functools.partial(dataset_utils.shard, n_devices=num_shards)
exclude_classes = functools.partial(
cityscapes_dataset.exclude_bad_classes,
new_labels=cityscapes_dataset.get_post_exclusion_labels())
eval_iter = iter(eval_ds)
eval_iter = map(dataset_utils.tf_to_numpy, eval_iter)
eval_iter = map(maybe_pad_batches_eval, eval_iter)
if 'cityscapes' in dataset_name:
eval_iter = map(exclude_classes, eval_iter)
eval_iter = map(shard_batches, eval_iter)
eval_iter = jax_utils.prefetch_to_device(eval_iter, prefetch_buffer_size)
if target_size is None:
input_shape = (-1, 1024, 2048, 3)
else:
input_shape = (-1,) + tuple(target_size) + (3,)
meta_data = {
'input_shape': input_shape,
'num_train_examples': 0,
'num_eval_examples': cityscapes_variant_info['num_of_examples'],
'input_dtype': getattr(jnp, dtype_str),
'target_is_onehot': False,
}
if 'cityscapes' in dataset_name:
meta_data.update(cityscapes_meta_data)
elif 'fishyscapes' in dataset_name:
meta_data.update(fishyscapes_meta_data)
return dataset_utils.Dataset(None, eval_iter, None, meta_data)
| apache-2.0 |
starimpact/fast-rcnn | tools/train_net.py | 23 | 3134 | #!/usr/bin/env python
# --------------------------------------------------------
# Fast R-CNN
# Copyright (c) 2015 Microsoft
# Licensed under The MIT License [see LICENSE for details]
# Written by Ross Girshick
# --------------------------------------------------------
"""Train a Fast R-CNN network on a region of interest database."""
import _init_paths
from fast_rcnn.train import get_training_roidb, train_net
from fast_rcnn.config import cfg, cfg_from_file, cfg_from_list, get_output_dir
from datasets.factory import get_imdb
import caffe
import argparse
import pprint
import numpy as np
import sys
def parse_args():
"""
Parse input arguments
"""
parser = argparse.ArgumentParser(description='Train a Fast R-CNN network')
parser.add_argument('--gpu', dest='gpu_id',
help='GPU device id to use [0]',
default=0, type=int)
parser.add_argument('--solver', dest='solver',
help='solver prototxt',
default=None, type=str)
parser.add_argument('--iters', dest='max_iters',
help='number of iterations to train',
default=40000, type=int)
parser.add_argument('--weights', dest='pretrained_model',
help='initialize with pretrained model weights',
default=None, type=str)
parser.add_argument('--cfg', dest='cfg_file',
help='optional config file',
default=None, type=str)
parser.add_argument('--imdb', dest='imdb_name',
help='dataset to train on',
default='voc_2007_trainval', type=str)
parser.add_argument('--rand', dest='randomize',
help='randomize (do not use a fixed seed)',
action='store_true')
parser.add_argument('--set', dest='set_cfgs',
help='set config keys', default=None,
nargs=argparse.REMAINDER)
if len(sys.argv) == 1:
parser.print_help()
sys.exit(1)
args = parser.parse_args()
return args
if __name__ == '__main__':
args = parse_args()
print('Called with args:')
print(args)
if args.cfg_file is not None:
cfg_from_file(args.cfg_file)
if args.set_cfgs is not None:
cfg_from_list(args.set_cfgs)
print('Using config:')
pprint.pprint(cfg)
if not args.randomize:
# fix the random seeds (numpy and caffe) for reproducibility
np.random.seed(cfg.RNG_SEED)
caffe.set_random_seed(cfg.RNG_SEED)
# set up caffe
caffe.set_mode_gpu()
if args.gpu_id is not None:
caffe.set_device(args.gpu_id)
imdb = get_imdb(args.imdb_name)
print 'Loaded dataset `{:s}` for training'.format(imdb.name)
roidb = get_training_roidb(imdb)
output_dir = get_output_dir(imdb, None)
print 'Output will be saved to `{:s}`'.format(output_dir)
train_net(args.solver, roidb, output_dir,
pretrained_model=args.pretrained_model,
max_iters=args.max_iters)
| mit |
YzPaul3/h2o-3 | py2/testdir_single_jvm/test_GBM_basic.py | 20 | 4883 | import unittest, sys, time
sys.path.extend(['.','..','../..','py'])
import h2o2 as h2o
import h2o_cmd, h2o_import as h2i
from h2o_test import dump_json, verboseprint, OutputObj
from tabulate import tabulate
class Basic(unittest.TestCase):
def tearDown(self):
h2o.check_sandbox_for_errors()
@classmethod
def setUpClass(cls):
h2o.init(1, java_heap_GB=12)
@classmethod
def tearDownClass(cls):
h2o.tear_down_cloud()
def test_GBM_basic(self):
bucket = 'home-0xdiag-datasets'
importFolderPath = 'standard'
trainFilename = 'covtype.shuffled.90pct.data'
train_key = 'covtype.train.hex'
model_key = 'GBMModelKey'
timeoutSecs = 1800
csvPathname = importFolderPath + "/" + trainFilename
# FIX! do I need to force enum for classification? what if I do regression after this?
columnTypeDict = {54: 'Enum'}
parseResult = h2i.import_parse(bucket=bucket, path=csvPathname, columnTypeDict=columnTypeDict,
schema='local', chunk_size=4194304, hex_key=train_key, timeoutSecs=timeoutSecs)
pA = h2o_cmd.ParseObj(parseResult)
iA = h2o_cmd.InspectObj(pA.parse_key)
parse_key = pA.parse_key
numRows = iA.numRows
numCols = iA.numCols
labelList = iA.labelList
labelListUsed = list(labelList)
numColsUsed = numCols
# run through a couple of parameter sets
parameters = []
parameters.append({
'response_column': 'C55',
'ntrees': 2,
'max_depth': 10,
'min_rows': 3,
'nbins': 40,
'learn_rate': 0.2,
# 'loss': 'multinomial',
# FIX! doesn't like it?
# 'loss': 'Bernoulli',
# FIX..no variable importance for GBM yet?
# 'variable_importance': False,
# 'seed':
})
parameters.append({
'response_column': 'C55',
'loss': 'multinomial',
# This does nothing! intent is solely based on type of response col
'ntrees': 1,
'max_depth': 20,
'min_rows': 3,
'nbins': 40,
'learn_rate': 0.2,
})
model_key = 'covtype_gbm.hex'
for p in parameters:
bmResult = h2o.n0.build_model(
algo='gbm',
model_id=model_key,
training_frame=train_key,
validation_frame=train_key,
parameters=p,
timeoutSecs=60)
bm = OutputObj(bmResult, 'bm')
modelResult = h2o.n0.models(key=model_key)
model = OutputObj(modelResult['models'][0]['output'], 'model')
cmmResult = h2o.n0.compute_model_metrics(model=model_key, frame=parse_key, timeoutSecs=60)
# cmm = OutputObj(cmmResult, 'cmm')
# print "\nLook!, can use dot notation: cmm.cm.confusion_matrix", cmm.cm.confusion_matrix, "\n"
vis = OutputObj(model.variable_importances, 'vis')
# just the first 10
visDataChopped = [v[0:9] for v in vis.data]
names = visDataChopped[0]
relativeImportance = visDataChopped[1]
print "names:", names
print "relativeImportance:", relativeImportance
scaledImportance = visDataChopped[2]
percentage = visDataChopped[3]
print "\nvis\n", tabulate(visDataChopped[1:], headers=names)
# print "\nrelativeImportance (10)\n", tabulate(relativeImportance, headers=names)
# print "\nscaledImportance (10)\n", tabulate(scaledImportance, headers=names)
# print "\npercentage (10)\n", tabulate(percentage, headers=names)
print "will say Regression or Classification. no Multinomial?"
print "model.model_category", model.model_category
assert model.model_category=='Multinomial', model.model_category
# FIX!
# print "FIX! why is mse 0 and mse_train Nan?"
# print "model.mse:", model.mse
# print "model.mse_train:", model.mse_train
if 1==0:
print ""
for i,c in enumerate(cmm.cm):
print "\ncmms.cm[%s]" % i, tabulate(c)
print ""
mmResult = h2o.n0.model_metrics(model=model_key, frame=parse_key, timeoutSecs=60)
mmResultShort = mmResult['model_metrics'][0]
del mmResultShort['frame'] # too much!
mm = OutputObj(mmResultShort, 'mm')
prResult = h2o.n0.predict(model=model_key, frame=parse_key, timeoutSecs=60)
pr = OutputObj(prResult['model_metrics'][0]['predictions'], 'pr')
# too slow!
# h2o_cmd.runStoreView()
if __name__ == '__main__':
h2o.unit_main()
| apache-2.0 |
ephes/scikit-learn | sklearn/ensemble/partial_dependence.py | 249 | 15097 | """Partial dependence plots for tree ensembles. """
# Authors: Peter Prettenhofer
# License: BSD 3 clause
from itertools import count
import numbers
import numpy as np
from scipy.stats.mstats import mquantiles
from ..utils.extmath import cartesian
from ..externals.joblib import Parallel, delayed
from ..externals import six
from ..externals.six.moves import map, range, zip
from ..utils import check_array
from ..tree._tree import DTYPE
from ._gradient_boosting import _partial_dependence_tree
from .gradient_boosting import BaseGradientBoosting
def _grid_from_X(X, percentiles=(0.05, 0.95), grid_resolution=100):
"""Generate a grid of points based on the ``percentiles of ``X``.
The grid is generated by placing ``grid_resolution`` equally
spaced points between the ``percentiles`` of each column
of ``X``.
Parameters
----------
X : ndarray
The data
percentiles : tuple of floats
The percentiles which are used to construct the extreme
values of the grid axes.
grid_resolution : int
The number of equally spaced points that are placed
on the grid.
Returns
-------
grid : ndarray
All data points on the grid; ``grid.shape[1] == X.shape[1]``
and ``grid.shape[0] == grid_resolution * X.shape[1]``.
axes : seq of ndarray
The axes with which the grid has been created.
"""
if len(percentiles) != 2:
raise ValueError('percentile must be tuple of len 2')
if not all(0. <= x <= 1. for x in percentiles):
raise ValueError('percentile values must be in [0, 1]')
axes = []
for col in range(X.shape[1]):
uniques = np.unique(X[:, col])
if uniques.shape[0] < grid_resolution:
# feature has low resolution use unique vals
axis = uniques
else:
emp_percentiles = mquantiles(X, prob=percentiles, axis=0)
# create axis based on percentiles and grid resolution
axis = np.linspace(emp_percentiles[0, col],
emp_percentiles[1, col],
num=grid_resolution, endpoint=True)
axes.append(axis)
return cartesian(axes), axes
def partial_dependence(gbrt, target_variables, grid=None, X=None,
percentiles=(0.05, 0.95), grid_resolution=100):
"""Partial dependence of ``target_variables``.
Partial dependence plots show the dependence between the joint values
of the ``target_variables`` and the function represented
by the ``gbrt``.
Read more in the :ref:`User Guide <partial_dependence>`.
Parameters
----------
gbrt : BaseGradientBoosting
A fitted gradient boosting model.
target_variables : array-like, dtype=int
The target features for which the partial dependecy should be
computed (size should be smaller than 3 for visual renderings).
grid : array-like, shape=(n_points, len(target_variables))
The grid of ``target_variables`` values for which the
partial dependecy should be evaluated (either ``grid`` or ``X``
must be specified).
X : array-like, shape=(n_samples, n_features)
The data on which ``gbrt`` was trained. It is used to generate
a ``grid`` for the ``target_variables``. The ``grid`` comprises
``grid_resolution`` equally spaced points between the two
``percentiles``.
percentiles : (low, high), default=(0.05, 0.95)
The lower and upper percentile used create the extreme values
for the ``grid``. Only if ``X`` is not None.
grid_resolution : int, default=100
The number of equally spaced points on the ``grid``.
Returns
-------
pdp : array, shape=(n_classes, n_points)
The partial dependence function evaluated on the ``grid``.
For regression and binary classification ``n_classes==1``.
axes : seq of ndarray or None
The axes with which the grid has been created or None if
the grid has been given.
Examples
--------
>>> samples = [[0, 0, 2], [1, 0, 0]]
>>> labels = [0, 1]
>>> from sklearn.ensemble import GradientBoostingClassifier
>>> gb = GradientBoostingClassifier(random_state=0).fit(samples, labels)
>>> kwargs = dict(X=samples, percentiles=(0, 1), grid_resolution=2)
>>> partial_dependence(gb, [0], **kwargs) # doctest: +SKIP
(array([[-4.52..., 4.52...]]), [array([ 0., 1.])])
"""
if not isinstance(gbrt, BaseGradientBoosting):
raise ValueError('gbrt has to be an instance of BaseGradientBoosting')
if gbrt.estimators_.shape[0] == 0:
raise ValueError('Call %s.fit before partial_dependence' %
gbrt.__class__.__name__)
if (grid is None and X is None) or (grid is not None and X is not None):
raise ValueError('Either grid or X must be specified')
target_variables = np.asarray(target_variables, dtype=np.int32,
order='C').ravel()
if any([not (0 <= fx < gbrt.n_features) for fx in target_variables]):
raise ValueError('target_variables must be in [0, %d]'
% (gbrt.n_features - 1))
if X is not None:
X = check_array(X, dtype=DTYPE, order='C')
grid, axes = _grid_from_X(X[:, target_variables], percentiles,
grid_resolution)
else:
assert grid is not None
# dont return axes if grid is given
axes = None
# grid must be 2d
if grid.ndim == 1:
grid = grid[:, np.newaxis]
if grid.ndim != 2:
raise ValueError('grid must be 2d but is %dd' % grid.ndim)
grid = np.asarray(grid, dtype=DTYPE, order='C')
assert grid.shape[1] == target_variables.shape[0]
n_trees_per_stage = gbrt.estimators_.shape[1]
n_estimators = gbrt.estimators_.shape[0]
pdp = np.zeros((n_trees_per_stage, grid.shape[0],), dtype=np.float64,
order='C')
for stage in range(n_estimators):
for k in range(n_trees_per_stage):
tree = gbrt.estimators_[stage, k].tree_
_partial_dependence_tree(tree, grid, target_variables,
gbrt.learning_rate, pdp[k])
return pdp, axes
def plot_partial_dependence(gbrt, X, features, feature_names=None,
label=None, n_cols=3, grid_resolution=100,
percentiles=(0.05, 0.95), n_jobs=1,
verbose=0, ax=None, line_kw=None,
contour_kw=None, **fig_kw):
"""Partial dependence plots for ``features``.
The ``len(features)`` plots are arranged in a grid with ``n_cols``
columns. Two-way partial dependence plots are plotted as contour
plots.
Read more in the :ref:`User Guide <partial_dependence>`.
Parameters
----------
gbrt : BaseGradientBoosting
A fitted gradient boosting model.
X : array-like, shape=(n_samples, n_features)
The data on which ``gbrt`` was trained.
features : seq of tuples or ints
If seq[i] is an int or a tuple with one int value, a one-way
PDP is created; if seq[i] is a tuple of two ints, a two-way
PDP is created.
feature_names : seq of str
Name of each feature; feature_names[i] holds
the name of the feature with index i.
label : object
The class label for which the PDPs should be computed.
Only if gbrt is a multi-class model. Must be in ``gbrt.classes_``.
n_cols : int
The number of columns in the grid plot (default: 3).
percentiles : (low, high), default=(0.05, 0.95)
The lower and upper percentile used to create the extreme values
for the PDP axes.
grid_resolution : int, default=100
The number of equally spaced points on the axes.
n_jobs : int
The number of CPUs to use to compute the PDs. -1 means 'all CPUs'.
Defaults to 1.
verbose : int
Verbose output during PD computations. Defaults to 0.
ax : Matplotlib axis object, default None
An axis object onto which the plots will be drawn.
line_kw : dict
Dict with keywords passed to the ``pylab.plot`` call.
For one-way partial dependence plots.
contour_kw : dict
Dict with keywords passed to the ``pylab.plot`` call.
For two-way partial dependence plots.
fig_kw : dict
Dict with keywords passed to the figure() call.
Note that all keywords not recognized above will be automatically
included here.
Returns
-------
fig : figure
The Matplotlib Figure object.
axs : seq of Axis objects
A seq of Axis objects, one for each subplot.
Examples
--------
>>> from sklearn.datasets import make_friedman1
>>> from sklearn.ensemble import GradientBoostingRegressor
>>> X, y = make_friedman1()
>>> clf = GradientBoostingRegressor(n_estimators=10).fit(X, y)
>>> fig, axs = plot_partial_dependence(clf, X, [0, (0, 1)]) #doctest: +SKIP
...
"""
import matplotlib.pyplot as plt
from matplotlib import transforms
from matplotlib.ticker import MaxNLocator
from matplotlib.ticker import ScalarFormatter
if not isinstance(gbrt, BaseGradientBoosting):
raise ValueError('gbrt has to be an instance of BaseGradientBoosting')
if gbrt.estimators_.shape[0] == 0:
raise ValueError('Call %s.fit before partial_dependence' %
gbrt.__class__.__name__)
# set label_idx for multi-class GBRT
if hasattr(gbrt, 'classes_') and np.size(gbrt.classes_) > 2:
if label is None:
raise ValueError('label is not given for multi-class PDP')
label_idx = np.searchsorted(gbrt.classes_, label)
if gbrt.classes_[label_idx] != label:
raise ValueError('label %s not in ``gbrt.classes_``' % str(label))
else:
# regression and binary classification
label_idx = 0
X = check_array(X, dtype=DTYPE, order='C')
if gbrt.n_features != X.shape[1]:
raise ValueError('X.shape[1] does not match gbrt.n_features')
if line_kw is None:
line_kw = {'color': 'green'}
if contour_kw is None:
contour_kw = {}
# convert feature_names to list
if feature_names is None:
# if not feature_names use fx indices as name
feature_names = [str(i) for i in range(gbrt.n_features)]
elif isinstance(feature_names, np.ndarray):
feature_names = feature_names.tolist()
def convert_feature(fx):
if isinstance(fx, six.string_types):
try:
fx = feature_names.index(fx)
except ValueError:
raise ValueError('Feature %s not in feature_names' % fx)
return fx
# convert features into a seq of int tuples
tmp_features = []
for fxs in features:
if isinstance(fxs, (numbers.Integral,) + six.string_types):
fxs = (fxs,)
try:
fxs = np.array([convert_feature(fx) for fx in fxs], dtype=np.int32)
except TypeError:
raise ValueError('features must be either int, str, or tuple '
'of int/str')
if not (1 <= np.size(fxs) <= 2):
raise ValueError('target features must be either one or two')
tmp_features.append(fxs)
features = tmp_features
names = []
try:
for fxs in features:
l = []
# explicit loop so "i" is bound for exception below
for i in fxs:
l.append(feature_names[i])
names.append(l)
except IndexError:
raise ValueError('features[i] must be in [0, n_features) '
'but was %d' % i)
# compute PD functions
pd_result = Parallel(n_jobs=n_jobs, verbose=verbose)(
delayed(partial_dependence)(gbrt, fxs, X=X,
grid_resolution=grid_resolution,
percentiles=percentiles)
for fxs in features)
# get global min and max values of PD grouped by plot type
pdp_lim = {}
for pdp, axes in pd_result:
min_pd, max_pd = pdp[label_idx].min(), pdp[label_idx].max()
n_fx = len(axes)
old_min_pd, old_max_pd = pdp_lim.get(n_fx, (min_pd, max_pd))
min_pd = min(min_pd, old_min_pd)
max_pd = max(max_pd, old_max_pd)
pdp_lim[n_fx] = (min_pd, max_pd)
# create contour levels for two-way plots
if 2 in pdp_lim:
Z_level = np.linspace(*pdp_lim[2], num=8)
if ax is None:
fig = plt.figure(**fig_kw)
else:
fig = ax.get_figure()
fig.clear()
n_cols = min(n_cols, len(features))
n_rows = int(np.ceil(len(features) / float(n_cols)))
axs = []
for i, fx, name, (pdp, axes) in zip(count(), features, names,
pd_result):
ax = fig.add_subplot(n_rows, n_cols, i + 1)
if len(axes) == 1:
ax.plot(axes[0], pdp[label_idx].ravel(), **line_kw)
else:
# make contour plot
assert len(axes) == 2
XX, YY = np.meshgrid(axes[0], axes[1])
Z = pdp[label_idx].reshape(list(map(np.size, axes))).T
CS = ax.contour(XX, YY, Z, levels=Z_level, linewidths=0.5,
colors='k')
ax.contourf(XX, YY, Z, levels=Z_level, vmax=Z_level[-1],
vmin=Z_level[0], alpha=0.75, **contour_kw)
ax.clabel(CS, fmt='%2.2f', colors='k', fontsize=10, inline=True)
# plot data deciles + axes labels
deciles = mquantiles(X[:, fx[0]], prob=np.arange(0.1, 1.0, 0.1))
trans = transforms.blended_transform_factory(ax.transData,
ax.transAxes)
ylim = ax.get_ylim()
ax.vlines(deciles, [0], 0.05, transform=trans, color='k')
ax.set_xlabel(name[0])
ax.set_ylim(ylim)
# prevent x-axis ticks from overlapping
ax.xaxis.set_major_locator(MaxNLocator(nbins=6, prune='lower'))
tick_formatter = ScalarFormatter()
tick_formatter.set_powerlimits((-3, 4))
ax.xaxis.set_major_formatter(tick_formatter)
if len(axes) > 1:
# two-way PDP - y-axis deciles + labels
deciles = mquantiles(X[:, fx[1]], prob=np.arange(0.1, 1.0, 0.1))
trans = transforms.blended_transform_factory(ax.transAxes,
ax.transData)
xlim = ax.get_xlim()
ax.hlines(deciles, [0], 0.05, transform=trans, color='k')
ax.set_ylabel(name[1])
# hline erases xlim
ax.set_xlim(xlim)
else:
ax.set_ylabel('Partial dependence')
if len(axes) == 1:
ax.set_ylim(pdp_lim[1])
axs.append(ax)
fig.subplots_adjust(bottom=0.15, top=0.7, left=0.1, right=0.95, wspace=0.4,
hspace=0.3)
return fig, axs
| bsd-3-clause |
gibiansky/tensorflow | tensorflow/contrib/learn/python/learn/datasets/load_csv_test.py | 31 | 1334 | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import tensorflow as tf
from tensorflow.contrib.learn.python.learn import datasets
class LoadCsvTest(tf.test.TestCase):
"""Test load csv functions."""
def testIris(self):
iris = datasets.load_iris()
self.assertTupleEqual(iris.data.shape, (150, 4))
self.assertTupleEqual(iris.target.shape, (150,))
def testBoston(self):
boston = datasets.load_boston()
self.assertTupleEqual(boston.data.shape, (506, 13))
self.assertTupleEqual(boston.target.shape, (506,))
if __name__ == "__main__":
tf.test.main()
| apache-2.0 |
RafaelCosman/pybrain | pybrain/rl/learners/valuebased/nfq.py | 31 | 1994 | from scipy import r_
from pybrain.rl.learners.valuebased.valuebased import ValueBasedLearner
from pybrain.datasets import SupervisedDataSet
from pybrain.supervised.trainers.rprop import RPropMinusTrainer
from pybrain.supervised.trainers import BackpropTrainer
from pybrain.utilities import one_to_n
class NFQ(ValueBasedLearner):
""" Neuro-fitted Q-learning"""
def __init__(self, maxEpochs=20):
ValueBasedLearner.__init__(self)
self.gamma = 0.9
self.maxEpochs = maxEpochs
def learn(self):
# convert reinforcement dataset to NFQ supervised dataset
supervised = SupervisedDataSet(self.module.network.indim, 1)
for seq in self.dataset:
lastexperience = None
for state, action, reward in seq:
if not lastexperience:
# delay each experience in sequence by one
lastexperience = (state, action, reward)
continue
# use experience from last timestep to do Q update
(state_, action_, reward_) = lastexperience
Q = self.module.getValue(state_, action_[0])
inp = r_[state_, one_to_n(action_[0], self.module.numActions)]
tgt = Q + 0.5*(reward_ + self.gamma * max(self.module.getActionValues(state)) - Q)
supervised.addSample(inp, tgt)
# update last experience with current one
lastexperience = (state, action, reward)
# train module with backprop/rprop on dataset
trainer = RPropMinusTrainer(self.module.network, dataset=supervised, batchlearning=True, verbose=False)
trainer.trainUntilConvergence(maxEpochs=self.maxEpochs)
# alternative: backprop, was not as stable as rprop
# trainer = BackpropTrainer(self.module.network, dataset=supervised, learningrate=0.005, batchlearning=True, verbose=True)
# trainer.trainUntilConvergence(maxEpochs=self.maxEpochs)
| bsd-3-clause |
Cito/sqlalchemy | lib/sqlalchemy/orm/query.py | 2 | 131160 | # orm/query.py
# Copyright (C) 2005-2014 the SQLAlchemy authors and contributors <see AUTHORS file>
#
# This module is part of SQLAlchemy and is released under
# the MIT License: http://www.opensource.org/licenses/mit-license.php
"""The Query class and support.
Defines the :class:`.Query` class, the central
construct used by the ORM to construct database queries.
The :class:`.Query` class should not be confused with the
:class:`.Select` class, which defines database
SELECT operations at the SQL (non-ORM) level. ``Query`` differs from
``Select`` in that it returns ORM-mapped objects and interacts with an
ORM session, whereas the ``Select`` construct interacts directly with the
database to return iterable result sets.
"""
from itertools import chain
from . import (
attributes, interfaces, object_mapper, persistence,
exc as orm_exc, loading
)
from .base import _entity_descriptor, _is_aliased_class, \
_is_mapped_class, _orm_columns, _generative
from .path_registry import PathRegistry
from .util import (
AliasedClass, ORMAdapter, join as orm_join, with_parent, aliased
)
from .. import sql, util, log, exc as sa_exc, inspect, inspection
from ..sql.expression import _interpret_as_from
from ..sql import (
util as sql_util,
expression, visitors
)
from ..sql.base import ColumnCollection
from . import properties
__all__ = ['Query', 'QueryContext', 'aliased']
_path_registry = PathRegistry.root
@inspection._self_inspects
@log.class_logger
class Query(object):
"""ORM-level SQL construction object.
:class:`.Query` is the source of all SELECT statements generated by the
ORM, both those formulated by end-user query operations as well as by
high level internal operations such as related collection loading. It
features a generative interface whereby successive calls return a new
:class:`.Query` object, a copy of the former with additional
criteria and options associated with it.
:class:`.Query` objects are normally initially generated using the
:meth:`~.Session.query` method of :class:`.Session`. For a full
walkthrough of :class:`.Query` usage, see the
:ref:`ormtutorial_toplevel`.
"""
_enable_eagerloads = True
_enable_assertions = True
_with_labels = False
_criterion = None
_yield_per = None
_order_by = False
_group_by = False
_having = None
_distinct = False
_prefixes = None
_offset = None
_limit = None
_for_update_arg = None
_statement = None
_correlate = frozenset()
_populate_existing = False
_invoke_all_eagers = True
_version_check = False
_autoflush = True
_only_load_props = None
_refresh_state = None
_from_obj = ()
_join_entities = ()
_select_from_entity = None
_mapper_adapter_map = {}
_filter_aliases = None
_from_obj_alias = None
_joinpath = _joinpoint = util.immutabledict()
_execution_options = util.immutabledict()
_params = util.immutabledict()
_attributes = util.immutabledict()
_with_options = ()
_with_hints = ()
_enable_single_crit = True
_current_path = _path_registry
def __init__(self, entities, session=None):
self.session = session
self._polymorphic_adapters = {}
self._set_entities(entities)
def _set_entities(self, entities, entity_wrapper=None):
if entity_wrapper is None:
entity_wrapper = _QueryEntity
self._entities = []
self._primary_entity = None
for ent in util.to_list(entities):
entity_wrapper(self, ent)
self._set_entity_selectables(self._entities)
def _set_entity_selectables(self, entities):
self._mapper_adapter_map = d = self._mapper_adapter_map.copy()
for ent in entities:
for entity in ent.entities:
if entity not in d:
ext_info = inspect(entity)
if not ext_info.is_aliased_class and \
ext_info.mapper.with_polymorphic:
if ext_info.mapper.mapped_table not in \
self._polymorphic_adapters:
self._mapper_loads_polymorphically_with(
ext_info.mapper,
sql_util.ColumnAdapter(
ext_info.selectable,
ext_info.mapper._equivalent_columns
)
)
aliased_adapter = None
elif ext_info.is_aliased_class:
aliased_adapter = sql_util.ColumnAdapter(
ext_info.selectable,
ext_info.mapper._equivalent_columns
)
else:
aliased_adapter = None
d[entity] = (
ext_info,
aliased_adapter
)
ent.setup_entity(*d[entity])
def _mapper_loads_polymorphically_with(self, mapper, adapter):
for m2 in mapper._with_polymorphic_mappers or [mapper]:
self._polymorphic_adapters[m2] = adapter
for m in m2.iterate_to_root():
self._polymorphic_adapters[m.local_table] = adapter
def _set_select_from(self, obj, set_base_alias):
fa = []
select_from_alias = None
for from_obj in obj:
info = inspect(from_obj)
if hasattr(info, 'mapper') and \
(info.is_mapper or info.is_aliased_class):
if set_base_alias:
raise sa_exc.ArgumentError(
"A selectable (FromClause) instance is "
"expected when the base alias is being set.")
fa.append(info.selectable)
elif not info.is_selectable:
raise sa_exc.ArgumentError(
"argument is not a mapped class, mapper, "
"aliased(), or FromClause instance.")
else:
if isinstance(from_obj, expression.SelectBase):
from_obj = from_obj.alias()
if set_base_alias:
select_from_alias = from_obj
fa.append(from_obj)
self._from_obj = tuple(fa)
if set_base_alias and \
len(self._from_obj) == 1 and \
isinstance(select_from_alias, expression.Alias):
equivs = self.__all_equivs()
self._from_obj_alias = sql_util.ColumnAdapter(
self._from_obj[0], equivs)
def _reset_polymorphic_adapter(self, mapper):
for m2 in mapper._with_polymorphic_mappers:
self._polymorphic_adapters.pop(m2, None)
for m in m2.iterate_to_root():
self._polymorphic_adapters.pop(m.local_table, None)
def _adapt_polymorphic_element(self, element):
if "parententity" in element._annotations:
search = element._annotations['parententity']
alias = self._polymorphic_adapters.get(search, None)
if alias:
return alias.adapt_clause(element)
if isinstance(element, expression.FromClause):
search = element
elif hasattr(element, 'table'):
search = element.table
else:
return None
alias = self._polymorphic_adapters.get(search, None)
if alias:
return alias.adapt_clause(element)
def _adapt_col_list(self, cols):
return [
self._adapt_clause(
expression._literal_as_text(o),
True, True)
for o in cols
]
@_generative()
def _adapt_all_clauses(self):
self._orm_only_adapt = False
def _adapt_clause(self, clause, as_filter, orm_only):
"""Adapt incoming clauses to transformations which
have been applied within this query."""
adapters = []
# do we adapt all expression elements or only those
# tagged as 'ORM' constructs ?
orm_only = getattr(self, '_orm_only_adapt', orm_only)
if as_filter and self._filter_aliases:
for fa in self._filter_aliases._visitor_iterator:
adapters.append(
(
orm_only, fa.replace
)
)
if self._from_obj_alias:
# for the "from obj" alias, apply extra rule to the
# 'ORM only' check, if this query were generated from a
# subquery of itself, i.e. _from_selectable(), apply adaption
# to all SQL constructs.
adapters.append(
(
getattr(self, '_orm_only_from_obj_alias', orm_only),
self._from_obj_alias.replace
)
)
if self._polymorphic_adapters:
adapters.append(
(
orm_only, self._adapt_polymorphic_element
)
)
if not adapters:
return clause
def replace(elem):
for _orm_only, adapter in adapters:
# if 'orm only', look for ORM annotations
# in the element before adapting.
if not _orm_only or \
'_orm_adapt' in elem._annotations or \
"parententity" in elem._annotations:
e = adapter(elem)
if e is not None:
return e
return visitors.replacement_traverse(
clause,
{},
replace
)
def _entity_zero(self):
return self._entities[0]
def _mapper_zero(self):
return self._select_from_entity or \
self._entity_zero().entity_zero
@property
def _mapper_entities(self):
for ent in self._entities:
if isinstance(ent, _MapperEntity):
yield ent
def _joinpoint_zero(self):
return self._joinpoint.get(
'_joinpoint_entity',
self._mapper_zero()
)
def _mapper_zero_or_none(self):
if self._primary_entity:
return self._primary_entity.mapper
else:
return None
def _only_mapper_zero(self, rationale=None):
if len(self._entities) > 1:
raise sa_exc.InvalidRequestError(
rationale or
"This operation requires a Query "
"against a single mapper."
)
return self._mapper_zero()
def _only_full_mapper_zero(self, methname):
if self._entities != [self._primary_entity]:
raise sa_exc.InvalidRequestError(
"%s() can only be used against "
"a single mapped class." % methname)
return self._primary_entity.entity_zero
def _only_entity_zero(self, rationale=None):
if len(self._entities) > 1:
raise sa_exc.InvalidRequestError(
rationale or
"This operation requires a Query "
"against a single mapper."
)
return self._entity_zero()
def __all_equivs(self):
equivs = {}
for ent in self._mapper_entities:
equivs.update(ent.mapper._equivalent_columns)
return equivs
def _get_condition(self):
return self._no_criterion_condition("get", order_by=False, distinct=False)
def _get_existing_condition(self):
self._no_criterion_assertion("get", order_by=False, distinct=False)
def _no_criterion_assertion(self, meth, order_by=True, distinct=True):
if not self._enable_assertions:
return
if self._criterion is not None or \
self._statement is not None or self._from_obj or \
self._limit is not None or self._offset is not None or \
self._group_by or (order_by and self._order_by) or \
(distinct and self._distinct):
raise sa_exc.InvalidRequestError(
"Query.%s() being called on a "
"Query with existing criterion. " % meth)
def _no_criterion_condition(self, meth, order_by=True, distinct=True):
self._no_criterion_assertion(meth, order_by, distinct)
self._from_obj = ()
self._statement = self._criterion = None
self._order_by = self._group_by = self._distinct = False
def _no_clauseelement_condition(self, meth):
if not self._enable_assertions:
return
if self._order_by:
raise sa_exc.InvalidRequestError(
"Query.%s() being called on a "
"Query with existing criterion. " % meth)
self._no_criterion_condition(meth)
def _no_statement_condition(self, meth):
if not self._enable_assertions:
return
if self._statement is not None:
raise sa_exc.InvalidRequestError(
("Query.%s() being called on a Query with an existing full "
"statement - can't apply criterion.") % meth)
def _no_limit_offset(self, meth):
if not self._enable_assertions:
return
if self._limit is not None or self._offset is not None:
raise sa_exc.InvalidRequestError(
"Query.%s() being called on a Query which already has LIMIT "
"or OFFSET applied. To modify the row-limited results of a "
" Query, call from_self() first. "
"Otherwise, call %s() before limit() or offset() "
"are applied."
% (meth, meth)
)
def _no_select_modifiers(self, meth):
if not self._enable_assertions:
return
for attr, methname, notset in (
('_limit', 'limit()', None),
('_offset', 'offset()', None),
('_order_by', 'order_by()', False),
('_group_by', 'group_by()', False),
('_distinct', 'distinct()', False),
):
if getattr(self, attr) is not notset:
raise sa_exc.InvalidRequestError(
"Can't call Query.%s() when %s has been called" %
(meth, methname)
)
def _get_options(self, populate_existing=None,
version_check=None,
only_load_props=None,
refresh_state=None):
if populate_existing:
self._populate_existing = populate_existing
if version_check:
self._version_check = version_check
if refresh_state:
self._refresh_state = refresh_state
if only_load_props:
self._only_load_props = set(only_load_props)
return self
def _clone(self):
cls = self.__class__
q = cls.__new__(cls)
q.__dict__ = self.__dict__.copy()
return q
@property
def statement(self):
"""The full SELECT statement represented by this Query.
The statement by default will not have disambiguating labels
applied to the construct unless with_labels(True) is called
first.
"""
stmt = self._compile_context(labels=self._with_labels).\
statement
if self._params:
stmt = stmt.params(self._params)
# TODO: there's no tests covering effects of
# the annotation not being there
return stmt._annotate({'no_replacement_traverse': True})
def subquery(self, name=None, with_labels=False, reduce_columns=False):
"""return the full SELECT statement represented by
this :class:`.Query`, embedded within an :class:`.Alias`.
Eager JOIN generation within the query is disabled.
:param name: string name to be assigned as the alias;
this is passed through to :meth:`.FromClause.alias`.
If ``None``, a name will be deterministically generated
at compile time.
:param with_labels: if True, :meth:`.with_labels` will be called
on the :class:`.Query` first to apply table-qualified labels
to all columns.
:param reduce_columns: if True, :meth:`.Select.reduce_columns` will
be called on the resulting :func:`.select` construct,
to remove same-named columns where one also refers to the other
via foreign key or WHERE clause equivalence.
.. versionchanged:: 0.8 the ``with_labels`` and ``reduce_columns``
keyword arguments were added.
"""
q = self.enable_eagerloads(False)
if with_labels:
q = q.with_labels()
q = q.statement
if reduce_columns:
q = q.reduce_columns()
return q.alias(name=name)
def cte(self, name=None, recursive=False):
"""Return the full SELECT statement represented by this
:class:`.Query` represented as a common table expression (CTE).
.. versionadded:: 0.7.6
Parameters and usage are the same as those of the
:meth:`.SelectBase.cte` method; see that method for
further details.
Here is the `Postgresql WITH
RECURSIVE example
<http://www.postgresql.org/docs/8.4/static/queries-with.html>`_.
Note that, in this example, the ``included_parts`` cte and the
``incl_alias`` alias of it are Core selectables, which
means the columns are accessed via the ``.c.`` attribute. The
``parts_alias`` object is an :func:`.orm.aliased` instance of the
``Part`` entity, so column-mapped attributes are available
directly::
from sqlalchemy.orm import aliased
class Part(Base):
__tablename__ = 'part'
part = Column(String, primary_key=True)
sub_part = Column(String, primary_key=True)
quantity = Column(Integer)
included_parts = session.query(
Part.sub_part,
Part.part,
Part.quantity).\\
filter(Part.part=="our part").\\
cte(name="included_parts", recursive=True)
incl_alias = aliased(included_parts, name="pr")
parts_alias = aliased(Part, name="p")
included_parts = included_parts.union_all(
session.query(
parts_alias.part,
parts_alias.sub_part,
parts_alias.quantity).\\
filter(parts_alias.part==incl_alias.c.sub_part)
)
q = session.query(
included_parts.c.sub_part,
func.sum(included_parts.c.quantity).
label('total_quantity')
).\\
group_by(included_parts.c.sub_part)
.. seealso::
:meth:`.SelectBase.cte`
"""
return self.enable_eagerloads(False).\
statement.cte(name=name, recursive=recursive)
def label(self, name):
"""Return the full SELECT statement represented by this
:class:`.Query`, converted
to a scalar subquery with a label of the given name.
Analogous to :meth:`sqlalchemy.sql.expression.SelectBase.label`.
.. versionadded:: 0.6.5
"""
return self.enable_eagerloads(False).statement.label(name)
def as_scalar(self):
"""Return the full SELECT statement represented by this
:class:`.Query`, converted to a scalar subquery.
Analogous to :meth:`sqlalchemy.sql.expression.SelectBase.as_scalar`.
.. versionadded:: 0.6.5
"""
return self.enable_eagerloads(False).statement.as_scalar()
@property
def selectable(self):
"""Return the :class:`.Select` object emitted by this :class:`.Query`.
Used for :func:`.inspect` compatibility, this is equivalent to::
query.enable_eagerloads(False).with_labels().statement
"""
return self.__clause_element__()
def __clause_element__(self):
return self.enable_eagerloads(False).with_labels().statement
@_generative()
def enable_eagerloads(self, value):
"""Control whether or not eager joins and subqueries are
rendered.
When set to False, the returned Query will not render
eager joins regardless of :func:`~sqlalchemy.orm.joinedload`,
:func:`~sqlalchemy.orm.subqueryload` options
or mapper-level ``lazy='joined'``/``lazy='subquery'``
configurations.
This is used primarily when nesting the Query's
statement into a subquery or other
selectable.
"""
self._enable_eagerloads = value
@_generative()
def with_labels(self):
"""Apply column labels to the return value of Query.statement.
Indicates that this Query's `statement` accessor should return
a SELECT statement that applies labels to all columns in the
form <tablename>_<columnname>; this is commonly used to
disambiguate columns from multiple tables which have the same
name.
When the `Query` actually issues SQL to load rows, it always
uses column labeling.
"""
self._with_labels = True
@_generative()
def enable_assertions(self, value):
"""Control whether assertions are generated.
When set to False, the returned Query will
not assert its state before certain operations,
including that LIMIT/OFFSET has not been applied
when filter() is called, no criterion exists
when get() is called, and no "from_statement()"
exists when filter()/order_by()/group_by() etc.
is called. This more permissive mode is used by
custom Query subclasses to specify criterion or
other modifiers outside of the usual usage patterns.
Care should be taken to ensure that the usage
pattern is even possible. A statement applied
by from_statement() will override any criterion
set by filter() or order_by(), for example.
"""
self._enable_assertions = value
@property
def whereclause(self):
"""A readonly attribute which returns the current WHERE criterion for
this Query.
This returned value is a SQL expression construct, or ``None`` if no
criterion has been established.
"""
return self._criterion
@_generative()
def _with_current_path(self, path):
"""indicate that this query applies to objects loaded
within a certain path.
Used by deferred loaders (see strategies.py) which transfer
query options from an originating query to a newly generated
query intended for the deferred load.
"""
self._current_path = path
@_generative(_no_clauseelement_condition)
def with_polymorphic(self,
cls_or_mappers,
selectable=None,
polymorphic_on=None):
"""Load columns for inheriting classes.
:meth:`.Query.with_polymorphic` applies transformations
to the "main" mapped class represented by this :class:`.Query`.
The "main" mapped class here means the :class:`.Query`
object's first argument is a full class, i.e.
``session.query(SomeClass)``. These transformations allow additional
tables to be present in the FROM clause so that columns for a
joined-inheritance subclass are available in the query, both for the
purposes of load-time efficiency as well as the ability to use
these columns at query time.
See the documentation section :ref:`with_polymorphic` for
details on how this method is used.
.. versionchanged:: 0.8
A new and more flexible function
:func:`.orm.with_polymorphic` supersedes
:meth:`.Query.with_polymorphic`, as it can apply the equivalent
functionality to any set of columns or classes in the
:class:`.Query`, not just the "zero mapper". See that
function for a description of arguments.
"""
if not self._primary_entity:
raise sa_exc.InvalidRequestError(
"No primary mapper set up for this Query.")
entity = self._entities[0]._clone()
self._entities = [entity] + self._entities[1:]
entity.set_with_polymorphic(self,
cls_or_mappers,
selectable=selectable,
polymorphic_on=polymorphic_on)
@_generative()
def yield_per(self, count):
"""Yield only ``count`` rows at a time.
WARNING: use this method with caution; if the same instance is present
in more than one batch of rows, end-user changes to attributes will be
overwritten.
In particular, it's usually impossible to use this setting with
eagerly loaded collections (i.e. any lazy='joined' or 'subquery')
since those collections will be cleared for a new load when
encountered in a subsequent result batch. In the case of 'subquery'
loading, the full result for all rows is fetched which generally
defeats the purpose of :meth:`~sqlalchemy.orm.query.Query.yield_per`.
Also note that while :meth:`~sqlalchemy.orm.query.Query.yield_per`
will set the ``stream_results`` execution option to True, currently
this is only understood by :mod:`~sqlalchemy.dialects.postgresql.psycopg2` dialect
which will stream results using server side cursors instead of pre-buffer
all rows for this query. Other DBAPIs pre-buffer all rows before
making them available.
"""
self._yield_per = count
self._execution_options = self._execution_options.union(
{"stream_results": True})
def get(self, ident):
"""Return an instance based on the given primary key identifier,
or ``None`` if not found.
E.g.::
my_user = session.query(User).get(5)
some_object = session.query(VersionedFoo).get((5, 10))
:meth:`~.Query.get` is special in that it provides direct
access to the identity map of the owning :class:`.Session`.
If the given primary key identifier is present
in the local identity map, the object is returned
directly from this collection and no SQL is emitted,
unless the object has been marked fully expired.
If not present,
a SELECT is performed in order to locate the object.
:meth:`~.Query.get` also will perform a check if
the object is present in the identity map and
marked as expired - a SELECT
is emitted to refresh the object as well as to
ensure that the row is still present.
If not, :class:`~sqlalchemy.orm.exc.ObjectDeletedError` is raised.
:meth:`~.Query.get` is only used to return a single
mapped instance, not multiple instances or
individual column constructs, and strictly
on a single primary key value. The originating
:class:`.Query` must be constructed in this way,
i.e. against a single mapped entity,
with no additional filtering criterion. Loading
options via :meth:`~.Query.options` may be applied
however, and will be used if the object is not
yet locally present.
A lazy-loading, many-to-one attribute configured
by :func:`.relationship`, using a simple
foreign-key-to-primary-key criterion, will also use an
operation equivalent to :meth:`~.Query.get` in order to retrieve
the target value from the local identity map
before querying the database. See :doc:`/orm/loading`
for further details on relationship loading.
:param ident: A scalar or tuple value representing
the primary key. For a composite primary key,
the order of identifiers corresponds in most cases
to that of the mapped :class:`.Table` object's
primary key columns. For a :func:`.mapper` that
was given the ``primary key`` argument during
construction, the order of identifiers corresponds
to the elements present in this collection.
:return: The object instance, or ``None``.
"""
# convert composite types to individual args
if hasattr(ident, '__composite_values__'):
ident = ident.__composite_values__()
ident = util.to_list(ident)
mapper = self._only_full_mapper_zero("get")
if len(ident) != len(mapper.primary_key):
raise sa_exc.InvalidRequestError(
"Incorrect number of values in identifier to formulate "
"primary key for query.get(); primary key columns are %s" %
','.join("'%s'" % c for c in mapper.primary_key))
key = mapper.identity_key_from_primary_key(ident)
if not self._populate_existing and \
not mapper.always_refresh and \
self._for_update_arg is None:
instance = loading.get_from_identity(
self.session, key, attributes.PASSIVE_OFF)
if instance is not None:
self._get_existing_condition()
# reject calls for id in identity map but class
# mismatch.
if not issubclass(instance.__class__, mapper.class_):
return None
return instance
return loading.load_on_ident(self, key)
@_generative()
def correlate(self, *args):
"""Return a :class:`.Query` construct which will correlate the given
FROM clauses to that of an enclosing :class:`.Query` or
:func:`~.expression.select`.
The method here accepts mapped classes, :func:`.aliased` constructs,
and :func:`.mapper` constructs as arguments, which are resolved into
expression constructs, in addition to appropriate expression
constructs.
The correlation arguments are ultimately passed to
:meth:`.Select.correlate` after coercion to expression constructs.
The correlation arguments take effect in such cases
as when :meth:`.Query.from_self` is used, or when
a subquery as returned by :meth:`.Query.subquery` is
embedded in another :func:`~.expression.select` construct.
"""
self._correlate = self._correlate.union(
_interpret_as_from(s)
if s is not None else None
for s in args)
@_generative()
def autoflush(self, setting):
"""Return a Query with a specific 'autoflush' setting.
Note that a Session with autoflush=False will
not autoflush, even if this flag is set to True at the
Query level. Therefore this flag is usually used only
to disable autoflush for a specific Query.
"""
self._autoflush = setting
@_generative()
def populate_existing(self):
"""Return a :class:`.Query` that will expire and refresh all instances
as they are loaded, or reused from the current :class:`.Session`.
:meth:`.populate_existing` does not improve behavior when
the ORM is used normally - the :class:`.Session` object's usual
behavior of maintaining a transaction and expiring all attributes
after rollback or commit handles object state automatically.
This method is not intended for general use.
"""
self._populate_existing = True
@_generative()
def _with_invoke_all_eagers(self, value):
"""Set the 'invoke all eagers' flag which causes joined- and
subquery loaders to traverse into already-loaded related objects
and collections.
Default is that of :attr:`.Query._invoke_all_eagers`.
"""
self._invoke_all_eagers = value
def with_parent(self, instance, property=None):
"""Add filtering criterion that relates the given instance
to a child object or collection, using its attribute state
as well as an established :func:`.relationship()`
configuration.
The method uses the :func:`.with_parent` function to generate
the clause, the result of which is passed to :meth:`.Query.filter`.
Parameters are the same as :func:`.with_parent`, with the exception
that the given property can be None, in which case a search is
performed against this :class:`.Query` object's target mapper.
"""
if property is None:
mapper = object_mapper(instance)
for prop in mapper.iterate_properties:
if isinstance(prop, properties.RelationshipProperty) and \
prop.mapper is self._mapper_zero():
property = prop
break
else:
raise sa_exc.InvalidRequestError(
"Could not locate a property which relates instances "
"of class '%s' to instances of class '%s'" %
(
self._mapper_zero().class_.__name__,
instance.__class__.__name__)
)
return self.filter(with_parent(instance, property))
@_generative()
def add_entity(self, entity, alias=None):
"""add a mapped entity to the list of result columns
to be returned."""
if alias is not None:
entity = aliased(entity, alias)
self._entities = list(self._entities)
m = _MapperEntity(self, entity)
self._set_entity_selectables([m])
@_generative()
def with_session(self, session):
"""Return a :class:`.Query` that will use the given :class:`.Session`.
"""
self.session = session
def from_self(self, *entities):
"""return a Query that selects from this Query's
SELECT statement.
\*entities - optional list of entities which will replace
those being selected.
"""
fromclause = self.with_labels().enable_eagerloads(False).\
_enable_single_crit(False).\
statement.correlate(None)
q = self._from_selectable(fromclause)
if entities:
q._set_entities(entities)
return q
@_generative()
def _enable_single_crit(self, val):
self._enable_single_crit = val
@_generative()
def _from_selectable(self, fromclause):
for attr in (
'_statement', '_criterion',
'_order_by', '_group_by',
'_limit', '_offset',
'_joinpath', '_joinpoint',
'_distinct', '_having',
'_prefixes',
):
self.__dict__.pop(attr, None)
self._set_select_from([fromclause], True)
# this enables clause adaptation for non-ORM
# expressions.
self._orm_only_from_obj_alias = False
old_entities = self._entities
self._entities = []
for e in old_entities:
e.adapt_to_selectable(self, self._from_obj[0])
def values(self, *columns):
"""Return an iterator yielding result tuples corresponding
to the given list of columns"""
if not columns:
return iter(())
q = self._clone()
q._set_entities(columns, entity_wrapper=_ColumnEntity)
if not q._yield_per:
q._yield_per = 10
return iter(q)
_values = values
def value(self, column):
"""Return a scalar result corresponding to the given
column expression."""
try:
return next(self.values(column))[0]
except StopIteration:
return None
@_generative()
def with_entities(self, *entities):
"""Return a new :class:`.Query` replacing the SELECT list with the
given entities.
e.g.::
# Users, filtered on some arbitrary criterion
# and then ordered by related email address
q = session.query(User).\\
join(User.address).\\
filter(User.name.like('%ed%')).\\
order_by(Address.email)
# given *only* User.id==5, Address.email, and 'q', what
# would the *next* User in the result be ?
subq = q.with_entities(Address.email).\\
order_by(None).\\
filter(User.id==5).\\
subquery()
q = q.join((subq, subq.c.email < Address.email)).\\
limit(1)
.. versionadded:: 0.6.5
"""
self._set_entities(entities)
@_generative()
def add_columns(self, *column):
"""Add one or more column expressions to the list
of result columns to be returned."""
self._entities = list(self._entities)
l = len(self._entities)
for c in column:
_ColumnEntity(self, c)
# _ColumnEntity may add many entities if the
# given arg is a FROM clause
self._set_entity_selectables(self._entities[l:])
@util.pending_deprecation("0.7",
":meth:`.add_column` is superseded by :meth:`.add_columns`",
False)
def add_column(self, column):
"""Add a column expression to the list of result columns to be
returned.
Pending deprecation: :meth:`.add_column` will be superseded by
:meth:`.add_columns`.
"""
return self.add_columns(column)
def options(self, *args):
"""Return a new Query object, applying the given list of
mapper options.
Most supplied options regard changing how column- and
relationship-mapped attributes are loaded. See the sections
:ref:`deferred` and :doc:`/orm/loading` for reference
documentation.
"""
return self._options(False, *args)
def _conditional_options(self, *args):
return self._options(True, *args)
@_generative()
def _options(self, conditional, *args):
# most MapperOptions write to the '_attributes' dictionary,
# so copy that as well
self._attributes = self._attributes.copy()
opts = tuple(util.flatten_iterator(args))
self._with_options = self._with_options + opts
if conditional:
for opt in opts:
opt.process_query_conditionally(self)
else:
for opt in opts:
opt.process_query(self)
def with_transformation(self, fn):
"""Return a new :class:`.Query` object transformed by
the given function.
E.g.::
def filter_something(criterion):
def transform(q):
return q.filter(criterion)
return transform
q = q.with_transformation(filter_something(x==5))
This allows ad-hoc recipes to be created for :class:`.Query`
objects. See the example at :ref:`hybrid_transformers`.
.. versionadded:: 0.7.4
"""
return fn(self)
@_generative()
def with_hint(self, selectable, text, dialect_name='*'):
"""Add an indexing hint for the given entity or selectable to
this :class:`.Query`.
Functionality is passed straight through to
:meth:`~sqlalchemy.sql.expression.Select.with_hint`,
with the addition that ``selectable`` can be a
:class:`.Table`, :class:`.Alias`, or ORM entity / mapped class
/etc.
"""
selectable = inspect(selectable).selectable
self._with_hints += ((selectable, text, dialect_name),)
@_generative()
def execution_options(self, **kwargs):
""" Set non-SQL options which take effect during execution.
The options are the same as those accepted by
:meth:`.Connection.execution_options`.
Note that the ``stream_results`` execution option is enabled
automatically if the :meth:`~sqlalchemy.orm.query.Query.yield_per()`
method is used.
"""
self._execution_options = self._execution_options.union(kwargs)
@_generative()
def with_lockmode(self, mode):
"""Return a new :class:`.Query` object with the specified "locking mode",
which essentially refers to the ``FOR UPDATE`` clause.
.. deprecated:: 0.9.0 superseded by :meth:`.Query.with_for_update`.
:param mode: a string representing the desired locking mode.
Valid values are:
* ``None`` - translates to no lockmode
* ``'update'`` - translates to ``FOR UPDATE``
(standard SQL, supported by most dialects)
* ``'update_nowait'`` - translates to ``FOR UPDATE NOWAIT``
(supported by Oracle, PostgreSQL 8.1 upwards)
* ``'read'`` - translates to ``LOCK IN SHARE MODE`` (for MySQL),
and ``FOR SHARE`` (for PostgreSQL)
.. seealso::
:meth:`.Query.with_for_update` - improved API for
specifying the ``FOR UPDATE`` clause.
"""
self._for_update_arg = LockmodeArg.parse_legacy_query(mode)
@_generative()
def with_for_update(self, read=False, nowait=False, of=None):
"""return a new :class:`.Query` with the specified options for the
``FOR UPDATE`` clause.
The behavior of this method is identical to that of
:meth:`.SelectBase.with_for_update`. When called with no arguments,
the resulting ``SELECT`` statement will have a ``FOR UPDATE`` clause
appended. When additional arguments are specified, backend-specific
options such as ``FOR UPDATE NOWAIT`` or ``LOCK IN SHARE MODE``
can take effect.
E.g.::
q = sess.query(User).with_for_update(nowait=True, of=User)
The above query on a Postgresql backend will render like::
SELECT users.id AS users_id FROM users FOR UPDATE OF users NOWAIT
.. versionadded:: 0.9.0 :meth:`.Query.with_for_update` supersedes
the :meth:`.Query.with_lockmode` method.
.. seealso::
:meth:`.GenerativeSelect.with_for_update` - Core level method with
full argument and behavioral description.
"""
self._for_update_arg = LockmodeArg(read=read, nowait=nowait, of=of)
@_generative()
def params(self, *args, **kwargs):
"""add values for bind parameters which may have been
specified in filter().
parameters may be specified using \**kwargs, or optionally a single
dictionary as the first positional argument. The reason for both is
that \**kwargs is convenient, however some parameter dictionaries
contain unicode keys in which case \**kwargs cannot be used.
"""
if len(args) == 1:
kwargs.update(args[0])
elif len(args) > 0:
raise sa_exc.ArgumentError(
"params() takes zero or one positional argument, "
"which is a dictionary.")
self._params = self._params.copy()
self._params.update(kwargs)
@_generative(_no_statement_condition, _no_limit_offset)
def filter(self, *criterion):
"""apply the given filtering criterion to a copy
of this :class:`.Query`, using SQL expressions.
e.g.::
session.query(MyClass).filter(MyClass.name == 'some name')
Multiple criteria are joined together by AND::
session.query(MyClass).\\
filter(MyClass.name == 'some name', MyClass.id > 5)
The criterion is any SQL expression object applicable to the
WHERE clause of a select. String expressions are coerced
into SQL expression constructs via the :func:`.text` construct.
.. versionchanged:: 0.7.5
Multiple criteria joined by AND.
.. seealso::
:meth:`.Query.filter_by` - filter on keyword expressions.
"""
for criterion in list(criterion):
criterion = expression._literal_as_text(criterion)
criterion = self._adapt_clause(criterion, True, True)
if self._criterion is not None:
self._criterion = self._criterion & criterion
else:
self._criterion = criterion
def filter_by(self, **kwargs):
"""apply the given filtering criterion to a copy
of this :class:`.Query`, using keyword expressions.
e.g.::
session.query(MyClass).filter_by(name = 'some name')
Multiple criteria are joined together by AND::
session.query(MyClass).\\
filter_by(name = 'some name', id = 5)
The keyword expressions are extracted from the primary
entity of the query, or the last entity that was the
target of a call to :meth:`.Query.join`.
.. seealso::
:meth:`.Query.filter` - filter on SQL expressions.
"""
clauses = [_entity_descriptor(self._joinpoint_zero(), key) == value
for key, value in kwargs.items()]
return self.filter(sql.and_(*clauses))
@_generative(_no_statement_condition, _no_limit_offset)
def order_by(self, *criterion):
"""apply one or more ORDER BY criterion to the query and return
the newly resulting ``Query``
All existing ORDER BY settings can be suppressed by
passing ``None`` - this will suppress any ORDER BY configured
on mappers as well.
Alternatively, an existing ORDER BY setting on the Query
object can be entirely cancelled by passing ``False``
as the value - use this before calling methods where
an ORDER BY is invalid.
"""
if len(criterion) == 1:
if criterion[0] is False:
if '_order_by' in self.__dict__:
del self._order_by
return
if criterion[0] is None:
self._order_by = None
return
criterion = self._adapt_col_list(criterion)
if self._order_by is False or self._order_by is None:
self._order_by = criterion
else:
self._order_by = self._order_by + criterion
@_generative(_no_statement_condition, _no_limit_offset)
def group_by(self, *criterion):
"""apply one or more GROUP BY criterion to the query and return
the newly resulting :class:`.Query`"""
criterion = list(chain(*[_orm_columns(c) for c in criterion]))
criterion = self._adapt_col_list(criterion)
if self._group_by is False:
self._group_by = criterion
else:
self._group_by = self._group_by + criterion
@_generative(_no_statement_condition, _no_limit_offset)
def having(self, criterion):
"""apply a HAVING criterion to the query and return the
newly resulting :class:`.Query`.
:meth:`~.Query.having` is used in conjunction with :meth:`~.Query.group_by`.
HAVING criterion makes it possible to use filters on aggregate
functions like COUNT, SUM, AVG, MAX, and MIN, eg.::
q = session.query(User.id).\\
join(User.addresses).\\
group_by(User.id).\\
having(func.count(Address.id) > 2)
"""
if isinstance(criterion, util.string_types):
criterion = sql.text(criterion)
if criterion is not None and \
not isinstance(criterion, sql.ClauseElement):
raise sa_exc.ArgumentError(
"having() argument must be of type "
"sqlalchemy.sql.ClauseElement or string")
criterion = self._adapt_clause(criterion, True, True)
if self._having is not None:
self._having = self._having & criterion
else:
self._having = criterion
def union(self, *q):
"""Produce a UNION of this Query against one or more queries.
e.g.::
q1 = sess.query(SomeClass).filter(SomeClass.foo=='bar')
q2 = sess.query(SomeClass).filter(SomeClass.bar=='foo')
q3 = q1.union(q2)
The method accepts multiple Query objects so as to control
the level of nesting. A series of ``union()`` calls such as::
x.union(y).union(z).all()
will nest on each ``union()``, and produces::
SELECT * FROM (SELECT * FROM (SELECT * FROM X UNION
SELECT * FROM y) UNION SELECT * FROM Z)
Whereas::
x.union(y, z).all()
produces::
SELECT * FROM (SELECT * FROM X UNION SELECT * FROM y UNION
SELECT * FROM Z)
Note that many database backends do not allow ORDER BY to
be rendered on a query called within UNION, EXCEPT, etc.
To disable all ORDER BY clauses including those configured
on mappers, issue ``query.order_by(None)`` - the resulting
:class:`.Query` object will not render ORDER BY within
its SELECT statement.
"""
return self._from_selectable(
expression.union(*([self] + list(q))))
def union_all(self, *q):
"""Produce a UNION ALL of this Query against one or more queries.
Works the same way as :meth:`~sqlalchemy.orm.query.Query.union`. See
that method for usage examples.
"""
return self._from_selectable(
expression.union_all(*([self] + list(q)))
)
def intersect(self, *q):
"""Produce an INTERSECT of this Query against one or more queries.
Works the same way as :meth:`~sqlalchemy.orm.query.Query.union`. See
that method for usage examples.
"""
return self._from_selectable(
expression.intersect(*([self] + list(q)))
)
def intersect_all(self, *q):
"""Produce an INTERSECT ALL of this Query against one or more queries.
Works the same way as :meth:`~sqlalchemy.orm.query.Query.union`. See
that method for usage examples.
"""
return self._from_selectable(
expression.intersect_all(*([self] + list(q)))
)
def except_(self, *q):
"""Produce an EXCEPT of this Query against one or more queries.
Works the same way as :meth:`~sqlalchemy.orm.query.Query.union`. See
that method for usage examples.
"""
return self._from_selectable(
expression.except_(*([self] + list(q)))
)
def except_all(self, *q):
"""Produce an EXCEPT ALL of this Query against one or more queries.
Works the same way as :meth:`~sqlalchemy.orm.query.Query.union`. See
that method for usage examples.
"""
return self._from_selectable(
expression.except_all(*([self] + list(q)))
)
def join(self, *props, **kwargs):
"""Create a SQL JOIN against this :class:`.Query` object's criterion
and apply generatively, returning the newly resulting :class:`.Query`.
**Simple Relationship Joins**
Consider a mapping between two classes ``User`` and ``Address``,
with a relationship ``User.addresses`` representing a collection
of ``Address`` objects associated with each ``User``. The most common
usage of :meth:`~.Query.join` is to create a JOIN along this
relationship, using the ``User.addresses`` attribute as an indicator
for how this should occur::
q = session.query(User).join(User.addresses)
Where above, the call to :meth:`~.Query.join` along ``User.addresses``
will result in SQL equivalent to::
SELECT user.* FROM user JOIN address ON user.id = address.user_id
In the above example we refer to ``User.addresses`` as passed to
:meth:`~.Query.join` as the *on clause*, that is, it indicates
how the "ON" portion of the JOIN should be constructed. For a
single-entity query such as the one above (i.e. we start by selecting
only from ``User`` and nothing else), the relationship can also be
specified by its string name::
q = session.query(User).join("addresses")
:meth:`~.Query.join` can also accommodate multiple
"on clause" arguments to produce a chain of joins, such as below
where a join across four related entities is constructed::
q = session.query(User).join("orders", "items", "keywords")
The above would be shorthand for three separate calls to
:meth:`~.Query.join`, each using an explicit attribute to indicate
the source entity::
q = session.query(User).\\
join(User.orders).\\
join(Order.items).\\
join(Item.keywords)
**Joins to a Target Entity or Selectable**
A second form of :meth:`~.Query.join` allows any mapped entity
or core selectable construct as a target. In this usage,
:meth:`~.Query.join` will attempt
to create a JOIN along the natural foreign key relationship between
two entities::
q = session.query(User).join(Address)
The above calling form of :meth:`~.Query.join` will raise an error if
either there are no foreign keys between the two entities, or if
there are multiple foreign key linkages between them. In the
above calling form, :meth:`~.Query.join` is called upon to
create the "on clause" automatically for us. The target can
be any mapped entity or selectable, such as a :class:`.Table`::
q = session.query(User).join(addresses_table)
**Joins to a Target with an ON Clause**
The third calling form allows both the target entity as well
as the ON clause to be passed explicitly. Suppose for
example we wanted to join to ``Address`` twice, using
an alias the second time. We use :func:`~sqlalchemy.orm.aliased`
to create a distinct alias of ``Address``, and join
to it using the ``target, onclause`` form, so that the
alias can be specified explicitly as the target along with
the relationship to instruct how the ON clause should proceed::
a_alias = aliased(Address)
q = session.query(User).\\
join(User.addresses).\\
join(a_alias, User.addresses).\\
filter(Address.email_address=='ed@foo.com').\\
filter(a_alias.email_address=='ed@bar.com')
Where above, the generated SQL would be similar to::
SELECT user.* FROM user
JOIN address ON user.id = address.user_id
JOIN address AS address_1 ON user.id=address_1.user_id
WHERE address.email_address = :email_address_1
AND address_1.email_address = :email_address_2
The two-argument calling form of :meth:`~.Query.join`
also allows us to construct arbitrary joins with SQL-oriented
"on clause" expressions, not relying upon configured relationships
at all. Any SQL expression can be passed as the ON clause
when using the two-argument form, which should refer to the target
entity in some way as well as an applicable source entity::
q = session.query(User).join(Address, User.id==Address.user_id)
.. versionchanged:: 0.7
In SQLAlchemy 0.6 and earlier, the two argument form of
:meth:`~.Query.join` requires the usage of a tuple:
``query(User).join((Address, User.id==Address.user_id))``\ .
This calling form is accepted in 0.7 and further, though
is not necessary unless multiple join conditions are passed to
a single :meth:`~.Query.join` call, which itself is also not
generally necessary as it is now equivalent to multiple
calls (this wasn't always the case).
**Advanced Join Targeting and Adaption**
There is a lot of flexibility in what the "target" can be when using
:meth:`~.Query.join`. As noted previously, it also accepts
:class:`.Table` constructs and other selectables such as
:func:`.alias` and :func:`.select` constructs, with either the one
or two-argument forms::
addresses_q = select([Address.user_id]).\\
where(Address.email_address.endswith("@bar.com")).\\
alias()
q = session.query(User).\\
join(addresses_q, addresses_q.c.user_id==User.id)
:meth:`~.Query.join` also features the ability to *adapt* a
:meth:`~sqlalchemy.orm.relationship` -driven ON clause to the target
selectable. Below we construct a JOIN from ``User`` to a subquery
against ``Address``, allowing the relationship denoted by
``User.addresses`` to *adapt* itself to the altered target::
address_subq = session.query(Address).\\
filter(Address.email_address == 'ed@foo.com').\\
subquery()
q = session.query(User).join(address_subq, User.addresses)
Producing SQL similar to::
SELECT user.* FROM user
JOIN (
SELECT address.id AS id,
address.user_id AS user_id,
address.email_address AS email_address
FROM address
WHERE address.email_address = :email_address_1
) AS anon_1 ON user.id = anon_1.user_id
The above form allows one to fall back onto an explicit ON
clause at any time::
q = session.query(User).\\
join(address_subq, User.id==address_subq.c.user_id)
**Controlling what to Join From**
While :meth:`~.Query.join` exclusively deals with the "right"
side of the JOIN, we can also control the "left" side, in those
cases where it's needed, using :meth:`~.Query.select_from`.
Below we construct a query against ``Address`` but can still
make usage of ``User.addresses`` as our ON clause by instructing
the :class:`.Query` to select first from the ``User``
entity::
q = session.query(Address).select_from(User).\\
join(User.addresses).\\
filter(User.name == 'ed')
Which will produce SQL similar to::
SELECT address.* FROM user
JOIN address ON user.id=address.user_id
WHERE user.name = :name_1
**Constructing Aliases Anonymously**
:meth:`~.Query.join` can construct anonymous aliases
using the ``aliased=True`` flag. This feature is useful
when a query is being joined algorithmically, such as
when querying self-referentially to an arbitrary depth::
q = session.query(Node).\\
join("children", "children", aliased=True)
When ``aliased=True`` is used, the actual "alias" construct
is not explicitly available. To work with it, methods such as
:meth:`.Query.filter` will adapt the incoming entity to
the last join point::
q = session.query(Node).\\
join("children", "children", aliased=True).\\
filter(Node.name == 'grandchild 1')
When using automatic aliasing, the ``from_joinpoint=True``
argument can allow a multi-node join to be broken into
multiple calls to :meth:`~.Query.join`, so that
each path along the way can be further filtered::
q = session.query(Node).\\
join("children", aliased=True).\\
filter(Node.name='child 1').\\
join("children", aliased=True, from_joinpoint=True).\\
filter(Node.name == 'grandchild 1')
The filtering aliases above can then be reset back to the
original ``Node`` entity using :meth:`~.Query.reset_joinpoint`::
q = session.query(Node).\\
join("children", "children", aliased=True).\\
filter(Node.name == 'grandchild 1').\\
reset_joinpoint().\\
filter(Node.name == 'parent 1)
For an example of ``aliased=True``, see the distribution
example :ref:`examples_xmlpersistence` which illustrates
an XPath-like query system using algorithmic joins.
:param \*props: A collection of one or more join conditions,
each consisting of a relationship-bound attribute or string
relationship name representing an "on clause", or a single
target entity, or a tuple in the form of ``(target, onclause)``.
A special two-argument calling form of the form ``target, onclause``
is also accepted.
:param aliased=False: If True, indicate that the JOIN target should be
anonymously aliased. Subsequent calls to :meth:`~.Query.filter`
and similar will adapt the incoming criterion to the target
alias, until :meth:`~.Query.reset_joinpoint` is called.
:param from_joinpoint=False: When using ``aliased=True``, a setting
of True here will cause the join to be from the most recent
joined target, rather than starting back from the original
FROM clauses of the query.
.. seealso::
:ref:`ormtutorial_joins` in the ORM tutorial.
:ref:`inheritance_toplevel` for details on how :meth:`~.Query.join`
is used for inheritance relationships.
:func:`.orm.join` - a standalone ORM-level join function,
used internally by :meth:`.Query.join`, which in previous
SQLAlchemy versions was the primary ORM-level joining interface.
"""
aliased, from_joinpoint = kwargs.pop('aliased', False),\
kwargs.pop('from_joinpoint', False)
if kwargs:
raise TypeError("unknown arguments: %s" %
','.join(kwargs.keys))
return self._join(props,
outerjoin=False, create_aliases=aliased,
from_joinpoint=from_joinpoint)
def outerjoin(self, *props, **kwargs):
"""Create a left outer join against this ``Query`` object's criterion
and apply generatively, returning the newly resulting ``Query``.
Usage is the same as the ``join()`` method.
"""
aliased, from_joinpoint = kwargs.pop('aliased', False), \
kwargs.pop('from_joinpoint', False)
if kwargs:
raise TypeError("unknown arguments: %s" %
','.join(kwargs))
return self._join(props,
outerjoin=True, create_aliases=aliased,
from_joinpoint=from_joinpoint)
def _update_joinpoint(self, jp):
self._joinpoint = jp
# copy backwards to the root of the _joinpath
# dict, so that no existing dict in the path is mutated
while 'prev' in jp:
f, prev = jp['prev']
prev = prev.copy()
prev[f] = jp
jp['prev'] = (f, prev)
jp = prev
self._joinpath = jp
@_generative(_no_statement_condition, _no_limit_offset)
def _join(self, keys, outerjoin, create_aliases, from_joinpoint):
"""consumes arguments from join() or outerjoin(), places them into a
consistent format with which to form the actual JOIN constructs.
"""
if not from_joinpoint:
self._reset_joinpoint()
if len(keys) == 2 and \
isinstance(keys[0], (expression.FromClause,
type, AliasedClass)) and \
isinstance(keys[1], (str, expression.ClauseElement,
interfaces.PropComparator)):
# detect 2-arg form of join and
# convert to a tuple.
keys = (keys,)
for arg1 in util.to_list(keys):
if isinstance(arg1, tuple):
# "tuple" form of join, multiple
# tuples are accepted as well. The simpler
# "2-arg" form is preferred. May deprecate
# the "tuple" usage.
arg1, arg2 = arg1
else:
arg2 = None
# determine onclause/right_entity. there
# is a little bit of legacy behavior still at work here
# which means they might be in either order. may possibly
# lock this down to (right_entity, onclause) in 0.6.
if isinstance(arg1, (interfaces.PropComparator, util.string_types)):
right_entity, onclause = arg2, arg1
else:
right_entity, onclause = arg1, arg2
left_entity = prop = None
if isinstance(onclause, util.string_types):
left_entity = self._joinpoint_zero()
descriptor = _entity_descriptor(left_entity, onclause)
onclause = descriptor
# check for q.join(Class.propname, from_joinpoint=True)
# and Class is that of the current joinpoint
elif from_joinpoint and \
isinstance(onclause, interfaces.PropComparator):
left_entity = onclause._parententity
info = inspect(self._joinpoint_zero())
left_mapper, left_selectable, left_is_aliased = \
getattr(info, 'mapper', None), \
info.selectable, \
getattr(info, 'is_aliased_class', None)
if left_mapper is left_entity:
left_entity = self._joinpoint_zero()
descriptor = _entity_descriptor(left_entity,
onclause.key)
onclause = descriptor
if isinstance(onclause, interfaces.PropComparator):
if right_entity is None:
right_entity = onclause.property.mapper
of_type = getattr(onclause, '_of_type', None)
if of_type:
right_entity = of_type
else:
right_entity = onclause.property.mapper
left_entity = onclause._parententity
prop = onclause.property
if not isinstance(onclause, attributes.QueryableAttribute):
onclause = prop
if not create_aliases:
# check for this path already present.
# don't render in that case.
edge = (left_entity, right_entity, prop.key)
if edge in self._joinpoint:
# The child's prev reference might be stale --
# it could point to a parent older than the
# current joinpoint. If this is the case,
# then we need to update it and then fix the
# tree's spine with _update_joinpoint. Copy
# and then mutate the child, which might be
# shared by a different query object.
jp = self._joinpoint[edge].copy()
jp['prev'] = (edge, self._joinpoint)
self._update_joinpoint(jp)
continue
elif onclause is not None and right_entity is None:
# TODO: no coverage here
raise NotImplementedError("query.join(a==b) not supported.")
self._join_left_to_right(
left_entity,
right_entity, onclause,
outerjoin, create_aliases, prop)
def _join_left_to_right(self, left, right,
onclause, outerjoin, create_aliases, prop):
"""append a JOIN to the query's from clause."""
self._polymorphic_adapters = self._polymorphic_adapters.copy()
if left is None:
if self._from_obj:
left = self._from_obj[0]
elif self._entities:
left = self._entities[0].entity_zero_or_selectable
if left is right and \
not create_aliases:
raise sa_exc.InvalidRequestError(
"Can't construct a join from %s to %s, they "
"are the same entity" %
(left, right))
l_info = inspect(left)
r_info = inspect(right)
overlap = False
if not create_aliases:
right_mapper = getattr(r_info, "mapper", None)
# if the target is a joined inheritance mapping,
# be more liberal about auto-aliasing.
if right_mapper and (
right_mapper.with_polymorphic or
isinstance(right_mapper.mapped_table, expression.Join)
):
for from_obj in self._from_obj or [l_info.selectable]:
if sql_util.selectables_overlap(l_info.selectable, from_obj) and \
sql_util.selectables_overlap(from_obj, r_info.selectable):
overlap = True
break
elif sql_util.selectables_overlap(l_info.selectable, r_info.selectable):
overlap = True
if overlap and l_info.selectable is r_info.selectable:
raise sa_exc.InvalidRequestError(
"Can't join table/selectable '%s' to itself" %
l_info.selectable)
right, onclause = self._prepare_right_side(
r_info, right, onclause,
create_aliases,
prop, overlap)
# if joining on a MapperProperty path,
# track the path to prevent redundant joins
if not create_aliases and prop:
self._update_joinpoint({
'_joinpoint_entity': right,
'prev': ((left, right, prop.key), self._joinpoint)
})
else:
self._joinpoint = {'_joinpoint_entity': right}
self._join_to_left(l_info, left, right, onclause, outerjoin)
def _prepare_right_side(self, r_info, right, onclause, create_aliases,
prop, overlap):
info = r_info
right_mapper, right_selectable, right_is_aliased = \
getattr(info, 'mapper', None), \
info.selectable, \
getattr(info, 'is_aliased_class', False)
if right_mapper:
self._join_entities += (info, )
if right_mapper and prop and \
not right_mapper.common_parent(prop.mapper):
raise sa_exc.InvalidRequestError(
"Join target %s does not correspond to "
"the right side of join condition %s" % (right, onclause)
)
if not right_mapper and prop:
right_mapper = prop.mapper
need_adapter = False
if right_mapper and right is right_selectable:
if not right_selectable.is_derived_from(
right_mapper.mapped_table):
raise sa_exc.InvalidRequestError(
"Selectable '%s' is not derived from '%s'" %
(right_selectable.description,
right_mapper.mapped_table.description))
if isinstance(right_selectable, expression.SelectBase):
# TODO: this isn't even covered now!
right_selectable = right_selectable.alias()
need_adapter = True
right = aliased(right_mapper, right_selectable)
aliased_entity = right_mapper and \
not right_is_aliased and \
(
right_mapper.with_polymorphic and isinstance(
right_mapper._with_polymorphic_selectable,
expression.Alias)
or
overlap # test for overlap:
# orm/inheritance/relationships.py
# SelfReferentialM2MTest
)
if not need_adapter and (create_aliases or aliased_entity):
right = aliased(right, flat=True)
need_adapter = True
# if an alias() of the right side was generated here,
# apply an adapter to all subsequent filter() calls
# until reset_joinpoint() is called.
if need_adapter:
self._filter_aliases = ORMAdapter(right,
equivalents=right_mapper and
right_mapper._equivalent_columns or {},
chain_to=self._filter_aliases)
# if the onclause is a ClauseElement, adapt it with any
# adapters that are in place right now
if isinstance(onclause, expression.ClauseElement):
onclause = self._adapt_clause(onclause, True, True)
# if an alias() on the right side was generated,
# which is intended to wrap a the right side in a subquery,
# ensure that columns retrieved from this target in the result
# set are also adapted.
if aliased_entity and not create_aliases:
self._mapper_loads_polymorphically_with(
right_mapper,
ORMAdapter(
right,
equivalents=right_mapper._equivalent_columns
)
)
return right, onclause
def _join_to_left(self, l_info, left, right, onclause, outerjoin):
info = l_info
left_mapper = getattr(info, 'mapper', None)
left_selectable = info.selectable
if self._from_obj:
replace_clause_index, clause = sql_util.find_join_source(
self._from_obj,
left_selectable)
if clause is not None:
try:
clause = orm_join(clause,
right,
onclause, isouter=outerjoin)
except sa_exc.ArgumentError as ae:
raise sa_exc.InvalidRequestError(
"Could not find a FROM clause to join from. "
"Tried joining to %s, but got: %s" % (right, ae))
self._from_obj = \
self._from_obj[:replace_clause_index] + \
(clause, ) + \
self._from_obj[replace_clause_index + 1:]
return
if left_mapper:
for ent in self._entities:
if ent.corresponds_to(left):
clause = ent.selectable
break
else:
clause = left
else:
clause = left_selectable
assert clause is not None
try:
clause = orm_join(clause, right, onclause, isouter=outerjoin)
except sa_exc.ArgumentError as ae:
raise sa_exc.InvalidRequestError(
"Could not find a FROM clause to join from. "
"Tried joining to %s, but got: %s" % (right, ae))
self._from_obj = self._from_obj + (clause,)
def _reset_joinpoint(self):
self._joinpoint = self._joinpath
self._filter_aliases = None
@_generative(_no_statement_condition)
def reset_joinpoint(self):
"""Return a new :class:`.Query`, where the "join point" has
been reset back to the base FROM entities of the query.
This method is usually used in conjunction with the
``aliased=True`` feature of the :meth:`~.Query.join`
method. See the example in :meth:`~.Query.join` for how
this is used.
"""
self._reset_joinpoint()
@_generative(_no_clauseelement_condition)
def select_from(self, *from_obj):
"""Set the FROM clause of this :class:`.Query` explicitly.
:meth:`.Query.select_from` is often used in conjunction with
:meth:`.Query.join` in order to control which entity is selected
from on the "left" side of the join.
The entity or selectable object here effectively replaces the
"left edge" of any calls to :meth:`~.Query.join`, when no
joinpoint is otherwise established - usually, the default "join
point" is the leftmost entity in the :class:`~.Query` object's
list of entities to be selected.
A typical example::
q = session.query(Address).select_from(User).\\
join(User.addresses).\\
filter(User.name == 'ed')
Which produces SQL equivalent to::
SELECT address.* FROM user
JOIN address ON user.id=address.user_id
WHERE user.name = :name_1
:param \*from_obj: collection of one or more entities to apply
to the FROM clause. Entities can be mapped classes,
:class:`.AliasedClass` objects, :class:`.Mapper` objects
as well as core :class:`.FromClause` elements like subqueries.
.. versionchanged:: 0.9
This method no longer applies the given FROM object
to be the selectable from which matching entities
select from; the :meth:`.select_entity_from` method
now accomplishes this. See that method for a description
of this behavior.
.. seealso::
:meth:`~.Query.join`
:meth:`.Query.select_entity_from`
"""
self._set_select_from(from_obj, False)
@_generative(_no_clauseelement_condition)
def select_entity_from(self, from_obj):
"""Set the FROM clause of this :class:`.Query` to a
core selectable, applying it as a replacement FROM clause
for corresponding mapped entities.
This method is similar to the :meth:`.Query.select_from`
method, in that it sets the FROM clause of the query. However,
where :meth:`.Query.select_from` only affects what is placed
in the FROM, this method also applies the given selectable
to replace the FROM which the selected entities would normally
select from.
The given ``from_obj`` must be an instance of a :class:`.FromClause`,
e.g. a :func:`.select` or :class:`.Alias` construct.
An example would be a :class:`.Query` that selects ``User`` entities,
but uses :meth:`.Query.select_entity_from` to have the entities
selected from a :func:`.select` construct instead of the
base ``user`` table::
select_stmt = select([User]).where(User.id == 7)
q = session.query(User).\\
select_entity_from(select_stmt).\\
filter(User.name == 'ed')
The query generated will select ``User`` entities directly
from the given :func:`.select` construct, and will be::
SELECT anon_1.id AS anon_1_id, anon_1.name AS anon_1_name
FROM (SELECT "user".id AS id, "user".name AS name
FROM "user"
WHERE "user".id = :id_1) AS anon_1
WHERE anon_1.name = :name_1
Notice above that even the WHERE criterion was "adapted" such that
the ``anon_1`` subquery effectively replaces all references to the
``user`` table, except for the one that it refers to internally.
Compare this to :meth:`.Query.select_from`, which as of
version 0.9, does not affect existing entities. The
statement below::
q = session.query(User).\\
select_from(select_stmt).\\
filter(User.name == 'ed')
Produces SQL where both the ``user`` table as well as the
``select_stmt`` construct are present as separate elements
in the FROM clause. No "adaptation" of the ``user`` table
is applied::
SELECT "user".id AS user_id, "user".name AS user_name
FROM "user", (SELECT "user".id AS id, "user".name AS name
FROM "user"
WHERE "user".id = :id_1) AS anon_1
WHERE "user".name = :name_1
:meth:`.Query.select_entity_from` maintains an older
behavior of :meth:`.Query.select_from`. In modern usage,
similar results can also be achieved using :func:`.aliased`::
select_stmt = select([User]).where(User.id == 7)
user_from_select = aliased(User, select_stmt.alias())
q = session.query(user_from_select)
:param from_obj: a :class:`.FromClause` object that will replace
the FROM clause of this :class:`.Query`.
.. seealso::
:meth:`.Query.select_from`
.. versionadded:: 0.8
:meth:`.Query.select_entity_from` was added to specify
the specific behavior of entity replacement, however
the :meth:`.Query.select_from` maintains this behavior
as well until 0.9.
"""
self._set_select_from([from_obj], True)
def __getitem__(self, item):
if isinstance(item, slice):
start, stop, step = util.decode_slice(item)
if isinstance(stop, int) and \
isinstance(start, int) and \
stop - start <= 0:
return []
# perhaps we should execute a count() here so that we
# can still use LIMIT/OFFSET ?
elif (isinstance(start, int) and start < 0) \
or (isinstance(stop, int) and stop < 0):
return list(self)[item]
res = self.slice(start, stop)
if step is not None:
return list(res)[None:None:item.step]
else:
return list(res)
else:
if item == -1:
return list(self)[-1]
else:
return list(self[item:item + 1])[0]
@_generative(_no_statement_condition)
def slice(self, start, stop):
"""apply LIMIT/OFFSET to the ``Query`` based on a "
"range and return the newly resulting ``Query``."""
if start is not None and stop is not None:
self._offset = (self._offset or 0) + start
self._limit = stop - start
elif start is None and stop is not None:
self._limit = stop
elif start is not None and stop is None:
self._offset = (self._offset or 0) + start
if self._offset == 0:
self._offset = None
@_generative(_no_statement_condition)
def limit(self, limit):
"""Apply a ``LIMIT`` to the query and return the newly resulting
``Query``.
"""
self._limit = limit
@_generative(_no_statement_condition)
def offset(self, offset):
"""Apply an ``OFFSET`` to the query and return the newly resulting
``Query``.
"""
self._offset = offset
@_generative(_no_statement_condition)
def distinct(self, *criterion):
"""Apply a ``DISTINCT`` to the query and return the newly resulting
``Query``.
:param \*expr: optional column expressions. When present,
the Postgresql dialect will render a ``DISTINCT ON (<expressions>>)``
construct.
"""
if not criterion:
self._distinct = True
else:
criterion = self._adapt_col_list(criterion)
if isinstance(self._distinct, list):
self._distinct += criterion
else:
self._distinct = criterion
@_generative()
def prefix_with(self, *prefixes):
"""Apply the prefixes to the query and return the newly resulting
``Query``.
:param \*prefixes: optional prefixes, typically strings,
not using any commas. In particular is useful for MySQL keywords.
e.g.::
query = sess.query(User.name).\\
prefix_with('HIGH_PRIORITY').\\
prefix_with('SQL_SMALL_RESULT', 'ALL')
Would render::
SELECT HIGH_PRIORITY SQL_SMALL_RESULT ALL users.name AS users_name
FROM users
.. versionadded:: 0.7.7
"""
if self._prefixes:
self._prefixes += prefixes
else:
self._prefixes = prefixes
def all(self):
"""Return the results represented by this ``Query`` as a list.
This results in an execution of the underlying query.
"""
return list(self)
@_generative(_no_clauseelement_condition)
def from_statement(self, statement):
"""Execute the given SELECT statement and return results.
This method bypasses all internal statement compilation, and the
statement is executed without modification.
The statement argument is either a string, a ``select()`` construct,
or a ``text()`` construct, and should return the set of columns
appropriate to the entity class represented by this ``Query``.
"""
if isinstance(statement, util.string_types):
statement = sql.text(statement)
if not isinstance(statement,
(expression.TextClause,
expression.SelectBase)):
raise sa_exc.ArgumentError(
"from_statement accepts text(), select(), "
"and union() objects only.")
self._statement = statement
def first(self):
"""Return the first result of this ``Query`` or
None if the result doesn't contain any row.
first() applies a limit of one within the generated SQL, so that
only one primary entity row is generated on the server side
(note this may consist of multiple result rows if join-loaded
collections are present).
Calling ``first()`` results in an execution of the underlying query.
"""
if self._statement is not None:
ret = list(self)[0:1]
else:
ret = list(self[0:1])
if len(ret) > 0:
return ret[0]
else:
return None
def one(self):
"""Return exactly one result or raise an exception.
Raises ``sqlalchemy.orm.exc.NoResultFound`` if the query selects
no rows. Raises ``sqlalchemy.orm.exc.MultipleResultsFound``
if multiple object identities are returned, or if multiple
rows are returned for a query that does not return object
identities.
Note that an entity query, that is, one which selects one or
more mapped classes as opposed to individual column attributes,
may ultimately represent many rows but only one row of
unique entity or entities - this is a successful result for one().
Calling ``one()`` results in an execution of the underlying query.
.. versionchanged:: 0.6
``one()`` fully fetches all results instead of applying
any kind of limit, so that the "unique"-ing of entities does not
conceal multiple object identities.
"""
ret = list(self)
l = len(ret)
if l == 1:
return ret[0]
elif l == 0:
raise orm_exc.NoResultFound("No row was found for one()")
else:
raise orm_exc.MultipleResultsFound(
"Multiple rows were found for one()")
def scalar(self):
"""Return the first element of the first result or None
if no rows present. If multiple rows are returned,
raises MultipleResultsFound.
>>> session.query(Item).scalar()
<Item>
>>> session.query(Item.id).scalar()
1
>>> session.query(Item.id).filter(Item.id < 0).scalar()
None
>>> session.query(Item.id, Item.name).scalar()
1
>>> session.query(func.count(Parent.id)).scalar()
20
This results in an execution of the underlying query.
"""
try:
ret = self.one()
if not isinstance(ret, tuple):
return ret
return ret[0]
except orm_exc.NoResultFound:
return None
def __iter__(self):
context = self._compile_context()
context.statement.use_labels = True
if self._autoflush and not self._populate_existing:
self.session._autoflush()
return self._execute_and_instances(context)
def _connection_from_session(self, **kw):
conn = self.session.connection(
**kw)
if self._execution_options:
conn = conn.execution_options(**self._execution_options)
return conn
def _execute_and_instances(self, querycontext):
conn = self._connection_from_session(
mapper=self._mapper_zero_or_none(),
clause=querycontext.statement,
close_with_result=True)
result = conn.execute(querycontext.statement, self._params)
return loading.instances(self, result, querycontext)
@property
def column_descriptions(self):
"""Return metadata about the columns which would be
returned by this :class:`.Query`.
Format is a list of dictionaries::
user_alias = aliased(User, name='user2')
q = sess.query(User, User.id, user_alias)
# this expression:
q.column_descriptions
# would return:
[
{
'name':'User',
'type':User,
'aliased':False,
'expr':User,
},
{
'name':'id',
'type':Integer(),
'aliased':False,
'expr':User.id,
},
{
'name':'user2',
'type':User,
'aliased':True,
'expr':user_alias
}
]
"""
return [
{
'name': ent._label_name,
'type': ent.type,
'aliased': getattr(ent, 'is_aliased_class', False),
'expr': ent.expr
}
for ent in self._entities
]
def instances(self, cursor, __context=None):
"""Given a ResultProxy cursor as returned by connection.execute(),
return an ORM result as an iterator.
e.g.::
result = engine.execute("select * from users")
for u in session.query(User).instances(result):
print u
"""
context = __context
if context is None:
context = QueryContext(self)
return loading.instances(self, cursor, context)
def merge_result(self, iterator, load=True):
"""Merge a result into this :class:`.Query` object's Session.
Given an iterator returned by a :class:`.Query` of the same structure
as this one, return an identical iterator of results, with all mapped
instances merged into the session using :meth:`.Session.merge`. This
is an optimized method which will merge all mapped instances,
preserving the structure of the result rows and unmapped columns with
less method overhead than that of calling :meth:`.Session.merge`
explicitly for each value.
The structure of the results is determined based on the column list of
this :class:`.Query` - if these do not correspond, unchecked errors
will occur.
The 'load' argument is the same as that of :meth:`.Session.merge`.
For an example of how :meth:`~.Query.merge_result` is used, see
the source code for the example :ref:`examples_caching`, where
:meth:`~.Query.merge_result` is used to efficiently restore state
from a cache back into a target :class:`.Session`.
"""
return loading.merge_result(self, iterator, load)
@property
def _select_args(self):
return {
'limit': self._limit,
'offset': self._offset,
'distinct': self._distinct,
'prefixes': self._prefixes,
'group_by': self._group_by or None,
'having': self._having
}
@property
def _should_nest_selectable(self):
kwargs = self._select_args
return (kwargs.get('limit') is not None or
kwargs.get('offset') is not None or
kwargs.get('distinct', False))
def exists(self):
"""A convenience method that turns a query into an EXISTS subquery
of the form EXISTS (SELECT 1 FROM ... WHERE ...).
e.g.::
q = session.query(User).filter(User.name == 'fred')
session.query(q.exists())
Producing SQL similar to::
SELECT EXISTS (
SELECT 1 FROM users WHERE users.name = :name_1
) AS anon_1
.. versionadded:: 0.8.1
"""
return sql.exists(self.with_labels().statement.with_only_columns(['1']))
def count(self):
"""Return a count of rows this Query would return.
This generates the SQL for this Query as follows::
SELECT count(1) AS count_1 FROM (
SELECT <rest of query follows...>
) AS anon_1
.. versionchanged:: 0.7
The above scheme is newly refined as of 0.7b3.
For fine grained control over specific columns
to count, to skip the usage of a subquery or
otherwise control of the FROM clause,
or to use other aggregate functions,
use :attr:`~sqlalchemy.sql.expression.func`
expressions in conjunction
with :meth:`~.Session.query`, i.e.::
from sqlalchemy import func
# count User records, without
# using a subquery.
session.query(func.count(User.id))
# return count of user "id" grouped
# by "name"
session.query(func.count(User.id)).\\
group_by(User.name)
from sqlalchemy import distinct
# count distinct "name" values
session.query(func.count(distinct(User.name)))
"""
col = sql.func.count(sql.literal_column('*'))
return self.from_self(col).scalar()
def delete(self, synchronize_session='evaluate'):
"""Perform a bulk delete query.
Deletes rows matched by this query from the database.
:param synchronize_session: chooses the strategy for the removal of
matched objects from the session. Valid values are:
``False`` - don't synchronize the session. This option is the most
efficient and is reliable once the session is expired, which
typically occurs after a commit(), or explicitly using
expire_all(). Before the expiration, objects may still remain in
the session which were in fact deleted which can lead to confusing
results if they are accessed via get() or already loaded
collections.
``'fetch'`` - performs a select query before the delete to find
objects that are matched by the delete query and need to be
removed from the session. Matched objects are removed from the
session.
``'evaluate'`` - Evaluate the query's criteria in Python straight
on the objects in the session. If evaluation of the criteria isn't
implemented, an error is raised. In that case you probably
want to use the 'fetch' strategy as a fallback.
The expression evaluator currently doesn't account for differing
string collations between the database and Python.
:return: the count of rows matched as returned by the database's
"row count" feature.
This method has several key caveats:
* The method does **not** offer in-Python cascading of relationships - it
is assumed that ON DELETE CASCADE/SET NULL/etc. is configured for any foreign key
references which require it, otherwise the database may emit an
integrity violation if foreign key references are being enforced.
After the DELETE, dependent objects in the :class:`.Session` which
were impacted by an ON DELETE may not contain the current
state, or may have been deleted. This issue is resolved once the
:class:`.Session` is expired,
which normally occurs upon :meth:`.Session.commit` or can be forced
by using :meth:`.Session.expire_all`. Accessing an expired object
whose row has been deleted will invoke a SELECT to locate the
row; when the row is not found, an :class:`~sqlalchemy.orm.exc.ObjectDeletedError`
is raised.
* The :meth:`.MapperEvents.before_delete` and
:meth:`.MapperEvents.after_delete`
events are **not** invoked from this method. Instead, the
:meth:`.SessionEvents.after_bulk_delete` method is provided to act
upon a mass DELETE of entity rows.
.. seealso::
:meth:`.Query.update`
:ref:`inserts_and_updates` - Core SQL tutorial
"""
#TODO: cascades need handling.
delete_op = persistence.BulkDelete.factory(
self, synchronize_session)
delete_op.exec_()
return delete_op.rowcount
def update(self, values, synchronize_session='evaluate'):
"""Perform a bulk update query.
Updates rows matched by this query in the database.
:param values: a dictionary with attributes names as keys and literal
values or sql expressions as values.
:param synchronize_session: chooses the strategy to update the
attributes on objects in the session. Valid values are:
``False`` - don't synchronize the session. This option is the most
efficient and is reliable once the session is expired, which
typically occurs after a commit(), or explicitly using
expire_all(). Before the expiration, updated objects may still
remain in the session with stale values on their attributes, which
can lead to confusing results.
``'fetch'`` - performs a select query before the update to find
objects that are matched by the update query. The updated
attributes are expired on matched objects.
``'evaluate'`` - Evaluate the Query's criteria in Python straight
on the objects in the session. If evaluation of the criteria isn't
implemented, an exception is raised.
The expression evaluator currently doesn't account for differing
string collations between the database and Python.
:return: the count of rows matched as returned by the database's
"row count" feature.
This method has several key caveats:
* The method does **not** offer in-Python cascading of relationships - it
is assumed that ON UPDATE CASCADE is configured for any foreign key
references which require it, otherwise the database may emit an
integrity violation if foreign key references are being enforced.
After the UPDATE, dependent objects in the :class:`.Session` which
were impacted by an ON UPDATE CASCADE may not contain the current
state; this issue is resolved once the :class:`.Session` is expired,
which normally occurs upon :meth:`.Session.commit` or can be forced
by using :meth:`.Session.expire_all`.
* As of 0.8, this method will support multiple table updates, as detailed
in :ref:`multi_table_updates`, and this behavior does extend to support
updates of joined-inheritance and other multiple table mappings. However,
the **join condition of an inheritance mapper is currently not
automatically rendered**.
Care must be taken in any multiple-table update to explicitly include
the joining condition between those tables, even in mappings where
this is normally automatic.
E.g. if a class ``Engineer`` subclasses ``Employee``, an UPDATE of the
``Engineer`` local table using criteria against the ``Employee``
local table might look like::
session.query(Engineer).\\
filter(Engineer.id == Employee.id).\\
filter(Employee.name == 'dilbert').\\
update({"engineer_type": "programmer"})
* The :meth:`.MapperEvents.before_update` and
:meth:`.MapperEvents.after_update`
events are **not** invoked from this method. Instead, the
:meth:`.SessionEvents.after_bulk_update` method is provided to act
upon a mass UPDATE of entity rows.
.. seealso::
:meth:`.Query.delete`
:ref:`inserts_and_updates` - Core SQL tutorial
"""
#TODO: value keys need to be mapped to corresponding sql cols and
# instr.attr.s to string keys
#TODO: updates of manytoone relationships need to be converted to
# fk assignments
#TODO: cascades need handling.
update_op = persistence.BulkUpdate.factory(
self, synchronize_session, values)
update_op.exec_()
return update_op.rowcount
def _compile_context(self, labels=True):
context = QueryContext(self)
if context.statement is not None:
return context
context.labels = labels
context._for_update_arg = self._for_update_arg
for entity in self._entities:
entity.setup_context(self, context)
for rec in context.create_eager_joins:
strategy = rec[0]
strategy(*rec[1:])
if context.from_clause:
# "load from explicit FROMs" mode,
# i.e. when select_from() or join() is used
context.froms = list(context.from_clause)
else:
# "load from discrete FROMs" mode,
# i.e. when each _MappedEntity has its own FROM
context.froms = context.froms
if self._enable_single_crit:
self._adjust_for_single_inheritance(context)
if not context.primary_columns:
if self._only_load_props:
raise sa_exc.InvalidRequestError(
"No column-based properties specified for "
"refresh operation. Use session.expire() "
"to reload collections and related items.")
else:
raise sa_exc.InvalidRequestError(
"Query contains no columns with which to "
"SELECT from.")
if context.multi_row_eager_loaders and self._should_nest_selectable:
context.statement = self._compound_eager_statement(context)
else:
context.statement = self._simple_statement(context)
return context
def _compound_eager_statement(self, context):
# for eager joins present and LIMIT/OFFSET/DISTINCT,
# wrap the query inside a select,
# then append eager joins onto that
if context.order_by:
order_by_col_expr = list(
chain(*[
sql_util.unwrap_order_by(o)
for o in context.order_by
])
)
else:
context.order_by = None
order_by_col_expr = []
inner = sql.select(
context.primary_columns + order_by_col_expr,
context.whereclause,
from_obj=context.froms,
use_labels=context.labels,
# TODO: this order_by is only needed if
# LIMIT/OFFSET is present in self._select_args,
# else the application on the outside is enough
order_by=context.order_by,
**self._select_args
)
for hint in self._with_hints:
inner = inner.with_hint(*hint)
if self._correlate:
inner = inner.correlate(*self._correlate)
inner = inner.alias()
equivs = self.__all_equivs()
context.adapter = sql_util.ColumnAdapter(inner, equivs)
statement = sql.select(
[inner] + context.secondary_columns,
use_labels=context.labels)
statement._for_update_arg = context._for_update_arg
from_clause = inner
for eager_join in context.eager_joins.values():
# EagerLoader places a 'stop_on' attribute on the join,
# giving us a marker as to where the "splice point" of
# the join should be
from_clause = sql_util.splice_joins(
from_clause,
eager_join, eager_join.stop_on)
statement.append_from(from_clause)
if context.order_by:
statement.append_order_by(
*context.adapter.copy_and_process(
context.order_by
)
)
statement.append_order_by(*context.eager_order_by)
return statement
def _simple_statement(self, context):
if not context.order_by:
context.order_by = None
if self._distinct and context.order_by:
order_by_col_expr = list(
chain(*[
sql_util.unwrap_order_by(o)
for o in context.order_by
])
)
context.primary_columns += order_by_col_expr
context.froms += tuple(context.eager_joins.values())
statement = sql.select(
context.primary_columns +
context.secondary_columns,
context.whereclause,
from_obj=context.froms,
use_labels=context.labels,
order_by=context.order_by,
**self._select_args
)
statement._for_update_arg = context._for_update_arg
for hint in self._with_hints:
statement = statement.with_hint(*hint)
if self._correlate:
statement = statement.correlate(*self._correlate)
if context.eager_order_by:
statement.append_order_by(*context.eager_order_by)
return statement
def _adjust_for_single_inheritance(self, context):
"""Apply single-table-inheritance filtering.
For all distinct single-table-inheritance mappers represented in
the columns clause of this query, add criterion to the WHERE
clause of the given QueryContext such that only the appropriate
subtypes are selected from the total results.
"""
for (ext_info, adapter) in self._mapper_adapter_map.values():
if ext_info in self._join_entities:
continue
single_crit = ext_info.mapper._single_table_criterion
if single_crit is not None:
if adapter:
single_crit = adapter.traverse(single_crit)
single_crit = self._adapt_clause(single_crit, False, False)
context.whereclause = sql.and_(
sql.True_._ifnone(context.whereclause),
single_crit)
def __str__(self):
return str(self._compile_context().statement)
from ..sql.selectable import ForUpdateArg
class LockmodeArg(ForUpdateArg):
@classmethod
def parse_legacy_query(self, mode):
if mode in (None, False):
return None
if mode == "read":
read = True
nowait = False
elif mode == "update":
read = nowait = False
elif mode == "update_nowait":
nowait = True
read = False
else:
raise sa_exc.ArgumentError(
"Unknown with_lockmode argument: %r" % mode)
return LockmodeArg(read=read, nowait=nowait)
class _QueryEntity(object):
"""represent an entity column returned within a Query result."""
def __new__(cls, *args, **kwargs):
if cls is _QueryEntity:
entity = args[1]
if not isinstance(entity, util.string_types) and \
_is_mapped_class(entity):
cls = _MapperEntity
elif isinstance(entity, Bundle):
cls = _BundleEntity
else:
cls = _ColumnEntity
return object.__new__(cls)
def _clone(self):
q = self.__class__.__new__(self.__class__)
q.__dict__ = self.__dict__.copy()
return q
class _MapperEntity(_QueryEntity):
"""mapper/class/AliasedClass entity"""
def __init__(self, query, entity):
if not query._primary_entity:
query._primary_entity = self
query._entities.append(self)
self.entities = [entity]
self.expr = entity
supports_single_entity = True
def setup_entity(self, ext_info, aliased_adapter):
self.mapper = ext_info.mapper
self.aliased_adapter = aliased_adapter
self.selectable = ext_info.selectable
self.is_aliased_class = ext_info.is_aliased_class
self._with_polymorphic = ext_info.with_polymorphic_mappers
self._polymorphic_discriminator = \
ext_info.polymorphic_on
self.entity_zero = ext_info
if ext_info.is_aliased_class:
self._label_name = self.entity_zero.name
else:
self._label_name = self.mapper.class_.__name__
self.path = self.entity_zero._path_registry
self.custom_rows = bool(self.mapper.dispatch.append_result)
def set_with_polymorphic(self, query, cls_or_mappers,
selectable, polymorphic_on):
"""Receive an update from a call to query.with_polymorphic().
Note the newer style of using a free standing with_polymporphic()
construct doesn't make use of this method.
"""
if self.is_aliased_class:
# TODO: invalidrequest ?
raise NotImplementedError(
"Can't use with_polymorphic() against "
"an Aliased object"
)
if cls_or_mappers is None:
query._reset_polymorphic_adapter(self.mapper)
return
mappers, from_obj = self.mapper._with_polymorphic_args(
cls_or_mappers, selectable)
self._with_polymorphic = mappers
self._polymorphic_discriminator = polymorphic_on
self.selectable = from_obj
query._mapper_loads_polymorphically_with(self.mapper,
sql_util.ColumnAdapter(from_obj,
self.mapper._equivalent_columns))
filter_fn = id
@property
def type(self):
return self.mapper.class_
@property
def entity_zero_or_selectable(self):
return self.entity_zero
def corresponds_to(self, entity):
if entity.is_aliased_class:
if self.is_aliased_class:
if entity._base_alias is self.entity_zero._base_alias:
return True
return False
elif self.is_aliased_class:
if self.entity_zero._use_mapper_path:
return entity in self._with_polymorphic
else:
return entity is self.entity_zero
return entity.common_parent(self.entity_zero)
def adapt_to_selectable(self, query, sel):
query._entities.append(self)
def _get_entity_clauses(self, query, context):
adapter = None
if not self.is_aliased_class:
if query._polymorphic_adapters:
adapter = query._polymorphic_adapters.get(self.mapper, None)
else:
adapter = self.aliased_adapter
if adapter:
if query._from_obj_alias:
ret = adapter.wrap(query._from_obj_alias)
else:
ret = adapter
else:
ret = query._from_obj_alias
return ret
def row_processor(self, query, context, custom_rows):
adapter = self._get_entity_clauses(query, context)
if context.adapter and adapter:
adapter = adapter.wrap(context.adapter)
elif not adapter:
adapter = context.adapter
# polymorphic mappers which have concrete tables in
# their hierarchy usually
# require row aliasing unconditionally.
if not adapter and self.mapper._requires_row_aliasing:
adapter = sql_util.ColumnAdapter(
self.selectable,
self.mapper._equivalent_columns)
if query._primary_entity is self:
_instance = loading.instance_processor(
self.mapper,
context,
self.path,
adapter,
only_load_props=query._only_load_props,
refresh_state=context.refresh_state,
polymorphic_discriminator=self._polymorphic_discriminator
)
else:
_instance = loading.instance_processor(
self.mapper,
context,
self.path,
adapter,
polymorphic_discriminator=self._polymorphic_discriminator
)
return _instance, self._label_name
def setup_context(self, query, context):
adapter = self._get_entity_clauses(query, context)
#if self._adapted_selectable is None:
context.froms += (self.selectable,)
if context.order_by is False and self.mapper.order_by:
context.order_by = self.mapper.order_by
# apply adaptation to the mapper's order_by if needed.
if adapter:
context.order_by = adapter.adapt_list(
util.to_list(
context.order_by
)
)
if self._with_polymorphic:
poly_properties = self.mapper._iterate_polymorphic_properties(
self._with_polymorphic)
else:
poly_properties = self.mapper._polymorphic_properties
for value in poly_properties:
if query._only_load_props and \
value.key not in query._only_load_props:
continue
value.setup(
context,
self,
self.path,
adapter,
only_load_props=query._only_load_props,
column_collection=context.primary_columns
)
if self._polymorphic_discriminator is not None and \
self._polymorphic_discriminator \
is not self.mapper.polymorphic_on:
if adapter:
pd = adapter.columns[self._polymorphic_discriminator]
else:
pd = self._polymorphic_discriminator
context.primary_columns.append(pd)
def __str__(self):
return str(self.mapper)
@inspection._self_inspects
class Bundle(object):
"""A grouping of SQL expressions that are returned by a :class:`.Query`
under one namespace.
The :class:`.Bundle` essentially allows nesting of the tuple-based
results returned by a column-oriented :class:`.Query` object. It also
is extensible via simple subclassing, where the primary capability
to override is that of how the set of expressions should be returned,
allowing post-processing as well as custom return types, without
involving ORM identity-mapped classes.
.. versionadded:: 0.9.0
.. seealso::
:ref:`bundles`
"""
single_entity = False
"""If True, queries for a single Bundle will be returned as a single
entity, rather than an element within a keyed tuple."""
def __init__(self, name, *exprs, **kw):
"""Construct a new :class:`.Bundle`.
e.g.::
bn = Bundle("mybundle", MyClass.x, MyClass.y)
for row in session.query(bn).filter(bn.c.x == 5).filter(bn.c.y == 4):
print(row.mybundle.x, row.mybundle.y)
:param name: name of the bundle.
:param \*exprs: columns or SQL expressions comprising the bundle.
:param single_entity=False: if True, rows for this :class:`.Bundle`
can be returned as a "single entity" outside of any enclosing tuple
in the same manner as a mapped entity.
"""
self.name = self._label = name
self.exprs = exprs
self.c = self.columns = ColumnCollection()
self.columns.update((getattr(col, "key", col._label), col)
for col in exprs)
self.single_entity = kw.pop('single_entity', self.single_entity)
columns = None
"""A namespace of SQL expressions referred to by this :class:`.Bundle`.
e.g.::
bn = Bundle("mybundle", MyClass.x, MyClass.y)
q = sess.query(bn).filter(bn.c.x == 5)
Nesting of bundles is also supported::
b1 = Bundle("b1",
Bundle('b2', MyClass.a, MyClass.b),
Bundle('b3', MyClass.x, MyClass.y)
)
q = sess.query(b1).filter(b1.c.b2.c.a == 5).filter(b1.c.b3.c.y == 9)
.. seealso::
:attr:`.Bundle.c`
"""
c = None
"""An alias for :attr:`.Bundle.columns`."""
def _clone(self):
cloned = self.__class__.__new__(self.__class__)
cloned.__dict__.update(self.__dict__)
return cloned
def __clause_element__(self):
return expression.ClauseList(group=False, *self.c)
@property
def clauses(self):
return self.__clause_element__().clauses
def label(self, name):
"""Provide a copy of this :class:`.Bundle` passing a new label."""
cloned = self._clone()
cloned.name = name
return cloned
def create_row_processor(self, query, procs, labels):
"""Produce the "row processing" function for this :class:`.Bundle`.
May be overridden by subclasses.
.. seealso::
:ref:`bundles` - includes an example of subclassing.
"""
def proc(row, result):
return util.KeyedTuple([proc(row, None) for proc in procs], labels)
return proc
class _BundleEntity(_QueryEntity):
def __init__(self, query, bundle, setup_entities=True):
query._entities.append(self)
self.bundle = self.expr = bundle
self.type = type(bundle)
self._label_name = bundle.name
self._entities = []
if setup_entities:
for expr in bundle.exprs:
if isinstance(expr, Bundle):
_BundleEntity(self, expr)
else:
_ColumnEntity(self, expr, namespace=self)
self.entities = ()
self.filter_fn = lambda item: item
self.supports_single_entity = self.bundle.single_entity
custom_rows = False
@property
def entity_zero(self):
for ent in self._entities:
ezero = ent.entity_zero
if ezero is not None:
return ezero
else:
return None
def corresponds_to(self, entity):
# TODO: this seems to have no effect for
# _ColumnEntity either
return False
@property
def entity_zero_or_selectable(self):
for ent in self._entities:
ezero = ent.entity_zero_or_selectable
if ezero is not None:
return ezero
else:
return None
def adapt_to_selectable(self, query, sel):
c = _BundleEntity(query, self.bundle, setup_entities=False)
#c._label_name = self._label_name
#c.entity_zero = self.entity_zero
#c.entities = self.entities
for ent in self._entities:
ent.adapt_to_selectable(c, sel)
def setup_entity(self, ext_info, aliased_adapter):
for ent in self._entities:
ent.setup_entity(ext_info, aliased_adapter)
def setup_context(self, query, context):
for ent in self._entities:
ent.setup_context(query, context)
def row_processor(self, query, context, custom_rows):
procs, labels = zip(
*[ent.row_processor(query, context, custom_rows)
for ent in self._entities]
)
proc = self.bundle.create_row_processor(query, procs, labels)
return proc, self._label_name
class _ColumnEntity(_QueryEntity):
"""Column/expression based entity."""
def __init__(self, query, column, namespace=None):
self.expr = column
self.namespace = namespace
if isinstance(column, util.string_types):
column = sql.literal_column(column)
self._label_name = column.name
elif isinstance(column, (
attributes.QueryableAttribute,
interfaces.PropComparator
)):
self._label_name = column.key
column = column._query_clause_element()
else:
self._label_name = getattr(column, 'key', None)
if not isinstance(column, expression.ColumnElement) and \
hasattr(column, '_select_iterable'):
for c in column._select_iterable:
if c is column:
break
_ColumnEntity(query, c, namespace=column)
if c is not column:
return
elif isinstance(column, Bundle):
_BundleEntity(query, column)
return
if not isinstance(column, sql.ColumnElement):
raise sa_exc.InvalidRequestError(
"SQL expression, column, or mapped entity "
"expected - got '%r'" % (column, )
)
self.type = type_ = column.type
if type_.hashable:
self.filter_fn = lambda item: item
else:
counter = util.counter()
self.filter_fn = lambda item: counter()
# If the Column is unnamed, give it a
# label() so that mutable column expressions
# can be located in the result even
# if the expression's identity has been changed
# due to adaption.
if not column._label and not getattr(column, 'is_literal', False):
column = column.label(self._label_name)
query._entities.append(self)
self.column = column
self.froms = set()
# look for ORM entities represented within the
# given expression. Try to count only entities
# for columns whose FROM object is in the actual list
# of FROMs for the overall expression - this helps
# subqueries which were built from ORM constructs from
# leaking out their entities into the main select construct
self.actual_froms = actual_froms = set(column._from_objects)
self.entities = util.OrderedSet(
elem._annotations['parententity']
for elem in visitors.iterate(column, {})
if 'parententity' in elem._annotations
and actual_froms.intersection(elem._from_objects)
)
if self.entities:
self.entity_zero = list(self.entities)[0]
elif self.namespace is not None:
self.entity_zero = self.namespace
else:
self.entity_zero = None
supports_single_entity = False
custom_rows = False
@property
def entity_zero_or_selectable(self):
if self.entity_zero is not None:
return self.entity_zero
elif self.actual_froms:
return list(self.actual_froms)[0]
else:
return None
def adapt_to_selectable(self, query, sel):
c = _ColumnEntity(query, sel.corresponding_column(self.column))
c._label_name = self._label_name
c.entity_zero = self.entity_zero
c.entities = self.entities
def setup_entity(self, ext_info, aliased_adapter):
if 'selectable' not in self.__dict__:
self.selectable = ext_info.selectable
self.froms.add(ext_info.selectable)
def corresponds_to(self, entity):
# TODO: just returning False here,
# no tests fail
if self.entity_zero is None:
return False
elif _is_aliased_class(entity):
# TODO: polymorphic subclasses ?
return entity is self.entity_zero
else:
return not _is_aliased_class(self.entity_zero) and \
entity.common_parent(self.entity_zero)
def _resolve_expr_against_query_aliases(self, query, expr, context):
return query._adapt_clause(expr, False, True)
def row_processor(self, query, context, custom_rows):
column = self._resolve_expr_against_query_aliases(
query, self.column, context)
if context.adapter:
column = context.adapter.columns[column]
def proc(row, result):
return row[column]
return proc, self._label_name
def setup_context(self, query, context):
column = self._resolve_expr_against_query_aliases(
query, self.column, context)
context.froms += tuple(self.froms)
context.primary_columns.append(column)
def __str__(self):
return str(self.column)
class QueryContext(object):
multi_row_eager_loaders = False
adapter = None
froms = ()
for_update = None
def __init__(self, query):
if query._statement is not None:
if isinstance(query._statement, expression.SelectBase) and \
not query._statement._textual and \
not query._statement.use_labels:
self.statement = query._statement.apply_labels()
else:
self.statement = query._statement
else:
self.statement = None
self.from_clause = query._from_obj
self.whereclause = query._criterion
self.order_by = query._order_by
self.query = query
self.session = query.session
self.populate_existing = query._populate_existing
self.invoke_all_eagers = query._invoke_all_eagers
self.version_check = query._version_check
self.refresh_state = query._refresh_state
self.primary_columns = []
self.secondary_columns = []
self.eager_order_by = []
self.eager_joins = {}
self.create_eager_joins = []
self.propagate_options = set(o for o in query._with_options if
o.propagate_to_loaders)
self.attributes = query._attributes.copy()
class AliasOption(interfaces.MapperOption):
def __init__(self, alias):
"""Return a :class:`.MapperOption` that will indicate to the :class:`.Query`
that the main table has been aliased.
This is a seldom-used option to suit the
very rare case that :func:`.contains_eager`
is being used in conjunction with a user-defined SELECT
statement that aliases the parent table. E.g.::
# define an aliased UNION called 'ulist'
ulist = users.select(users.c.user_id==7).\\
union(users.select(users.c.user_id>7)).\\
alias('ulist')
# add on an eager load of "addresses"
statement = ulist.outerjoin(addresses).\\
select().apply_labels()
# create query, indicating "ulist" will be an
# alias for the main table, "addresses"
# property should be eager loaded
query = session.query(User).options(
contains_alias(ulist),
contains_eager(User.addresses))
# then get results via the statement
results = query.from_statement(statement).all()
:param alias: is the string name of an alias, or a
:class:`~.sql.expression.Alias` object representing
the alias.
"""
self.alias = alias
def process_query(self, query):
if isinstance(self.alias, util.string_types):
alias = query._mapper_zero().mapped_table.alias(self.alias)
else:
alias = self.alias
query._from_obj_alias = sql_util.ColumnAdapter(alias)
| mit |
irit-melodi/attelo | attelo/metrics/classification_structured.py | 3 | 5266 | """Classification metrics for structured outputs.
"""
from collections import Counter
from itertools import chain, izip
import numpy as np
def _unique_labels(y):
"""Set of unique labels in y"""
return set(y_ij[1] for y_ij in
chain.from_iterable(y_i for y_i in y))
def unique_labels(*ys):
"""Extract an ordered array of unique labels.
Parameters
----------
elt_type: string
Type of each element, determines how to find the label
See also
--------
This is the structured version of
`sklearn.utils.multiclass.unique_labels`
"""
ys_labels = set(chain.from_iterable(_unique_labels(y) for y in ys))
# TODO check the set of labels contains a unique (e.g. string) type
# of values
return np.array(sorted(ys_labels))
def precision_recall_fscore_support(y_true, y_pred, labels=None,
average=None):
"""Compute precision, recall, F-measure and support for each class.
The support is the number of occurrences of each class in
``y_true``.
This is essentially a structured version of
sklearn.metrics.classification.precision_recall_fscore_support .
It should apply equally well to lists of constituency tree spans
and lists of dependency edges.
Parameters
----------
y_true: list of iterable
Ground truth target structures, encoded in a sparse format (e.g.
list of edges or span descriptions).
y_pred: list of iterable
Estimated target structures, encoded in a sparse format (e.g. list
of edges or span descriptions).
labels: list, optional
The set of labels to include, and their order if ``average is
None``.
average: string, [None (default), 'binary', 'micro', 'macro']
If ``None``, the scores for each class are returned. Otherwise,
this determines the type of averaging performed on the data:
``'binary'``:
Only report results for the positive class.
This is applicable only if targets are binary.
``'micro'``:
Calculate metrics globally by counting the total true
positives, false negatives and false positives.
``'macro'``:
Calculate metrics for each label, and find their unweighted
mean. This does not take label imbalance into account.
Returns
-------
precision: float (if average is not None) or array of float, shape=\
[n_unique_labels]
recall: float (if average is not None) or array of float, shape=\
[n_unique_labels]
fscore: float (if average is not None) or array of float, shape=\
[n_unique_labels]
support: int (if average is not None) or array of int, shape=\
[n_unique_labels]
The number of occurrences of each label in ``ctree_true``.
"""
average_options = frozenset([None, 'micro', 'macro'])
if average not in average_options:
raise ValueError('average has to be one of' +
str(average_options))
# TMP
if average == 'macro':
raise NotImplementedError('average currently has to be micro or None')
# end TMP
# gather an ordered list of unique labels from y_true and y_pred
present_labels = unique_labels(y_true, y_pred)
if labels is None:
labels = present_labels
# n_labels = None
else:
# EXPERIMENTAL
labels = [lbl for lbl in labels if lbl in present_labels]
# n_labels = len(labels)
# FIXME complete/fix this
# raise ValueError('Parameter `labels` is currently unsupported')
# end EXPERIMENTAL
# compute tp_sum, pred_sum, true_sum
# true positives for each tree
tp = [set(yi_true) & set(yi_pred)
for yi_true, yi_pred in izip(y_true, y_pred)]
# TODO find a nicer and faster design that resembles sklearn's, e.g.
# use np.bincount instead of collections.Counter
tp_sum = Counter(y_ij[1] for y_ij in chain.from_iterable(tp))
true_sum = Counter(y_ij[1] for y_ij in chain.from_iterable(y_true))
pred_sum = Counter(y_ij[1] for y_ij in chain.from_iterable(y_pred))
# transform to np arrays of floats
tp_sum = np.array([float(tp_sum[lbl]) for lbl in labels])
true_sum = np.array([float(true_sum[lbl]) for lbl in labels])
pred_sum = np.array([float(pred_sum[lbl]) for lbl in labels])
# TODO rewrite to compute by summing over scores broken down by label
if average == 'micro':
tp_sum = np.array([tp_sum.sum()])
true_sum = np.array([true_sum.sum()])
pred_sum = np.array([pred_sum.sum()])
# finally compute the desired statistics
# when the div denominator is 0, assign 0.0 (instead of np.inf)
precision = tp_sum / pred_sum
precision[pred_sum == 0] = 0.0
recall = tp_sum / true_sum
recall[true_sum == 0] = 0.0
f_score = 2 * (precision * recall) / (precision + recall)
f_score[precision + recall == 0] = 0.0
if average is not None:
precision = np.average(precision)
recall = np.average(recall)
f_score = np.average(f_score)
true_sum = np.average(true_sum) # != sklearn: we keep the support
return precision, recall, f_score, true_sum
| gpl-3.0 |
roxyboy/scikit-learn | sklearn/tree/tests/test_export.py | 130 | 9950 | """
Testing for export functions of decision trees (sklearn.tree.export).
"""
from re import finditer
from numpy.testing import assert_equal
from nose.tools import assert_raises
from sklearn.tree import DecisionTreeClassifier, DecisionTreeRegressor
from sklearn.ensemble import GradientBoostingClassifier
from sklearn.tree import export_graphviz
from sklearn.externals.six import StringIO
from sklearn.utils.testing import assert_in
# toy sample
X = [[-2, -1], [-1, -1], [-1, -2], [1, 1], [1, 2], [2, 1]]
y = [-1, -1, -1, 1, 1, 1]
y2 = [[-1, 1], [-1, 2], [-1, 3], [1, 1], [1, 2], [1, 3]]
w = [1, 1, 1, .5, .5, .5]
def test_graphviz_toy():
# Check correctness of export_graphviz
clf = DecisionTreeClassifier(max_depth=3,
min_samples_split=1,
criterion="gini",
random_state=2)
clf.fit(X, y)
# Test export code
out = StringIO()
export_graphviz(clf, out_file=out)
contents1 = out.getvalue()
contents2 = 'digraph Tree {\n' \
'node [shape=box] ;\n' \
'0 [label="X[0] <= 0.0\\ngini = 0.5\\nsamples = 6\\n' \
'value = [3, 3]"] ;\n' \
'1 [label="gini = 0.0\\nsamples = 3\\nvalue = [3, 0]"] ;\n' \
'0 -> 1 [labeldistance=2.5, labelangle=45, ' \
'headlabel="True"] ;\n' \
'2 [label="gini = 0.0\\nsamples = 3\\nvalue = [0, 3]"] ;\n' \
'0 -> 2 [labeldistance=2.5, labelangle=-45, ' \
'headlabel="False"] ;\n' \
'}'
assert_equal(contents1, contents2)
# Test with feature_names
out = StringIO()
export_graphviz(clf, out_file=out, feature_names=["feature0", "feature1"])
contents1 = out.getvalue()
contents2 = 'digraph Tree {\n' \
'node [shape=box] ;\n' \
'0 [label="feature0 <= 0.0\\ngini = 0.5\\nsamples = 6\\n' \
'value = [3, 3]"] ;\n' \
'1 [label="gini = 0.0\\nsamples = 3\\nvalue = [3, 0]"] ;\n' \
'0 -> 1 [labeldistance=2.5, labelangle=45, ' \
'headlabel="True"] ;\n' \
'2 [label="gini = 0.0\\nsamples = 3\\nvalue = [0, 3]"] ;\n' \
'0 -> 2 [labeldistance=2.5, labelangle=-45, ' \
'headlabel="False"] ;\n' \
'}'
assert_equal(contents1, contents2)
# Test with class_names
out = StringIO()
export_graphviz(clf, out_file=out, class_names=["yes", "no"])
contents1 = out.getvalue()
contents2 = 'digraph Tree {\n' \
'node [shape=box] ;\n' \
'0 [label="X[0] <= 0.0\\ngini = 0.5\\nsamples = 6\\n' \
'value = [3, 3]\\nclass = yes"] ;\n' \
'1 [label="gini = 0.0\\nsamples = 3\\nvalue = [3, 0]\\n' \
'class = yes"] ;\n' \
'0 -> 1 [labeldistance=2.5, labelangle=45, ' \
'headlabel="True"] ;\n' \
'2 [label="gini = 0.0\\nsamples = 3\\nvalue = [0, 3]\\n' \
'class = no"] ;\n' \
'0 -> 2 [labeldistance=2.5, labelangle=-45, ' \
'headlabel="False"] ;\n' \
'}'
assert_equal(contents1, contents2)
# Test plot_options
out = StringIO()
export_graphviz(clf, out_file=out, filled=True, impurity=False,
proportion=True, special_characters=True, rounded=True)
contents1 = out.getvalue()
contents2 = 'digraph Tree {\n' \
'node [shape=box, style="filled, rounded", color="black", ' \
'fontname=helvetica] ;\n' \
'edge [fontname=helvetica] ;\n' \
'0 [label=<X<SUB>0</SUB> ≤ 0.0<br/>samples = 100.0%<br/>' \
'value = [0.5, 0.5]>, fillcolor="#e5813900"] ;\n' \
'1 [label=<samples = 50.0%<br/>value = [1.0, 0.0]>, ' \
'fillcolor="#e58139ff"] ;\n' \
'0 -> 1 [labeldistance=2.5, labelangle=45, ' \
'headlabel="True"] ;\n' \
'2 [label=<samples = 50.0%<br/>value = [0.0, 1.0]>, ' \
'fillcolor="#399de5ff"] ;\n' \
'0 -> 2 [labeldistance=2.5, labelangle=-45, ' \
'headlabel="False"] ;\n' \
'}'
assert_equal(contents1, contents2)
# Test max_depth
out = StringIO()
export_graphviz(clf, out_file=out, max_depth=0, class_names=True)
contents1 = out.getvalue()
contents2 = 'digraph Tree {\n' \
'node [shape=box] ;\n' \
'0 [label="X[0] <= 0.0\\ngini = 0.5\\nsamples = 6\\n' \
'value = [3, 3]\\nclass = y[0]"] ;\n' \
'1 [label="(...)"] ;\n' \
'0 -> 1 ;\n' \
'2 [label="(...)"] ;\n' \
'0 -> 2 ;\n' \
'}'
assert_equal(contents1, contents2)
# Test max_depth with plot_options
out = StringIO()
export_graphviz(clf, out_file=out, max_depth=0, filled=True,
node_ids=True)
contents1 = out.getvalue()
contents2 = 'digraph Tree {\n' \
'node [shape=box, style="filled", color="black"] ;\n' \
'0 [label="node #0\\nX[0] <= 0.0\\ngini = 0.5\\n' \
'samples = 6\\nvalue = [3, 3]", fillcolor="#e5813900"] ;\n' \
'1 [label="(...)", fillcolor="#C0C0C0"] ;\n' \
'0 -> 1 ;\n' \
'2 [label="(...)", fillcolor="#C0C0C0"] ;\n' \
'0 -> 2 ;\n' \
'}'
assert_equal(contents1, contents2)
# Test multi-output with weighted samples
clf = DecisionTreeClassifier(max_depth=2,
min_samples_split=1,
criterion="gini",
random_state=2)
clf = clf.fit(X, y2, sample_weight=w)
out = StringIO()
export_graphviz(clf, out_file=out, filled=True, impurity=False)
contents1 = out.getvalue()
contents2 = 'digraph Tree {\n' \
'node [shape=box, style="filled", color="black"] ;\n' \
'0 [label="X[0] <= 0.0\\nsamples = 6\\n' \
'value = [[3.0, 1.5, 0.0]\\n' \
'[1.5, 1.5, 1.5]]", fillcolor="#e5813900"] ;\n' \
'1 [label="X[1] <= -1.5\\nsamples = 3\\n' \
'value = [[3, 0, 0]\\n[1, 1, 1]]", ' \
'fillcolor="#e5813965"] ;\n' \
'0 -> 1 [labeldistance=2.5, labelangle=45, ' \
'headlabel="True"] ;\n' \
'2 [label="samples = 1\\nvalue = [[1, 0, 0]\\n' \
'[0, 0, 1]]", fillcolor="#e58139ff"] ;\n' \
'1 -> 2 ;\n' \
'3 [label="samples = 2\\nvalue = [[2, 0, 0]\\n' \
'[1, 1, 0]]", fillcolor="#e581398c"] ;\n' \
'1 -> 3 ;\n' \
'4 [label="X[0] <= 1.5\\nsamples = 3\\n' \
'value = [[0.0, 1.5, 0.0]\\n[0.5, 0.5, 0.5]]", ' \
'fillcolor="#e5813965"] ;\n' \
'0 -> 4 [labeldistance=2.5, labelangle=-45, ' \
'headlabel="False"] ;\n' \
'5 [label="samples = 2\\nvalue = [[0.0, 1.0, 0.0]\\n' \
'[0.5, 0.5, 0.0]]", fillcolor="#e581398c"] ;\n' \
'4 -> 5 ;\n' \
'6 [label="samples = 1\\nvalue = [[0.0, 0.5, 0.0]\\n' \
'[0.0, 0.0, 0.5]]", fillcolor="#e58139ff"] ;\n' \
'4 -> 6 ;\n' \
'}'
assert_equal(contents1, contents2)
# Test regression output with plot_options
clf = DecisionTreeRegressor(max_depth=3,
min_samples_split=1,
criterion="mse",
random_state=2)
clf.fit(X, y)
out = StringIO()
export_graphviz(clf, out_file=out, filled=True, leaves_parallel=True,
rotate=True, rounded=True)
contents1 = out.getvalue()
contents2 = 'digraph Tree {\n' \
'node [shape=box, style="filled, rounded", color="black", ' \
'fontname=helvetica] ;\n' \
'graph [ranksep=equally, splines=polyline] ;\n' \
'edge [fontname=helvetica] ;\n' \
'rankdir=LR ;\n' \
'0 [label="X[0] <= 0.0\\nmse = 1.0\\nsamples = 6\\n' \
'value = 0.0", fillcolor="#e581397f"] ;\n' \
'1 [label="mse = 0.0\\nsamples = 3\\nvalue = -1.0", ' \
'fillcolor="#e5813900"] ;\n' \
'0 -> 1 [labeldistance=2.5, labelangle=-45, ' \
'headlabel="True"] ;\n' \
'2 [label="mse = 0.0\\nsamples = 3\\nvalue = 1.0", ' \
'fillcolor="#e58139ff"] ;\n' \
'0 -> 2 [labeldistance=2.5, labelangle=45, ' \
'headlabel="False"] ;\n' \
'{rank=same ; 0} ;\n' \
'{rank=same ; 1; 2} ;\n' \
'}'
assert_equal(contents1, contents2)
def test_graphviz_errors():
# Check for errors of export_graphviz
clf = DecisionTreeClassifier(max_depth=3, min_samples_split=1)
clf.fit(X, y)
# Check feature_names error
out = StringIO()
assert_raises(IndexError, export_graphviz, clf, out, feature_names=[])
# Check class_names error
out = StringIO()
assert_raises(IndexError, export_graphviz, clf, out, class_names=[])
def test_friedman_mse_in_graphviz():
clf = DecisionTreeRegressor(criterion="friedman_mse", random_state=0)
clf.fit(X, y)
dot_data = StringIO()
export_graphviz(clf, out_file=dot_data)
clf = GradientBoostingClassifier(n_estimators=2, random_state=0)
clf.fit(X, y)
for estimator in clf.estimators_:
export_graphviz(estimator[0], out_file=dot_data)
for finding in finditer("\[.*?samples.*?\]", dot_data.getvalue()):
assert_in("friedman_mse", finding.group())
| bsd-3-clause |
roxyboy/scikit-learn | setup.py | 142 | 7364 | #! /usr/bin/env python
#
# Copyright (C) 2007-2009 Cournapeau David <cournape@gmail.com>
# 2010 Fabian Pedregosa <fabian.pedregosa@inria.fr>
# License: 3-clause BSD
descr = """A set of python modules for machine learning and data mining"""
import sys
import os
import shutil
from distutils.command.clean import clean as Clean
if sys.version_info[0] < 3:
import __builtin__ as builtins
else:
import builtins
# This is a bit (!) hackish: we are setting a global variable so that the main
# sklearn __init__ can detect if it is being loaded by the setup routine, to
# avoid attempting to load components that aren't built yet:
# the numpy distutils extensions that are used by scikit-learn to recursively
# build the compiled extensions in sub-packages is based on the Python import
# machinery.
builtins.__SKLEARN_SETUP__ = True
DISTNAME = 'scikit-learn'
DESCRIPTION = 'A set of python modules for machine learning and data mining'
with open('README.rst') as f:
LONG_DESCRIPTION = f.read()
MAINTAINER = 'Andreas Mueller'
MAINTAINER_EMAIL = 'amueller@ais.uni-bonn.de'
URL = 'http://scikit-learn.org'
LICENSE = 'new BSD'
DOWNLOAD_URL = 'http://sourceforge.net/projects/scikit-learn/files/'
# We can actually import a restricted version of sklearn that
# does not need the compiled code
import sklearn
VERSION = sklearn.__version__
# Optional setuptools features
# We need to import setuptools early, if we want setuptools features,
# as it monkey-patches the 'setup' function
# For some commands, use setuptools
SETUPTOOLS_COMMANDS = set([
'develop', 'release', 'bdist_egg', 'bdist_rpm',
'bdist_wininst', 'install_egg_info', 'build_sphinx',
'egg_info', 'easy_install', 'upload', 'bdist_wheel',
'--single-version-externally-managed',
])
if SETUPTOOLS_COMMANDS.intersection(sys.argv):
import setuptools
extra_setuptools_args = dict(
zip_safe=False, # the package can run out of an .egg file
include_package_data=True,
)
else:
extra_setuptools_args = dict()
# Custom clean command to remove build artifacts
class CleanCommand(Clean):
description = "Remove build artifacts from the source tree"
def run(self):
Clean.run(self)
if os.path.exists('build'):
shutil.rmtree('build')
for dirpath, dirnames, filenames in os.walk('sklearn'):
for filename in filenames:
if (filename.endswith('.so') or filename.endswith('.pyd')
or filename.endswith('.dll')
or filename.endswith('.pyc')):
os.unlink(os.path.join(dirpath, filename))
for dirname in dirnames:
if dirname == '__pycache__':
shutil.rmtree(os.path.join(dirpath, dirname))
cmdclass = {'clean': CleanCommand}
# Optional wheelhouse-uploader features
# To automate release of binary packages for scikit-learn we need a tool
# to download the packages generated by travis and appveyor workers (with
# version number matching the current release) and upload them all at once
# to PyPI at release time.
# The URL of the artifact repositories are configured in the setup.cfg file.
WHEELHOUSE_UPLOADER_COMMANDS = set(['fetch_artifacts', 'upload_all'])
if WHEELHOUSE_UPLOADER_COMMANDS.intersection(sys.argv):
import wheelhouse_uploader.cmd
cmdclass.update(vars(wheelhouse_uploader.cmd))
def configuration(parent_package='', top_path=None):
if os.path.exists('MANIFEST'):
os.remove('MANIFEST')
from numpy.distutils.misc_util import Configuration
config = Configuration(None, parent_package, top_path)
# Avoid non-useful msg:
# "Ignoring attempt to set 'name' (from ... "
config.set_options(ignore_setup_xxx_py=True,
assume_default_configuration=True,
delegate_options_to_subpackages=True,
quiet=True)
config.add_subpackage('sklearn')
return config
def is_scipy_installed():
try:
import scipy
except ImportError:
return False
return True
def is_numpy_installed():
try:
import numpy
except ImportError:
return False
return True
def setup_package():
metadata = dict(name=DISTNAME,
maintainer=MAINTAINER,
maintainer_email=MAINTAINER_EMAIL,
description=DESCRIPTION,
license=LICENSE,
url=URL,
version=VERSION,
download_url=DOWNLOAD_URL,
long_description=LONG_DESCRIPTION,
classifiers=['Intended Audience :: Science/Research',
'Intended Audience :: Developers',
'License :: OSI Approved',
'Programming Language :: C',
'Programming Language :: Python',
'Topic :: Software Development',
'Topic :: Scientific/Engineering',
'Operating System :: Microsoft :: Windows',
'Operating System :: POSIX',
'Operating System :: Unix',
'Operating System :: MacOS',
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 2.6',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.3',
'Programming Language :: Python :: 3.4',
],
cmdclass=cmdclass,
**extra_setuptools_args)
if (len(sys.argv) >= 2
and ('--help' in sys.argv[1:] or sys.argv[1]
in ('--help-commands', 'egg_info', '--version', 'clean'))):
# For these actions, NumPy is not required.
#
# They are required to succeed without Numpy for example when
# pip is used to install Scikit-learn when Numpy is not yet present in
# the system.
try:
from setuptools import setup
except ImportError:
from distutils.core import setup
metadata['version'] = VERSION
else:
if is_numpy_installed() is False:
raise ImportError("Numerical Python (NumPy) is not installed.\n"
"scikit-learn requires NumPy.\n"
"Installation instructions are available on scikit-learn website: "
"http://scikit-learn.org/stable/install.html\n")
if is_scipy_installed() is False:
raise ImportError("Scientific Python (SciPy) is not installed.\n"
"scikit-learn requires SciPy.\n"
"Installation instructions are available on scikit-learn website: "
"http://scikit-learn.org/stable/install.html\n")
from numpy.distutils.core import setup
metadata['configuration'] = configuration
setup(**metadata)
if __name__ == "__main__":
setup_package()
| bsd-3-clause |
cpausmit/IntelROCCS | Monitor/dynamoToPickle.py | 2 | 2945 | #!/usr/bin/env python
import os, sys
import re, glob, time, json, pprint
import cPickle as pickle
import multiprocessing as mp
from Dataset import Dataset,Request
import dynamoDB
import requestParse
import config
#===================================================================================================
# M A I N
#===================================================================================================
if __name__=="__main__":
sw = Stopwatch()
print 'Loading interesting datasets'
# load all interesting datasets
cursor = dynamoDB.getDbCursor()
table_dump = dynamoDB.getDatasetsAndProps(cursor)
datasets = {}
for name,nfiles,size,dt in table_dump:
if not re.match(config.dataset_pattern,name):
continue
ds = Dataset(name)
ds.nFiles = int(nfiles)
ds.sizeGB = size/(10.**9)
ds.cTime = time.mktime(dt.timetuple())
datasets[name] = ds
sw.report()
print 'Considering %i relevant datasets'%(len(datasets))
print 'Importing transfer history'
# import phedex history
pool = mp.Pool(processes=10)
all_transfers = pool.map(requestParse.parseTransfer,
glob.glob(config.requests_dir+'/requests_transfer_*.json'))
for reqs in all_transfers:
for d in reqs:
if not d in datasets:
continue
for t,s in reqs[d]:
if not re.match(config.site_pattern,s):
continue
datasets[d].addTransfer(s,t)
sw.report()
print 'Importing deletion history'
all_deletions = pool.map(requestParse.parseDeletion,
glob.glob(config.requests_dir+'/requests_delete_*.json'))
for reqs in all_deletions:
for d in reqs:
if not d in datasets:
continue
for t,s in reqs[d]:
if not re.match(config.site_pattern,s):
continue
datasets[d].addDeletion(s,t)
sw.report()
print 'Sorting history'
# organize the history
for name,d in datasets.iteritems():
d.sortRequests()
sw.report()
print 'Importing CRAB+xrootd accesses'
# import access history
all_accesses = dynamoDB.getDatasetsAccesses(cursor)
for name,node,dt,n in all_accesses:
if name not in datasets:
continue
if not re.match(config.site_pattern,node):
continue
timestamp = time.mktime(dt.timetuple())
datasets[name].addAccesses(node,n,timestamp)
sw.report()
print 'Exporting to pickle'
i=0
for k in datasets:
if i == 10: break
print k
print datasets[k]
i+= 1
pickleJar = open('monitorCache_'+ config.name+'.pkl',"wb")
pickle.dump(datasets,pickleJar,2) # put the pickle in a jar
pickleJar.close() # close the jar
sw.report()
sys.exit(0)
| mit |
cpausmit/IntelROCCS | Detox/python/phedexApi.py | 3 | 17588 | #!/usr/bin/python
#---------------------------------------------------------------------------------------------------
#
# This script provide an API to PhEDEX communications one can delete or subscribe datasets using
# methods defined in this class.
#
# This script uses auxilary tool for logging purposes in case the request runs into the error.
#
#---------------------------------------------------------------------------------------------------
__author__ = '*Bjorn Barrefors, $Maxim Goncharov, $Christoph Paus'
__organization__ = '*Holland Computing Center - University of Nebraska-Lincoln, $MIT'
__email__ = 'bbarrefo@cse.unl.edu, maxi@mit.edu, paus@mit.edu'
import sys
import os
import re
import urllib
import urllib2
import httplib
import time
import datetime
try:
import json
except ImportError:
import simplejson as json
from cmsDataLogger import cmsDataLogger
####################################################################################################
#
# P h E D E x A P I
#
####################################################################################################
class phedexApi:
"""
_phedexApi_
Interface to submit queries to the PhEDEx API For specifications of calls see
https://cmsweb.cern.ch/phedex/datasvc/doc
Class variables:
phedexBase -- Base URL to the PhEDEx web API
logger -- Used to print log and error messages to log file
"""
# Useful variables
# phedexBase = "https://cmsweb.cern.ch/phedex/datasvc/"
# phedexInstance = "prod" or "dev
# dataType = "json" or "xml"
# site = "T2_US_Nebraska"
# dataset = "/BTau/GowdyTest10-Run2010Av3/RAW"
# group = 'local' or 'AnalysisOps'
def __init__(self, logPath=''):
"""
__init__
Set up class constants
"""
statusDirectory = os.environ['DETOX_DB'] + '/' + os.environ['DETOX_STATUS']
self.logger = cmsDataLogger(statusDirectory+'/')
self.phedexBase = "https://cmsweb.cern.ch/phedex/datasvc/"
############################################################################
# #
# P h E D E x C A L L #
# #
############################################################################
def phedexCall(self, url, values):
"""
_phedexCall_
Make http post call to PhEDEx API.
Function only gaurantees that something is returned,
the caller need to check the response for correctness.
Keyword arguments:
url -- URL to make API call
values -- Arguments to pass to the call
Return values:
1 -- Status, 0 = everything went well, 1 = something went wrong
2 -- IF status == 0 : HTTP response ELSE : Error message
"""
name = "phedexCall"
data = urllib.urlencode(values)
opener = urllib2.build_opener(HTTPSGridAuthHandler())
request = urllib2.Request(url, data)
try:
response = opener.open(request)
except urllib2.HTTPError, e:
self.logger.error(name, e.read())
self.logger.error(name, "URL: %s" % (str(url),))
self.logger.error(name, "VALUES: %s" % (str(values),))
#print (name, "VALUES: %s" % (str(values),))
return 1, " ERROR - urllib2.HTTPError"
except urllib2.URLError, e:
self.logger.error(name, e.args)
self.logger.error(name, "URL: %s" % (str(url),))
self.logger.error(name, "VALUES: %s" % (str(values),))
#print (name, "VALUES: %s" % (str(values)))
return 1, " ERROR - urllib2.URLError"
return 0, response
############################################################################
# #
# D A T A #
# #
############################################################################
def data(self, dataset='', block='', fileName='', level='block',
createSince='', format='json', instance='prod'):
"""
_data_
PhEDEx data call
At least one of the arguments dataset, block, file have to be passed
No checking is made for xml data
Even if JSON data is returned no gaurantees are made for the structure
of it
Keyword arguments:
dataset -- Name of dataset to look up
block -- Name of block to look up
file -- Name of file to look up
block -- Only return data for this block
fileName -- Data for file fileName returned
level -- Which granularity of dataset information to show
createSince -- Files/blocks/datasets created since this date/time
format -- Which format to return data as, XML or JSON
instance -- Which instance of PhEDEx to query, dev or prod
Return values:
check -- 0 if all went well, 1 if error occured
data -- json structure if json format, xml structure if xml format
"""
name = "data"
if not (dataset or block or fileName):
self.logger.error(name, "Need to pass at least one of dataset/block/fileName")
return 1, "Error"
values = { 'dataset' : dataset, 'block' : block, 'file' : fileName,
'level' : level, 'create_since' : createSince }
dataURL = urllib.basejoin(self.phedexBase, "%s/%s/data" % (format, instance))
check, response = self.phedexCall(dataURL, values)
if check:
# An error occurred
self.logger.error(name, "Data call failed")
return 1, "Error"
if format == "json":
try:
data = json.load(response)
except ValueError, e:
# This usually means that PhEDEx didn't like the URL
self.logger.error(name, "In call to url %s : %s" % (dataURL, str(e)))
return 1, " ERROR - ValueError"
if not data:
self.logger.error(name, "No json data available")
return 1, " ERROR - no data"
else:
data = response.read()
return 0, data
############################################################################
# #
# P A R S E #
# #
############################################################################
def parse(self, data, xml):
"""
_parse_
Take data output from PhEDEx and parse it into xml syntax
corresponding to subscribe and delete calls.
"""
for k, v in data.iteritems():
k = k.replace("_", "-")
if type(v) is list:
xml = "%s>" % (xml,)
for v1 in v:
xml = "%s<%s" % (xml, k)
xml = self.parse(v1, xml)
if (k == "file"):
xml = "%s/>" % (xml,)
else:
xml = "%s</%s>" % (xml, k)
else:
if k == "lfn":
k = "name"
elif k == "size":
k = "bytes"
if (k == "name" or k == "is-open" or k == "is-transient" or \
k == "bytes" or k== "checksum"):
xml = '%s %s="%s"' % (xml, k, v)
return xml
############################################################################
# #
# X M L D A T A #
# #
############################################################################
def xmlData(self, datasets=[], instance='prod', level='file'):
"""
_xmlData_
Get json data from PhEDEx for all datasets and convert it to a xml
structure complient with the PhEDEx delete/subscribe call.
Keyword arguments:
datasets -- List of dataset names
instance -- The instance on which the datasets resides, prod/dev
Return values:
error -- 1 if an error occurred, 0 if everything went as expected
xml -- The converted data now represented as an xml structure
"""
name = "xmlData"
# @CHANGED: Function now takes a list of datasets instead of only one
if not datasets:
self.logger.error(name, "Need to pass at least one of dataset")
return 1, "Error"
xml = '<data version="2">'
xml = '%s<%s name="https://cmsweb.cern.ch/dbs/%s/global/DBSReader">'\
% (xml, 'dbs', instance)
for dataset in datasets:
if '#' in dataset:
parts = dataset.split('#')
check, response = self.data(block=dataset, level=level, instance=instance)
else:
check, response = self.data(dataset=dataset, level=level, instance=instance)
if check:
return 1, "Error"
data = response.get('phedex').get('dbs')
if not data:
return 1, "Error"
xml = "%s<%s" % (xml, 'dataset')
data = data[0].get('dataset')
xml = self.parse(data[0], xml)
xml = "%s</%s>" % (xml, 'dataset')
xml = "%s</%s>" % (xml, 'dbs')
xml_data = "%s</data>" % (xml,)
#print xml_data
return 0, xml_data
############################################################################
# #
# S U B S C R I B E #
# #
############################################################################
def subscribe(self, node='', data='', level='dataset', priority='low',
move='n', static='n', custodial='n', group='local',
timeStart='', requestOnly='n', noMail='n', comments='',
format='json', instance='prod'):
"""
_subscribe_
Set up subscription call to PhEDEx API.
"""
name = "subscribe"
if not (node and data):
self.logger.error(name, "Need to pass both node and data")
return 1, "Error"
values = { 'node' : node, 'data' : data, 'level' : level, 'priority' : priority,
'move' : move, 'static' : static, 'custodial' : custodial, 'group' : group,
'time_start' : timeStart, 'request_only' : requestOnly, 'no_mail' : noMail,
'comments' : comments }
subscriptionURL = urllib.basejoin(self.phedexBase, "%s/%s/subscribe" % (format, instance))
check, response = self.phedexCall(subscriptionURL, values)
if check:
# An error occurred
self.logger.error(name, "Subscription call failed")
return 1, "Error"
return 0, response
############################################################################
# #
# D E L E T E #
# #
############################################################################
def delete(self, node='', data='', level='dataset', rmSubscriptions='y',
comments='', format='json', instance='prod'):
name = "delete"
if not (node and data):
self.logger.error(name, "Need to pass both node and data")
return 1, "Error"
values = { 'node' : node, 'data' : data, 'level' : level,
'rm_subscriptions' : rmSubscriptions, 'comments' : comments }
deleteURL = urllib.basejoin(self.phedexBase, "%s/%s/delete" % (format, instance))
check, response = self.phedexCall(deleteURL, values)
if check:
self.logger.error(name, "Delete call failed")
return 1, "ERROR - self.phedexCall with response: " + response
return 0, response
############################################################################
# #
# U P D A T E R E Q U E S T #
# #
############################################################################
def updateRequest(self, decision, request, node, comments='',format='json', instance='prod'):
name = "update"
values = {'decision':decision, 'request':request, 'node':node, 'comments':comments}
url = urllib.basejoin(self.phedexBase, "%s/%s/updaterequest" % (format, instance))
check, response = self.phedexCall(url, values)
if check:
self.logger.error(name, "Update call failed")
return 1, "ERROR - self.phedexCall with response: " + response
return 0, response
def changeGroup(self, node, dataset, group, comments='',format='json',
instance='prod', level='dataset'):
name = "changegroup"
values = {'node':node, 'dataset':dataset, 'group':group}
url = urllib.basejoin(self.phedexBase, "%s/%s/updatesubscription" % (format, instance))
check, response = self.phedexCall(url, values)
if check:
self.logger.error(name, "Change group call failed")
print response
print check
return 1, "ERROR - self.phedexCall with response: " + response
return 0, response
def getDelRequests(self,node, request_since=1428200998, complete='y',format='json',
instance = 'prod'):
name = 'deletions'
values = {'node':node, 'request_since':request_since, 'complete':complete}
url = urllib.basejoin(self.phedexBase, "%s/%s/deletions" % (format, instance))
check, response = self.phedexCall(url, values)
if check:
self.logger.error(name, "Get deletions for site")
print response
print check
return 1, "ERROR - self.phedexCall with response: " + response
return 0, response
####################################################################################################
#
# H T T P S G R I D A U T H H A N D L E R
#
####################################################################################################
class HTTPSGridAuthHandler(urllib2.HTTPSHandler):
"""
_HTTPSGridAuthHandler_
Get proxy to acces PhEDEx API
Needed for subscribe and delete calls
Class variables:
key -- User key to CERN with access to PhEDEx
cert -- User certificate connected to key
"""
def __init__(self):
urllib2.HTTPSHandler.__init__(self)
self.key = self.getProxy()
self.cert = self.key
def https_open(self, req):
return self.do_open(self.getConnection, req)
def getProxy(self):
proxy = os.environ['DETOX_X509UP']
return proxy
def getConnection(self, host, timeout=300):
return httplib.HTTPSConnection(host, key_file=self.key, cert_file=self.cert)
####################################################################################################
#
# M A I N
#
####################################################################################################
if __name__ == '__main__':
"""
__main__
For testing purpose only
"""
# Example for deletion
#phedexApi = phedexApi(logPath='./')
#check, data = phedexApi.xmlData(datasets=['/MET/Run2012A-22Jan2013-v1/AOD'], instance='prod')
#if check:
# sys.exit(1)
#print data
#
#check, response = \
# phedexApi.delete(node='T2_US_MIT', data=data, instance='prod',
# comments='Just a test by Christoph Paus for Maxim Goncharov.')
#if check:
# print "This is the response from phedexApi.delete: " + response
# sys.exit(1)
#
#print response.read()
# Example for subscription
#phedexApi = phedexApi(logPath='./')
#check, data = phedexApi.xmlData(
# datasets=['/Muplus_Pt1_PositiveEta-gun/Muon2023Upg14-DES23_62_V1-v1/GEN-SIM'], instance='prod')
#if check:
# sys.exit(1)
#print data
#
#check, response = \
# phedexApi.subscribe(node='T2_US_MIT', data=data, instance='prod', group='AnalysisOps',
# comments='Just a test by Christoph Paus for Maxim Goncharov.')
#if check:
# print "This is the response from phedexApi.delete: " + response
# sys.exit(1)
#
#print response.read()
#sys.exit(0)
| mit |
ephes/scikit-learn | sklearn/learning_curve.py | 109 | 13467 | """Utilities to evaluate models with respect to a variable
"""
# Author: Alexander Fabisch <afabisch@informatik.uni-bremen.de>
#
# License: BSD 3 clause
import warnings
import numpy as np
from .base import is_classifier, clone
from .cross_validation import check_cv
from .externals.joblib import Parallel, delayed
from .cross_validation import _safe_split, _score, _fit_and_score
from .metrics.scorer import check_scoring
from .utils import indexable
from .utils.fixes import astype
__all__ = ['learning_curve', 'validation_curve']
def learning_curve(estimator, X, y, train_sizes=np.linspace(0.1, 1.0, 5),
cv=None, scoring=None, exploit_incremental_learning=False,
n_jobs=1, pre_dispatch="all", verbose=0):
"""Learning curve.
Determines cross-validated training and test scores for different training
set sizes.
A cross-validation generator splits the whole dataset k times in training
and test data. Subsets of the training set with varying sizes will be used
to train the estimator and a score for each training subset size and the
test set will be computed. Afterwards, the scores will be averaged over
all k runs for each training subset size.
Read more in the :ref:`User Guide <learning_curves>`.
Parameters
----------
estimator : object type that implements the "fit" and "predict" methods
An object of that type which is cloned for each validation.
X : array-like, shape (n_samples, n_features)
Training vector, where n_samples is the number of samples and
n_features is the number of features.
y : array-like, shape (n_samples) or (n_samples, n_features), optional
Target relative to X for classification or regression;
None for unsupervised learning.
train_sizes : array-like, shape (n_ticks,), dtype float or int
Relative or absolute numbers of training examples that will be used to
generate the learning curve. If the dtype is float, it is regarded as a
fraction of the maximum size of the training set (that is determined
by the selected validation method), i.e. it has to be within (0, 1].
Otherwise it is interpreted as absolute sizes of the training sets.
Note that for classification the number of samples usually have to
be big enough to contain at least one sample from each class.
(default: np.linspace(0.1, 1.0, 5))
cv : integer, cross-validation generator, optional
If an integer is passed, it is the number of folds (defaults to 3).
Specific cross-validation objects can be passed, see
sklearn.cross_validation module for the list of possible objects
scoring : string, callable or None, optional, default: None
A string (see model evaluation documentation) or
a scorer callable object / function with signature
``scorer(estimator, X, y)``.
exploit_incremental_learning : boolean, optional, default: False
If the estimator supports incremental learning, this will be
used to speed up fitting for different training set sizes.
n_jobs : integer, optional
Number of jobs to run in parallel (default 1).
pre_dispatch : integer or string, optional
Number of predispatched jobs for parallel execution (default is
all). The option can reduce the allocated memory. The string can
be an expression like '2*n_jobs'.
verbose : integer, optional
Controls the verbosity: the higher, the more messages.
Returns
-------
train_sizes_abs : array, shape = (n_unique_ticks,), dtype int
Numbers of training examples that has been used to generate the
learning curve. Note that the number of ticks might be less
than n_ticks because duplicate entries will be removed.
train_scores : array, shape (n_ticks, n_cv_folds)
Scores on training sets.
test_scores : array, shape (n_ticks, n_cv_folds)
Scores on test set.
Notes
-----
See :ref:`examples/model_selection/plot_learning_curve.py
<example_model_selection_plot_learning_curve.py>`
"""
if exploit_incremental_learning and not hasattr(estimator, "partial_fit"):
raise ValueError("An estimator must support the partial_fit interface "
"to exploit incremental learning")
X, y = indexable(X, y)
# Make a list since we will be iterating multiple times over the folds
cv = list(check_cv(cv, X, y, classifier=is_classifier(estimator)))
scorer = check_scoring(estimator, scoring=scoring)
# HACK as long as boolean indices are allowed in cv generators
if cv[0][0].dtype == bool:
new_cv = []
for i in range(len(cv)):
new_cv.append((np.nonzero(cv[i][0])[0], np.nonzero(cv[i][1])[0]))
cv = new_cv
n_max_training_samples = len(cv[0][0])
# Because the lengths of folds can be significantly different, it is
# not guaranteed that we use all of the available training data when we
# use the first 'n_max_training_samples' samples.
train_sizes_abs = _translate_train_sizes(train_sizes,
n_max_training_samples)
n_unique_ticks = train_sizes_abs.shape[0]
if verbose > 0:
print("[learning_curve] Training set sizes: " + str(train_sizes_abs))
parallel = Parallel(n_jobs=n_jobs, pre_dispatch=pre_dispatch,
verbose=verbose)
if exploit_incremental_learning:
classes = np.unique(y) if is_classifier(estimator) else None
out = parallel(delayed(_incremental_fit_estimator)(
clone(estimator), X, y, classes, train, test, train_sizes_abs,
scorer, verbose) for train, test in cv)
else:
out = parallel(delayed(_fit_and_score)(
clone(estimator), X, y, scorer, train[:n_train_samples], test,
verbose, parameters=None, fit_params=None, return_train_score=True)
for train, test in cv for n_train_samples in train_sizes_abs)
out = np.array(out)[:, :2]
n_cv_folds = out.shape[0] // n_unique_ticks
out = out.reshape(n_cv_folds, n_unique_ticks, 2)
out = np.asarray(out).transpose((2, 1, 0))
return train_sizes_abs, out[0], out[1]
def _translate_train_sizes(train_sizes, n_max_training_samples):
"""Determine absolute sizes of training subsets and validate 'train_sizes'.
Examples:
_translate_train_sizes([0.5, 1.0], 10) -> [5, 10]
_translate_train_sizes([5, 10], 10) -> [5, 10]
Parameters
----------
train_sizes : array-like, shape (n_ticks,), dtype float or int
Numbers of training examples that will be used to generate the
learning curve. If the dtype is float, it is regarded as a
fraction of 'n_max_training_samples', i.e. it has to be within (0, 1].
n_max_training_samples : int
Maximum number of training samples (upper bound of 'train_sizes').
Returns
-------
train_sizes_abs : array, shape (n_unique_ticks,), dtype int
Numbers of training examples that will be used to generate the
learning curve. Note that the number of ticks might be less
than n_ticks because duplicate entries will be removed.
"""
train_sizes_abs = np.asarray(train_sizes)
n_ticks = train_sizes_abs.shape[0]
n_min_required_samples = np.min(train_sizes_abs)
n_max_required_samples = np.max(train_sizes_abs)
if np.issubdtype(train_sizes_abs.dtype, np.float):
if n_min_required_samples <= 0.0 or n_max_required_samples > 1.0:
raise ValueError("train_sizes has been interpreted as fractions "
"of the maximum number of training samples and "
"must be within (0, 1], but is within [%f, %f]."
% (n_min_required_samples,
n_max_required_samples))
train_sizes_abs = astype(train_sizes_abs * n_max_training_samples,
dtype=np.int, copy=False)
train_sizes_abs = np.clip(train_sizes_abs, 1,
n_max_training_samples)
else:
if (n_min_required_samples <= 0 or
n_max_required_samples > n_max_training_samples):
raise ValueError("train_sizes has been interpreted as absolute "
"numbers of training samples and must be within "
"(0, %d], but is within [%d, %d]."
% (n_max_training_samples,
n_min_required_samples,
n_max_required_samples))
train_sizes_abs = np.unique(train_sizes_abs)
if n_ticks > train_sizes_abs.shape[0]:
warnings.warn("Removed duplicate entries from 'train_sizes'. Number "
"of ticks will be less than than the size of "
"'train_sizes' %d instead of %d)."
% (train_sizes_abs.shape[0], n_ticks), RuntimeWarning)
return train_sizes_abs
def _incremental_fit_estimator(estimator, X, y, classes, train, test,
train_sizes, scorer, verbose):
"""Train estimator on training subsets incrementally and compute scores."""
train_scores, test_scores = [], []
partitions = zip(train_sizes, np.split(train, train_sizes)[:-1])
for n_train_samples, partial_train in partitions:
train_subset = train[:n_train_samples]
X_train, y_train = _safe_split(estimator, X, y, train_subset)
X_partial_train, y_partial_train = _safe_split(estimator, X, y,
partial_train)
X_test, y_test = _safe_split(estimator, X, y, test, train_subset)
if y_partial_train is None:
estimator.partial_fit(X_partial_train, classes=classes)
else:
estimator.partial_fit(X_partial_train, y_partial_train,
classes=classes)
train_scores.append(_score(estimator, X_train, y_train, scorer))
test_scores.append(_score(estimator, X_test, y_test, scorer))
return np.array((train_scores, test_scores)).T
def validation_curve(estimator, X, y, param_name, param_range, cv=None,
scoring=None, n_jobs=1, pre_dispatch="all", verbose=0):
"""Validation curve.
Determine training and test scores for varying parameter values.
Compute scores for an estimator with different values of a specified
parameter. This is similar to grid search with one parameter. However, this
will also compute training scores and is merely a utility for plotting the
results.
Read more in the :ref:`User Guide <validation_curve>`.
Parameters
----------
estimator : object type that implements the "fit" and "predict" methods
An object of that type which is cloned for each validation.
X : array-like, shape (n_samples, n_features)
Training vector, where n_samples is the number of samples and
n_features is the number of features.
y : array-like, shape (n_samples) or (n_samples, n_features), optional
Target relative to X for classification or regression;
None for unsupervised learning.
param_name : string
Name of the parameter that will be varied.
param_range : array-like, shape (n_values,)
The values of the parameter that will be evaluated.
cv : integer, cross-validation generator, optional
If an integer is passed, it is the number of folds (defaults to 3).
Specific cross-validation objects can be passed, see
sklearn.cross_validation module for the list of possible objects
scoring : string, callable or None, optional, default: None
A string (see model evaluation documentation) or
a scorer callable object / function with signature
``scorer(estimator, X, y)``.
n_jobs : integer, optional
Number of jobs to run in parallel (default 1).
pre_dispatch : integer or string, optional
Number of predispatched jobs for parallel execution (default is
all). The option can reduce the allocated memory. The string can
be an expression like '2*n_jobs'.
verbose : integer, optional
Controls the verbosity: the higher, the more messages.
Returns
-------
train_scores : array, shape (n_ticks, n_cv_folds)
Scores on training sets.
test_scores : array, shape (n_ticks, n_cv_folds)
Scores on test set.
Notes
-----
See
:ref:`examples/model_selection/plot_validation_curve.py
<example_model_selection_plot_validation_curve.py>`
"""
X, y = indexable(X, y)
cv = check_cv(cv, X, y, classifier=is_classifier(estimator))
scorer = check_scoring(estimator, scoring=scoring)
parallel = Parallel(n_jobs=n_jobs, pre_dispatch=pre_dispatch,
verbose=verbose)
out = parallel(delayed(_fit_and_score)(
estimator, X, y, scorer, train, test, verbose,
parameters={param_name: v}, fit_params=None, return_train_score=True)
for train, test in cv for v in param_range)
out = np.asarray(out)[:, :2]
n_params = len(param_range)
n_cv_folds = out.shape[0] // n_params
out = out.reshape(n_cv_folds, n_params, 2).transpose((2, 1, 0))
return out[0], out[1]
| bsd-3-clause |
vijayaganesh/Kanjoos-HackGT | django-webapp/kanjoos/myapp/src/runner/dataset.py | 1 | 3274 | import cv2
import os
import glob
from sklearn.utils import shuffle
import numpy as np
def load_train(train_path, image_size, classes):
images = []
labels = []
img_names = []
cls = []
print('Going to read training images')
for fields in classes:
index = classes.index(fields)
print('Now going to read {} files (Index: {})'.format(fields, index))
path = os.path.join(train_path, fields, '*g')
files = glob.glob(path)
for fl in files:
image = cv2.imread(fl)
image = cv2.resize(image, (image_size, image_size),0,0, cv2.INTER_LINEAR)
image = image.astype(np.float32)
image = np.multiply(image, 1.0 / 255.0)
images.append(image)
label = np.zeros(len(classes))
label[index] = 1.0
labels.append(label)
flbase = os.path.basename(fl)
img_names.append(flbase)
cls.append(fields)
images = np.array(images)
labels = np.array(labels)
img_names = np.array(img_names)
cls = np.array(cls)
return images, labels, img_names, cls
class DataSet(object):
def __init__(self, images, labels, img_names, cls):
self._num_examples = images.shape[0]
self._images = images
self._labels = labels
self._img_names = img_names
self._cls = cls
self._epochs_done = 0
self._index_in_epoch = 0
@property
def images(self):
return self._images
@property
def labels(self):
return self._labels
@property
def img_names(self):
return self._img_names
@property
def cls(self):
return self._cls
@property
def num_examples(self):
return self._num_examples
@property
def epochs_done(self):
return self._epochs_done
def next_batch(self, batch_size):
"""Return the next `batch_size` examples from this data set."""
start = self._index_in_epoch
self._index_in_epoch += batch_size
if self._index_in_epoch > self._num_examples:
# After each epoch we update this
self._epochs_done += 1
start = 0
self._index_in_epoch = batch_size
assert batch_size <= self._num_examples
end = self._index_in_epoch
return self._images[start:end], self._labels[start:end], self._img_names[start:end], self._cls[start:end]
def read_train_sets(train_path, image_size, classes, validation_size):
class DataSets(object):
pass
data_sets = DataSets()
images, labels, img_names, cls = load_train(train_path, image_size, classes)
images, labels, img_names, cls = shuffle(images, labels, img_names, cls)
if isinstance(validation_size, float):
validation_size = int(validation_size * images.shape[0])
validation_images = images[:validation_size]
validation_labels = labels[:validation_size]
validation_img_names = img_names[:validation_size]
validation_cls = cls[:validation_size]
train_images = images[validation_size:]
train_labels = labels[validation_size:]
train_img_names = img_names[validation_size:]
train_cls = cls[validation_size:]
data_sets.train = DataSet(train_images, train_labels, train_img_names, train_cls)
data_sets.valid = DataSet(validation_images, validation_labels, validation_img_names, validation_cls)
return data_sets
| mit |
bromjiri/Presto | predictor/predictor_sklearn.py | 1 | 10590 | import settings
import pandas as pd
import numpy as np
import os
from datetime import timedelta
import predictor.predictor_statistic as stat
import random
import pickle
from sklearn.feature_extraction import DictVectorizer
from sklearn.model_selection import train_test_split
from sklearn.linear_model import LogisticRegression
from sklearn import metrics
class Stock:
def __init__(self, subject):
input_file = settings.PREDICTOR_STOCK + "/" + subject + ".csv"
self.stock_df = pd.read_csv(input_file, sep=',', index_col='Date')
def create_dict(self, from_date, to_date):
self.stock_ser = self.stock_df['Diff'].loc[from_date:to_date]
# binning
self.stock_ser = self.stock_ser.apply(binning_none)
self.stock_dict = self.stock_ser.dropna().astype(int).to_dict()
def get_dict(self):
return self.stock_dict
def get_stock_dates(self):
return self.stock_ser.index.values
class Sent:
def __init__(self, subject, source):
input_file = settings.PREDICTOR_SENTIMENT + "/" + source + "/" + source + "-sent-" + subject + ".csv"
self.sent_df = pd.read_csv(input_file, sep=',', index_col='Date')
def get_weekend(self, col_name, stock_dates):
weekend_df = np.round(self.sent_df, 2)
aggreg = 0
days = 1
for idx, row in weekend_df.iterrows():
value = row[col_name]
date = pd.to_datetime(idx)
date_plus = date + timedelta(days=1)
if str(date_plus.date()) not in stock_dates:
# print("weekend")
value += aggreg
aggreg = value
days += 1
else:
total = value + aggreg
mean = total / days
aggreg = 0
days = 1
weekend_df.set_value(idx, col_name, mean)
# print(date.date(), row[col_name], value)
return np.round(weekend_df[col_name].diff().loc[stock_dates], 2)
def create_dict(self, precision, method, from_date, to_date, stock_dates, binning):
sentiment_col = "Sent" + precision
sent_ser = self.sent_df[sentiment_col]
if method == "Natural":
sent_ser = sent_ser.diff().loc[from_date:to_date]
elif method == "Friday":
sent_ser = sent_ser.loc[stock_dates].diff()
elif method == "Sunday":
sent_ser = sent_ser.diff().loc[stock_dates]
elif method == "Weekend":
sent_ser = self.get_weekend(sentiment_col, stock_dates)
# binning
std_dev1 = sent_ser.std() / 4
std_dev2 = sent_ser.std()
if binning == 'none':
sent_ser_new = sent_ser.apply(binning_none)
elif binning == 'low':
sent_ser_new = sent_ser.apply(binning_low, args=(std_dev1,))
else:
sent_ser_new = sent_ser.apply(binning_high, args=(std_dev1, std_dev2,))
# print(pd.concat([sent_ser, sent_ser_new], axis=1))
self.sent_dict = sent_ser_new.dropna().astype(int).to_dict()
self.key_list = sorted(self.sent_dict.keys())
def get_dict(self):
return self.sent_dict
def get_features(self, key):
index = self.key_list.index(key)
features = dict()
features['d1'] = self.sent_dict[self.key_list[index-3]]
features['d2'] = self.sent_dict[self.key_list[index-2]]
features['d3'] = self.sent_dict[self.key_list[index-1]]
return features
def binning_none(row):
if row > 0:
return 4
elif row < 0:
return 0
else:
return row
def binning_low(row, std_dev1):
if row > std_dev1:
return 4
elif row < std_dev1 and row > -std_dev1:
return 2
elif row < -std_dev1:
return 0
else:
return row
def binning_high(row, std_dev1, std_dev2):
if row > std_dev2:
return 4
elif row < std_dev2 and row > std_dev1:
return 3
elif row < std_dev1 and row > -std_dev1:
return 2
elif row < -std_dev1 and row > -std_dev2:
return 1
elif row < -std_dev2:
return 0
else:
return row
def run_one(source, subject, precision, method, from_date, to_date, binning, filename_nltk, filename_skl, filename_lr):
# stock dataframe
stock = Stock(subject)
stock.create_dict(from_date, to_date)
stock_dict = stock.get_dict()
# print(sorted(stock_dict.items()))
indexes = ["djia", "snp", "nasdaq"]
# if subject in indexes:
# subject = "the"
# sentiment dataframe
sent = Sent(subject, source)
sent.create_dict(precision, method, from_date, to_date, stock.get_stock_dates(), binning)
# print(sorted(sent.get_dict().items()))
# features
features_list = list()
for key in sorted(stock_dict)[3:]:
features = sent.get_features(key)
features_list.append([features, stock_dict[key]])
# print([key, sorted(features.items()), stock_dict[key]])
features_list_pos = list()
features_list_neg = list()
for feature in features_list:
if feature[1] == 0:
features_list_neg.append(feature)
else:
features_list_pos.append(feature)
statistic = stat.Statistic(source, subject, precision, method, binning)
# print(len(features_list), len(features_list_pos), len(features_list_neg))
max_half = min(len(features_list_pos), len(features_list_neg))
train_border = int(max_half * 4 / 5)
# print(train_border, max_half)
# exit()
if pickle_switch:
cycles = 1
else:
cycles = 50
for x in range(0, cycles):
random.shuffle(features_list_pos)
random.shuffle(features_list_neg)
trainfeats = features_list_pos[:train_border] + features_list_neg[:train_border]
testfeats = features_list_pos[train_border:max_half] + features_list_neg[train_border:max_half]
# print(len(trainfeats), len(testfeats))
# print(trainfeats)
X = list()
y = list()
feats = trainfeats + testfeats
for feat in feats:
X.append(feat[0])
y.append(feat[1])
# vectorize input
v = DictVectorizer(sparse=False)
X_train, X_test, y_train, y_test = train_test_split(X, y)
X_train_vect = v.fit_transform(X_train)
X_test_vect = v.transform(X_test)
# instantiate model
logreg = LogisticRegression()
# fit model
logreg.fit(X_train_vect, y_train)
# pickle
if pickle_switch:
os.makedirs('pickled', exist_ok=True)
logreg_f = open("pickled/tesla_logreg.pickle", "wb")
pickle.dump(logreg, logreg_f)
logreg_f.close()
vector_f = open("pickled/tesla_vector.pickle", "wb")
pickle.dump(v, vector_f)
vector_f.close()
# make class predictions for the testing set
y_pred_class = logreg.predict(X_test_vect)
#################
# get the outputs
lr_output = dict()
lr_output['accuracy'] = metrics.accuracy_score(y_test, y_pred_class) * 100
# print(lr_output['accuracy'])
confusion = metrics.confusion_matrix(y_test, y_pred_class)
# print('True:', y_test)
# print('False:', y_pred_class)
# print(confusion)
TP = confusion[1, 1]
TN = confusion[0, 0]
FP = confusion[0, 1]
FN = confusion[1, 0]
# exit()
lr_output['pos_prec'] = TP / float(TP + FP) * 100
lr_output['neg_prec'] = TN / float(TN + FN) * 100
lr_output['pos_rec'] = TP / float(TP + FN) * 100
lr_output['neg_rec'] = TN / float(TN + FP) * 100
# nlt_output, skl_output = cls.train(trainfeats, testfeats, nlt=nltk_run, skl=sklearn_run)
# print(nlt_output['most1'])
# if nltk_run:
# statistic.add_nltk(nlt_output)
# if sklearn_run:
# statistic.add_skl(skl_output)
if lr_run:
statistic.add_lr(lr_output)
if nltk_run:
statistic.mean_nltk(cycles)
statistic.print_nltk()
# statistic.write_nltk(filename_nltk)
if sklearn_run:
statistic.mean_skl(cycles)
statistic.print_skl()
statistic.print_stddev()
# statistic.write_skl(filename_skl)
if lr_run:
statistic.mean_lr(cycles)
statistic.print_lr()
# statistic.write_lr(filename_lr)
nltk_run = False
sklearn_run = False
lr_run = True
pickle_switch = False
from_date = '2016-11-01'
to_date = '2017-08-31'
source = "stwits"
# binnings = ['none', 'low', 'high']
binnings = ['none']
# subjects = ["coca-cola", "mcdonalds", "microsoft", "netflix", "nike", "samsung", "tesla", "djia", "snp", "nasdaq"]
subjects = ["tesla"]
# precisions = ["0.6", "0.8", "1.0"]
precisions = ["0.6"]
# methods = ["Friday", "Natural", "Weekend"]
methods = ["Friday"]
for subject in subjects:
folder = settings.PREDICTOR_PREDICTION + '/' + source + '/' + subject + '/'
os.makedirs(folder, exist_ok=True)
filename_nltk = folder + source + '-prediction-' + subject + "-nltk.csv"
filename_skl = folder + source + '-prediction-' + subject + "-skl.csv"
filename_lr = folder + source + '-prediction-' + subject + "-lr.csv"
# if nltk_run:
# open(filename_nltk, 'w').close()
#
# if sklearn_run:
# open(filename_skl, 'w').close()
if lr_run:
open(filename_lr, 'w').close()
for method in methods:
# if nltk_run:
# f = open(filename_nltk, 'a')
# f.write(source + ", " + subject + ", " + method + ", NLTK\n")
# f.write("precision, binning, accuracy, pos_prec, neg_prec, pos_rec, neg_rec, d1, d2, d3\n")
# f.close()
#
# if sklearn_run:
# f = open(filename_skl, 'a')
# f.write(source + ", " + subject + ", " + method + ", SKL\n")
# f.write("precision, binning, mnb, bnb, lr, lsvc, nsvc, voted\n")
# f.close()
# if lr_run:
# f = open(filename_lr, 'a')
# f.write(source + ", " + subject + ", " + method + ", LR\n")
# f.write("precision, binning, accuracy, pos_prec, neg_prec, pos_rec, neg_rec\n")
# f.close()
for precision in precisions:
for binning in binnings:
# print(source, subject, precision, method)
run_one(source, subject, precision, method, from_date, to_date, binning, filename_nltk, filename_skl, filename_lr)
| mit |
lensacom/sparkit-learn | splearn/base.py | 2 | 1701 | # -*- coding: utf-8 -*-
from sklearn.base import BaseEstimator, ClassifierMixin, TransformerMixin
from sklearn.metrics import accuracy_score
class SparkBroadcasterMixin(object):
# TODO: consider caching in case of streaming
def broadcast(self, func, context):
bcvars = {name: context.broadcast(getattr(self, name))
for name in self.__transient__}
def func_wrapper(*args, **kwargs):
for k, v in bcvars.items():
setattr(func.__self__, k, v.value)
return func(*args, **kwargs)
return func_wrapper
class SparkBaseEstimator(BaseEstimator):
pass
class SparkClassifierMixin(ClassifierMixin):
"""Mixin class for all classifiers in sparkit-learn."""
def score(self, Z):
X, y, w = Z[:, 'X'], Z[:, 'y'], None
if 'w' in Z.columns:
w = Z[:, 'w']
return accuracy_score(y.toarray(),
self.predict(X).toarray(),
sample_weight=w)
class SparkTransformerMixin(TransformerMixin):
"""Mixin class for all transformers in sparkit-learn."""
def fit_transform(self, Z, **fit_params):
"""Fit to data, then transform it.
Fits transformer to Z with optional parameters fit_params
and returns a transformed version of Z.
Parameters
----------
Z : ArrayRDD or DictRDD
Training set.
Returns
-------
Z_new : ArrayRDD or DictRDD
Transformed array.
"""
# non-optimized default implementation; override when a better
# method is possible
return self.fit(Z, **fit_params).transform(Z)
| apache-2.0 |
3quarterstack/simple_blog | djangoappengine/db/base.py | 18 | 12838 | import datetime
import decimal
import logging
import os
import shutil
from django.db.utils import DatabaseError
from google.appengine.api.datastore import Delete, Query
from google.appengine.api.datastore_errors import BadArgumentError, \
BadValueError
from google.appengine.api.datastore_types import Blob, Key, Text, \
ValidateInteger
from google.appengine.api.namespace_manager import set_namespace
from google.appengine.ext.db.metadata import get_kinds, get_namespaces
from djangotoolbox.db.base import (
NonrelDatabaseClient,
NonrelDatabaseFeatures,
NonrelDatabaseIntrospection,
NonrelDatabaseOperations,
NonrelDatabaseValidation,
NonrelDatabaseWrapper)
from djangotoolbox.db.utils import decimal_to_string
from ..boot import DATA_ROOT
from ..utils import appid, on_production_server
from .creation import DatabaseCreation
from .stubs import stub_manager
DATASTORE_PATHS = {
'datastore_path': os.path.join(DATA_ROOT, 'datastore'),
'blobstore_path': os.path.join(DATA_ROOT, 'blobstore'),
#'rdbms_sqlite_path': os.path.join(DATA_ROOT, 'rdbms'),
'prospective_search_path': os.path.join(DATA_ROOT, 'prospective-search'),
}
def key_from_path(db_table, value):
"""
Workaround for GAE choosing not to validate integer ids when
creating keys.
TODO: Should be removed if it gets fixed.
"""
if isinstance(value, (int, long)):
ValidateInteger(value, 'id')
return Key.from_path(db_table, value)
def get_datastore_paths(options):
paths = {}
for key, path in DATASTORE_PATHS.items():
paths[key] = options.get(key, path)
return paths
def destroy_datastore(paths):
"""Destroys the appengine datastore at the specified paths."""
for path in paths.values():
if not path:
continue
try:
if os.path.isdir(path):
shutil.rmtree(path)
else:
os.remove(path)
except OSError, error:
if error.errno != 2:
logging.error("Failed to clear datastore: %s" % error)
class DatabaseFeatures(NonrelDatabaseFeatures):
# GAE only allow strictly positive integers (and strings) to be
# used as key values.
allows_primary_key_0 = False
# Anything that results in a something different than a positive
# integer or a string cannot be directly used as a key on GAE.
# Note that DecimalField values are encoded as strings, so can be
# used as keys.
# With some encoding, we could allow most fields to be used as a
# primary key, but for now only mark what can and what cannot be
# safely used.
supports_primary_key_on = \
NonrelDatabaseFeatures.supports_primary_key_on - set((
'FloatField', 'DateField', 'DateTimeField', 'TimeField',
'BooleanField', 'NullBooleanField', 'TextField', 'XMLField'))
class DatabaseOperations(NonrelDatabaseOperations):
compiler_module = __name__.rsplit('.', 1)[0] + '.compiler'
# Date used to store times as datetimes.
# TODO: Use just date()?
DEFAULT_DATE = datetime.date(1970, 1, 1)
# Time used to store dates as datetimes.
DEFAULT_TIME = datetime.time()
def sql_flush(self, style, tables, sequences, allow_cascade=False):
self.connection.flush()
return []
def value_to_db_auto(self, value):
"""
New keys generated by the GAE datastore hold longs.
"""
if value is None:
return None
return long(value)
def value_for_db(self, value, field, lookup=None):
"""
We'll simulate `startswith` lookups with two inequalities:
property >= value and property <= value + u'\ufffd',
and need to "double" the value before passing it through the
actual datastore conversions.
"""
super_value_for_db = super(DatabaseOperations, self).value_for_db
if lookup == 'startswith':
return [super_value_for_db(value, field, lookup),
super_value_for_db(value + u'\ufffd', field, lookup)]
return super_value_for_db(value, field, lookup)
def _value_for_db(self, value, field, field_kind, db_type, lookup):
"""
GAE database may store a restricted set of Python types, for
some cases it has its own types like Key, Text or Blob.
TODO: Consider moving empty list handling here (from insert).
"""
# Store Nones as Nones to handle nullable fields, even keys.
if value is None:
return None
# Parent can handle iterable fields and Django wrappers.
value = super(DatabaseOperations, self)._value_for_db(
value, field, field_kind, db_type, lookup)
# Convert decimals to strings preserving order.
if field_kind == 'DecimalField':
value = decimal_to_string(
value, field.max_digits, field.decimal_places)
# Create GAE db.Keys from Django keys.
# We use model's table name as key kind (the table of the model
# of the instance that the key identifies, for ForeignKeys and
# other relations).
if db_type == 'key':
# value = self._value_for_db_key(value, field_kind)
try:
value = key_from_path(field.model._meta.db_table, value)
except (BadArgumentError, BadValueError,):
raise DatabaseError("Only strings and positive integers "
"may be used as keys on GAE.")
# Store all strings as unicode, use db.Text for longer content.
elif db_type == 'string' or db_type == 'text':
if isinstance(value, str):
value = value.decode('utf-8')
if db_type == 'text':
value = Text(value)
# Store all date / time values as datetimes, by using some
# default time or date.
elif db_type == 'date':
value = datetime.datetime.combine(value, self.DEFAULT_TIME)
elif db_type == 'time':
value = datetime.datetime.combine(self.DEFAULT_DATE, value)
# Store BlobField, DictField and EmbeddedModelField values as Blobs.
elif db_type == 'bytes':
value = Blob(value)
return value
def _value_from_db(self, value, field, field_kind, db_type):
"""
Undoes conversions done in value_for_db.
"""
# We could have stored None for a null field.
if value is None:
return None
# All keys were converted to the Key class.
if db_type == 'key':
assert isinstance(value, Key), \
"GAE db.Key expected! Try changing to old storage, " \
"dumping data, changing to new storage and reloading."
assert value.parent() is None, "Parents are not yet supported!"
value = value.id_or_name()
# value = self._value_from_db_key(value, field_kind)
# Always retrieve strings as unicode (old datasets may
# contain non-unicode strings).
elif db_type == 'string' or db_type == 'text':
if isinstance(value, str):
value = value.decode('utf-8')
else:
value = unicode(value)
# Dates and times are stored as datetimes, drop the added part.
elif db_type == 'date':
value = value.date()
elif db_type == 'time':
value = value.time()
# Convert GAE Blobs to plain strings for Django.
elif db_type == 'bytes':
value = str(value)
# Revert the decimal-to-string encoding.
if field_kind == 'DecimalField':
value = decimal.Decimal(value)
return super(DatabaseOperations, self)._value_from_db(
value, field, field_kind, db_type)
# def _value_for_db_key(self, value, field_kind):
# """
# Converts values to be used as entity keys to strings,
# trying (but not fully succeeding) to preserve comparisons.
# """
# # Bools as positive integers.
# if field_kind == 'BooleanField':
# value = int(value) + 1
# # Encode floats as strings.
# elif field_kind == 'FloatField':
# value = self.value_to_db_decimal(
# decimal.Decimal(value), None, None)
# # Integers as strings (string keys sort after int keys, so
# # all need to be encoded to preserve comparisons).
# elif field_kind in ('IntegerField', 'BigIntegerField',
# 'PositiveIntegerField', 'PositiveSmallIntegerField',
# 'SmallIntegerField'):
# value = self.value_to_db_decimal(
# decimal.Decimal(value), None, 0)
# return value
# def value_from_db_key(self, value, field_kind):
# """
# Decodes value previously encoded in a key.
# """
# if field_kind == 'BooleanField':
# value = bool(value - 1)
# elif field_kind == 'FloatField':
# value = float(value)
# elif field_kind in ('IntegerField', 'BigIntegerField',
# 'PositiveIntegerField', 'PositiveSmallIntegerField',
# 'SmallIntegerField'):
# value = int(value)
# return value
class DatabaseClient(NonrelDatabaseClient):
pass
class DatabaseValidation(NonrelDatabaseValidation):
pass
class DatabaseIntrospection(NonrelDatabaseIntrospection):
def table_names(self, cursor=None):
"""
Returns a list of names of all tables that exist in the
database.
"""
return [kind.key().name() for kind in Query(kind='__kind__').Run()]
class DatabaseWrapper(NonrelDatabaseWrapper):
def __init__(self, *args, **kwds):
super(DatabaseWrapper, self).__init__(*args, **kwds)
self.features = DatabaseFeatures(self)
self.ops = DatabaseOperations(self)
self.client = DatabaseClient(self)
self.creation = DatabaseCreation(self)
self.validation = DatabaseValidation(self)
self.introspection = DatabaseIntrospection(self)
options = self.settings_dict
self.remote_app_id = options.get('REMOTE_APP_ID', appid)
self.domain = options.get('DOMAIN', 'appspot.com')
self.remote_api_path = options.get('REMOTE_API_PATH', None)
self.secure_remote_api = options.get('SECURE_REMOTE_API', True)
remote = options.get('REMOTE', False)
if on_production_server:
remote = False
if remote:
stub_manager.setup_remote_stubs(self)
else:
stub_manager.setup_stubs(self)
def flush(self):
"""
Helper function to remove the current datastore and re-open the
stubs.
"""
if stub_manager.active_stubs == 'remote':
import random
import string
code = ''.join([random.choice(string.ascii_letters)
for x in range(4)])
print "\n\n!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!"
print "!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!"
print "Warning! You're about to delete the *production* datastore!"
print "Only models defined in your INSTALLED_APPS can be removed!"
print "If you want to clear the whole datastore you have to use " \
"the datastore viewer in the dashboard. Also, in order to " \
"delete all unneeded indexes you have to run appcfg.py " \
"vacuum_indexes."
print "In order to proceed you have to enter the following code:"
print code
response = raw_input("Repeat: ")
if code == response:
print "Deleting..."
delete_all_entities()
print "Datastore flushed! Please check your dashboard's " \
"datastore viewer for any remaining entities and " \
"remove all unneeded indexes with appcfg.py " \
"vacuum_indexes."
else:
print "Aborting."
exit()
elif stub_manager.active_stubs == 'test':
stub_manager.deactivate_test_stubs()
stub_manager.activate_test_stubs(self)
else:
destroy_datastore(get_datastore_paths(self.settings_dict))
stub_manager.setup_local_stubs(self)
def delete_all_entities():
for namespace in get_namespaces():
set_namespace(namespace)
for kind in get_kinds():
if kind.startswith('__'):
continue
while True:
data = Query(kind=kind, keys_only=True).Get(200)
if not data:
break
Delete(data)
| mit |
alasdairtran/mclearn | projects/jakub/learning_curve/uncertainty_curve.py | 2 | 1191 | import json
import sys
import numpy as np
import sklearn.gaussian_process
# Import splitter
sys.path.insert(1, '..')
import splitter
TRAINING_SAMPLES_NUM = 1000000
TESTING_SAMPLES_NUM = 1000
MAX_GP = 3000
STEP = 100
ALPHA = .002
LENGTH_SCALE = 1
def perform_gp(train_X, train_y, test_X):
kernel = sklearn.gaussian_process.kernels.RBF(
length_scale=LENGTH_SCALE)
gp = sklearn.gaussian_process.GaussianProcessRegressor(
kernel=kernel,
alpha=ALPHA,
copy_X_train=False)
gp.fit(train_X, train_y)
_, sigmas = gp.predict(test_X, return_std=True)
return np.mean(sigmas)
def main(path_in, path_out):
data = splitter.load(path_in)
(train_X, train_y), (test_X, test_y) \
= splitter.split(data, TRAINING_SAMPLES_NUM, TESTING_SAMPLES_NUM)
gp_x = list(range(STEP, MAX_GP+1, STEP))
gp_y = []
for i, n in enumerate(gp_x):
print('Starting GP', i + 1)
gp_y.append(perform_gp(train_X[:n], train_y[:n], test_X))
with open(path_out, 'w') as f:
json.dump({
'gp_x': gp_x,
'gp_y': gp_y,
}, f)
if __name__ == '__main__':
main(*sys.argv[1:3])
| bsd-3-clause |
Orpine/py-R-FCN | lib/datasets/pascal_voc.py | 11 | 14217 | # --------------------------------------------------------
# Fast R-CNN
# Copyright (c) 2015 Microsoft
# Licensed under The MIT License [see LICENSE for details]
# Written by Ross Girshick
# --------------------------------------------------------
import os
from datasets.imdb import imdb
import datasets.ds_utils as ds_utils
import xml.etree.ElementTree as ET
import numpy as np
import scipy.sparse
import scipy.io as sio
import utils.cython_bbox
import cPickle
import subprocess
import uuid
from voc_eval import voc_eval
from fast_rcnn.config import cfg
class pascal_voc(imdb):
def __init__(self, image_set, year, devkit_path=None):
imdb.__init__(self, 'voc_' + year + '_' + image_set)
self._year = year
self._image_set = image_set
self._devkit_path = self._get_default_path() if devkit_path is None \
else devkit_path
self._data_path = os.path.join(self._devkit_path, 'VOC' + self._year)
self._classes = ('__background__', # always index 0
'aeroplane', 'bicycle', 'bird', 'boat',
'bottle', 'bus', 'car', 'cat', 'chair',
'cow', 'diningtable', 'dog', 'horse',
'motorbike', 'person', 'pottedplant',
'sheep', 'sofa', 'train', 'tvmonitor')
self._class_to_ind = dict(zip(self.classes, xrange(self.num_classes)))
self._image_ext = '.jpg'
self._image_index = self._load_image_set_index()
# Default to roidb handler
self._roidb_handler = self.selective_search_roidb
self._salt = str(uuid.uuid4())
self._comp_id = 'comp4'
# PASCAL specific config options
self.config = {'cleanup' : True,
'use_salt' : True,
'use_diff' : False,
'matlab_eval' : False,
'rpn_file' : None,
'min_size' : 2}
assert os.path.exists(self._devkit_path), \
'VOCdevkit path does not exist: {}'.format(self._devkit_path)
assert os.path.exists(self._data_path), \
'Path does not exist: {}'.format(self._data_path)
def image_path_at(self, i):
"""
Return the absolute path to image i in the image sequence.
"""
return self.image_path_from_index(self._image_index[i])
def image_path_from_index(self, index):
"""
Construct an image path from the image's "index" identifier.
"""
image_path = os.path.join(self._data_path, 'JPEGImages',
index + self._image_ext)
assert os.path.exists(image_path), \
'Path does not exist: {}'.format(image_path)
return image_path
def _load_image_set_index(self):
"""
Load the indexes listed in this dataset's image set file.
"""
# Example path to image set file:
# self._devkit_path + /VOCdevkit2007/VOC2007/ImageSets/Main/val.txt
image_set_file = os.path.join(self._data_path, 'ImageSets', 'Main',
self._image_set + '.txt')
assert os.path.exists(image_set_file), \
'Path does not exist: {}'.format(image_set_file)
with open(image_set_file) as f:
image_index = [x.strip() for x in f.readlines()]
return image_index
def _get_default_path(self):
"""
Return the default path where PASCAL VOC is expected to be installed.
"""
return os.path.join(cfg.DATA_DIR, 'VOCdevkit' + self._year)
def gt_roidb(self):
"""
Return the database of ground-truth regions of interest.
This function loads/saves from/to a cache file to speed up future calls.
"""
cache_file = os.path.join(self.cache_path, self.name + '_gt_roidb.pkl')
if os.path.exists(cache_file):
with open(cache_file, 'rb') as fid:
roidb = cPickle.load(fid)
print '{} gt roidb loaded from {}'.format(self.name, cache_file)
return roidb
gt_roidb = [self._load_pascal_annotation(index)
for index in self.image_index]
with open(cache_file, 'wb') as fid:
cPickle.dump(gt_roidb, fid, cPickle.HIGHEST_PROTOCOL)
print 'wrote gt roidb to {}'.format(cache_file)
return gt_roidb
def selective_search_roidb(self):
"""
Return the database of selective search regions of interest.
Ground-truth ROIs are also included.
This function loads/saves from/to a cache file to speed up future calls.
"""
cache_file = os.path.join(self.cache_path,
self.name + '_selective_search_roidb.pkl')
if os.path.exists(cache_file):
with open(cache_file, 'rb') as fid:
roidb = cPickle.load(fid)
print '{} ss roidb loaded from {}'.format(self.name, cache_file)
return roidb
if int(self._year) == 2007 or self._image_set != 'test':
gt_roidb = self.gt_roidb()
ss_roidb = self._load_selective_search_roidb(gt_roidb)
roidb = imdb.merge_roidbs(gt_roidb, ss_roidb)
else:
roidb = self._load_selective_search_roidb(None)
with open(cache_file, 'wb') as fid:
cPickle.dump(roidb, fid, cPickle.HIGHEST_PROTOCOL)
print 'wrote ss roidb to {}'.format(cache_file)
return roidb
def rpn_roidb(self):
if int(self._year) == 2007 or self._image_set != 'test':
gt_roidb = self.gt_roidb()
rpn_roidb = self._load_rpn_roidb(gt_roidb)
roidb = imdb.merge_roidbs(gt_roidb, rpn_roidb)
else:
roidb = self._load_rpn_roidb(None)
return roidb
def _load_rpn_roidb(self, gt_roidb):
filename = self.config['rpn_file']
print 'loading {}'.format(filename)
assert os.path.exists(filename), \
'rpn data not found at: {}'.format(filename)
with open(filename, 'rb') as f:
box_list = cPickle.load(f)
return self.create_roidb_from_box_list(box_list, gt_roidb)
def _load_selective_search_roidb(self, gt_roidb):
filename = os.path.abspath(os.path.join(cfg.DATA_DIR,
'selective_search_data',
self.name + '.mat'))
assert os.path.exists(filename), \
'Selective search data not found at: {}'.format(filename)
raw_data = sio.loadmat(filename)['boxes'].ravel()
box_list = []
for i in xrange(raw_data.shape[0]):
boxes = raw_data[i][:, (1, 0, 3, 2)] - 1
keep = ds_utils.unique_boxes(boxes)
boxes = boxes[keep, :]
keep = ds_utils.filter_small_boxes(boxes, self.config['min_size'])
boxes = boxes[keep, :]
box_list.append(boxes)
return self.create_roidb_from_box_list(box_list, gt_roidb)
def _load_pascal_annotation(self, index):
"""
Load image and bounding boxes info from XML file in the PASCAL VOC
format.
"""
filename = os.path.join(self._data_path, 'Annotations', index + '.xml')
tree = ET.parse(filename)
objs = tree.findall('object')
if not self.config['use_diff']:
# Exclude the samples labeled as difficult
non_diff_objs = [
obj for obj in objs if int(obj.find('difficult').text) == 0]
# if len(non_diff_objs) != len(objs):
# print 'Removed {} difficult objects'.format(
# len(objs) - len(non_diff_objs))
objs = non_diff_objs
num_objs = len(objs)
boxes = np.zeros((num_objs, 4), dtype=np.uint16)
gt_classes = np.zeros((num_objs), dtype=np.int32)
overlaps = np.zeros((num_objs, self.num_classes), dtype=np.float32)
# "Seg" area for pascal is just the box area
seg_areas = np.zeros((num_objs), dtype=np.float32)
# Load object bounding boxes into a data frame.
for ix, obj in enumerate(objs):
bbox = obj.find('bndbox')
# Make pixel indexes 0-based
x1 = float(bbox.find('xmin').text) - 1
y1 = float(bbox.find('ymin').text) - 1
x2 = float(bbox.find('xmax').text) - 1
y2 = float(bbox.find('ymax').text) - 1
cls = self._class_to_ind[obj.find('name').text.lower().strip()]
boxes[ix, :] = [x1, y1, x2, y2]
gt_classes[ix] = cls
overlaps[ix, cls] = 1.0
seg_areas[ix] = (x2 - x1 + 1) * (y2 - y1 + 1)
overlaps = scipy.sparse.csr_matrix(overlaps)
return {'boxes' : boxes,
'gt_classes': gt_classes,
'gt_overlaps' : overlaps,
'flipped' : False,
'seg_areas' : seg_areas}
def _get_comp_id(self):
comp_id = (self._comp_id + '_' + self._salt if self.config['use_salt']
else self._comp_id)
return comp_id
def _get_voc_results_file_template(self):
# VOCdevkit/results/VOC2007/Main/<comp_id>_det_test_aeroplane.txt
filename = self._get_comp_id() + '_det_' + self._image_set + '_{:s}.txt'
path = os.path.join(
self._devkit_path,
'results',
'VOC' + self._year,
'Main',
filename)
return path
def _write_voc_results_file(self, all_boxes):
for cls_ind, cls in enumerate(self.classes):
if cls == '__background__':
continue
print 'Writing {} VOC results file'.format(cls)
filename = self._get_voc_results_file_template().format(cls)
with open(filename, 'wt') as f:
for im_ind, index in enumerate(self.image_index):
dets = all_boxes[cls_ind][im_ind]
if dets == []:
continue
# the VOCdevkit expects 1-based indices
for k in xrange(dets.shape[0]):
f.write('{:s} {:.3f} {:.1f} {:.1f} {:.1f} {:.1f}\n'.
format(index, dets[k, -1],
dets[k, 0] + 1, dets[k, 1] + 1,
dets[k, 2] + 1, dets[k, 3] + 1))
def _do_python_eval(self, output_dir = 'output'):
annopath = os.path.join(
self._devkit_path,
'VOC' + self._year,
'Annotations',
'{:s}.xml')
imagesetfile = os.path.join(
self._devkit_path,
'VOC' + self._year,
'ImageSets',
'Main',
self._image_set + '.txt')
cachedir = os.path.join(self._devkit_path, 'annotations_cache')
aps = []
# The PASCAL VOC metric changed in 2010
use_07_metric = True if int(self._year) < 2010 else False
print 'VOC07 metric? ' + ('Yes' if use_07_metric else 'No')
if not os.path.isdir(output_dir):
os.mkdir(output_dir)
for i, cls in enumerate(self._classes):
if cls == '__background__':
continue
filename = self._get_voc_results_file_template().format(cls)
rec, prec, ap = voc_eval(
filename, annopath, imagesetfile, cls, cachedir, ovthresh=0.5,
use_07_metric=use_07_metric)
aps += [ap]
print('AP for {} = {:.4f}'.format(cls, ap))
with open(os.path.join(output_dir, cls + '_pr.pkl'), 'w') as f:
cPickle.dump({'rec': rec, 'prec': prec, 'ap': ap}, f)
print('Mean AP = {:.4f}'.format(np.mean(aps)))
print('~~~~~~~~')
print('Results:')
for ap in aps:
print('{:.3f}'.format(ap))
print('{:.3f}'.format(np.mean(aps)))
print('~~~~~~~~')
print('')
print('--------------------------------------------------------------')
print('Results computed with the **unofficial** Python eval code.')
print('Results should be very close to the official MATLAB eval code.')
print('Recompute with `./tools/reval.py --matlab ...` for your paper.')
print('-- Thanks, The Management')
print('--------------------------------------------------------------')
def _do_matlab_eval(self, output_dir='output'):
print '-----------------------------------------------------'
print 'Computing results with the official MATLAB eval code.'
print '-----------------------------------------------------'
path = os.path.join(cfg.ROOT_DIR, 'lib', 'datasets',
'VOCdevkit-matlab-wrapper')
cmd = 'cd {} && '.format(path)
cmd += '{:s} -nodisplay -nodesktop '.format(cfg.MATLAB)
cmd += '-r "dbstop if error; '
cmd += 'voc_eval(\'{:s}\',\'{:s}\',\'{:s}\',\'{:s}\'); quit;"' \
.format(self._devkit_path, self._get_comp_id(),
self._image_set, output_dir)
print('Running:\n{}'.format(cmd))
status = subprocess.call(cmd, shell=True)
def evaluate_detections(self, all_boxes, output_dir):
self._write_voc_results_file(all_boxes)
self._do_python_eval(output_dir)
if self.config['matlab_eval']:
self._do_matlab_eval(output_dir)
if self.config['cleanup']:
for cls in self._classes:
if cls == '__background__':
continue
filename = self._get_voc_results_file_template().format(cls)
os.remove(filename)
def competition_mode(self, on):
if on:
self.config['use_salt'] = False
self.config['cleanup'] = False
else:
self.config['use_salt'] = True
self.config['cleanup'] = True
if __name__ == '__main__':
from datasets.pascal_voc import pascal_voc
d = pascal_voc('trainval', '2007')
res = d.roidb
from IPython import embed; embed()
| mit |
roxyboy/scikit-learn | sklearn/utils/__init__.py | 131 | 14185 | """
The :mod:`sklearn.utils` module includes various utilities.
"""
from collections import Sequence
import numpy as np
from scipy.sparse import issparse
import warnings
from .murmurhash import murmurhash3_32
from .validation import (as_float_array,
assert_all_finite,
check_random_state, column_or_1d, check_array,
check_consistent_length, check_X_y, indexable,
check_symmetric, DataConversionWarning)
from .class_weight import compute_class_weight, compute_sample_weight
from ..externals.joblib import cpu_count
__all__ = ["murmurhash3_32", "as_float_array",
"assert_all_finite", "check_array",
"check_random_state",
"compute_class_weight", "compute_sample_weight",
"column_or_1d", "safe_indexing",
"check_consistent_length", "check_X_y", 'indexable',
"check_symmetric"]
class deprecated(object):
"""Decorator to mark a function or class as deprecated.
Issue a warning when the function is called/the class is instantiated and
adds a warning to the docstring.
The optional extra argument will be appended to the deprecation message
and the docstring. Note: to use this with the default value for extra, put
in an empty of parentheses:
>>> from sklearn.utils import deprecated
>>> deprecated() # doctest: +ELLIPSIS
<sklearn.utils.deprecated object at ...>
>>> @deprecated()
... def some_function(): pass
"""
# Adapted from http://wiki.python.org/moin/PythonDecoratorLibrary,
# but with many changes.
def __init__(self, extra=''):
"""
Parameters
----------
extra: string
to be added to the deprecation messages
"""
self.extra = extra
def __call__(self, obj):
if isinstance(obj, type):
return self._decorate_class(obj)
else:
return self._decorate_fun(obj)
def _decorate_class(self, cls):
msg = "Class %s is deprecated" % cls.__name__
if self.extra:
msg += "; %s" % self.extra
# FIXME: we should probably reset __new__ for full generality
init = cls.__init__
def wrapped(*args, **kwargs):
warnings.warn(msg, category=DeprecationWarning)
return init(*args, **kwargs)
cls.__init__ = wrapped
wrapped.__name__ = '__init__'
wrapped.__doc__ = self._update_doc(init.__doc__)
wrapped.deprecated_original = init
return cls
def _decorate_fun(self, fun):
"""Decorate function fun"""
msg = "Function %s is deprecated" % fun.__name__
if self.extra:
msg += "; %s" % self.extra
def wrapped(*args, **kwargs):
warnings.warn(msg, category=DeprecationWarning)
return fun(*args, **kwargs)
wrapped.__name__ = fun.__name__
wrapped.__dict__ = fun.__dict__
wrapped.__doc__ = self._update_doc(fun.__doc__)
return wrapped
def _update_doc(self, olddoc):
newdoc = "DEPRECATED"
if self.extra:
newdoc = "%s: %s" % (newdoc, self.extra)
if olddoc:
newdoc = "%s\n\n%s" % (newdoc, olddoc)
return newdoc
def safe_mask(X, mask):
"""Return a mask which is safe to use on X.
Parameters
----------
X : {array-like, sparse matrix}
Data on which to apply mask.
mask: array
Mask to be used on X.
Returns
-------
mask
"""
mask = np.asarray(mask)
if np.issubdtype(mask.dtype, np.int):
return mask
if hasattr(X, "toarray"):
ind = np.arange(mask.shape[0])
mask = ind[mask]
return mask
def safe_indexing(X, indices):
"""Return items or rows from X using indices.
Allows simple indexing of lists or arrays.
Parameters
----------
X : array-like, sparse-matrix, list.
Data from which to sample rows or items.
indices : array-like, list
Indices according to which X will be subsampled.
"""
if hasattr(X, "iloc"):
# Pandas Dataframes and Series
try:
return X.iloc[indices]
except ValueError:
# Cython typed memoryviews internally used in pandas do not support
# readonly buffers.
warnings.warn("Copying input dataframe for slicing.",
DataConversionWarning)
return X.copy().iloc[indices]
elif hasattr(X, "shape"):
if hasattr(X, 'take') and (hasattr(indices, 'dtype') and
indices.dtype.kind == 'i'):
# This is often substantially faster than X[indices]
return X.take(indices, axis=0)
else:
return X[indices]
else:
return [X[idx] for idx in indices]
def resample(*arrays, **options):
"""Resample arrays or sparse matrices in a consistent way
The default strategy implements one step of the bootstrapping
procedure.
Parameters
----------
*arrays : sequence of indexable data-structures
Indexable data-structures can be arrays, lists, dataframes or scipy
sparse matrices with consistent first dimension.
replace : boolean, True by default
Implements resampling with replacement. If False, this will implement
(sliced) random permutations.
n_samples : int, None by default
Number of samples to generate. If left to None this is
automatically set to the first dimension of the arrays.
random_state : int or RandomState instance
Control the shuffling for reproducible behavior.
Returns
-------
resampled_arrays : sequence of indexable data-structures
Sequence of resampled views of the collections. The original arrays are
not impacted.
Examples
--------
It is possible to mix sparse and dense arrays in the same run::
>>> X = np.array([[1., 0.], [2., 1.], [0., 0.]])
>>> y = np.array([0, 1, 2])
>>> from scipy.sparse import coo_matrix
>>> X_sparse = coo_matrix(X)
>>> from sklearn.utils import resample
>>> X, X_sparse, y = resample(X, X_sparse, y, random_state=0)
>>> X
array([[ 1., 0.],
[ 2., 1.],
[ 1., 0.]])
>>> X_sparse # doctest: +ELLIPSIS +NORMALIZE_WHITESPACE
<3x2 sparse matrix of type '<... 'numpy.float64'>'
with 4 stored elements in Compressed Sparse Row format>
>>> X_sparse.toarray()
array([[ 1., 0.],
[ 2., 1.],
[ 1., 0.]])
>>> y
array([0, 1, 0])
>>> resample(y, n_samples=2, random_state=0)
array([0, 1])
See also
--------
:func:`sklearn.utils.shuffle`
"""
random_state = check_random_state(options.pop('random_state', None))
replace = options.pop('replace', True)
max_n_samples = options.pop('n_samples', None)
if options:
raise ValueError("Unexpected kw arguments: %r" % options.keys())
if len(arrays) == 0:
return None
first = arrays[0]
n_samples = first.shape[0] if hasattr(first, 'shape') else len(first)
if max_n_samples is None:
max_n_samples = n_samples
if max_n_samples > n_samples:
raise ValueError("Cannot sample %d out of arrays with dim %d" % (
max_n_samples, n_samples))
check_consistent_length(*arrays)
if replace:
indices = random_state.randint(0, n_samples, size=(max_n_samples,))
else:
indices = np.arange(n_samples)
random_state.shuffle(indices)
indices = indices[:max_n_samples]
# convert sparse matrices to CSR for row-based indexing
arrays = [a.tocsr() if issparse(a) else a for a in arrays]
resampled_arrays = [safe_indexing(a, indices) for a in arrays]
if len(resampled_arrays) == 1:
# syntactic sugar for the unit argument case
return resampled_arrays[0]
else:
return resampled_arrays
def shuffle(*arrays, **options):
"""Shuffle arrays or sparse matrices in a consistent way
This is a convenience alias to ``resample(*arrays, replace=False)`` to do
random permutations of the collections.
Parameters
----------
*arrays : sequence of indexable data-structures
Indexable data-structures can be arrays, lists, dataframes or scipy
sparse matrices with consistent first dimension.
random_state : int or RandomState instance
Control the shuffling for reproducible behavior.
n_samples : int, None by default
Number of samples to generate. If left to None this is
automatically set to the first dimension of the arrays.
Returns
-------
shuffled_arrays : sequence of indexable data-structures
Sequence of shuffled views of the collections. The original arrays are
not impacted.
Examples
--------
It is possible to mix sparse and dense arrays in the same run::
>>> X = np.array([[1., 0.], [2., 1.], [0., 0.]])
>>> y = np.array([0, 1, 2])
>>> from scipy.sparse import coo_matrix
>>> X_sparse = coo_matrix(X)
>>> from sklearn.utils import shuffle
>>> X, X_sparse, y = shuffle(X, X_sparse, y, random_state=0)
>>> X
array([[ 0., 0.],
[ 2., 1.],
[ 1., 0.]])
>>> X_sparse # doctest: +ELLIPSIS +NORMALIZE_WHITESPACE
<3x2 sparse matrix of type '<... 'numpy.float64'>'
with 3 stored elements in Compressed Sparse Row format>
>>> X_sparse.toarray()
array([[ 0., 0.],
[ 2., 1.],
[ 1., 0.]])
>>> y
array([2, 1, 0])
>>> shuffle(y, n_samples=2, random_state=0)
array([0, 1])
See also
--------
:func:`sklearn.utils.resample`
"""
options['replace'] = False
return resample(*arrays, **options)
def safe_sqr(X, copy=True):
"""Element wise squaring of array-likes and sparse matrices.
Parameters
----------
X : array like, matrix, sparse matrix
copy : boolean, optional, default True
Whether to create a copy of X and operate on it or to perform
inplace computation (default behaviour).
Returns
-------
X ** 2 : element wise square
"""
X = check_array(X, accept_sparse=['csr', 'csc', 'coo'])
if issparse(X):
if copy:
X = X.copy()
X.data **= 2
else:
if copy:
X = X ** 2
else:
X **= 2
return X
def gen_batches(n, batch_size):
"""Generator to create slices containing batch_size elements, from 0 to n.
The last slice may contain less than batch_size elements, when batch_size
does not divide n.
Examples
--------
>>> from sklearn.utils import gen_batches
>>> list(gen_batches(7, 3))
[slice(0, 3, None), slice(3, 6, None), slice(6, 7, None)]
>>> list(gen_batches(6, 3))
[slice(0, 3, None), slice(3, 6, None)]
>>> list(gen_batches(2, 3))
[slice(0, 2, None)]
"""
start = 0
for _ in range(int(n // batch_size)):
end = start + batch_size
yield slice(start, end)
start = end
if start < n:
yield slice(start, n)
def gen_even_slices(n, n_packs, n_samples=None):
"""Generator to create n_packs slices going up to n.
Pass n_samples when the slices are to be used for sparse matrix indexing;
slicing off-the-end raises an exception, while it works for NumPy arrays.
Examples
--------
>>> from sklearn.utils import gen_even_slices
>>> list(gen_even_slices(10, 1))
[slice(0, 10, None)]
>>> list(gen_even_slices(10, 10)) #doctest: +ELLIPSIS
[slice(0, 1, None), slice(1, 2, None), ..., slice(9, 10, None)]
>>> list(gen_even_slices(10, 5)) #doctest: +ELLIPSIS
[slice(0, 2, None), slice(2, 4, None), ..., slice(8, 10, None)]
>>> list(gen_even_slices(10, 3))
[slice(0, 4, None), slice(4, 7, None), slice(7, 10, None)]
"""
start = 0
if n_packs < 1:
raise ValueError("gen_even_slices got n_packs=%s, must be >=1" % n_packs)
for pack_num in range(n_packs):
this_n = n // n_packs
if pack_num < n % n_packs:
this_n += 1
if this_n > 0:
end = start + this_n
if n_samples is not None:
end = min(n_samples, end)
yield slice(start, end, None)
start = end
def _get_n_jobs(n_jobs):
"""Get number of jobs for the computation.
This function reimplements the logic of joblib to determine the actual
number of jobs depending on the cpu count. If -1 all CPUs are used.
If 1 is given, no parallel computing code is used at all, which is useful
for debugging. For n_jobs below -1, (n_cpus + 1 + n_jobs) are used.
Thus for n_jobs = -2, all CPUs but one are used.
Parameters
----------
n_jobs : int
Number of jobs stated in joblib convention.
Returns
-------
n_jobs : int
The actual number of jobs as positive integer.
Examples
--------
>>> from sklearn.utils import _get_n_jobs
>>> _get_n_jobs(4)
4
>>> jobs = _get_n_jobs(-2)
>>> assert jobs == max(cpu_count() - 1, 1)
>>> _get_n_jobs(0)
Traceback (most recent call last):
...
ValueError: Parameter n_jobs == 0 has no meaning.
"""
if n_jobs < 0:
return max(cpu_count() + 1 + n_jobs, 1)
elif n_jobs == 0:
raise ValueError('Parameter n_jobs == 0 has no meaning.')
else:
return n_jobs
def tosequence(x):
"""Cast iterable x to a Sequence, avoiding a copy if possible."""
if isinstance(x, np.ndarray):
return np.asarray(x)
elif isinstance(x, Sequence):
return x
else:
return list(x)
class ConvergenceWarning(UserWarning):
"""Custom warning to capture convergence problems"""
class DataDimensionalityWarning(UserWarning):
"""Custom warning to notify potential issues with data dimensionality"""
| bsd-3-clause |
ephes/scikit-learn | examples/svm/plot_svm_margin.py | 315 | 2328 | #!/usr/bin/python
# -*- coding: utf-8 -*-
"""
=========================================================
SVM Margins Example
=========================================================
The plots below illustrate the effect the parameter `C` has
on the separation line. A large value of `C` basically tells
our model that we do not have that much faith in our data's
distribution, and will only consider points close to line
of separation.
A small value of `C` includes more/all the observations, allowing
the margins to be calculated using all the data in the area.
"""
print(__doc__)
# Code source: Gaël Varoquaux
# Modified for documentation by Jaques Grobler
# License: BSD 3 clause
import numpy as np
import matplotlib.pyplot as plt
from sklearn import svm
# we create 40 separable points
np.random.seed(0)
X = np.r_[np.random.randn(20, 2) - [2, 2], np.random.randn(20, 2) + [2, 2]]
Y = [0] * 20 + [1] * 20
# figure number
fignum = 1
# fit the model
for name, penalty in (('unreg', 1), ('reg', 0.05)):
clf = svm.SVC(kernel='linear', C=penalty)
clf.fit(X, Y)
# get the separating hyperplane
w = clf.coef_[0]
a = -w[0] / w[1]
xx = np.linspace(-5, 5)
yy = a * xx - (clf.intercept_[0]) / w[1]
# plot the parallels to the separating hyperplane that pass through the
# support vectors
margin = 1 / np.sqrt(np.sum(clf.coef_ ** 2))
yy_down = yy + a * margin
yy_up = yy - a * margin
# plot the line, the points, and the nearest vectors to the plane
plt.figure(fignum, figsize=(4, 3))
plt.clf()
plt.plot(xx, yy, 'k-')
plt.plot(xx, yy_down, 'k--')
plt.plot(xx, yy_up, 'k--')
plt.scatter(clf.support_vectors_[:, 0], clf.support_vectors_[:, 1], s=80,
facecolors='none', zorder=10)
plt.scatter(X[:, 0], X[:, 1], c=Y, zorder=10, cmap=plt.cm.Paired)
plt.axis('tight')
x_min = -4.8
x_max = 4.2
y_min = -6
y_max = 6
XX, YY = np.mgrid[x_min:x_max:200j, y_min:y_max:200j]
Z = clf.predict(np.c_[XX.ravel(), YY.ravel()])
# Put the result into a color plot
Z = Z.reshape(XX.shape)
plt.figure(fignum, figsize=(4, 3))
plt.pcolormesh(XX, YY, Z, cmap=plt.cm.Paired)
plt.xlim(x_min, x_max)
plt.ylim(y_min, y_max)
plt.xticks(())
plt.yticks(())
fignum = fignum + 1
plt.show()
| bsd-3-clause |
roxyboy/scikit-learn | sklearn/svm/base.py | 155 | 36018 | from __future__ import print_function
import numpy as np
import scipy.sparse as sp
import warnings
from abc import ABCMeta, abstractmethod
from . import libsvm, liblinear
from . import libsvm_sparse
from ..base import BaseEstimator, ClassifierMixin, ChangedBehaviorWarning
from ..preprocessing import LabelEncoder
from ..multiclass import _ovr_decision_function
from ..utils import check_array, check_random_state, column_or_1d
from ..utils import ConvergenceWarning, compute_class_weight, deprecated
from ..utils.extmath import safe_sparse_dot
from ..utils.validation import check_is_fitted, NotFittedError
from ..externals import six
LIBSVM_IMPL = ['c_svc', 'nu_svc', 'one_class', 'epsilon_svr', 'nu_svr']
def _one_vs_one_coef(dual_coef, n_support, support_vectors):
"""Generate primal coefficients from dual coefficients
for the one-vs-one multi class LibSVM in the case
of a linear kernel."""
# get 1vs1 weights for all n*(n-1) classifiers.
# this is somewhat messy.
# shape of dual_coef_ is nSV * (n_classes -1)
# see docs for details
n_class = dual_coef.shape[0] + 1
# XXX we could do preallocation of coef but
# would have to take care in the sparse case
coef = []
sv_locs = np.cumsum(np.hstack([[0], n_support]))
for class1 in range(n_class):
# SVs for class1:
sv1 = support_vectors[sv_locs[class1]:sv_locs[class1 + 1], :]
for class2 in range(class1 + 1, n_class):
# SVs for class1:
sv2 = support_vectors[sv_locs[class2]:sv_locs[class2 + 1], :]
# dual coef for class1 SVs:
alpha1 = dual_coef[class2 - 1, sv_locs[class1]:sv_locs[class1 + 1]]
# dual coef for class2 SVs:
alpha2 = dual_coef[class1, sv_locs[class2]:sv_locs[class2 + 1]]
# build weight for class1 vs class2
coef.append(safe_sparse_dot(alpha1, sv1)
+ safe_sparse_dot(alpha2, sv2))
return coef
class BaseLibSVM(six.with_metaclass(ABCMeta, BaseEstimator)):
"""Base class for estimators that use libsvm as backing library
This implements support vector machine classification and regression.
Parameter documentation is in the derived `SVC` class.
"""
# The order of these must match the integer values in LibSVM.
# XXX These are actually the same in the dense case. Need to factor
# this out.
_sparse_kernels = ["linear", "poly", "rbf", "sigmoid", "precomputed"]
@abstractmethod
def __init__(self, impl, kernel, degree, gamma, coef0,
tol, C, nu, epsilon, shrinking, probability, cache_size,
class_weight, verbose, max_iter, random_state):
if impl not in LIBSVM_IMPL: # pragma: no cover
raise ValueError("impl should be one of %s, %s was given" % (
LIBSVM_IMPL, impl))
# FIXME Remove gamma=0.0 support in 0.18
if gamma == 0:
msg = ("gamma=%s has been deprecated in favor of "
"gamma='%s' as of 0.17. Backward compatibility"
" for gamma=%s will be removed in %s")
invalid_gamma = 0.0
warnings.warn(msg % (invalid_gamma, "auto", invalid_gamma, "0.18"),
DeprecationWarning)
self._impl = impl
self.kernel = kernel
self.degree = degree
self.gamma = gamma
self.coef0 = coef0
self.tol = tol
self.C = C
self.nu = nu
self.epsilon = epsilon
self.shrinking = shrinking
self.probability = probability
self.cache_size = cache_size
self.class_weight = class_weight
self.verbose = verbose
self.max_iter = max_iter
self.random_state = random_state
@property
def _pairwise(self):
# Used by cross_val_score.
kernel = self.kernel
return kernel == "precomputed" or callable(kernel)
def fit(self, X, y, sample_weight=None):
"""Fit the SVM model according to the given training data.
Parameters
----------
X : {array-like, sparse matrix}, shape (n_samples, n_features)
Training vectors, where n_samples is the number of samples
and n_features is the number of features.
For kernel="precomputed", the expected shape of X is
(n_samples, n_samples).
y : array-like, shape (n_samples,)
Target values (class labels in classification, real numbers in
regression)
sample_weight : array-like, shape (n_samples,)
Per-sample weights. Rescale C per sample. Higher weights
force the classifier to put more emphasis on these points.
Returns
-------
self : object
Returns self.
Notes
------
If X and y are not C-ordered and contiguous arrays of np.float64 and
X is not a scipy.sparse.csr_matrix, X and/or y may be copied.
If X is a dense array, then the other methods will not support sparse
matrices as input.
"""
rnd = check_random_state(self.random_state)
sparse = sp.isspmatrix(X)
if sparse and self.kernel == "precomputed":
raise TypeError("Sparse precomputed kernels are not supported.")
self._sparse = sparse and not callable(self.kernel)
X = check_array(X, accept_sparse='csr', dtype=np.float64, order='C')
y = self._validate_targets(y)
sample_weight = np.asarray([]
if sample_weight is None
else sample_weight, dtype=np.float64)
solver_type = LIBSVM_IMPL.index(self._impl)
# input validation
if solver_type != 2 and X.shape[0] != y.shape[0]:
raise ValueError("X and y have incompatible shapes.\n" +
"X has %s samples, but y has %s." %
(X.shape[0], y.shape[0]))
if self.kernel == "precomputed" and X.shape[0] != X.shape[1]:
raise ValueError("X.shape[0] should be equal to X.shape[1]")
if sample_weight.shape[0] > 0 and sample_weight.shape[0] != X.shape[0]:
raise ValueError("sample_weight and X have incompatible shapes: "
"%r vs %r\n"
"Note: Sparse matrices cannot be indexed w/"
"boolean masks (use `indices=True` in CV)."
% (sample_weight.shape, X.shape))
# FIXME remove (self.gamma == 0) in 0.18
if (self.kernel in ['poly', 'rbf']) and ((self.gamma == 0) or
(self.gamma == 'auto')):
# if custom gamma is not provided ...
self._gamma = 1.0 / X.shape[1]
elif self.gamma == 'auto':
self._gamma = 0.0
else:
self._gamma = self.gamma
kernel = self.kernel
if callable(kernel):
kernel = 'precomputed'
fit = self._sparse_fit if self._sparse else self._dense_fit
if self.verbose: # pragma: no cover
print('[LibSVM]', end='')
seed = rnd.randint(np.iinfo('i').max)
fit(X, y, sample_weight, solver_type, kernel, random_seed=seed)
# see comment on the other call to np.iinfo in this file
self.shape_fit_ = X.shape
# In binary case, we need to flip the sign of coef, intercept and
# decision function. Use self._intercept_ and self._dual_coef_ internally.
self._intercept_ = self.intercept_.copy()
self._dual_coef_ = self.dual_coef_
if self._impl in ['c_svc', 'nu_svc'] and len(self.classes_) == 2:
self.intercept_ *= -1
self.dual_coef_ = -self.dual_coef_
return self
def _validate_targets(self, y):
"""Validation of y and class_weight.
Default implementation for SVR and one-class; overridden in BaseSVC.
"""
# XXX this is ugly.
# Regression models should not have a class_weight_ attribute.
self.class_weight_ = np.empty(0)
return column_or_1d(y, warn=True).astype(np.float64)
def _warn_from_fit_status(self):
assert self.fit_status_ in (0, 1)
if self.fit_status_ == 1:
warnings.warn('Solver terminated early (max_iter=%i).'
' Consider pre-processing your data with'
' StandardScaler or MinMaxScaler.'
% self.max_iter, ConvergenceWarning)
def _dense_fit(self, X, y, sample_weight, solver_type, kernel,
random_seed):
if callable(self.kernel):
# you must store a reference to X to compute the kernel in predict
# TODO: add keyword copy to copy on demand
self.__Xfit = X
X = self._compute_kernel(X)
if X.shape[0] != X.shape[1]:
raise ValueError("X.shape[0] should be equal to X.shape[1]")
libsvm.set_verbosity_wrap(self.verbose)
# we don't pass **self.get_params() to allow subclasses to
# add other parameters to __init__
self.support_, self.support_vectors_, self.n_support_, \
self.dual_coef_, self.intercept_, self.probA_, \
self.probB_, self.fit_status_ = libsvm.fit(
X, y,
svm_type=solver_type, sample_weight=sample_weight,
class_weight=self.class_weight_, kernel=kernel, C=self.C,
nu=self.nu, probability=self.probability, degree=self.degree,
shrinking=self.shrinking, tol=self.tol,
cache_size=self.cache_size, coef0=self.coef0,
gamma=self._gamma, epsilon=self.epsilon,
max_iter=self.max_iter, random_seed=random_seed)
self._warn_from_fit_status()
def _sparse_fit(self, X, y, sample_weight, solver_type, kernel,
random_seed):
X.data = np.asarray(X.data, dtype=np.float64, order='C')
X.sort_indices()
kernel_type = self._sparse_kernels.index(kernel)
libsvm_sparse.set_verbosity_wrap(self.verbose)
self.support_, self.support_vectors_, dual_coef_data, \
self.intercept_, self.n_support_, \
self.probA_, self.probB_, self.fit_status_ = \
libsvm_sparse.libsvm_sparse_train(
X.shape[1], X.data, X.indices, X.indptr, y, solver_type,
kernel_type, self.degree, self._gamma, self.coef0, self.tol,
self.C, self.class_weight_,
sample_weight, self.nu, self.cache_size, self.epsilon,
int(self.shrinking), int(self.probability), self.max_iter,
random_seed)
self._warn_from_fit_status()
if hasattr(self, "classes_"):
n_class = len(self.classes_) - 1
else: # regression
n_class = 1
n_SV = self.support_vectors_.shape[0]
dual_coef_indices = np.tile(np.arange(n_SV), n_class)
dual_coef_indptr = np.arange(0, dual_coef_indices.size + 1,
dual_coef_indices.size / n_class)
self.dual_coef_ = sp.csr_matrix(
(dual_coef_data, dual_coef_indices, dual_coef_indptr),
(n_class, n_SV))
def predict(self, X):
"""Perform regression on samples in X.
For an one-class model, +1 or -1 is returned.
Parameters
----------
X : {array-like, sparse matrix}, shape (n_samples, n_features)
For kernel="precomputed", the expected shape of X is
(n_samples_test, n_samples_train).
Returns
-------
y_pred : array, shape (n_samples,)
"""
X = self._validate_for_predict(X)
predict = self._sparse_predict if self._sparse else self._dense_predict
return predict(X)
def _dense_predict(self, X):
n_samples, n_features = X.shape
X = self._compute_kernel(X)
if X.ndim == 1:
X = check_array(X, order='C')
kernel = self.kernel
if callable(self.kernel):
kernel = 'precomputed'
if X.shape[1] != self.shape_fit_[0]:
raise ValueError("X.shape[1] = %d should be equal to %d, "
"the number of samples at training time" %
(X.shape[1], self.shape_fit_[0]))
svm_type = LIBSVM_IMPL.index(self._impl)
return libsvm.predict(
X, self.support_, self.support_vectors_, self.n_support_,
self._dual_coef_, self._intercept_,
self.probA_, self.probB_, svm_type=svm_type, kernel=kernel,
degree=self.degree, coef0=self.coef0, gamma=self._gamma,
cache_size=self.cache_size)
def _sparse_predict(self, X):
# Precondition: X is a csr_matrix of dtype np.float64.
kernel = self.kernel
if callable(kernel):
kernel = 'precomputed'
kernel_type = self._sparse_kernels.index(kernel)
C = 0.0 # C is not useful here
return libsvm_sparse.libsvm_sparse_predict(
X.data, X.indices, X.indptr,
self.support_vectors_.data,
self.support_vectors_.indices,
self.support_vectors_.indptr,
self._dual_coef_.data, self._intercept_,
LIBSVM_IMPL.index(self._impl), kernel_type,
self.degree, self._gamma, self.coef0, self.tol,
C, self.class_weight_,
self.nu, self.epsilon, self.shrinking,
self.probability, self.n_support_,
self.probA_, self.probB_)
def _compute_kernel(self, X):
"""Return the data transformed by a callable kernel"""
if callable(self.kernel):
# in the case of precomputed kernel given as a function, we
# have to compute explicitly the kernel matrix
kernel = self.kernel(X, self.__Xfit)
if sp.issparse(kernel):
kernel = kernel.toarray()
X = np.asarray(kernel, dtype=np.float64, order='C')
return X
@deprecated(" and will be removed in 0.19")
def decision_function(self, X):
"""Distance of the samples X to the separating hyperplane.
Parameters
----------
X : array-like, shape (n_samples, n_features)
For kernel="precomputed", the expected shape of X is
[n_samples_test, n_samples_train].
Returns
-------
X : array-like, shape (n_samples, n_class * (n_class-1) / 2)
Returns the decision function of the sample for each class
in the model.
"""
return self._decision_function(X)
def _decision_function(self, X):
"""Distance of the samples X to the separating hyperplane.
Parameters
----------
X : array-like, shape (n_samples, n_features)
Returns
-------
X : array-like, shape (n_samples, n_class * (n_class-1) / 2)
Returns the decision function of the sample for each class
in the model.
"""
# NOTE: _validate_for_predict contains check for is_fitted
# hence must be placed before any other attributes are used.
X = self._validate_for_predict(X)
X = self._compute_kernel(X)
if self._sparse:
dec_func = self._sparse_decision_function(X)
else:
dec_func = self._dense_decision_function(X)
# In binary case, we need to flip the sign of coef, intercept and
# decision function.
if self._impl in ['c_svc', 'nu_svc'] and len(self.classes_) == 2:
return -dec_func.ravel()
return dec_func
def _dense_decision_function(self, X):
X = check_array(X, dtype=np.float64, order="C")
kernel = self.kernel
if callable(kernel):
kernel = 'precomputed'
return libsvm.decision_function(
X, self.support_, self.support_vectors_, self.n_support_,
self._dual_coef_, self._intercept_,
self.probA_, self.probB_,
svm_type=LIBSVM_IMPL.index(self._impl),
kernel=kernel, degree=self.degree, cache_size=self.cache_size,
coef0=self.coef0, gamma=self._gamma)
def _sparse_decision_function(self, X):
X.data = np.asarray(X.data, dtype=np.float64, order='C')
kernel = self.kernel
if hasattr(kernel, '__call__'):
kernel = 'precomputed'
kernel_type = self._sparse_kernels.index(kernel)
return libsvm_sparse.libsvm_sparse_decision_function(
X.data, X.indices, X.indptr,
self.support_vectors_.data,
self.support_vectors_.indices,
self.support_vectors_.indptr,
self._dual_coef_.data, self._intercept_,
LIBSVM_IMPL.index(self._impl), kernel_type,
self.degree, self._gamma, self.coef0, self.tol,
self.C, self.class_weight_,
self.nu, self.epsilon, self.shrinking,
self.probability, self.n_support_,
self.probA_, self.probB_)
def _validate_for_predict(self, X):
check_is_fitted(self, 'support_')
X = check_array(X, accept_sparse='csr', dtype=np.float64, order="C")
if self._sparse and not sp.isspmatrix(X):
X = sp.csr_matrix(X)
if self._sparse:
X.sort_indices()
if sp.issparse(X) and not self._sparse and not callable(self.kernel):
raise ValueError(
"cannot use sparse input in %r trained on dense data"
% type(self).__name__)
n_samples, n_features = X.shape
if self.kernel == "precomputed":
if X.shape[1] != self.shape_fit_[0]:
raise ValueError("X.shape[1] = %d should be equal to %d, "
"the number of samples at training time" %
(X.shape[1], self.shape_fit_[0]))
elif n_features != self.shape_fit_[1]:
raise ValueError("X.shape[1] = %d should be equal to %d, "
"the number of features at training time" %
(n_features, self.shape_fit_[1]))
return X
@property
def coef_(self):
if self.kernel != 'linear':
raise ValueError('coef_ is only available when using a '
'linear kernel')
coef = self._get_coef()
# coef_ being a read-only property, it's better to mark the value as
# immutable to avoid hiding potential bugs for the unsuspecting user.
if sp.issparse(coef):
# sparse matrix do not have global flags
coef.data.flags.writeable = False
else:
# regular dense array
coef.flags.writeable = False
return coef
def _get_coef(self):
return safe_sparse_dot(self._dual_coef_, self.support_vectors_)
class BaseSVC(six.with_metaclass(ABCMeta, BaseLibSVM, ClassifierMixin)):
"""ABC for LibSVM-based classifiers."""
@abstractmethod
def __init__(self, impl, kernel, degree, gamma, coef0, tol, C, nu,
shrinking, probability, cache_size, class_weight, verbose,
max_iter, decision_function_shape, random_state):
self.decision_function_shape = decision_function_shape
super(BaseSVC, self).__init__(
impl=impl, kernel=kernel, degree=degree, gamma=gamma, coef0=coef0,
tol=tol, C=C, nu=nu, epsilon=0., shrinking=shrinking,
probability=probability, cache_size=cache_size,
class_weight=class_weight, verbose=verbose, max_iter=max_iter,
random_state=random_state)
def _validate_targets(self, y):
y_ = column_or_1d(y, warn=True)
cls, y = np.unique(y_, return_inverse=True)
self.class_weight_ = compute_class_weight(self.class_weight, cls, y_)
if len(cls) < 2:
raise ValueError(
"The number of classes has to be greater than one; got %d"
% len(cls))
self.classes_ = cls
return np.asarray(y, dtype=np.float64, order='C')
def decision_function(self, X):
"""Distance of the samples X to the separating hyperplane.
Parameters
----------
X : array-like, shape (n_samples, n_features)
Returns
-------
X : array-like, shape (n_samples, n_classes * (n_classes-1) / 2)
Returns the decision function of the sample for each class
in the model.
If decision_function_shape='ovr', the shape is (n_samples,
n_classes)
"""
dec = self._decision_function(X)
if self.decision_function_shape is None and len(self.classes_) > 2:
warnings.warn("The decision_function_shape default value will "
"change from 'ovo' to 'ovr' in 0.18. This will change "
"the shape of the decision function returned by "
"SVC.", ChangedBehaviorWarning)
if self.decision_function_shape == 'ovr':
return _ovr_decision_function(dec < 0, dec, len(self.classes_))
return dec
def predict(self, X):
"""Perform classification on samples in X.
For an one-class model, +1 or -1 is returned.
Parameters
----------
X : {array-like, sparse matrix}, shape (n_samples, n_features)
For kernel="precomputed", the expected shape of X is
[n_samples_test, n_samples_train]
Returns
-------
y_pred : array, shape (n_samples,)
Class labels for samples in X.
"""
y = super(BaseSVC, self).predict(X)
return self.classes_.take(np.asarray(y, dtype=np.intp))
# Hacky way of getting predict_proba to raise an AttributeError when
# probability=False using properties. Do not use this in new code; when
# probabilities are not available depending on a setting, introduce two
# estimators.
def _check_proba(self):
if not self.probability:
raise AttributeError("predict_proba is not available when "
" probability=False")
if self._impl not in ('c_svc', 'nu_svc'):
raise AttributeError("predict_proba only implemented for SVC"
" and NuSVC")
@property
def predict_proba(self):
"""Compute probabilities of possible outcomes for samples in X.
The model need to have probability information computed at training
time: fit with attribute `probability` set to True.
Parameters
----------
X : array-like, shape (n_samples, n_features)
For kernel="precomputed", the expected shape of X is
[n_samples_test, n_samples_train]
Returns
-------
T : array-like, shape (n_samples, n_classes)
Returns the probability of the sample for each class in
the model. The columns correspond to the classes in sorted
order, as they appear in the attribute `classes_`.
Notes
-----
The probability model is created using cross validation, so
the results can be slightly different than those obtained by
predict. Also, it will produce meaningless results on very small
datasets.
"""
self._check_proba()
return self._predict_proba
def _predict_proba(self, X):
X = self._validate_for_predict(X)
if self.probA_.size == 0 or self.probB_.size == 0:
raise NotFittedError("predict_proba is not available when fitted "
"with probability=False")
pred_proba = (self._sparse_predict_proba
if self._sparse else self._dense_predict_proba)
return pred_proba(X)
@property
def predict_log_proba(self):
"""Compute log probabilities of possible outcomes for samples in X.
The model need to have probability information computed at training
time: fit with attribute `probability` set to True.
Parameters
----------
X : array-like, shape (n_samples, n_features)
For kernel="precomputed", the expected shape of X is
[n_samples_test, n_samples_train]
Returns
-------
T : array-like, shape (n_samples, n_classes)
Returns the log-probabilities of the sample for each class in
the model. The columns correspond to the classes in sorted
order, as they appear in the attribute `classes_`.
Notes
-----
The probability model is created using cross validation, so
the results can be slightly different than those obtained by
predict. Also, it will produce meaningless results on very small
datasets.
"""
self._check_proba()
return self._predict_log_proba
def _predict_log_proba(self, X):
return np.log(self.predict_proba(X))
def _dense_predict_proba(self, X):
X = self._compute_kernel(X)
kernel = self.kernel
if callable(kernel):
kernel = 'precomputed'
svm_type = LIBSVM_IMPL.index(self._impl)
pprob = libsvm.predict_proba(
X, self.support_, self.support_vectors_, self.n_support_,
self._dual_coef_, self._intercept_,
self.probA_, self.probB_,
svm_type=svm_type, kernel=kernel, degree=self.degree,
cache_size=self.cache_size, coef0=self.coef0, gamma=self._gamma)
return pprob
def _sparse_predict_proba(self, X):
X.data = np.asarray(X.data, dtype=np.float64, order='C')
kernel = self.kernel
if callable(kernel):
kernel = 'precomputed'
kernel_type = self._sparse_kernels.index(kernel)
return libsvm_sparse.libsvm_sparse_predict_proba(
X.data, X.indices, X.indptr,
self.support_vectors_.data,
self.support_vectors_.indices,
self.support_vectors_.indptr,
self._dual_coef_.data, self._intercept_,
LIBSVM_IMPL.index(self._impl), kernel_type,
self.degree, self._gamma, self.coef0, self.tol,
self.C, self.class_weight_,
self.nu, self.epsilon, self.shrinking,
self.probability, self.n_support_,
self.probA_, self.probB_)
def _get_coef(self):
if self.dual_coef_.shape[0] == 1:
# binary classifier
coef = safe_sparse_dot(self.dual_coef_, self.support_vectors_)
else:
# 1vs1 classifier
coef = _one_vs_one_coef(self.dual_coef_, self.n_support_,
self.support_vectors_)
if sp.issparse(coef[0]):
coef = sp.vstack(coef).tocsr()
else:
coef = np.vstack(coef)
return coef
def _get_liblinear_solver_type(multi_class, penalty, loss, dual):
"""Find the liblinear magic number for the solver.
This number depends on the values of the following attributes:
- multi_class
- penalty
- loss
- dual
The same number is also internally used by LibLinear to determine
which solver to use.
"""
# nested dicts containing level 1: available loss functions,
# level2: available penalties for the given loss functin,
# level3: wether the dual solver is available for the specified
# combination of loss function and penalty
_solver_type_dict = {
'logistic_regression': {
'l1': {False: 6},
'l2': {False: 0, True: 7}},
'hinge': {
'l2': {True: 3}},
'squared_hinge': {
'l1': {False: 5},
'l2': {False: 2, True: 1}},
'epsilon_insensitive': {
'l2': {True: 13}},
'squared_epsilon_insensitive': {
'l2': {False: 11, True: 12}},
'crammer_singer': 4
}
if multi_class == 'crammer_singer':
return _solver_type_dict[multi_class]
elif multi_class != 'ovr':
raise ValueError("`multi_class` must be one of `ovr`, "
"`crammer_singer`, got %r" % multi_class)
# FIXME loss.lower() --> loss in 0.18
_solver_pen = _solver_type_dict.get(loss.lower(), None)
if _solver_pen is None:
error_string = ("loss='%s' is not supported" % loss)
else:
# FIME penalty.lower() --> penalty in 0.18
_solver_dual = _solver_pen.get(penalty.lower(), None)
if _solver_dual is None:
error_string = ("The combination of penalty='%s' "
"and loss='%s' is not supported"
% (penalty, loss))
else:
solver_num = _solver_dual.get(dual, None)
if solver_num is None:
error_string = ("The combination of penalty='%s' and "
"loss='%s' are not supported when dual=%s"
% (penalty, loss, dual))
else:
return solver_num
raise ValueError('Unsupported set of arguments: %s, '
'Parameters: penalty=%r, loss=%r, dual=%r'
% (error_string, penalty, loss, dual))
def _fit_liblinear(X, y, C, fit_intercept, intercept_scaling, class_weight,
penalty, dual, verbose, max_iter, tol,
random_state=None, multi_class='ovr',
loss='logistic_regression', epsilon=0.1):
"""Used by Logistic Regression (and CV) and LinearSVC.
Preprocessing is done in this function before supplying it to liblinear.
Parameters
----------
X : {array-like, sparse matrix}, shape (n_samples, n_features)
Training vector, where n_samples in the number of samples and
n_features is the number of features.
y : array-like, shape (n_samples,)
Target vector relative to X
C : float
Inverse of cross-validation parameter. Lower the C, the more
the penalization.
fit_intercept : bool
Whether or not to fit the intercept, that is to add a intercept
term to the decision function.
intercept_scaling : float
LibLinear internally penalizes the intercept and this term is subject
to regularization just like the other terms of the feature vector.
In order to avoid this, one should increase the intercept_scaling.
such that the feature vector becomes [x, intercept_scaling].
class_weight : {dict, 'balanced'}, optional
Weights associated with classes in the form ``{class_label: weight}``.
If not given, all classes are supposed to have weight one. For
multi-output problems, a list of dicts can be provided in the same
order as the columns of y.
The "balanced" mode uses the values of y to automatically adjust
weights inversely proportional to class frequencies in the input data
as ``n_samples / (n_classes * np.bincount(y))``
penalty : str, {'l1', 'l2'}
The norm of the penalty used in regularization.
dual : bool
Dual or primal formulation,
verbose : int
Set verbose to any positive number for verbosity.
max_iter : int
Number of iterations.
tol : float
Stopping condition.
random_state : int seed, RandomState instance, or None (default)
The seed of the pseudo random number generator to use when
shuffling the data.
multi_class : str, {'ovr', 'crammer_singer'}
`ovr` trains n_classes one-vs-rest classifiers, while `crammer_singer`
optimizes a joint objective over all classes.
While `crammer_singer` is interesting from an theoretical perspective
as it is consistent it is seldom used in practice and rarely leads to
better accuracy and is more expensive to compute.
If `crammer_singer` is chosen, the options loss, penalty and dual will
be ignored.
loss : str, {'logistic_regression', 'hinge', 'squared_hinge',
'epsilon_insensitive', 'squared_epsilon_insensitive}
The loss function used to fit the model.
epsilon : float, optional (default=0.1)
Epsilon parameter in the epsilon-insensitive loss function. Note
that the value of this parameter depends on the scale of the target
variable y. If unsure, set epsilon=0.
Returns
-------
coef_ : ndarray, shape (n_features, n_features + 1)
The coefficent vector got by minimizing the objective function.
intercept_ : float
The intercept term added to the vector.
n_iter_ : int
Maximum number of iterations run across all classes.
"""
# FIXME Remove case insensitivity in 0.18 ---------------------
loss_l, penalty_l = loss.lower(), penalty.lower()
msg = ("loss='%s' has been deprecated in favor of "
"loss='%s' as of 0.16. Backward compatibility"
" for the uppercase notation will be removed in %s")
if (not loss.islower()) and loss_l not in ('l1', 'l2'):
warnings.warn(msg % (loss, loss_l, "0.18"),
DeprecationWarning)
if not penalty.islower():
warnings.warn(msg.replace("loss", "penalty")
% (penalty, penalty_l, "0.18"),
DeprecationWarning)
# -------------------------------------------------------------
# FIXME loss_l --> loss in 0.18
if loss_l not in ['epsilon_insensitive', 'squared_epsilon_insensitive']:
enc = LabelEncoder()
y_ind = enc.fit_transform(y)
classes_ = enc.classes_
if len(classes_) < 2:
raise ValueError("This solver needs samples of at least 2 classes"
" in the data, but the data contains only one"
" class: %r" % classes_[0])
class_weight_ = compute_class_weight(class_weight, classes_, y)
else:
class_weight_ = np.empty(0, dtype=np.float)
y_ind = y
liblinear.set_verbosity_wrap(verbose)
rnd = check_random_state(random_state)
if verbose:
print('[LibLinear]', end='')
# LinearSVC breaks when intercept_scaling is <= 0
bias = -1.0
if fit_intercept:
if intercept_scaling <= 0:
raise ValueError("Intercept scaling is %r but needs to be greater than 0."
" To disable fitting an intercept,"
" set fit_intercept=False." % intercept_scaling)
else:
bias = intercept_scaling
libsvm.set_verbosity_wrap(verbose)
libsvm_sparse.set_verbosity_wrap(verbose)
liblinear.set_verbosity_wrap(verbose)
# LibLinear wants targets as doubles, even for classification
y_ind = np.asarray(y_ind, dtype=np.float64).ravel()
solver_type = _get_liblinear_solver_type(multi_class, penalty, loss, dual)
raw_coef_, n_iter_ = liblinear.train_wrap(
X, y_ind, sp.isspmatrix(X), solver_type, tol, bias, C,
class_weight_, max_iter, rnd.randint(np.iinfo('i').max),
epsilon)
# Regarding rnd.randint(..) in the above signature:
# seed for srand in range [0..INT_MAX); due to limitations in Numpy
# on 32-bit platforms, we can't get to the UINT_MAX limit that
# srand supports
n_iter_ = max(n_iter_)
if n_iter_ >= max_iter and verbose > 0:
warnings.warn("Liblinear failed to converge, increase "
"the number of iterations.", ConvergenceWarning)
if fit_intercept:
coef_ = raw_coef_[:, :-1]
intercept_ = intercept_scaling * raw_coef_[:, -1]
else:
coef_ = raw_coef_
intercept_ = 0.
return coef_, intercept_, n_iter_
| bsd-3-clause |
edx/edx-platform | openedx/features/content_type_gating/block_transformers.py | 4 | 4525 | """
Content Type Gate Transformer implementation.
Limits access for certain users to certain types of content.
"""
from django.conf import settings
from lms.djangoapps.course_blocks.transformers.user_partitions import UserPartitionTransformer
from openedx.core.djangoapps.content.block_structure.transformer import BlockStructureTransformer
from openedx.features.content_type_gating.helpers import CONTENT_GATING_PARTITION_ID
from openedx.features.content_type_gating.models import ContentTypeGatingConfig
class ContentTypeGateTransformer(BlockStructureTransformer):
"""
A transformer that adds a partition condition for all graded content
so that the content is only visible to verified users.
This transformer requires that the UserPartitionTransformer also be included in your transformer list.
"""
WRITE_VERSION = 1
READ_VERSION = 1
@classmethod
def name(cls):
"""
Unique identifier for the transformer's class;
same identifier used in setup.py.
"""
return "content_type_gate"
@classmethod
def collect(cls, block_structure):
"""
Collects any information that's necessary to execute this
transformer's transform method.
"""
block_structure.request_xblock_fields('group_access', 'graded', 'has_score', 'weight')
def _set_contains_gated_content_on_parents(self, block_structure, block_key):
"""
This will recursively set a field on all the parents of a block if one of the problems
inside of it is content gated. `contains_gated_content` can then be used to indicate something
in the blocks subtree is gated.
"""
if block_structure.get_xblock_field(block_key, 'contains_gated_content'):
return
block_structure.override_xblock_field(block_key, 'contains_gated_content', True)
for parent_block_key in block_structure.get_parents(block_key):
self._set_contains_gated_content_on_parents(block_structure, parent_block_key)
@staticmethod
def _get_block_group_access(block_structure, block_key):
"""
Gets the current group_access value for a block, supporting inheritance when possible.
In order to support inheritance, UserPartitionTransformer must also be used.
"""
# See user_partitions.py for the code that sets this field.
merged_access = block_structure.get_transformer_block_field(
block_key, UserPartitionTransformer, 'merged_group_access', None
)
if merged_access:
# merged_access holds a dictionary of sets, but group_access is a dictionary of lists, so we convert here
# (sets seem like a better format for this, but existing code already expects lists)
current_access = {p: list(g) for (p, g) in merged_access.get_allowed_groups().items()}
else:
# This fallback code has a bug if UserPartitionTranformer is not being used -- it does not consider
# inheritance from parent blocks. This is why our class docstring recommends UserPartitionTranformer.
current_access = block_structure.get_xblock_field(block_key, 'group_access')
return current_access or {}
def transform(self, usage_info, block_structure):
if not ContentTypeGatingConfig.enabled_for_enrollment(
user=usage_info.user,
course_key=usage_info.course_key,
):
return
for block_key in block_structure.topological_traversal():
graded = block_structure.get_xblock_field(block_key, 'graded')
has_score = block_structure.get_xblock_field(block_key, 'has_score')
weight_not_zero = block_structure.get_xblock_field(block_key, 'weight') != 0
problem_eligible_for_content_gating = graded and has_score and weight_not_zero
if problem_eligible_for_content_gating:
current_access = self._get_block_group_access(block_structure, block_key)
current_access.setdefault(
CONTENT_GATING_PARTITION_ID,
[settings.CONTENT_TYPE_GATE_GROUP_IDS['full_access']]
)
block_structure.override_xblock_field(block_key, 'group_access', current_access)
if current_access[CONTENT_GATING_PARTITION_ID] == [settings.CONTENT_TYPE_GATE_GROUP_IDS['full_access']]:
self._set_contains_gated_content_on_parents(block_structure, block_key)
| agpl-3.0 |
ephes/scikit-learn | sklearn/grid_search.py | 102 | 36232 | """
The :mod:`sklearn.grid_search` includes utilities to fine-tune the parameters
of an estimator.
"""
from __future__ import print_function
# Author: Alexandre Gramfort <alexandre.gramfort@inria.fr>,
# Gael Varoquaux <gael.varoquaux@normalesup.org>
# Andreas Mueller <amueller@ais.uni-bonn.de>
# Olivier Grisel <olivier.grisel@ensta.org>
# License: BSD 3 clause
from abc import ABCMeta, abstractmethod
from collections import Mapping, namedtuple, Sized
from functools import partial, reduce
from itertools import product
import operator
import warnings
import numpy as np
from .base import BaseEstimator, is_classifier, clone
from .base import MetaEstimatorMixin, ChangedBehaviorWarning
from .cross_validation import check_cv
from .cross_validation import _fit_and_score
from .externals.joblib import Parallel, delayed
from .externals import six
from .utils import check_random_state
from .utils.random import sample_without_replacement
from .utils.validation import _num_samples, indexable
from .utils.metaestimators import if_delegate_has_method
from .metrics.scorer import check_scoring
__all__ = ['GridSearchCV', 'ParameterGrid', 'fit_grid_point',
'ParameterSampler', 'RandomizedSearchCV']
class ParameterGrid(object):
"""Grid of parameters with a discrete number of values for each.
Can be used to iterate over parameter value combinations with the
Python built-in function iter.
Read more in the :ref:`User Guide <grid_search>`.
Parameters
----------
param_grid : dict of string to sequence, or sequence of such
The parameter grid to explore, as a dictionary mapping estimator
parameters to sequences of allowed values.
An empty dict signifies default parameters.
A sequence of dicts signifies a sequence of grids to search, and is
useful to avoid exploring parameter combinations that make no sense
or have no effect. See the examples below.
Examples
--------
>>> from sklearn.grid_search import ParameterGrid
>>> param_grid = {'a': [1, 2], 'b': [True, False]}
>>> list(ParameterGrid(param_grid)) == (
... [{'a': 1, 'b': True}, {'a': 1, 'b': False},
... {'a': 2, 'b': True}, {'a': 2, 'b': False}])
True
>>> grid = [{'kernel': ['linear']}, {'kernel': ['rbf'], 'gamma': [1, 10]}]
>>> list(ParameterGrid(grid)) == [{'kernel': 'linear'},
... {'kernel': 'rbf', 'gamma': 1},
... {'kernel': 'rbf', 'gamma': 10}]
True
>>> ParameterGrid(grid)[1] == {'kernel': 'rbf', 'gamma': 1}
True
See also
--------
:class:`GridSearchCV`:
uses ``ParameterGrid`` to perform a full parallelized parameter search.
"""
def __init__(self, param_grid):
if isinstance(param_grid, Mapping):
# wrap dictionary in a singleton list to support either dict
# or list of dicts
param_grid = [param_grid]
self.param_grid = param_grid
def __iter__(self):
"""Iterate over the points in the grid.
Returns
-------
params : iterator over dict of string to any
Yields dictionaries mapping each estimator parameter to one of its
allowed values.
"""
for p in self.param_grid:
# Always sort the keys of a dictionary, for reproducibility
items = sorted(p.items())
if not items:
yield {}
else:
keys, values = zip(*items)
for v in product(*values):
params = dict(zip(keys, v))
yield params
def __len__(self):
"""Number of points on the grid."""
# Product function that can handle iterables (np.product can't).
product = partial(reduce, operator.mul)
return sum(product(len(v) for v in p.values()) if p else 1
for p in self.param_grid)
def __getitem__(self, ind):
"""Get the parameters that would be ``ind``th in iteration
Parameters
----------
ind : int
The iteration index
Returns
-------
params : dict of string to any
Equal to list(self)[ind]
"""
# This is used to make discrete sampling without replacement memory
# efficient.
for sub_grid in self.param_grid:
# XXX: could memoize information used here
if not sub_grid:
if ind == 0:
return {}
else:
ind -= 1
continue
# Reverse so most frequent cycling parameter comes first
keys, values_lists = zip(*sorted(sub_grid.items())[::-1])
sizes = [len(v_list) for v_list in values_lists]
total = np.product(sizes)
if ind >= total:
# Try the next grid
ind -= total
else:
out = {}
for key, v_list, n in zip(keys, values_lists, sizes):
ind, offset = divmod(ind, n)
out[key] = v_list[offset]
return out
raise IndexError('ParameterGrid index out of range')
class ParameterSampler(object):
"""Generator on parameters sampled from given distributions.
Non-deterministic iterable over random candidate combinations for hyper-
parameter search. If all parameters are presented as a list,
sampling without replacement is performed. If at least one parameter
is given as a distribution, sampling with replacement is used.
It is highly recommended to use continuous distributions for continuous
parameters.
Note that as of SciPy 0.12, the ``scipy.stats.distributions`` do not accept
a custom RNG instance and always use the singleton RNG from
``numpy.random``. Hence setting ``random_state`` will not guarantee a
deterministic iteration whenever ``scipy.stats`` distributions are used to
define the parameter search space.
Read more in the :ref:`User Guide <grid_search>`.
Parameters
----------
param_distributions : dict
Dictionary where the keys are parameters and values
are distributions from which a parameter is to be sampled.
Distributions either have to provide a ``rvs`` function
to sample from them, or can be given as a list of values,
where a uniform distribution is assumed.
n_iter : integer
Number of parameter settings that are produced.
random_state : int or RandomState
Pseudo random number generator state used for random uniform sampling
from lists of possible values instead of scipy.stats distributions.
Returns
-------
params : dict of string to any
**Yields** dictionaries mapping each estimator parameter to
as sampled value.
Examples
--------
>>> from sklearn.grid_search import ParameterSampler
>>> from scipy.stats.distributions import expon
>>> import numpy as np
>>> np.random.seed(0)
>>> param_grid = {'a':[1, 2], 'b': expon()}
>>> param_list = list(ParameterSampler(param_grid, n_iter=4))
>>> rounded_list = [dict((k, round(v, 6)) for (k, v) in d.items())
... for d in param_list]
>>> rounded_list == [{'b': 0.89856, 'a': 1},
... {'b': 0.923223, 'a': 1},
... {'b': 1.878964, 'a': 2},
... {'b': 1.038159, 'a': 2}]
True
"""
def __init__(self, param_distributions, n_iter, random_state=None):
self.param_distributions = param_distributions
self.n_iter = n_iter
self.random_state = random_state
def __iter__(self):
# check if all distributions are given as lists
# in this case we want to sample without replacement
all_lists = np.all([not hasattr(v, "rvs")
for v in self.param_distributions.values()])
rnd = check_random_state(self.random_state)
if all_lists:
# look up sampled parameter settings in parameter grid
param_grid = ParameterGrid(self.param_distributions)
grid_size = len(param_grid)
if grid_size < self.n_iter:
raise ValueError(
"The total space of parameters %d is smaller "
"than n_iter=%d." % (grid_size, self.n_iter)
+ " For exhaustive searches, use GridSearchCV.")
for i in sample_without_replacement(grid_size, self.n_iter,
random_state=rnd):
yield param_grid[i]
else:
# Always sort the keys of a dictionary, for reproducibility
items = sorted(self.param_distributions.items())
for _ in six.moves.range(self.n_iter):
params = dict()
for k, v in items:
if hasattr(v, "rvs"):
params[k] = v.rvs()
else:
params[k] = v[rnd.randint(len(v))]
yield params
def __len__(self):
"""Number of points that will be sampled."""
return self.n_iter
def fit_grid_point(X, y, estimator, parameters, train, test, scorer,
verbose, error_score='raise', **fit_params):
"""Run fit on one set of parameters.
Parameters
----------
X : array-like, sparse matrix or list
Input data.
y : array-like or None
Targets for input data.
estimator : estimator object
This estimator will be cloned and then fitted.
parameters : dict
Parameters to be set on estimator for this grid point.
train : ndarray, dtype int or bool
Boolean mask or indices for training set.
test : ndarray, dtype int or bool
Boolean mask or indices for test set.
scorer : callable or None.
If provided must be a scorer callable object / function with signature
``scorer(estimator, X, y)``.
verbose : int
Verbosity level.
**fit_params : kwargs
Additional parameter passed to the fit function of the estimator.
error_score : 'raise' (default) or numeric
Value to assign to the score if an error occurs in estimator fitting.
If set to 'raise', the error is raised. If a numeric value is given,
FitFailedWarning is raised. This parameter does not affect the refit
step, which will always raise the error.
Returns
-------
score : float
Score of this parameter setting on given training / test split.
parameters : dict
The parameters that have been evaluated.
n_samples_test : int
Number of test samples in this split.
"""
score, n_samples_test, _ = _fit_and_score(estimator, X, y, scorer, train,
test, verbose, parameters,
fit_params, error_score)
return score, parameters, n_samples_test
def _check_param_grid(param_grid):
if hasattr(param_grid, 'items'):
param_grid = [param_grid]
for p in param_grid:
for v in p.values():
if isinstance(v, np.ndarray) and v.ndim > 1:
raise ValueError("Parameter array should be one-dimensional.")
check = [isinstance(v, k) for k in (list, tuple, np.ndarray)]
if True not in check:
raise ValueError("Parameter values should be a list.")
if len(v) == 0:
raise ValueError("Parameter values should be a non-empty "
"list.")
class _CVScoreTuple (namedtuple('_CVScoreTuple',
('parameters',
'mean_validation_score',
'cv_validation_scores'))):
# A raw namedtuple is very memory efficient as it packs the attributes
# in a struct to get rid of the __dict__ of attributes in particular it
# does not copy the string for the keys on each instance.
# By deriving a namedtuple class just to introduce the __repr__ method we
# would also reintroduce the __dict__ on the instance. By telling the
# Python interpreter that this subclass uses static __slots__ instead of
# dynamic attributes. Furthermore we don't need any additional slot in the
# subclass so we set __slots__ to the empty tuple.
__slots__ = ()
def __repr__(self):
"""Simple custom repr to summarize the main info"""
return "mean: {0:.5f}, std: {1:.5f}, params: {2}".format(
self.mean_validation_score,
np.std(self.cv_validation_scores),
self.parameters)
class BaseSearchCV(six.with_metaclass(ABCMeta, BaseEstimator,
MetaEstimatorMixin)):
"""Base class for hyper parameter search with cross-validation."""
@abstractmethod
def __init__(self, estimator, scoring=None,
fit_params=None, n_jobs=1, iid=True,
refit=True, cv=None, verbose=0, pre_dispatch='2*n_jobs',
error_score='raise'):
self.scoring = scoring
self.estimator = estimator
self.n_jobs = n_jobs
self.fit_params = fit_params if fit_params is not None else {}
self.iid = iid
self.refit = refit
self.cv = cv
self.verbose = verbose
self.pre_dispatch = pre_dispatch
self.error_score = error_score
@property
def _estimator_type(self):
return self.estimator._estimator_type
def score(self, X, y=None):
"""Returns the score on the given data, if the estimator has been refit
This uses the score defined by ``scoring`` where provided, and the
``best_estimator_.score`` method otherwise.
Parameters
----------
X : array-like, shape = [n_samples, n_features]
Input data, where n_samples is the number of samples and
n_features is the number of features.
y : array-like, shape = [n_samples] or [n_samples, n_output], optional
Target relative to X for classification or regression;
None for unsupervised learning.
Returns
-------
score : float
Notes
-----
* The long-standing behavior of this method changed in version 0.16.
* It no longer uses the metric provided by ``estimator.score`` if the
``scoring`` parameter was set when fitting.
"""
if self.scorer_ is None:
raise ValueError("No score function explicitly defined, "
"and the estimator doesn't provide one %s"
% self.best_estimator_)
if self.scoring is not None and hasattr(self.best_estimator_, 'score'):
warnings.warn("The long-standing behavior to use the estimator's "
"score function in {0}.score has changed. The "
"scoring parameter is now used."
"".format(self.__class__.__name__),
ChangedBehaviorWarning)
return self.scorer_(self.best_estimator_, X, y)
@if_delegate_has_method(delegate='estimator')
def predict(self, X):
"""Call predict on the estimator with the best found parameters.
Only available if ``refit=True`` and the underlying estimator supports
``predict``.
Parameters
-----------
X : indexable, length n_samples
Must fulfill the input assumptions of the
underlying estimator.
"""
return self.best_estimator_.predict(X)
@if_delegate_has_method(delegate='estimator')
def predict_proba(self, X):
"""Call predict_proba on the estimator with the best found parameters.
Only available if ``refit=True`` and the underlying estimator supports
``predict_proba``.
Parameters
-----------
X : indexable, length n_samples
Must fulfill the input assumptions of the
underlying estimator.
"""
return self.best_estimator_.predict_proba(X)
@if_delegate_has_method(delegate='estimator')
def predict_log_proba(self, X):
"""Call predict_log_proba on the estimator with the best found parameters.
Only available if ``refit=True`` and the underlying estimator supports
``predict_log_proba``.
Parameters
-----------
X : indexable, length n_samples
Must fulfill the input assumptions of the
underlying estimator.
"""
return self.best_estimator_.predict_log_proba(X)
@if_delegate_has_method(delegate='estimator')
def decision_function(self, X):
"""Call decision_function on the estimator with the best found parameters.
Only available if ``refit=True`` and the underlying estimator supports
``decision_function``.
Parameters
-----------
X : indexable, length n_samples
Must fulfill the input assumptions of the
underlying estimator.
"""
return self.best_estimator_.decision_function(X)
@if_delegate_has_method(delegate='estimator')
def transform(self, X):
"""Call transform on the estimator with the best found parameters.
Only available if the underlying estimator supports ``transform`` and
``refit=True``.
Parameters
-----------
X : indexable, length n_samples
Must fulfill the input assumptions of the
underlying estimator.
"""
return self.best_estimator_.transform(X)
@if_delegate_has_method(delegate='estimator')
def inverse_transform(self, Xt):
"""Call inverse_transform on the estimator with the best found parameters.
Only available if the underlying estimator implements ``inverse_transform`` and
``refit=True``.
Parameters
-----------
Xt : indexable, length n_samples
Must fulfill the input assumptions of the
underlying estimator.
"""
return self.best_estimator_.transform(Xt)
def _fit(self, X, y, parameter_iterable):
"""Actual fitting, performing the search over parameters."""
estimator = self.estimator
cv = self.cv
self.scorer_ = check_scoring(self.estimator, scoring=self.scoring)
n_samples = _num_samples(X)
X, y = indexable(X, y)
if y is not None:
if len(y) != n_samples:
raise ValueError('Target variable (y) has a different number '
'of samples (%i) than data (X: %i samples)'
% (len(y), n_samples))
cv = check_cv(cv, X, y, classifier=is_classifier(estimator))
if self.verbose > 0:
if isinstance(parameter_iterable, Sized):
n_candidates = len(parameter_iterable)
print("Fitting {0} folds for each of {1} candidates, totalling"
" {2} fits".format(len(cv), n_candidates,
n_candidates * len(cv)))
base_estimator = clone(self.estimator)
pre_dispatch = self.pre_dispatch
out = Parallel(
n_jobs=self.n_jobs, verbose=self.verbose,
pre_dispatch=pre_dispatch
)(
delayed(_fit_and_score)(clone(base_estimator), X, y, self.scorer_,
train, test, self.verbose, parameters,
self.fit_params, return_parameters=True,
error_score=self.error_score)
for parameters in parameter_iterable
for train, test in cv)
# Out is a list of triplet: score, estimator, n_test_samples
n_fits = len(out)
n_folds = len(cv)
scores = list()
grid_scores = list()
for grid_start in range(0, n_fits, n_folds):
n_test_samples = 0
score = 0
all_scores = []
for this_score, this_n_test_samples, _, parameters in \
out[grid_start:grid_start + n_folds]:
all_scores.append(this_score)
if self.iid:
this_score *= this_n_test_samples
n_test_samples += this_n_test_samples
score += this_score
if self.iid:
score /= float(n_test_samples)
else:
score /= float(n_folds)
scores.append((score, parameters))
# TODO: shall we also store the test_fold_sizes?
grid_scores.append(_CVScoreTuple(
parameters,
score,
np.array(all_scores)))
# Store the computed scores
self.grid_scores_ = grid_scores
# Find the best parameters by comparing on the mean validation score:
# note that `sorted` is deterministic in the way it breaks ties
best = sorted(grid_scores, key=lambda x: x.mean_validation_score,
reverse=True)[0]
self.best_params_ = best.parameters
self.best_score_ = best.mean_validation_score
if self.refit:
# fit the best estimator using the entire dataset
# clone first to work around broken estimators
best_estimator = clone(base_estimator).set_params(
**best.parameters)
if y is not None:
best_estimator.fit(X, y, **self.fit_params)
else:
best_estimator.fit(X, **self.fit_params)
self.best_estimator_ = best_estimator
return self
class GridSearchCV(BaseSearchCV):
"""Exhaustive search over specified parameter values for an estimator.
Important members are fit, predict.
GridSearchCV implements a "fit" method and a "predict" method like
any classifier except that the parameters of the classifier
used to predict is optimized by cross-validation.
Read more in the :ref:`User Guide <grid_search>`.
Parameters
----------
estimator : object type that implements the "fit" and "predict" methods
A object of that type is instantiated for each grid point.
param_grid : dict or list of dictionaries
Dictionary with parameters names (string) as keys and lists of
parameter settings to try as values, or a list of such
dictionaries, in which case the grids spanned by each dictionary
in the list are explored. This enables searching over any sequence
of parameter settings.
scoring : string, callable or None, optional, default: None
A string (see model evaluation documentation) or
a scorer callable object / function with signature
``scorer(estimator, X, y)``.
fit_params : dict, optional
Parameters to pass to the fit method.
n_jobs : int, default 1
Number of jobs to run in parallel.
pre_dispatch : int, or string, optional
Controls the number of jobs that get dispatched during parallel
execution. Reducing this number can be useful to avoid an
explosion of memory consumption when more jobs get dispatched
than CPUs can process. This parameter can be:
- None, in which case all the jobs are immediately
created and spawned. Use this for lightweight and
fast-running jobs, to avoid delays due to on-demand
spawning of the jobs
- An int, giving the exact number of total jobs that are
spawned
- A string, giving an expression as a function of n_jobs,
as in '2*n_jobs'
iid : boolean, default=True
If True, the data is assumed to be identically distributed across
the folds, and the loss minimized is the total loss per sample,
and not the mean loss across the folds.
cv : integer or cross-validation generator, default=3
If an integer is passed, it is the number of folds.
Specific cross-validation objects can be passed, see
sklearn.cross_validation module for the list of possible objects
refit : boolean, default=True
Refit the best estimator with the entire dataset.
If "False", it is impossible to make predictions using
this GridSearchCV instance after fitting.
verbose : integer
Controls the verbosity: the higher, the more messages.
error_score : 'raise' (default) or numeric
Value to assign to the score if an error occurs in estimator fitting.
If set to 'raise', the error is raised. If a numeric value is given,
FitFailedWarning is raised. This parameter does not affect the refit
step, which will always raise the error.
Examples
--------
>>> from sklearn import svm, grid_search, datasets
>>> iris = datasets.load_iris()
>>> parameters = {'kernel':('linear', 'rbf'), 'C':[1, 10]}
>>> svr = svm.SVC()
>>> clf = grid_search.GridSearchCV(svr, parameters)
>>> clf.fit(iris.data, iris.target)
... # doctest: +NORMALIZE_WHITESPACE +ELLIPSIS
GridSearchCV(cv=None, error_score=...,
estimator=SVC(C=1.0, cache_size=..., class_weight=..., coef0=...,
decision_function_shape=None, degree=..., gamma=...,
kernel='rbf', max_iter=-1, probability=False,
random_state=None, shrinking=True, tol=...,
verbose=False),
fit_params={}, iid=..., n_jobs=1,
param_grid=..., pre_dispatch=..., refit=...,
scoring=..., verbose=...)
Attributes
----------
grid_scores_ : list of named tuples
Contains scores for all parameter combinations in param_grid.
Each entry corresponds to one parameter setting.
Each named tuple has the attributes:
* ``parameters``, a dict of parameter settings
* ``mean_validation_score``, the mean score over the
cross-validation folds
* ``cv_validation_scores``, the list of scores for each fold
best_estimator_ : estimator
Estimator that was chosen by the search, i.e. estimator
which gave highest score (or smallest loss if specified)
on the left out data. Not available if refit=False.
best_score_ : float
Score of best_estimator on the left out data.
best_params_ : dict
Parameter setting that gave the best results on the hold out data.
scorer_ : function
Scorer function used on the held out data to choose the best
parameters for the model.
Notes
------
The parameters selected are those that maximize the score of the left out
data, unless an explicit score is passed in which case it is used instead.
If `n_jobs` was set to a value higher than one, the data is copied for each
point in the grid (and not `n_jobs` times). This is done for efficiency
reasons if individual jobs take very little time, but may raise errors if
the dataset is large and not enough memory is available. A workaround in
this case is to set `pre_dispatch`. Then, the memory is copied only
`pre_dispatch` many times. A reasonable value for `pre_dispatch` is `2 *
n_jobs`.
See Also
---------
:class:`ParameterGrid`:
generates all the combinations of a an hyperparameter grid.
:func:`sklearn.cross_validation.train_test_split`:
utility function to split the data into a development set usable
for fitting a GridSearchCV instance and an evaluation set for
its final evaluation.
:func:`sklearn.metrics.make_scorer`:
Make a scorer from a performance metric or loss function.
"""
def __init__(self, estimator, param_grid, scoring=None, fit_params=None,
n_jobs=1, iid=True, refit=True, cv=None, verbose=0,
pre_dispatch='2*n_jobs', error_score='raise'):
super(GridSearchCV, self).__init__(
estimator, scoring, fit_params, n_jobs, iid,
refit, cv, verbose, pre_dispatch, error_score)
self.param_grid = param_grid
_check_param_grid(param_grid)
def fit(self, X, y=None):
"""Run fit with all sets of parameters.
Parameters
----------
X : array-like, shape = [n_samples, n_features]
Training vector, where n_samples is the number of samples and
n_features is the number of features.
y : array-like, shape = [n_samples] or [n_samples, n_output], optional
Target relative to X for classification or regression;
None for unsupervised learning.
"""
return self._fit(X, y, ParameterGrid(self.param_grid))
class RandomizedSearchCV(BaseSearchCV):
"""Randomized search on hyper parameters.
RandomizedSearchCV implements a "fit" method and a "predict" method like
any classifier except that the parameters of the classifier
used to predict is optimized by cross-validation.
In contrast to GridSearchCV, not all parameter values are tried out, but
rather a fixed number of parameter settings is sampled from the specified
distributions. The number of parameter settings that are tried is
given by n_iter.
If all parameters are presented as a list,
sampling without replacement is performed. If at least one parameter
is given as a distribution, sampling with replacement is used.
It is highly recommended to use continuous distributions for continuous
parameters.
Read more in the :ref:`User Guide <randomized_parameter_search>`.
Parameters
----------
estimator : object type that implements the "fit" and "predict" methods
A object of that type is instantiated for each parameter setting.
param_distributions : dict
Dictionary with parameters names (string) as keys and distributions
or lists of parameters to try. Distributions must provide a ``rvs``
method for sampling (such as those from scipy.stats.distributions).
If a list is given, it is sampled uniformly.
n_iter : int, default=10
Number of parameter settings that are sampled. n_iter trades
off runtime vs quality of the solution.
scoring : string, callable or None, optional, default: None
A string (see model evaluation documentation) or
a scorer callable object / function with signature
``scorer(estimator, X, y)``.
fit_params : dict, optional
Parameters to pass to the fit method.
n_jobs : int, default=1
Number of jobs to run in parallel.
pre_dispatch : int, or string, optional
Controls the number of jobs that get dispatched during parallel
execution. Reducing this number can be useful to avoid an
explosion of memory consumption when more jobs get dispatched
than CPUs can process. This parameter can be:
- None, in which case all the jobs are immediately
created and spawned. Use this for lightweight and
fast-running jobs, to avoid delays due to on-demand
spawning of the jobs
- An int, giving the exact number of total jobs that are
spawned
- A string, giving an expression as a function of n_jobs,
as in '2*n_jobs'
iid : boolean, default=True
If True, the data is assumed to be identically distributed across
the folds, and the loss minimized is the total loss per sample,
and not the mean loss across the folds.
cv : integer or cross-validation generator, optional
If an integer is passed, it is the number of folds (default 3).
Specific cross-validation objects can be passed, see
sklearn.cross_validation module for the list of possible objects
refit : boolean, default=True
Refit the best estimator with the entire dataset.
If "False", it is impossible to make predictions using
this RandomizedSearchCV instance after fitting.
verbose : integer
Controls the verbosity: the higher, the more messages.
random_state : int or RandomState
Pseudo random number generator state used for random uniform sampling
from lists of possible values instead of scipy.stats distributions.
error_score : 'raise' (default) or numeric
Value to assign to the score if an error occurs in estimator fitting.
If set to 'raise', the error is raised. If a numeric value is given,
FitFailedWarning is raised. This parameter does not affect the refit
step, which will always raise the error.
Attributes
----------
grid_scores_ : list of named tuples
Contains scores for all parameter combinations in param_grid.
Each entry corresponds to one parameter setting.
Each named tuple has the attributes:
* ``parameters``, a dict of parameter settings
* ``mean_validation_score``, the mean score over the
cross-validation folds
* ``cv_validation_scores``, the list of scores for each fold
best_estimator_ : estimator
Estimator that was chosen by the search, i.e. estimator
which gave highest score (or smallest loss if specified)
on the left out data. Not available if refit=False.
best_score_ : float
Score of best_estimator on the left out data.
best_params_ : dict
Parameter setting that gave the best results on the hold out data.
Notes
-----
The parameters selected are those that maximize the score of the held-out
data, according to the scoring parameter.
If `n_jobs` was set to a value higher than one, the data is copied for each
parameter setting(and not `n_jobs` times). This is done for efficiency
reasons if individual jobs take very little time, but may raise errors if
the dataset is large and not enough memory is available. A workaround in
this case is to set `pre_dispatch`. Then, the memory is copied only
`pre_dispatch` many times. A reasonable value for `pre_dispatch` is `2 *
n_jobs`.
See Also
--------
:class:`GridSearchCV`:
Does exhaustive search over a grid of parameters.
:class:`ParameterSampler`:
A generator over parameter settins, constructed from
param_distributions.
"""
def __init__(self, estimator, param_distributions, n_iter=10, scoring=None,
fit_params=None, n_jobs=1, iid=True, refit=True, cv=None,
verbose=0, pre_dispatch='2*n_jobs', random_state=None,
error_score='raise'):
self.param_distributions = param_distributions
self.n_iter = n_iter
self.random_state = random_state
super(RandomizedSearchCV, self).__init__(
estimator=estimator, scoring=scoring, fit_params=fit_params,
n_jobs=n_jobs, iid=iid, refit=refit, cv=cv, verbose=verbose,
pre_dispatch=pre_dispatch, error_score=error_score)
def fit(self, X, y=None):
"""Run fit on the estimator with randomly drawn parameters.
Parameters
----------
X : array-like, shape = [n_samples, n_features]
Training vector, where n_samples in the number of samples and
n_features is the number of features.
y : array-like, shape = [n_samples] or [n_samples, n_output], optional
Target relative to X for classification or regression;
None for unsupervised learning.
"""
sampled_params = ParameterSampler(self.param_distributions,
self.n_iter,
random_state=self.random_state)
return self._fit(X, y, sampled_params)
| bsd-3-clause |
luo66/scikit-learn | sklearn/ensemble/gradient_boosting.py | 50 | 67625 | """Gradient Boosted Regression Trees
This module contains methods for fitting gradient boosted regression trees for
both classification and regression.
The module structure is the following:
- The ``BaseGradientBoosting`` base class implements a common ``fit`` method
for all the estimators in the module. Regression and classification
only differ in the concrete ``LossFunction`` used.
- ``GradientBoostingClassifier`` implements gradient boosting for
classification problems.
- ``GradientBoostingRegressor`` implements gradient boosting for
regression problems.
"""
# Authors: Peter Prettenhofer, Scott White, Gilles Louppe, Emanuele Olivetti,
# Arnaud Joly, Jacob Schreiber
# License: BSD 3 clause
from __future__ import print_function
from __future__ import division
from abc import ABCMeta, abstractmethod
from time import time
import numbers
import numpy as np
from scipy import stats
from .base import BaseEnsemble
from ..base import BaseEstimator
from ..base import ClassifierMixin
from ..base import RegressorMixin
from ..utils import check_random_state, check_array, check_X_y, column_or_1d
from ..utils import check_consistent_length, deprecated
from ..utils.extmath import logsumexp
from ..utils.fixes import expit, bincount
from ..utils.stats import _weighted_percentile
from ..utils.validation import check_is_fitted, NotFittedError
from ..externals import six
from ..feature_selection.from_model import _LearntSelectorMixin
from ..tree.tree import DecisionTreeRegressor
from ..tree._tree import DTYPE, TREE_LEAF
from ..tree._splitter import PresortBestSplitter
from ..tree._criterion import FriedmanMSE
from ._gradient_boosting import predict_stages
from ._gradient_boosting import predict_stage
from ._gradient_boosting import _random_sample_mask
class QuantileEstimator(BaseEstimator):
"""An estimator predicting the alpha-quantile of the training targets."""
def __init__(self, alpha=0.9):
if not 0 < alpha < 1.0:
raise ValueError("`alpha` must be in (0, 1.0) but was %r" % alpha)
self.alpha = alpha
def fit(self, X, y, sample_weight=None):
if sample_weight is None:
self.quantile = stats.scoreatpercentile(y, self.alpha * 100.0)
else:
self.quantile = _weighted_percentile(y, sample_weight, self.alpha * 100.0)
def predict(self, X):
check_is_fitted(self, 'quantile')
y = np.empty((X.shape[0], 1), dtype=np.float64)
y.fill(self.quantile)
return y
class MeanEstimator(BaseEstimator):
"""An estimator predicting the mean of the training targets."""
def fit(self, X, y, sample_weight=None):
if sample_weight is None:
self.mean = np.mean(y)
else:
self.mean = np.average(y, weights=sample_weight)
def predict(self, X):
check_is_fitted(self, 'mean')
y = np.empty((X.shape[0], 1), dtype=np.float64)
y.fill(self.mean)
return y
class LogOddsEstimator(BaseEstimator):
"""An estimator predicting the log odds ratio."""
scale = 1.0
def fit(self, X, y, sample_weight=None):
# pre-cond: pos, neg are encoded as 1, 0
if sample_weight is None:
pos = np.sum(y)
neg = y.shape[0] - pos
else:
pos = np.sum(sample_weight * y)
neg = np.sum(sample_weight * (1 - y))
if neg == 0 or pos == 0:
raise ValueError('y contains non binary labels.')
self.prior = self.scale * np.log(pos / neg)
def predict(self, X):
check_is_fitted(self, 'prior')
y = np.empty((X.shape[0], 1), dtype=np.float64)
y.fill(self.prior)
return y
class ScaledLogOddsEstimator(LogOddsEstimator):
"""Log odds ratio scaled by 0.5 -- for exponential loss. """
scale = 0.5
class PriorProbabilityEstimator(BaseEstimator):
"""An estimator predicting the probability of each
class in the training data.
"""
def fit(self, X, y, sample_weight=None):
if sample_weight is None:
sample_weight = np.ones_like(y, dtype=np.float64)
class_counts = bincount(y, weights=sample_weight)
self.priors = class_counts / class_counts.sum()
def predict(self, X):
check_is_fitted(self, 'priors')
y = np.empty((X.shape[0], self.priors.shape[0]), dtype=np.float64)
y[:] = self.priors
return y
class ZeroEstimator(BaseEstimator):
"""An estimator that simply predicts zero. """
def fit(self, X, y, sample_weight=None):
if np.issubdtype(y.dtype, int):
# classification
self.n_classes = np.unique(y).shape[0]
if self.n_classes == 2:
self.n_classes = 1
else:
# regression
self.n_classes = 1
def predict(self, X):
check_is_fitted(self, 'n_classes')
y = np.empty((X.shape[0], self.n_classes), dtype=np.float64)
y.fill(0.0)
return y
class LossFunction(six.with_metaclass(ABCMeta, object)):
"""Abstract base class for various loss functions.
Attributes
----------
K : int
The number of regression trees to be induced;
1 for regression and binary classification;
``n_classes`` for multi-class classification.
"""
is_multi_class = False
def __init__(self, n_classes):
self.K = n_classes
def init_estimator(self):
"""Default ``init`` estimator for loss function. """
raise NotImplementedError()
@abstractmethod
def __call__(self, y, pred, sample_weight=None):
"""Compute the loss of prediction ``pred`` and ``y``. """
@abstractmethod
def negative_gradient(self, y, y_pred, **kargs):
"""Compute the negative gradient.
Parameters
---------
y : np.ndarray, shape=(n,)
The target labels.
y_pred : np.ndarray, shape=(n,):
The predictions.
"""
def update_terminal_regions(self, tree, X, y, residual, y_pred,
sample_weight, sample_mask,
learning_rate=1.0, k=0):
"""Update the terminal regions (=leaves) of the given tree and
updates the current predictions of the model. Traverses tree
and invokes template method `_update_terminal_region`.
Parameters
----------
tree : tree.Tree
The tree object.
X : ndarray, shape=(n, m)
The data array.
y : ndarray, shape=(n,)
The target labels.
residual : ndarray, shape=(n,)
The residuals (usually the negative gradient).
y_pred : ndarray, shape=(n,)
The predictions.
sample_weight : ndarray, shape=(n,)
The weight of each sample.
sample_mask : ndarray, shape=(n,)
The sample mask to be used.
learning_rate : float, default=0.1
learning rate shrinks the contribution of each tree by
``learning_rate``.
k : int, default 0
The index of the estimator being updated.
"""
# compute leaf for each sample in ``X``.
terminal_regions = tree.apply(X)
# mask all which are not in sample mask.
masked_terminal_regions = terminal_regions.copy()
masked_terminal_regions[~sample_mask] = -1
# update each leaf (= perform line search)
for leaf in np.where(tree.children_left == TREE_LEAF)[0]:
self._update_terminal_region(tree, masked_terminal_regions,
leaf, X, y, residual,
y_pred[:, k], sample_weight)
# update predictions (both in-bag and out-of-bag)
y_pred[:, k] += (learning_rate
* tree.value[:, 0, 0].take(terminal_regions, axis=0))
@abstractmethod
def _update_terminal_region(self, tree, terminal_regions, leaf, X, y,
residual, pred, sample_weight):
"""Template method for updating terminal regions (=leaves). """
class RegressionLossFunction(six.with_metaclass(ABCMeta, LossFunction)):
"""Base class for regression loss functions. """
def __init__(self, n_classes):
if n_classes != 1:
raise ValueError("``n_classes`` must be 1 for regression but "
"was %r" % n_classes)
super(RegressionLossFunction, self).__init__(n_classes)
class LeastSquaresError(RegressionLossFunction):
"""Loss function for least squares (LS) estimation.
Terminal regions need not to be updated for least squares. """
def init_estimator(self):
return MeanEstimator()
def __call__(self, y, pred, sample_weight=None):
if sample_weight is None:
return np.mean((y - pred.ravel()) ** 2.0)
else:
return (1.0 / sample_weight.sum() *
np.sum(sample_weight * ((y - pred.ravel()) ** 2.0)))
def negative_gradient(self, y, pred, **kargs):
return y - pred.ravel()
def update_terminal_regions(self, tree, X, y, residual, y_pred,
sample_weight, sample_mask,
learning_rate=1.0, k=0):
"""Least squares does not need to update terminal regions.
But it has to update the predictions.
"""
# update predictions
y_pred[:, k] += learning_rate * tree.predict(X).ravel()
def _update_terminal_region(self, tree, terminal_regions, leaf, X, y,
residual, pred, sample_weight):
pass
class LeastAbsoluteError(RegressionLossFunction):
"""Loss function for least absolute deviation (LAD) regression. """
def init_estimator(self):
return QuantileEstimator(alpha=0.5)
def __call__(self, y, pred, sample_weight=None):
if sample_weight is None:
return np.abs(y - pred.ravel()).mean()
else:
return (1.0 / sample_weight.sum() *
np.sum(sample_weight * np.abs(y - pred.ravel())))
def negative_gradient(self, y, pred, **kargs):
"""1.0 if y - pred > 0.0 else -1.0"""
pred = pred.ravel()
return 2.0 * (y - pred > 0.0) - 1.0
def _update_terminal_region(self, tree, terminal_regions, leaf, X, y,
residual, pred, sample_weight):
"""LAD updates terminal regions to median estimates. """
terminal_region = np.where(terminal_regions == leaf)[0]
sample_weight = sample_weight.take(terminal_region, axis=0)
diff = y.take(terminal_region, axis=0) - pred.take(terminal_region, axis=0)
tree.value[leaf, 0, 0] = _weighted_percentile(diff, sample_weight, percentile=50)
class HuberLossFunction(RegressionLossFunction):
"""Huber loss function for robust regression.
M-Regression proposed in Friedman 2001.
References
----------
J. Friedman, Greedy Function Approximation: A Gradient Boosting
Machine, The Annals of Statistics, Vol. 29, No. 5, 2001.
"""
def __init__(self, n_classes, alpha=0.9):
super(HuberLossFunction, self).__init__(n_classes)
self.alpha = alpha
self.gamma = None
def init_estimator(self):
return QuantileEstimator(alpha=0.5)
def __call__(self, y, pred, sample_weight=None):
pred = pred.ravel()
diff = y - pred
gamma = self.gamma
if gamma is None:
if sample_weight is None:
gamma = stats.scoreatpercentile(np.abs(diff), self.alpha * 100)
else:
gamma = _weighted_percentile(np.abs(diff), sample_weight, self.alpha * 100)
gamma_mask = np.abs(diff) <= gamma
if sample_weight is None:
sq_loss = np.sum(0.5 * diff[gamma_mask] ** 2.0)
lin_loss = np.sum(gamma * (np.abs(diff[~gamma_mask]) - gamma / 2.0))
loss = (sq_loss + lin_loss) / y.shape[0]
else:
sq_loss = np.sum(0.5 * sample_weight[gamma_mask] * diff[gamma_mask] ** 2.0)
lin_loss = np.sum(gamma * sample_weight[~gamma_mask] *
(np.abs(diff[~gamma_mask]) - gamma / 2.0))
loss = (sq_loss + lin_loss) / sample_weight.sum()
return loss
def negative_gradient(self, y, pred, sample_weight=None, **kargs):
pred = pred.ravel()
diff = y - pred
if sample_weight is None:
gamma = stats.scoreatpercentile(np.abs(diff), self.alpha * 100)
else:
gamma = _weighted_percentile(np.abs(diff), sample_weight, self.alpha * 100)
gamma_mask = np.abs(diff) <= gamma
residual = np.zeros((y.shape[0],), dtype=np.float64)
residual[gamma_mask] = diff[gamma_mask]
residual[~gamma_mask] = gamma * np.sign(diff[~gamma_mask])
self.gamma = gamma
return residual
def _update_terminal_region(self, tree, terminal_regions, leaf, X, y,
residual, pred, sample_weight):
terminal_region = np.where(terminal_regions == leaf)[0]
sample_weight = sample_weight.take(terminal_region, axis=0)
gamma = self.gamma
diff = (y.take(terminal_region, axis=0)
- pred.take(terminal_region, axis=0))
median = _weighted_percentile(diff, sample_weight, percentile=50)
diff_minus_median = diff - median
tree.value[leaf, 0] = median + np.mean(
np.sign(diff_minus_median) *
np.minimum(np.abs(diff_minus_median), gamma))
class QuantileLossFunction(RegressionLossFunction):
"""Loss function for quantile regression.
Quantile regression allows to estimate the percentiles
of the conditional distribution of the target.
"""
def __init__(self, n_classes, alpha=0.9):
super(QuantileLossFunction, self).__init__(n_classes)
assert 0 < alpha < 1.0
self.alpha = alpha
self.percentile = alpha * 100.0
def init_estimator(self):
return QuantileEstimator(self.alpha)
def __call__(self, y, pred, sample_weight=None):
pred = pred.ravel()
diff = y - pred
alpha = self.alpha
mask = y > pred
if sample_weight is None:
loss = (alpha * diff[mask].sum() +
(1.0 - alpha) * diff[~mask].sum()) / y.shape[0]
else:
loss = ((alpha * np.sum(sample_weight[mask] * diff[mask]) +
(1.0 - alpha) * np.sum(sample_weight[~mask] * diff[~mask])) /
sample_weight.sum())
return loss
def negative_gradient(self, y, pred, **kargs):
alpha = self.alpha
pred = pred.ravel()
mask = y > pred
return (alpha * mask) - ((1.0 - alpha) * ~mask)
def _update_terminal_region(self, tree, terminal_regions, leaf, X, y,
residual, pred, sample_weight):
terminal_region = np.where(terminal_regions == leaf)[0]
diff = (y.take(terminal_region, axis=0)
- pred.take(terminal_region, axis=0))
sample_weight = sample_weight.take(terminal_region, axis=0)
val = _weighted_percentile(diff, sample_weight, self.percentile)
tree.value[leaf, 0] = val
class ClassificationLossFunction(six.with_metaclass(ABCMeta, LossFunction)):
"""Base class for classification loss functions. """
def _score_to_proba(self, score):
"""Template method to convert scores to probabilities.
the does not support probabilites raises AttributeError.
"""
raise TypeError('%s does not support predict_proba' % type(self).__name__)
@abstractmethod
def _score_to_decision(self, score):
"""Template method to convert scores to decisions.
Returns int arrays.
"""
class BinomialDeviance(ClassificationLossFunction):
"""Binomial deviance loss function for binary classification.
Binary classification is a special case; here, we only need to
fit one tree instead of ``n_classes`` trees.
"""
def __init__(self, n_classes):
if n_classes != 2:
raise ValueError("{0:s} requires 2 classes.".format(
self.__class__.__name__))
# we only need to fit one tree for binary clf.
super(BinomialDeviance, self).__init__(1)
def init_estimator(self):
return LogOddsEstimator()
def __call__(self, y, pred, sample_weight=None):
"""Compute the deviance (= 2 * negative log-likelihood). """
# logaddexp(0, v) == log(1.0 + exp(v))
pred = pred.ravel()
if sample_weight is None:
return -2.0 * np.mean((y * pred) - np.logaddexp(0.0, pred))
else:
return (-2.0 / sample_weight.sum() *
np.sum(sample_weight * ((y * pred) - np.logaddexp(0.0, pred))))
def negative_gradient(self, y, pred, **kargs):
"""Compute the residual (= negative gradient). """
return y - expit(pred.ravel())
def _update_terminal_region(self, tree, terminal_regions, leaf, X, y,
residual, pred, sample_weight):
"""Make a single Newton-Raphson step.
our node estimate is given by:
sum(w * (y - prob)) / sum(w * prob * (1 - prob))
we take advantage that: y - prob = residual
"""
terminal_region = np.where(terminal_regions == leaf)[0]
residual = residual.take(terminal_region, axis=0)
y = y.take(terminal_region, axis=0)
sample_weight = sample_weight.take(terminal_region, axis=0)
numerator = np.sum(sample_weight * residual)
denominator = np.sum(sample_weight * (y - residual) * (1 - y + residual))
if denominator == 0.0:
tree.value[leaf, 0, 0] = 0.0
else:
tree.value[leaf, 0, 0] = numerator / denominator
def _score_to_proba(self, score):
proba = np.ones((score.shape[0], 2), dtype=np.float64)
proba[:, 1] = expit(score.ravel())
proba[:, 0] -= proba[:, 1]
return proba
def _score_to_decision(self, score):
proba = self._score_to_proba(score)
return np.argmax(proba, axis=1)
class MultinomialDeviance(ClassificationLossFunction):
"""Multinomial deviance loss function for multi-class classification.
For multi-class classification we need to fit ``n_classes`` trees at
each stage.
"""
is_multi_class = True
def __init__(self, n_classes):
if n_classes < 3:
raise ValueError("{0:s} requires more than 2 classes.".format(
self.__class__.__name__))
super(MultinomialDeviance, self).__init__(n_classes)
def init_estimator(self):
return PriorProbabilityEstimator()
def __call__(self, y, pred, sample_weight=None):
# create one-hot label encoding
Y = np.zeros((y.shape[0], self.K), dtype=np.float64)
for k in range(self.K):
Y[:, k] = y == k
if sample_weight is None:
return np.sum(-1 * (Y * pred).sum(axis=1) +
logsumexp(pred, axis=1))
else:
return np.sum(-1 * sample_weight * (Y * pred).sum(axis=1) +
logsumexp(pred, axis=1))
def negative_gradient(self, y, pred, k=0, **kwargs):
"""Compute negative gradient for the ``k``-th class. """
return y - np.nan_to_num(np.exp(pred[:, k] -
logsumexp(pred, axis=1)))
def _update_terminal_region(self, tree, terminal_regions, leaf, X, y,
residual, pred, sample_weight):
"""Make a single Newton-Raphson step. """
terminal_region = np.where(terminal_regions == leaf)[0]
residual = residual.take(terminal_region, axis=0)
y = y.take(terminal_region, axis=0)
sample_weight = sample_weight.take(terminal_region, axis=0)
numerator = np.sum(sample_weight * residual)
numerator *= (self.K - 1) / self.K
denominator = np.sum(sample_weight * (y - residual) *
(1.0 - y + residual))
if denominator == 0.0:
tree.value[leaf, 0, 0] = 0.0
else:
tree.value[leaf, 0, 0] = numerator / denominator
def _score_to_proba(self, score):
return np.nan_to_num(
np.exp(score - (logsumexp(score, axis=1)[:, np.newaxis])))
def _score_to_decision(self, score):
proba = self._score_to_proba(score)
return np.argmax(proba, axis=1)
class ExponentialLoss(ClassificationLossFunction):
"""Exponential loss function for binary classification.
Same loss as AdaBoost.
References
----------
Greg Ridgeway, Generalized Boosted Models: A guide to the gbm package, 2007
"""
def __init__(self, n_classes):
if n_classes != 2:
raise ValueError("{0:s} requires 2 classes.".format(
self.__class__.__name__))
# we only need to fit one tree for binary clf.
super(ExponentialLoss, self).__init__(1)
def init_estimator(self):
return ScaledLogOddsEstimator()
def __call__(self, y, pred, sample_weight=None):
pred = pred.ravel()
if sample_weight is None:
return np.mean(np.exp(-(2. * y - 1.) * pred))
else:
return (1.0 / sample_weight.sum() *
np.sum(sample_weight * np.exp(-(2 * y - 1) * pred)))
def negative_gradient(self, y, pred, **kargs):
y_ = -(2. * y - 1.)
return y_ * np.exp(y_ * pred.ravel())
def _update_terminal_region(self, tree, terminal_regions, leaf, X, y,
residual, pred, sample_weight):
terminal_region = np.where(terminal_regions == leaf)[0]
pred = pred.take(terminal_region, axis=0)
y = y.take(terminal_region, axis=0)
sample_weight = sample_weight.take(terminal_region, axis=0)
y_ = 2. * y - 1.
numerator = np.sum(y_ * sample_weight * np.exp(-y_ * pred))
denominator = np.sum(sample_weight * np.exp(-y_ * pred))
if denominator == 0.0:
tree.value[leaf, 0, 0] = 0.0
else:
tree.value[leaf, 0, 0] = numerator / denominator
def _score_to_proba(self, score):
proba = np.ones((score.shape[0], 2), dtype=np.float64)
proba[:, 1] = expit(2.0 * score.ravel())
proba[:, 0] -= proba[:, 1]
return proba
def _score_to_decision(self, score):
return (score.ravel() >= 0.0).astype(np.int)
LOSS_FUNCTIONS = {'ls': LeastSquaresError,
'lad': LeastAbsoluteError,
'huber': HuberLossFunction,
'quantile': QuantileLossFunction,
'deviance': None, # for both, multinomial and binomial
'exponential': ExponentialLoss,
}
INIT_ESTIMATORS = {'zero': ZeroEstimator}
class VerboseReporter(object):
"""Reports verbose output to stdout.
If ``verbose==1`` output is printed once in a while (when iteration mod
verbose_mod is zero).; if larger than 1 then output is printed for
each update.
"""
def __init__(self, verbose):
self.verbose = verbose
def init(self, est, begin_at_stage=0):
# header fields and line format str
header_fields = ['Iter', 'Train Loss']
verbose_fmt = ['{iter:>10d}', '{train_score:>16.4f}']
# do oob?
if est.subsample < 1:
header_fields.append('OOB Improve')
verbose_fmt.append('{oob_impr:>16.4f}')
header_fields.append('Remaining Time')
verbose_fmt.append('{remaining_time:>16s}')
# print the header line
print(('%10s ' + '%16s ' *
(len(header_fields) - 1)) % tuple(header_fields))
self.verbose_fmt = ' '.join(verbose_fmt)
# plot verbose info each time i % verbose_mod == 0
self.verbose_mod = 1
self.start_time = time()
self.begin_at_stage = begin_at_stage
def update(self, j, est):
"""Update reporter with new iteration. """
do_oob = est.subsample < 1
# we need to take into account if we fit additional estimators.
i = j - self.begin_at_stage # iteration relative to the start iter
if (i + 1) % self.verbose_mod == 0:
oob_impr = est.oob_improvement_[j] if do_oob else 0
remaining_time = ((est.n_estimators - (j + 1)) *
(time() - self.start_time) / float(i + 1))
if remaining_time > 60:
remaining_time = '{0:.2f}m'.format(remaining_time / 60.0)
else:
remaining_time = '{0:.2f}s'.format(remaining_time)
print(self.verbose_fmt.format(iter=j + 1,
train_score=est.train_score_[j],
oob_impr=oob_impr,
remaining_time=remaining_time))
if self.verbose == 1 and ((i + 1) // (self.verbose_mod * 10) > 0):
# adjust verbose frequency (powers of 10)
self.verbose_mod *= 10
class BaseGradientBoosting(six.with_metaclass(ABCMeta, BaseEnsemble,
_LearntSelectorMixin)):
"""Abstract base class for Gradient Boosting. """
@abstractmethod
def __init__(self, loss, learning_rate, n_estimators, min_samples_split,
min_samples_leaf, min_weight_fraction_leaf,
max_depth, init, subsample, max_features,
random_state, alpha=0.9, verbose=0, max_leaf_nodes=None,
warm_start=False):
self.n_estimators = n_estimators
self.learning_rate = learning_rate
self.loss = loss
self.min_samples_split = min_samples_split
self.min_samples_leaf = min_samples_leaf
self.min_weight_fraction_leaf = min_weight_fraction_leaf
self.subsample = subsample
self.max_features = max_features
self.max_depth = max_depth
self.init = init
self.random_state = random_state
self.alpha = alpha
self.verbose = verbose
self.max_leaf_nodes = max_leaf_nodes
self.warm_start = warm_start
self.estimators_ = np.empty((0, 0), dtype=np.object)
def _fit_stage(self, i, X, y, y_pred, sample_weight, sample_mask,
criterion, splitter, random_state):
"""Fit another stage of ``n_classes_`` trees to the boosting model. """
assert sample_mask.dtype == np.bool
loss = self.loss_
original_y = y
for k in range(loss.K):
if loss.is_multi_class:
y = np.array(original_y == k, dtype=np.float64)
residual = loss.negative_gradient(y, y_pred, k=k,
sample_weight=sample_weight)
# induce regression tree on residuals
tree = DecisionTreeRegressor(
criterion=criterion,
splitter=splitter,
max_depth=self.max_depth,
min_samples_split=self.min_samples_split,
min_samples_leaf=self.min_samples_leaf,
min_weight_fraction_leaf=self.min_weight_fraction_leaf,
max_features=self.max_features,
max_leaf_nodes=self.max_leaf_nodes,
random_state=random_state)
if self.subsample < 1.0:
# no inplace multiplication!
sample_weight = sample_weight * sample_mask.astype(np.float64)
tree.fit(X, residual, sample_weight=sample_weight,
check_input=False)
# update tree leaves
loss.update_terminal_regions(tree.tree_, X, y, residual, y_pred,
sample_weight, sample_mask,
self.learning_rate, k=k)
# add tree to ensemble
self.estimators_[i, k] = tree
return y_pred
def _check_params(self):
"""Check validity of parameters and raise ValueError if not valid. """
if self.n_estimators <= 0:
raise ValueError("n_estimators must be greater than 0 but "
"was %r" % self.n_estimators)
if self.learning_rate <= 0.0:
raise ValueError("learning_rate must be greater than 0 but "
"was %r" % self.learning_rate)
if (self.loss not in self._SUPPORTED_LOSS
or self.loss not in LOSS_FUNCTIONS):
raise ValueError("Loss '{0:s}' not supported. ".format(self.loss))
if self.loss == 'deviance':
loss_class = (MultinomialDeviance
if len(self.classes_) > 2
else BinomialDeviance)
else:
loss_class = LOSS_FUNCTIONS[self.loss]
if self.loss in ('huber', 'quantile'):
self.loss_ = loss_class(self.n_classes_, self.alpha)
else:
self.loss_ = loss_class(self.n_classes_)
if not (0.0 < self.subsample <= 1.0):
raise ValueError("subsample must be in (0,1] but "
"was %r" % self.subsample)
if self.init is not None:
if isinstance(self.init, six.string_types):
if self.init not in INIT_ESTIMATORS:
raise ValueError('init="%s" is not supported' % self.init)
else:
if (not hasattr(self.init, 'fit')
or not hasattr(self.init, 'predict')):
raise ValueError("init=%r must be valid BaseEstimator "
"and support both fit and "
"predict" % self.init)
if not (0.0 < self.alpha < 1.0):
raise ValueError("alpha must be in (0.0, 1.0) but "
"was %r" % self.alpha)
if isinstance(self.max_features, six.string_types):
if self.max_features == "auto":
# if is_classification
if self.n_classes_ > 1:
max_features = max(1, int(np.sqrt(self.n_features)))
else:
# is regression
max_features = self.n_features
elif self.max_features == "sqrt":
max_features = max(1, int(np.sqrt(self.n_features)))
elif self.max_features == "log2":
max_features = max(1, int(np.log2(self.n_features)))
else:
raise ValueError("Invalid value for max_features: %r. "
"Allowed string values are 'auto', 'sqrt' "
"or 'log2'." % self.max_features)
elif self.max_features is None:
max_features = self.n_features
elif isinstance(self.max_features, (numbers.Integral, np.integer)):
max_features = self.max_features
else: # float
if 0. < self.max_features <= 1.:
max_features = max(int(self.max_features * self.n_features), 1)
else:
raise ValueError("max_features must be in (0, n_features]")
self.max_features_ = max_features
def _init_state(self):
"""Initialize model state and allocate model state data structures. """
if self.init is None:
self.init_ = self.loss_.init_estimator()
elif isinstance(self.init, six.string_types):
self.init_ = INIT_ESTIMATORS[self.init]()
else:
self.init_ = self.init
self.estimators_ = np.empty((self.n_estimators, self.loss_.K),
dtype=np.object)
self.train_score_ = np.zeros((self.n_estimators,), dtype=np.float64)
# do oob?
if self.subsample < 1.0:
self.oob_improvement_ = np.zeros((self.n_estimators),
dtype=np.float64)
def _clear_state(self):
"""Clear the state of the gradient boosting model. """
if hasattr(self, 'estimators_'):
self.estimators_ = np.empty((0, 0), dtype=np.object)
if hasattr(self, 'train_score_'):
del self.train_score_
if hasattr(self, 'oob_improvement_'):
del self.oob_improvement_
if hasattr(self, 'init_'):
del self.init_
def _resize_state(self):
"""Add additional ``n_estimators`` entries to all attributes. """
# self.n_estimators is the number of additional est to fit
total_n_estimators = self.n_estimators
if total_n_estimators < self.estimators_.shape[0]:
raise ValueError('resize with smaller n_estimators %d < %d' %
(total_n_estimators, self.estimators_[0]))
self.estimators_.resize((total_n_estimators, self.loss_.K))
self.train_score_.resize(total_n_estimators)
if (self.subsample < 1 or hasattr(self, 'oob_improvement_')):
# if do oob resize arrays or create new if not available
if hasattr(self, 'oob_improvement_'):
self.oob_improvement_.resize(total_n_estimators)
else:
self.oob_improvement_ = np.zeros((total_n_estimators,),
dtype=np.float64)
def _is_initialized(self):
return len(getattr(self, 'estimators_', [])) > 0
def _check_initialized(self):
"""Check that the estimator is initialized, raising an error if not."""
if self.estimators_ is None or len(self.estimators_) == 0:
raise NotFittedError("Estimator not fitted, call `fit`"
" before making predictions`.")
def fit(self, X, y, sample_weight=None, monitor=None):
"""Fit the gradient boosting model.
Parameters
----------
X : array-like, shape = [n_samples, n_features]
Training vectors, where n_samples is the number of samples
and n_features is the number of features.
y : array-like, shape = [n_samples]
Target values (integers in classification, real numbers in
regression)
For classification, labels must correspond to classes.
sample_weight : array-like, shape = [n_samples] or None
Sample weights. If None, then samples are equally weighted. Splits
that would create child nodes with net zero or negative weight are
ignored while searching for a split in each node. In the case of
classification, splits are also ignored if they would result in any
single class carrying a negative weight in either child node.
monitor : callable, optional
The monitor is called after each iteration with the current
iteration, a reference to the estimator and the local variables of
``_fit_stages`` as keyword arguments ``callable(i, self,
locals())``. If the callable returns ``True`` the fitting procedure
is stopped. The monitor can be used for various things such as
computing held-out estimates, early stopping, model introspect, and
snapshoting.
Returns
-------
self : object
Returns self.
"""
# if not warmstart - clear the estimator state
if not self.warm_start:
self._clear_state()
# Check input
X, y = check_X_y(X, y, dtype=DTYPE)
n_samples, self.n_features = X.shape
if sample_weight is None:
sample_weight = np.ones(n_samples, dtype=np.float32)
else:
sample_weight = column_or_1d(sample_weight, warn=True)
check_consistent_length(X, y, sample_weight)
y = self._validate_y(y)
random_state = check_random_state(self.random_state)
self._check_params()
if not self._is_initialized():
# init state
self._init_state()
# fit initial model - FIXME make sample_weight optional
self.init_.fit(X, y, sample_weight)
# init predictions
y_pred = self.init_.predict(X)
begin_at_stage = 0
else:
# add more estimators to fitted model
# invariant: warm_start = True
if self.n_estimators < self.estimators_.shape[0]:
raise ValueError('n_estimators=%d must be larger or equal to '
'estimators_.shape[0]=%d when '
'warm_start==True'
% (self.n_estimators,
self.estimators_.shape[0]))
begin_at_stage = self.estimators_.shape[0]
y_pred = self._decision_function(X)
self._resize_state()
# fit the boosting stages
n_stages = self._fit_stages(X, y, y_pred, sample_weight, random_state,
begin_at_stage, monitor)
# change shape of arrays after fit (early-stopping or additional ests)
if n_stages != self.estimators_.shape[0]:
self.estimators_ = self.estimators_[:n_stages]
self.train_score_ = self.train_score_[:n_stages]
if hasattr(self, 'oob_improvement_'):
self.oob_improvement_ = self.oob_improvement_[:n_stages]
return self
def _fit_stages(self, X, y, y_pred, sample_weight, random_state,
begin_at_stage=0, monitor=None):
"""Iteratively fits the stages.
For each stage it computes the progress (OOB, train score)
and delegates to ``_fit_stage``.
Returns the number of stages fit; might differ from ``n_estimators``
due to early stopping.
"""
n_samples = X.shape[0]
do_oob = self.subsample < 1.0
sample_mask = np.ones((n_samples, ), dtype=np.bool)
n_inbag = max(1, int(self.subsample * n_samples))
loss_ = self.loss_
# Set min_weight_leaf from min_weight_fraction_leaf
if self.min_weight_fraction_leaf != 0. and sample_weight is not None:
min_weight_leaf = (self.min_weight_fraction_leaf *
np.sum(sample_weight))
else:
min_weight_leaf = 0.
# init criterion and splitter
criterion = FriedmanMSE(1)
splitter = PresortBestSplitter(criterion,
self.max_features_,
self.min_samples_leaf,
min_weight_leaf,
random_state)
if self.verbose:
verbose_reporter = VerboseReporter(self.verbose)
verbose_reporter.init(self, begin_at_stage)
# perform boosting iterations
i = begin_at_stage
for i in range(begin_at_stage, self.n_estimators):
# subsampling
if do_oob:
sample_mask = _random_sample_mask(n_samples, n_inbag,
random_state)
# OOB score before adding this stage
old_oob_score = loss_(y[~sample_mask],
y_pred[~sample_mask],
sample_weight[~sample_mask])
# fit next stage of trees
y_pred = self._fit_stage(i, X, y, y_pred, sample_weight,
sample_mask, criterion, splitter,
random_state)
# track deviance (= loss)
if do_oob:
self.train_score_[i] = loss_(y[sample_mask],
y_pred[sample_mask],
sample_weight[sample_mask])
self.oob_improvement_[i] = (
old_oob_score - loss_(y[~sample_mask],
y_pred[~sample_mask],
sample_weight[~sample_mask]))
else:
# no need to fancy index w/ no subsampling
self.train_score_[i] = loss_(y, y_pred, sample_weight)
if self.verbose > 0:
verbose_reporter.update(i, self)
if monitor is not None:
early_stopping = monitor(i, self, locals())
if early_stopping:
break
return i + 1
def _make_estimator(self, append=True):
# we don't need _make_estimator
raise NotImplementedError()
def _init_decision_function(self, X):
"""Check input and compute prediction of ``init``. """
self._check_initialized()
if X.shape[1] != self.n_features:
raise ValueError("X.shape[1] should be {0:d}, not {1:d}.".format(
self.n_features, X.shape[1]))
score = self.init_.predict(X).astype(np.float64)
return score
def _decision_function(self, X):
# for use in inner loop, not raveling the output in single-class case,
# not doing input validation.
score = self._init_decision_function(X)
predict_stages(self.estimators_, X, self.learning_rate, score)
return score
@deprecated(" and will be removed in 0.19")
def decision_function(self, X):
"""Compute the decision function of ``X``.
Parameters
----------
X : array-like of shape = [n_samples, n_features]
The input samples.
Returns
-------
score : array, shape = [n_samples, n_classes] or [n_samples]
The decision function of the input samples. The order of the
classes corresponds to that in the attribute `classes_`.
Regression and binary classification produce an array of shape
[n_samples].
"""
X = check_array(X, dtype=DTYPE, order="C")
score = self._decision_function(X)
if score.shape[1] == 1:
return score.ravel()
return score
def _staged_decision_function(self, X):
"""Compute decision function of ``X`` for each iteration.
This method allows monitoring (i.e. determine error on testing set)
after each stage.
Parameters
----------
X : array-like of shape = [n_samples, n_features]
The input samples.
Returns
-------
score : generator of array, shape = [n_samples, k]
The decision function of the input samples. The order of the
classes corresponds to that in the attribute `classes_`.
Regression and binary classification are special cases with
``k == 1``, otherwise ``k==n_classes``.
"""
X = check_array(X, dtype=DTYPE, order="C")
score = self._init_decision_function(X)
for i in range(self.estimators_.shape[0]):
predict_stage(self.estimators_, i, X, self.learning_rate, score)
yield score.copy()
@deprecated(" and will be removed in 0.19")
def staged_decision_function(self, X):
"""Compute decision function of ``X`` for each iteration.
This method allows monitoring (i.e. determine error on testing set)
after each stage.
Parameters
----------
X : array-like of shape = [n_samples, n_features]
The input samples.
Returns
-------
score : generator of array, shape = [n_samples, k]
The decision function of the input samples. The order of the
classes corresponds to that in the attribute `classes_`.
Regression and binary classification are special cases with
``k == 1``, otherwise ``k==n_classes``.
"""
for dec in self._staged_decision_function(X):
# no yield from in Python2.X
yield dec
@property
def feature_importances_(self):
"""Return the feature importances (the higher, the more important the
feature).
Returns
-------
feature_importances_ : array, shape = [n_features]
"""
self._check_initialized()
total_sum = np.zeros((self.n_features, ), dtype=np.float64)
for stage in self.estimators_:
stage_sum = sum(tree.feature_importances_
for tree in stage) / len(stage)
total_sum += stage_sum
importances = total_sum / len(self.estimators_)
return importances
def _validate_y(self, y):
self.n_classes_ = 1
if y.dtype.kind == 'O':
y = y.astype(np.float64)
# Default implementation
return y
def apply(self, X):
"""Apply trees in the ensemble to X, return leaf indices.
Parameters
----------
X : array-like or sparse matrix, shape = [n_samples, n_features]
The input samples. Internally, it will be converted to
``dtype=np.float32`` and if a sparse matrix is provided
to a sparse ``csr_matrix``.
Returns
-------
X_leaves : array_like, shape = [n_samples, n_estimators, n_classes]
For each datapoint x in X and for each tree in the ensemble,
return the index of the leaf x ends up in in each estimator.
In the case of binary classification n_classes is 1.
"""
self._check_initialized()
X = self.estimators_[0, 0]._validate_X_predict(X, check_input=True)
# n_classes will be equal to 1 in the binary classification or the
# regression case.
n_estimators, n_classes = self.estimators_.shape
leaves = np.zeros((X.shape[0], n_estimators, n_classes))
for i in range(n_estimators):
for j in range(n_classes):
estimator = self.estimators_[i, j]
leaves[:, i, j] = estimator.apply(X, check_input=False)
return leaves
class GradientBoostingClassifier(BaseGradientBoosting, ClassifierMixin):
"""Gradient Boosting for classification.
GB builds an additive model in a
forward stage-wise fashion; it allows for the optimization of
arbitrary differentiable loss functions. In each stage ``n_classes_``
regression trees are fit on the negative gradient of the
binomial or multinomial deviance loss function. Binary classification
is a special case where only a single regression tree is induced.
Read more in the :ref:`User Guide <gradient_boosting>`.
Parameters
----------
loss : {'deviance', 'exponential'}, optional (default='deviance')
loss function to be optimized. 'deviance' refers to
deviance (= logistic regression) for classification
with probabilistic outputs. For loss 'exponential' gradient
boosting recovers the AdaBoost algorithm.
learning_rate : float, optional (default=0.1)
learning rate shrinks the contribution of each tree by `learning_rate`.
There is a trade-off between learning_rate and n_estimators.
n_estimators : int (default=100)
The number of boosting stages to perform. Gradient boosting
is fairly robust to over-fitting so a large number usually
results in better performance.
max_depth : integer, optional (default=3)
maximum depth of the individual regression estimators. The maximum
depth limits the number of nodes in the tree. Tune this parameter
for best performance; the best value depends on the interaction
of the input variables.
Ignored if ``max_leaf_nodes`` is not None.
min_samples_split : integer, optional (default=2)
The minimum number of samples required to split an internal node.
min_samples_leaf : integer, optional (default=1)
The minimum number of samples required to be at a leaf node.
min_weight_fraction_leaf : float, optional (default=0.)
The minimum weighted fraction of the input samples required to be at a
leaf node.
subsample : float, optional (default=1.0)
The fraction of samples to be used for fitting the individual base
learners. If smaller than 1.0 this results in Stochastic Gradient
Boosting. `subsample` interacts with the parameter `n_estimators`.
Choosing `subsample < 1.0` leads to a reduction of variance
and an increase in bias.
max_features : int, float, string or None, optional (default=None)
The number of features to consider when looking for the best split:
- If int, then consider `max_features` features at each split.
- If float, then `max_features` is a percentage and
`int(max_features * n_features)` features are considered at each
split.
- If "auto", then `max_features=sqrt(n_features)`.
- If "sqrt", then `max_features=sqrt(n_features)`.
- If "log2", then `max_features=log2(n_features)`.
- If None, then `max_features=n_features`.
Choosing `max_features < n_features` leads to a reduction of variance
and an increase in bias.
Note: the search for a split does not stop until at least one
valid partition of the node samples is found, even if it requires to
effectively inspect more than ``max_features`` features.
max_leaf_nodes : int or None, optional (default=None)
Grow trees with ``max_leaf_nodes`` in best-first fashion.
Best nodes are defined as relative reduction in impurity.
If None then unlimited number of leaf nodes.
If not None then ``max_depth`` will be ignored.
init : BaseEstimator, None, optional (default=None)
An estimator object that is used to compute the initial
predictions. ``init`` has to provide ``fit`` and ``predict``.
If None it uses ``loss.init_estimator``.
verbose : int, default: 0
Enable verbose output. If 1 then it prints progress and performance
once in a while (the more trees the lower the frequency). If greater
than 1 then it prints progress and performance for every tree.
warm_start : bool, default: False
When set to ``True``, reuse the solution of the previous call to fit
and add more estimators to the ensemble, otherwise, just erase the
previous solution.
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
Attributes
----------
feature_importances_ : array, shape = [n_features]
The feature importances (the higher, the more important the feature).
oob_improvement_ : array, shape = [n_estimators]
The improvement in loss (= deviance) on the out-of-bag samples
relative to the previous iteration.
``oob_improvement_[0]`` is the improvement in
loss of the first stage over the ``init`` estimator.
train_score_ : array, shape = [n_estimators]
The i-th score ``train_score_[i]`` is the deviance (= loss) of the
model at iteration ``i`` on the in-bag sample.
If ``subsample == 1`` this is the deviance on the training data.
loss_ : LossFunction
The concrete ``LossFunction`` object.
init : BaseEstimator
The estimator that provides the initial predictions.
Set via the ``init`` argument or ``loss.init_estimator``.
estimators_ : ndarray of DecisionTreeRegressor, shape = [n_estimators, ``loss_.K``]
The collection of fitted sub-estimators. ``loss_.K`` is 1 for binary
classification, otherwise n_classes.
See also
--------
sklearn.tree.DecisionTreeClassifier, RandomForestClassifier
AdaBoostClassifier
References
----------
J. Friedman, Greedy Function Approximation: A Gradient Boosting
Machine, The Annals of Statistics, Vol. 29, No. 5, 2001.
J. Friedman, Stochastic Gradient Boosting, 1999
T. Hastie, R. Tibshirani and J. Friedman.
Elements of Statistical Learning Ed. 2, Springer, 2009.
"""
_SUPPORTED_LOSS = ('deviance', 'exponential')
def __init__(self, loss='deviance', learning_rate=0.1, n_estimators=100,
subsample=1.0, min_samples_split=2,
min_samples_leaf=1, min_weight_fraction_leaf=0.,
max_depth=3, init=None, random_state=None,
max_features=None, verbose=0,
max_leaf_nodes=None, warm_start=False):
super(GradientBoostingClassifier, self).__init__(
loss=loss, learning_rate=learning_rate, n_estimators=n_estimators,
min_samples_split=min_samples_split,
min_samples_leaf=min_samples_leaf,
min_weight_fraction_leaf=min_weight_fraction_leaf,
max_depth=max_depth, init=init, subsample=subsample,
max_features=max_features,
random_state=random_state, verbose=verbose,
max_leaf_nodes=max_leaf_nodes, warm_start=warm_start)
def _validate_y(self, y):
self.classes_, y = np.unique(y, return_inverse=True)
self.n_classes_ = len(self.classes_)
return y
def decision_function(self, X):
"""Compute the decision function of ``X``.
Parameters
----------
X : array-like of shape = [n_samples, n_features]
The input samples.
Returns
-------
score : array, shape = [n_samples, n_classes] or [n_samples]
The decision function of the input samples. The order of the
classes corresponds to that in the attribute `classes_`.
Regression and binary classification produce an array of shape
[n_samples].
"""
X = check_array(X, dtype=DTYPE, order="C")
score = self._decision_function(X)
if score.shape[1] == 1:
return score.ravel()
return score
def staged_decision_function(self, X):
"""Compute decision function of ``X`` for each iteration.
This method allows monitoring (i.e. determine error on testing set)
after each stage.
Parameters
----------
X : array-like of shape = [n_samples, n_features]
The input samples.
Returns
-------
score : generator of array, shape = [n_samples, k]
The decision function of the input samples. The order of the
classes corresponds to that in the attribute `classes_`.
Regression and binary classification are special cases with
``k == 1``, otherwise ``k==n_classes``.
"""
for dec in self._staged_decision_function(X):
# no yield from in Python2.X
yield dec
def predict(self, X):
"""Predict class for X.
Parameters
----------
X : array-like of shape = [n_samples, n_features]
The input samples.
Returns
-------
y: array of shape = ["n_samples]
The predicted values.
"""
score = self.decision_function(X)
decisions = self.loss_._score_to_decision(score)
return self.classes_.take(decisions, axis=0)
def staged_predict(self, X):
"""Predict class at each stage for X.
This method allows monitoring (i.e. determine error on testing set)
after each stage.
Parameters
----------
X : array-like of shape = [n_samples, n_features]
The input samples.
Returns
-------
y : generator of array of shape = [n_samples]
The predicted value of the input samples.
"""
for score in self._staged_decision_function(X):
decisions = self.loss_._score_to_decision(score)
yield self.classes_.take(decisions, axis=0)
def predict_proba(self, X):
"""Predict class probabilities for X.
Parameters
----------
X : array-like of shape = [n_samples, n_features]
The input samples.
Raises
------
AttributeError
If the ``loss`` does not support probabilities.
Returns
-------
p : array of shape = [n_samples]
The class probabilities of the input samples. The order of the
classes corresponds to that in the attribute `classes_`.
"""
score = self.decision_function(X)
try:
return self.loss_._score_to_proba(score)
except NotFittedError:
raise
except AttributeError:
raise AttributeError('loss=%r does not support predict_proba' %
self.loss)
def predict_log_proba(self, X):
"""Predict class log-probabilities for X.
Parameters
----------
X : array-like of shape = [n_samples, n_features]
The input samples.
Raises
------
AttributeError
If the ``loss`` does not support probabilities.
Returns
-------
p : array of shape = [n_samples]
The class log-probabilities of the input samples. The order of the
classes corresponds to that in the attribute `classes_`.
"""
proba = self.predict_proba(X)
return np.log(proba)
def staged_predict_proba(self, X):
"""Predict class probabilities at each stage for X.
This method allows monitoring (i.e. determine error on testing set)
after each stage.
Parameters
----------
X : array-like of shape = [n_samples, n_features]
The input samples.
Returns
-------
y : generator of array of shape = [n_samples]
The predicted value of the input samples.
"""
try:
for score in self._staged_decision_function(X):
yield self.loss_._score_to_proba(score)
except NotFittedError:
raise
except AttributeError:
raise AttributeError('loss=%r does not support predict_proba' %
self.loss)
class GradientBoostingRegressor(BaseGradientBoosting, RegressorMixin):
"""Gradient Boosting for regression.
GB builds an additive model in a forward stage-wise fashion;
it allows for the optimization of arbitrary differentiable loss functions.
In each stage a regression tree is fit on the negative gradient of the
given loss function.
Read more in the :ref:`User Guide <gradient_boosting>`.
Parameters
----------
loss : {'ls', 'lad', 'huber', 'quantile'}, optional (default='ls')
loss function to be optimized. 'ls' refers to least squares
regression. 'lad' (least absolute deviation) is a highly robust
loss function solely based on order information of the input
variables. 'huber' is a combination of the two. 'quantile'
allows quantile regression (use `alpha` to specify the quantile).
learning_rate : float, optional (default=0.1)
learning rate shrinks the contribution of each tree by `learning_rate`.
There is a trade-off between learning_rate and n_estimators.
n_estimators : int (default=100)
The number of boosting stages to perform. Gradient boosting
is fairly robust to over-fitting so a large number usually
results in better performance.
max_depth : integer, optional (default=3)
maximum depth of the individual regression estimators. The maximum
depth limits the number of nodes in the tree. Tune this parameter
for best performance; the best value depends on the interaction
of the input variables.
Ignored if ``max_leaf_nodes`` is not None.
min_samples_split : integer, optional (default=2)
The minimum number of samples required to split an internal node.
min_samples_leaf : integer, optional (default=1)
The minimum number of samples required to be at a leaf node.
min_weight_fraction_leaf : float, optional (default=0.)
The minimum weighted fraction of the input samples required to be at a
leaf node.
subsample : float, optional (default=1.0)
The fraction of samples to be used for fitting the individual base
learners. If smaller than 1.0 this results in Stochastic Gradient
Boosting. `subsample` interacts with the parameter `n_estimators`.
Choosing `subsample < 1.0` leads to a reduction of variance
and an increase in bias.
max_features : int, float, string or None, optional (default=None)
The number of features to consider when looking for the best split:
- If int, then consider `max_features` features at each split.
- If float, then `max_features` is a percentage and
`int(max_features * n_features)` features are considered at each
split.
- If "auto", then `max_features=n_features`.
- If "sqrt", then `max_features=sqrt(n_features)`.
- If "log2", then `max_features=log2(n_features)`.
- If None, then `max_features=n_features`.
Choosing `max_features < n_features` leads to a reduction of variance
and an increase in bias.
Note: the search for a split does not stop until at least one
valid partition of the node samples is found, even if it requires to
effectively inspect more than ``max_features`` features.
max_leaf_nodes : int or None, optional (default=None)
Grow trees with ``max_leaf_nodes`` in best-first fashion.
Best nodes are defined as relative reduction in impurity.
If None then unlimited number of leaf nodes.
alpha : float (default=0.9)
The alpha-quantile of the huber loss function and the quantile
loss function. Only if ``loss='huber'`` or ``loss='quantile'``.
init : BaseEstimator, None, optional (default=None)
An estimator object that is used to compute the initial
predictions. ``init`` has to provide ``fit`` and ``predict``.
If None it uses ``loss.init_estimator``.
verbose : int, default: 0
Enable verbose output. If 1 then it prints progress and performance
once in a while (the more trees the lower the frequency). If greater
than 1 then it prints progress and performance for every tree.
warm_start : bool, default: False
When set to ``True``, reuse the solution of the previous call to fit
and add more estimators to the ensemble, otherwise, just erase the
previous solution.
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
Attributes
----------
feature_importances_ : array, shape = [n_features]
The feature importances (the higher, the more important the feature).
oob_improvement_ : array, shape = [n_estimators]
The improvement in loss (= deviance) on the out-of-bag samples
relative to the previous iteration.
``oob_improvement_[0]`` is the improvement in
loss of the first stage over the ``init`` estimator.
train_score_ : array, shape = [n_estimators]
The i-th score ``train_score_[i]`` is the deviance (= loss) of the
model at iteration ``i`` on the in-bag sample.
If ``subsample == 1`` this is the deviance on the training data.
loss_ : LossFunction
The concrete ``LossFunction`` object.
`init` : BaseEstimator
The estimator that provides the initial predictions.
Set via the ``init`` argument or ``loss.init_estimator``.
estimators_ : ndarray of DecisionTreeRegressor, shape = [n_estimators, 1]
The collection of fitted sub-estimators.
See also
--------
DecisionTreeRegressor, RandomForestRegressor
References
----------
J. Friedman, Greedy Function Approximation: A Gradient Boosting
Machine, The Annals of Statistics, Vol. 29, No. 5, 2001.
J. Friedman, Stochastic Gradient Boosting, 1999
T. Hastie, R. Tibshirani and J. Friedman.
Elements of Statistical Learning Ed. 2, Springer, 2009.
"""
_SUPPORTED_LOSS = ('ls', 'lad', 'huber', 'quantile')
def __init__(self, loss='ls', learning_rate=0.1, n_estimators=100,
subsample=1.0, min_samples_split=2,
min_samples_leaf=1, min_weight_fraction_leaf=0.,
max_depth=3, init=None, random_state=None,
max_features=None, alpha=0.9, verbose=0, max_leaf_nodes=None,
warm_start=False):
super(GradientBoostingRegressor, self).__init__(
loss=loss, learning_rate=learning_rate, n_estimators=n_estimators,
min_samples_split=min_samples_split,
min_samples_leaf=min_samples_leaf,
min_weight_fraction_leaf=min_weight_fraction_leaf,
max_depth=max_depth, init=init, subsample=subsample,
max_features=max_features,
random_state=random_state, alpha=alpha, verbose=verbose,
max_leaf_nodes=max_leaf_nodes, warm_start=warm_start)
def predict(self, X):
"""Predict regression target for X.
Parameters
----------
X : array-like of shape = [n_samples, n_features]
The input samples.
Returns
-------
y : array of shape = [n_samples]
The predicted values.
"""
X = check_array(X, dtype=DTYPE, order="C")
return self._decision_function(X).ravel()
def staged_predict(self, X):
"""Predict regression target at each stage for X.
This method allows monitoring (i.e. determine error on testing set)
after each stage.
Parameters
----------
X : array-like of shape = [n_samples, n_features]
The input samples.
Returns
-------
y : generator of array of shape = [n_samples]
The predicted value of the input samples.
"""
for y in self._staged_decision_function(X):
yield y.ravel()
def apply(self, X):
"""Apply trees in the ensemble to X, return leaf indices.
Parameters
----------
X : array-like or sparse matrix, shape = [n_samples, n_features]
The input samples. Internally, it will be converted to
``dtype=np.float32`` and if a sparse matrix is provided
to a sparse ``csr_matrix``.
Returns
-------
X_leaves : array_like, shape = [n_samples, n_estimators]
For each datapoint x in X and for each tree in the ensemble,
return the index of the leaf x ends up in in each estimator.
"""
leaves = super(GradientBoostingRegressor, self).apply(X)
leaves = leaves.reshape(X.shape[0], self.estimators_.shape[0])
return leaves
| bsd-3-clause |
luo66/scikit-learn | sklearn/utils/__init__.py | 79 | 14202 | """
The :mod:`sklearn.utils` module includes various utilities.
"""
from collections import Sequence
import numpy as np
from scipy.sparse import issparse
import warnings
from .murmurhash import murmurhash3_32
from .validation import (as_float_array,
assert_all_finite,
check_random_state, column_or_1d, check_array,
check_consistent_length, check_X_y, indexable,
check_symmetric, DataConversionWarning)
from .class_weight import compute_class_weight, compute_sample_weight
from ..externals.joblib import cpu_count
__all__ = ["murmurhash3_32", "as_float_array",
"assert_all_finite", "check_array",
"check_random_state",
"compute_class_weight", "compute_sample_weight",
"column_or_1d", "safe_indexing",
"check_consistent_length", "check_X_y", 'indexable',
"check_symmetric"]
class deprecated(object):
"""Decorator to mark a function or class as deprecated.
Issue a warning when the function is called/the class is instantiated and
adds a warning to the docstring.
The optional extra argument will be appended to the deprecation message
and the docstring. Note: to use this with the default value for extra, put
in an empty of parentheses:
>>> from sklearn.utils import deprecated
>>> deprecated() # doctest: +ELLIPSIS
<sklearn.utils.deprecated object at ...>
>>> @deprecated()
... def some_function(): pass
"""
# Adapted from http://wiki.python.org/moin/PythonDecoratorLibrary,
# but with many changes.
def __init__(self, extra=''):
"""
Parameters
----------
extra: string
to be added to the deprecation messages
"""
self.extra = extra
def __call__(self, obj):
if isinstance(obj, type):
return self._decorate_class(obj)
else:
return self._decorate_fun(obj)
def _decorate_class(self, cls):
msg = "Class %s is deprecated" % cls.__name__
if self.extra:
msg += "; %s" % self.extra
# FIXME: we should probably reset __new__ for full generality
init = cls.__init__
def wrapped(*args, **kwargs):
warnings.warn(msg, category=DeprecationWarning)
return init(*args, **kwargs)
cls.__init__ = wrapped
wrapped.__name__ = '__init__'
wrapped.__doc__ = self._update_doc(init.__doc__)
wrapped.deprecated_original = init
return cls
def _decorate_fun(self, fun):
"""Decorate function fun"""
msg = "Function %s is deprecated" % fun.__name__
if self.extra:
msg += "; %s" % self.extra
def wrapped(*args, **kwargs):
warnings.warn(msg, category=DeprecationWarning)
return fun(*args, **kwargs)
wrapped.__name__ = fun.__name__
wrapped.__dict__ = fun.__dict__
wrapped.__doc__ = self._update_doc(fun.__doc__)
return wrapped
def _update_doc(self, olddoc):
newdoc = "DEPRECATED"
if self.extra:
newdoc = "%s: %s" % (newdoc, self.extra)
if olddoc:
newdoc = "%s\n\n%s" % (newdoc, olddoc)
return newdoc
def safe_mask(X, mask):
"""Return a mask which is safe to use on X.
Parameters
----------
X : {array-like, sparse matrix}
Data on which to apply mask.
mask: array
Mask to be used on X.
Returns
-------
mask
"""
mask = np.asarray(mask)
if np.issubdtype(mask.dtype, np.int):
return mask
if hasattr(X, "toarray"):
ind = np.arange(mask.shape[0])
mask = ind[mask]
return mask
def safe_indexing(X, indices):
"""Return items or rows from X using indices.
Allows simple indexing of lists or arrays.
Parameters
----------
X : array-like, sparse-matrix, list.
Data from which to sample rows or items.
indices : array-like, list
Indices according to which X will be subsampled.
"""
if hasattr(X, "iloc"):
# Pandas Dataframes and Series
try:
return X.iloc[indices]
except ValueError:
# Cython typed memoryviews internally used in pandas do not support
# readonly buffers.
warnings.warn("Copying input dataframe for slicing.",
DataConversionWarning)
return X.copy().iloc[indices]
elif hasattr(X, "shape"):
if hasattr(X, 'take') and (hasattr(indices, 'dtype') and
indices.dtype.kind == 'i'):
# This is often substantially faster than X[indices]
return X.take(indices, axis=0)
else:
return X[indices]
else:
return [X[idx] for idx in indices]
def resample(*arrays, **options):
"""Resample arrays or sparse matrices in a consistent way
The default strategy implements one step of the bootstrapping
procedure.
Parameters
----------
*arrays : sequence of indexable data-structures
Indexable data-structures can be arrays, lists, dataframes or scipy
sparse matrices with consistent first dimension.
replace : boolean, True by default
Implements resampling with replacement. If False, this will implement
(sliced) random permutations.
n_samples : int, None by default
Number of samples to generate. If left to None this is
automatically set to the first dimension of the arrays.
random_state : int or RandomState instance
Control the shuffling for reproducible behavior.
Returns
-------
resampled_arrays : sequence of indexable data-structures
Sequence of resampled views of the collections. The original arrays are
not impacted.
Examples
--------
It is possible to mix sparse and dense arrays in the same run::
>>> X = np.array([[1., 0.], [2., 1.], [0., 0.]])
>>> y = np.array([0, 1, 2])
>>> from scipy.sparse import coo_matrix
>>> X_sparse = coo_matrix(X)
>>> from sklearn.utils import resample
>>> X, X_sparse, y = resample(X, X_sparse, y, random_state=0)
>>> X
array([[ 1., 0.],
[ 2., 1.],
[ 1., 0.]])
>>> X_sparse # doctest: +ELLIPSIS +NORMALIZE_WHITESPACE
<3x2 sparse matrix of type '<... 'numpy.float64'>'
with 4 stored elements in Compressed Sparse Row format>
>>> X_sparse.toarray()
array([[ 1., 0.],
[ 2., 1.],
[ 1., 0.]])
>>> y
array([0, 1, 0])
>>> resample(y, n_samples=2, random_state=0)
array([0, 1])
See also
--------
:func:`sklearn.utils.shuffle`
"""
random_state = check_random_state(options.pop('random_state', None))
replace = options.pop('replace', True)
max_n_samples = options.pop('n_samples', None)
if options:
raise ValueError("Unexpected kw arguments: %r" % options.keys())
if len(arrays) == 0:
return None
first = arrays[0]
n_samples = first.shape[0] if hasattr(first, 'shape') else len(first)
if max_n_samples is None:
max_n_samples = n_samples
if max_n_samples > n_samples:
raise ValueError("Cannot sample %d out of arrays with dim %d" % (
max_n_samples, n_samples))
check_consistent_length(*arrays)
if replace:
indices = random_state.randint(0, n_samples, size=(max_n_samples,))
else:
indices = np.arange(n_samples)
random_state.shuffle(indices)
indices = indices[:max_n_samples]
# convert sparse matrices to CSR for row-based indexing
arrays = [a.tocsr() if issparse(a) else a for a in arrays]
resampled_arrays = [safe_indexing(a, indices) for a in arrays]
if len(resampled_arrays) == 1:
# syntactic sugar for the unit argument case
return resampled_arrays[0]
else:
return resampled_arrays
def shuffle(*arrays, **options):
"""Shuffle arrays or sparse matrices in a consistent way
This is a convenience alias to ``resample(*arrays, replace=False)`` to do
random permutations of the collections.
Parameters
----------
*arrays : sequence of indexable data-structures
Indexable data-structures can be arrays, lists, dataframes or scipy
sparse matrices with consistent first dimension.
random_state : int or RandomState instance
Control the shuffling for reproducible behavior.
n_samples : int, None by default
Number of samples to generate. If left to None this is
automatically set to the first dimension of the arrays.
Returns
-------
shuffled_arrays : sequence of indexable data-structures
Sequence of shuffled views of the collections. The original arrays are
not impacted.
Examples
--------
It is possible to mix sparse and dense arrays in the same run::
>>> X = np.array([[1., 0.], [2., 1.], [0., 0.]])
>>> y = np.array([0, 1, 2])
>>> from scipy.sparse import coo_matrix
>>> X_sparse = coo_matrix(X)
>>> from sklearn.utils import shuffle
>>> X, X_sparse, y = shuffle(X, X_sparse, y, random_state=0)
>>> X
array([[ 0., 0.],
[ 2., 1.],
[ 1., 0.]])
>>> X_sparse # doctest: +ELLIPSIS +NORMALIZE_WHITESPACE
<3x2 sparse matrix of type '<... 'numpy.float64'>'
with 3 stored elements in Compressed Sparse Row format>
>>> X_sparse.toarray()
array([[ 0., 0.],
[ 2., 1.],
[ 1., 0.]])
>>> y
array([2, 1, 0])
>>> shuffle(y, n_samples=2, random_state=0)
array([0, 1])
See also
--------
:func:`sklearn.utils.resample`
"""
options['replace'] = False
return resample(*arrays, **options)
def safe_sqr(X, copy=True):
"""Element wise squaring of array-likes and sparse matrices.
Parameters
----------
X : array like, matrix, sparse matrix
copy : boolean, optional, default True
Whether to create a copy of X and operate on it or to perform
inplace computation (default behaviour).
Returns
-------
X ** 2 : element wise square
"""
X = check_array(X, accept_sparse=['csr', 'csc', 'coo'], ensure_2d=False)
if issparse(X):
if copy:
X = X.copy()
X.data **= 2
else:
if copy:
X = X ** 2
else:
X **= 2
return X
def gen_batches(n, batch_size):
"""Generator to create slices containing batch_size elements, from 0 to n.
The last slice may contain less than batch_size elements, when batch_size
does not divide n.
Examples
--------
>>> from sklearn.utils import gen_batches
>>> list(gen_batches(7, 3))
[slice(0, 3, None), slice(3, 6, None), slice(6, 7, None)]
>>> list(gen_batches(6, 3))
[slice(0, 3, None), slice(3, 6, None)]
>>> list(gen_batches(2, 3))
[slice(0, 2, None)]
"""
start = 0
for _ in range(int(n // batch_size)):
end = start + batch_size
yield slice(start, end)
start = end
if start < n:
yield slice(start, n)
def gen_even_slices(n, n_packs, n_samples=None):
"""Generator to create n_packs slices going up to n.
Pass n_samples when the slices are to be used for sparse matrix indexing;
slicing off-the-end raises an exception, while it works for NumPy arrays.
Examples
--------
>>> from sklearn.utils import gen_even_slices
>>> list(gen_even_slices(10, 1))
[slice(0, 10, None)]
>>> list(gen_even_slices(10, 10)) #doctest: +ELLIPSIS
[slice(0, 1, None), slice(1, 2, None), ..., slice(9, 10, None)]
>>> list(gen_even_slices(10, 5)) #doctest: +ELLIPSIS
[slice(0, 2, None), slice(2, 4, None), ..., slice(8, 10, None)]
>>> list(gen_even_slices(10, 3))
[slice(0, 4, None), slice(4, 7, None), slice(7, 10, None)]
"""
start = 0
if n_packs < 1:
raise ValueError("gen_even_slices got n_packs=%s, must be >=1" % n_packs)
for pack_num in range(n_packs):
this_n = n // n_packs
if pack_num < n % n_packs:
this_n += 1
if this_n > 0:
end = start + this_n
if n_samples is not None:
end = min(n_samples, end)
yield slice(start, end, None)
start = end
def _get_n_jobs(n_jobs):
"""Get number of jobs for the computation.
This function reimplements the logic of joblib to determine the actual
number of jobs depending on the cpu count. If -1 all CPUs are used.
If 1 is given, no parallel computing code is used at all, which is useful
for debugging. For n_jobs below -1, (n_cpus + 1 + n_jobs) are used.
Thus for n_jobs = -2, all CPUs but one are used.
Parameters
----------
n_jobs : int
Number of jobs stated in joblib convention.
Returns
-------
n_jobs : int
The actual number of jobs as positive integer.
Examples
--------
>>> from sklearn.utils import _get_n_jobs
>>> _get_n_jobs(4)
4
>>> jobs = _get_n_jobs(-2)
>>> assert jobs == max(cpu_count() - 1, 1)
>>> _get_n_jobs(0)
Traceback (most recent call last):
...
ValueError: Parameter n_jobs == 0 has no meaning.
"""
if n_jobs < 0:
return max(cpu_count() + 1 + n_jobs, 1)
elif n_jobs == 0:
raise ValueError('Parameter n_jobs == 0 has no meaning.')
else:
return n_jobs
def tosequence(x):
"""Cast iterable x to a Sequence, avoiding a copy if possible."""
if isinstance(x, np.ndarray):
return np.asarray(x)
elif isinstance(x, Sequence):
return x
else:
return list(x)
class ConvergenceWarning(UserWarning):
"""Custom warning to capture convergence problems"""
class DataDimensionalityWarning(UserWarning):
"""Custom warning to notify potential issues with data dimensionality"""
| bsd-3-clause |
switowski/invenio | invenio/modules/indexer/tokenizers/BibIndexFiletypeTokenizer.py | 12 | 2411 | # -*- coding: utf-8 -*-
#
# This file is part of Invenio.
# Copyright (C) 2010, 2011, 2012, 2013, 2014, 2015 CERN.
#
# Invenio is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License as
# published by the Free Software Foundation; either version 2 of the
# License, or (at your option) any later version.
#
# Invenio is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Invenio; if not, write to the Free Software Foundation, Inc.,
# 59 Temple Place, Suite 330, Boston, MA 02111-1307, USA.
"""BibIndexFiletypeTokenizer: 'tokenizes' for file extensions.
Tokenizer is adapted to work with recjson and its get_record function.
"""
from invenio.modules.indexer.tokenizers.BibIndexRecJsonTokenizer import BibIndexRecJsonTokenizer
class BibIndexFiletypeTokenizer(BibIndexRecJsonTokenizer):
"""
Tokenizes for file extensions.
Tokenizer is adapted to work with recjson and its get_record function.
It accepts as an input a record created by a get_record function:
from invenio.modules.records.api import get_record
record16 = get_record(16)
tokenizer = BibIndexFiletypeTokenizer()
new_words = tokenizer.tokenize(record16)
"""
def __init__(self, stemming_language = None, remove_stopwords = False, remove_html_markup = False, remove_latex_markup = False):
pass
def tokenize(self, record):
"""'record' is a recjson record.
Function uses derived field 'filetypes'
from the record.
@param urls: recjson record
"""
values = []
try:
if 'filetypes' in record:
values = record['filetypes']
except KeyError:
pass
except TypeError:
return []
return values
def tokenize_for_words(self, record):
return self.tokenize(record)
def tokenize_for_pairs(self, record):
return self.tokenize(record)
def tokenize_for_phrases(self, record):
return self.tokenize(record)
def get_tokenizing_function(self, wordtable_type):
return self.tokenize
| gpl-2.0 |
MohammedWasim/scikit-learn | examples/cluster/plot_affinity_propagation.py | 346 | 2304 | """
=================================================
Demo of affinity propagation clustering algorithm
=================================================
Reference:
Brendan J. Frey and Delbert Dueck, "Clustering by Passing Messages
Between Data Points", Science Feb. 2007
"""
print(__doc__)
from sklearn.cluster import AffinityPropagation
from sklearn import metrics
from sklearn.datasets.samples_generator import make_blobs
##############################################################################
# Generate sample data
centers = [[1, 1], [-1, -1], [1, -1]]
X, labels_true = make_blobs(n_samples=300, centers=centers, cluster_std=0.5,
random_state=0)
##############################################################################
# Compute Affinity Propagation
af = AffinityPropagation(preference=-50).fit(X)
cluster_centers_indices = af.cluster_centers_indices_
labels = af.labels_
n_clusters_ = len(cluster_centers_indices)
print('Estimated number of clusters: %d' % n_clusters_)
print("Homogeneity: %0.3f" % metrics.homogeneity_score(labels_true, labels))
print("Completeness: %0.3f" % metrics.completeness_score(labels_true, labels))
print("V-measure: %0.3f" % metrics.v_measure_score(labels_true, labels))
print("Adjusted Rand Index: %0.3f"
% metrics.adjusted_rand_score(labels_true, labels))
print("Adjusted Mutual Information: %0.3f"
% metrics.adjusted_mutual_info_score(labels_true, labels))
print("Silhouette Coefficient: %0.3f"
% metrics.silhouette_score(X, labels, metric='sqeuclidean'))
##############################################################################
# Plot result
import matplotlib.pyplot as plt
from itertools import cycle
plt.close('all')
plt.figure(1)
plt.clf()
colors = cycle('bgrcmykbgrcmykbgrcmykbgrcmyk')
for k, col in zip(range(n_clusters_), colors):
class_members = labels == k
cluster_center = X[cluster_centers_indices[k]]
plt.plot(X[class_members, 0], X[class_members, 1], col + '.')
plt.plot(cluster_center[0], cluster_center[1], 'o', markerfacecolor=col,
markeredgecolor='k', markersize=14)
for x in X[class_members]:
plt.plot([cluster_center[0], x[0]], [cluster_center[1], x[1]], col)
plt.title('Estimated number of clusters: %d' % n_clusters_)
plt.show()
| bsd-3-clause |
glouppe/scikit-learn | examples/cluster/plot_affinity_propagation.py | 346 | 2304 | """
=================================================
Demo of affinity propagation clustering algorithm
=================================================
Reference:
Brendan J. Frey and Delbert Dueck, "Clustering by Passing Messages
Between Data Points", Science Feb. 2007
"""
print(__doc__)
from sklearn.cluster import AffinityPropagation
from sklearn import metrics
from sklearn.datasets.samples_generator import make_blobs
##############################################################################
# Generate sample data
centers = [[1, 1], [-1, -1], [1, -1]]
X, labels_true = make_blobs(n_samples=300, centers=centers, cluster_std=0.5,
random_state=0)
##############################################################################
# Compute Affinity Propagation
af = AffinityPropagation(preference=-50).fit(X)
cluster_centers_indices = af.cluster_centers_indices_
labels = af.labels_
n_clusters_ = len(cluster_centers_indices)
print('Estimated number of clusters: %d' % n_clusters_)
print("Homogeneity: %0.3f" % metrics.homogeneity_score(labels_true, labels))
print("Completeness: %0.3f" % metrics.completeness_score(labels_true, labels))
print("V-measure: %0.3f" % metrics.v_measure_score(labels_true, labels))
print("Adjusted Rand Index: %0.3f"
% metrics.adjusted_rand_score(labels_true, labels))
print("Adjusted Mutual Information: %0.3f"
% metrics.adjusted_mutual_info_score(labels_true, labels))
print("Silhouette Coefficient: %0.3f"
% metrics.silhouette_score(X, labels, metric='sqeuclidean'))
##############################################################################
# Plot result
import matplotlib.pyplot as plt
from itertools import cycle
plt.close('all')
plt.figure(1)
plt.clf()
colors = cycle('bgrcmykbgrcmykbgrcmykbgrcmyk')
for k, col in zip(range(n_clusters_), colors):
class_members = labels == k
cluster_center = X[cluster_centers_indices[k]]
plt.plot(X[class_members, 0], X[class_members, 1], col + '.')
plt.plot(cluster_center[0], cluster_center[1], 'o', markerfacecolor=col,
markeredgecolor='k', markersize=14)
for x in X[class_members]:
plt.plot([cluster_center[0], x[0]], [cluster_center[1], x[1]], col)
plt.title('Estimated number of clusters: %d' % n_clusters_)
plt.show()
| bsd-3-clause |
mileistone/test | brambox/boxes/annotations/kitti.py | 1 | 4525 | #
# Copyright EAVISE
# Author: Tanguy Ophoff
#
"""
KITTI
-----
"""
from .annotation import *
__all__ = ["KittiAnnotation", "KittiParser"]
class KittiAnnotation(Annotation):
""" KITI image annotation """
def serialize(self):
""" generate a KITTI annotation string """
truncated = 1.0 if self.lost else self.truncated_fraction
class_label = self.class_label if self.class_label != '' else '?'
if self.occluded_fraction >= 0.5:
occluded = 2
elif self.occluded_fraction > 0.0:
occluded = 1
else:
occluded = 0
return f'{class_label} {truncated:.2f} {occluded} -10 {self.x_top_left:.2f} {self.y_top_left:.2f} {self.x_top_left+self.width:.2f} {self.y_top_left+self.height:.2f} -1 -1 -1 -1000 -1000 -1000 -10'
def deserialize(self, string):
""" parse a KITTI annotation string """
elements = string.split()
self.class_label = elements[0] if elements[0] != '?' else ''
self.truncated_fraction = max(float(elements[1]), 0.0)
self.x_top_left = float(elements[4])
self.y_top_left = float(elements[5])
self.width = float(elements[6]) - self.x_top_left
self.height = float(elements[7]) - self.y_top_left
if elements[2] == '1':
self.occluded_fraction = 0.25
elif elements[2] == '2':
self.occluded_fraction = 0.5
else:
self.occluded_fraction = 0.0
class KittiParser(Parser):
"""
This parser can read and write kitti_ annotation files. |br|
Some of the values of this dataset are not present in the brambox annotation objects and are thus not used.
When serializing this format, these values will be set to their default value, as per specification.
================== ================ ===========
Name Number of Values Description
================== ================ ===========
class_label 1 Annotation class_label. In the official dataset this can be one of: |br|
'Car', 'Van', 'Truck', 'Pedestrian', 'Person_sitting', 'Cyclist', 'Tram', 'Misc' or 'DontCare'
truncated_fraction 1 Float in range [0-1] indicating whether object is truncated
occluded_state 1 Integer (0,1,2,3) indicating occlusion state: |br|
0=fully visible, 1=partly occluded, 2=largely occluded, 3=unknown
alpha 1 *[Not used in brambox]* Observation angle of the object
bbox 4 2D bounding box of the image, expressed in pixel coordinates
dimensions 3 *[Not used in brambox]* 3D object dimensions
location 3 *[Not used in brambox]* 3D object location
rotation_y 1 *[Not used in brambox]* Rotation around Y-axis in camera coordinates
================== ================ ===========
Example:
>>> image_000.txt
<class_label> <truncated_fraction> <occluded_state> -10 <bbox_left> <bbox_top> <bbox_right> <bbox_bottom> -1 -1 -1 -1000 -1000 -1000 -10
<class_label> <truncated_fraction> <occluded_state> -10 <bbox_left> <bbox_top> <bbox_right> <bbox_bottom> -1 -1 -1 -1000 -1000 -1000 -10
>>> image_001.txt
<class_label> <truncated_fraction> <occluded_state> -10 <bbox_left> <bbox_top> <bbox_right> <bbox_bottom> -1 -1 -1 -1000 -1000 -1000 -10
<class_label> <truncated_fraction> <occluded_state> -10 <bbox_left> <bbox_top> <bbox_right> <bbox_bottom> -1 -1 -1 -1000 -1000 -1000 -10
<class_label> <truncated_fraction> <occluded_state> -10 <bbox_left> <bbox_top> <bbox_right> <bbox_bottom> -1 -1 -1 -1000 -1000 -1000 -10
Note:
This parser will convert the ``occluded_state`` to an ``occluded_fraction``. |br|
Partly occluded (1) will be converted to a fraction of 0.25 and largely occluded (2) to 0.5.
The other states will be converted to a fraction of 0. |br|
When serializing, all fractions bigger or equal to 0.5 will be converted to largely occluded (2),
fractions between 0.5 and 0 to partly occluded (1) and fractions of 0 will be converted to fully visible (0).
.. _kitti: https://www.cvlibs.net/datasets/kitti/eval_object.php?obj_benchmark=2d
"""
parser_type = ParserType.MULTI_FILE
box_type = KittiAnnotation
| mit |
LohithBlaze/scikit-learn | sklearn/datasets/tests/test_samples_generator.py | 35 | 15016 | from __future__ import division
from collections import defaultdict
from functools import partial
import numpy as np
from sklearn.externals.six.moves import zip
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_almost_equal
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_true
from sklearn.utils.testing import assert_less
from sklearn.utils.testing import assert_raises
from sklearn.datasets import make_classification
from sklearn.datasets import make_multilabel_classification
from sklearn.datasets import make_hastie_10_2
from sklearn.datasets import make_regression
from sklearn.datasets import make_blobs
from sklearn.datasets import make_friedman1
from sklearn.datasets import make_friedman2
from sklearn.datasets import make_friedman3
from sklearn.datasets import make_low_rank_matrix
from sklearn.datasets import make_sparse_coded_signal
from sklearn.datasets import make_sparse_uncorrelated
from sklearn.datasets import make_spd_matrix
from sklearn.datasets import make_swiss_roll
from sklearn.datasets import make_s_curve
from sklearn.datasets import make_biclusters
from sklearn.datasets import make_checkerboard
from sklearn.utils.validation import assert_all_finite
def test_make_classification():
X, y = make_classification(n_samples=100, n_features=20, n_informative=5,
n_redundant=1, n_repeated=1, n_classes=3,
n_clusters_per_class=1, hypercube=False,
shift=None, scale=None, weights=[0.1, 0.25],
random_state=0)
assert_equal(X.shape, (100, 20), "X shape mismatch")
assert_equal(y.shape, (100,), "y shape mismatch")
assert_equal(np.unique(y).shape, (3,), "Unexpected number of classes")
assert_equal(sum(y == 0), 10, "Unexpected number of samples in class #0")
assert_equal(sum(y == 1), 25, "Unexpected number of samples in class #1")
assert_equal(sum(y == 2), 65, "Unexpected number of samples in class #2")
def test_make_classification_informative_features():
"""Test the construction of informative features in make_classification
Also tests `n_clusters_per_class`, `n_classes`, `hypercube` and
fully-specified `weights`.
"""
# Create very separate clusters; check that vertices are unique and
# correspond to classes
class_sep = 1e6
make = partial(make_classification, class_sep=class_sep, n_redundant=0,
n_repeated=0, flip_y=0, shift=0, scale=1, shuffle=False)
for n_informative, weights, n_clusters_per_class in [(2, [1], 1),
(2, [1/3] * 3, 1),
(2, [1/4] * 4, 1),
(2, [1/2] * 2, 2),
(2, [3/4, 1/4], 2),
(10, [1/3] * 3, 10)
]:
n_classes = len(weights)
n_clusters = n_classes * n_clusters_per_class
n_samples = n_clusters * 50
for hypercube in (False, True):
X, y = make(n_samples=n_samples, n_classes=n_classes,
weights=weights, n_features=n_informative,
n_informative=n_informative,
n_clusters_per_class=n_clusters_per_class,
hypercube=hypercube, random_state=0)
assert_equal(X.shape, (n_samples, n_informative))
assert_equal(y.shape, (n_samples,))
# Cluster by sign, viewed as strings to allow uniquing
signs = np.sign(X)
signs = signs.view(dtype='|S{0}'.format(signs.strides[0]))
unique_signs, cluster_index = np.unique(signs,
return_inverse=True)
assert_equal(len(unique_signs), n_clusters,
"Wrong number of clusters, or not in distinct "
"quadrants")
clusters_by_class = defaultdict(set)
for cluster, cls in zip(cluster_index, y):
clusters_by_class[cls].add(cluster)
for clusters in clusters_by_class.values():
assert_equal(len(clusters), n_clusters_per_class,
"Wrong number of clusters per class")
assert_equal(len(clusters_by_class), n_classes,
"Wrong number of classes")
assert_array_almost_equal(np.bincount(y) / len(y) // weights,
[1] * n_classes,
err_msg="Wrong number of samples "
"per class")
# Ensure on vertices of hypercube
for cluster in range(len(unique_signs)):
centroid = X[cluster_index == cluster].mean(axis=0)
if hypercube:
assert_array_almost_equal(np.abs(centroid),
[class_sep] * n_informative,
decimal=0,
err_msg="Clusters are not "
"centered on hypercube "
"vertices")
else:
assert_raises(AssertionError,
assert_array_almost_equal,
np.abs(centroid),
[class_sep] * n_informative,
decimal=0,
err_msg="Clusters should not be cenetered "
"on hypercube vertices")
assert_raises(ValueError, make, n_features=2, n_informative=2, n_classes=5,
n_clusters_per_class=1)
assert_raises(ValueError, make, n_features=2, n_informative=2, n_classes=3,
n_clusters_per_class=2)
def test_make_multilabel_classification_return_sequences():
for allow_unlabeled, min_length in zip((True, False), (0, 1)):
X, Y = make_multilabel_classification(n_samples=100, n_features=20,
n_classes=3, random_state=0,
return_indicator=False,
allow_unlabeled=allow_unlabeled)
assert_equal(X.shape, (100, 20), "X shape mismatch")
if not allow_unlabeled:
assert_equal(max([max(y) for y in Y]), 2)
assert_equal(min([len(y) for y in Y]), min_length)
assert_true(max([len(y) for y in Y]) <= 3)
def test_make_multilabel_classification_return_indicator():
for allow_unlabeled, min_length in zip((True, False), (0, 1)):
X, Y = make_multilabel_classification(n_samples=25, n_features=20,
n_classes=3, random_state=0,
allow_unlabeled=allow_unlabeled)
assert_equal(X.shape, (25, 20), "X shape mismatch")
assert_equal(Y.shape, (25, 3), "Y shape mismatch")
assert_true(np.all(np.sum(Y, axis=0) > min_length))
# Also test return_distributions
X2, Y2, p_c, p_w_c = make_multilabel_classification(
n_samples=25, n_features=20, n_classes=3, random_state=0,
allow_unlabeled=allow_unlabeled, return_distributions=True)
assert_array_equal(X, X2)
assert_array_equal(Y, Y2)
assert_equal(p_c.shape, (3,))
assert_almost_equal(p_c.sum(), 1)
assert_equal(p_w_c.shape, (20, 3))
assert_almost_equal(p_w_c.sum(axis=0), [1] * 3)
def test_make_hastie_10_2():
X, y = make_hastie_10_2(n_samples=100, random_state=0)
assert_equal(X.shape, (100, 10), "X shape mismatch")
assert_equal(y.shape, (100,), "y shape mismatch")
assert_equal(np.unique(y).shape, (2,), "Unexpected number of classes")
def test_make_regression():
X, y, c = make_regression(n_samples=100, n_features=10, n_informative=3,
effective_rank=5, coef=True, bias=0.0,
noise=1.0, random_state=0)
assert_equal(X.shape, (100, 10), "X shape mismatch")
assert_equal(y.shape, (100,), "y shape mismatch")
assert_equal(c.shape, (10,), "coef shape mismatch")
assert_equal(sum(c != 0.0), 3, "Unexpected number of informative features")
# Test that y ~= np.dot(X, c) + bias + N(0, 1.0).
assert_almost_equal(np.std(y - np.dot(X, c)), 1.0, decimal=1)
# Test with small number of features.
X, y = make_regression(n_samples=100, n_features=1) # n_informative=3
assert_equal(X.shape, (100, 1))
def test_make_regression_multitarget():
X, y, c = make_regression(n_samples=100, n_features=10, n_informative=3,
n_targets=3, coef=True, noise=1., random_state=0)
assert_equal(X.shape, (100, 10), "X shape mismatch")
assert_equal(y.shape, (100, 3), "y shape mismatch")
assert_equal(c.shape, (10, 3), "coef shape mismatch")
assert_array_equal(sum(c != 0.0), 3,
"Unexpected number of informative features")
# Test that y ~= np.dot(X, c) + bias + N(0, 1.0)
assert_almost_equal(np.std(y - np.dot(X, c)), 1.0, decimal=1)
def test_make_blobs():
cluster_stds = np.array([0.05, 0.2, 0.4])
cluster_centers = np.array([[0.0, 0.0], [1.0, 1.0], [0.0, 1.0]])
X, y = make_blobs(random_state=0, n_samples=50, n_features=2,
centers=cluster_centers, cluster_std=cluster_stds)
assert_equal(X.shape, (50, 2), "X shape mismatch")
assert_equal(y.shape, (50,), "y shape mismatch")
assert_equal(np.unique(y).shape, (3,), "Unexpected number of blobs")
for i, (ctr, std) in enumerate(zip(cluster_centers, cluster_stds)):
assert_almost_equal((X[y == i] - ctr).std(), std, 1, "Unexpected std")
def test_make_friedman1():
X, y = make_friedman1(n_samples=5, n_features=10, noise=0.0,
random_state=0)
assert_equal(X.shape, (5, 10), "X shape mismatch")
assert_equal(y.shape, (5,), "y shape mismatch")
assert_array_almost_equal(y,
10 * np.sin(np.pi * X[:, 0] * X[:, 1])
+ 20 * (X[:, 2] - 0.5) ** 2
+ 10 * X[:, 3] + 5 * X[:, 4])
def test_make_friedman2():
X, y = make_friedman2(n_samples=5, noise=0.0, random_state=0)
assert_equal(X.shape, (5, 4), "X shape mismatch")
assert_equal(y.shape, (5,), "y shape mismatch")
assert_array_almost_equal(y,
(X[:, 0] ** 2
+ (X[:, 1] * X[:, 2] - 1
/ (X[:, 1] * X[:, 3])) ** 2) ** 0.5)
def test_make_friedman3():
X, y = make_friedman3(n_samples=5, noise=0.0, random_state=0)
assert_equal(X.shape, (5, 4), "X shape mismatch")
assert_equal(y.shape, (5,), "y shape mismatch")
assert_array_almost_equal(y, np.arctan((X[:, 1] * X[:, 2]
- 1 / (X[:, 1] * X[:, 3]))
/ X[:, 0]))
def test_make_low_rank_matrix():
X = make_low_rank_matrix(n_samples=50, n_features=25, effective_rank=5,
tail_strength=0.01, random_state=0)
assert_equal(X.shape, (50, 25), "X shape mismatch")
from numpy.linalg import svd
u, s, v = svd(X)
assert_less(sum(s) - 5, 0.1, "X rank is not approximately 5")
def test_make_sparse_coded_signal():
Y, D, X = make_sparse_coded_signal(n_samples=5, n_components=8,
n_features=10, n_nonzero_coefs=3,
random_state=0)
assert_equal(Y.shape, (10, 5), "Y shape mismatch")
assert_equal(D.shape, (10, 8), "D shape mismatch")
assert_equal(X.shape, (8, 5), "X shape mismatch")
for col in X.T:
assert_equal(len(np.flatnonzero(col)), 3, 'Non-zero coefs mismatch')
assert_array_almost_equal(np.dot(D, X), Y)
assert_array_almost_equal(np.sqrt((D ** 2).sum(axis=0)),
np.ones(D.shape[1]))
def test_make_sparse_uncorrelated():
X, y = make_sparse_uncorrelated(n_samples=5, n_features=10, random_state=0)
assert_equal(X.shape, (5, 10), "X shape mismatch")
assert_equal(y.shape, (5,), "y shape mismatch")
def test_make_spd_matrix():
X = make_spd_matrix(n_dim=5, random_state=0)
assert_equal(X.shape, (5, 5), "X shape mismatch")
assert_array_almost_equal(X, X.T)
from numpy.linalg import eig
eigenvalues, _ = eig(X)
assert_array_equal(eigenvalues > 0, np.array([True] * 5),
"X is not positive-definite")
def test_make_swiss_roll():
X, t = make_swiss_roll(n_samples=5, noise=0.0, random_state=0)
assert_equal(X.shape, (5, 3), "X shape mismatch")
assert_equal(t.shape, (5,), "t shape mismatch")
assert_array_almost_equal(X[:, 0], t * np.cos(t))
assert_array_almost_equal(X[:, 2], t * np.sin(t))
def test_make_s_curve():
X, t = make_s_curve(n_samples=5, noise=0.0, random_state=0)
assert_equal(X.shape, (5, 3), "X shape mismatch")
assert_equal(t.shape, (5,), "t shape mismatch")
assert_array_almost_equal(X[:, 0], np.sin(t))
assert_array_almost_equal(X[:, 2], np.sign(t) * (np.cos(t) - 1))
def test_make_biclusters():
X, rows, cols = make_biclusters(
shape=(100, 100), n_clusters=4, shuffle=True, random_state=0)
assert_equal(X.shape, (100, 100), "X shape mismatch")
assert_equal(rows.shape, (4, 100), "rows shape mismatch")
assert_equal(cols.shape, (4, 100,), "columns shape mismatch")
assert_all_finite(X)
assert_all_finite(rows)
assert_all_finite(cols)
X2, _, _ = make_biclusters(shape=(100, 100), n_clusters=4,
shuffle=True, random_state=0)
assert_array_almost_equal(X, X2)
def test_make_checkerboard():
X, rows, cols = make_checkerboard(
shape=(100, 100), n_clusters=(20, 5),
shuffle=True, random_state=0)
assert_equal(X.shape, (100, 100), "X shape mismatch")
assert_equal(rows.shape, (100, 100), "rows shape mismatch")
assert_equal(cols.shape, (100, 100,), "columns shape mismatch")
X, rows, cols = make_checkerboard(
shape=(100, 100), n_clusters=2, shuffle=True, random_state=0)
assert_all_finite(X)
assert_all_finite(rows)
assert_all_finite(cols)
X1, _, _ = make_checkerboard(shape=(100, 100), n_clusters=2,
shuffle=True, random_state=0)
X2, _, _ = make_checkerboard(shape=(100, 100), n_clusters=2,
shuffle=True, random_state=0)
assert_array_equal(X1, X2)
| bsd-3-clause |
mlflow/mlflow | mlflow/recipes/steps/ingest/__init__.py | 1 | 10689 | import abc
import logging
import os
from pathlib import Path
from mlflow.exceptions import MlflowException
from mlflow.recipes.artifacts import DataframeArtifact
from mlflow.recipes.cards import BaseCard
from mlflow.recipes.step import BaseStep
from mlflow.recipes.step import StepClass
from mlflow.recipes.utils.step import get_pandas_data_profiles
from mlflow.protos.databricks_pb2 import INVALID_PARAMETER_VALUE
from mlflow.utils.file_utils import read_parquet_as_pandas_df
from mlflow.recipes.steps.ingest.datasets import (
ParquetDataset,
DeltaTableDataset,
SparkSqlDataset,
CustomDataset,
)
from typing import Dict, Any
import pandas as pd
_logger = logging.getLogger(__name__)
class BaseIngestStep(BaseStep, metaclass=abc.ABCMeta):
_DATASET_FORMAT_SPARK_TABLE = "spark_table"
_DATASET_FORMAT_DELTA = "delta"
_DATASET_FORMAT_PARQUET = "parquet"
_DATASET_PROFILE_OUTPUT_NAME = "dataset_profile.html"
_STEP_CARD_OUTPUT_NAME = "card.pkl"
_SUPPORTED_DATASETS = [
ParquetDataset,
DeltaTableDataset,
SparkSqlDataset,
# NB: The custom dataset is deliberately listed last as a catch-all for any
# format not matched by the datasets above. When mapping a format to a dataset,
# datasets are explored in the listed order
CustomDataset,
]
def _validate_and_apply_step_config(self):
dataset_format = self.step_config.get("using")
if not dataset_format:
raise MlflowException(
message=(
"Dataset format must be specified via the `using` key within the `ingest`"
" section of recipe.yaml"
),
error_code=INVALID_PARAMETER_VALUE,
)
if self.step_class() == StepClass.TRAINING:
self.target_col = self.step_config.get("target_col")
if self.target_col is None:
raise MlflowException(
"Missing target_col config in recipe config.",
error_code=INVALID_PARAMETER_VALUE,
)
if (
"positive_class" not in self.step_config
and self.step_config["recipe"] == "classification/v1"
):
raise MlflowException(
"`positive_class` must be specified for classification/v1 recipes.",
error_code=INVALID_PARAMETER_VALUE,
)
self.positive_class = self.step_config.get("positive_class")
for dataset_class in BaseIngestStep._SUPPORTED_DATASETS:
if dataset_class.handles_format(dataset_format):
self.dataset = dataset_class.from_config(
dataset_config=self.step_config,
recipe_root=self.recipe_root,
)
break
else:
raise MlflowException(
message=f"Unrecognized dataset format: {dataset_format}",
error_code=INVALID_PARAMETER_VALUE,
)
self.skip_data_profiling = self.step_config.get("skip_data_profiling", False)
def _run(self, output_directory: str) -> BaseCard:
dataset_dst_path = os.path.abspath(os.path.join(output_directory, self.dataset_output_name))
self.dataset.resolve_to_parquet(
dst_path=dataset_dst_path,
)
_logger.debug("Successfully stored data in parquet format at '%s'", dataset_dst_path)
ingested_df = read_parquet_as_pandas_df(data_parquet_path=dataset_dst_path)
if self.step_class() == StepClass.TRAINING:
if self.target_col not in ingested_df.columns:
raise MlflowException(
f"Target column '{self.target_col}' not found in ingested dataset.",
error_code=INVALID_PARAMETER_VALUE,
)
if self.positive_class is not None:
cardinality = ingested_df[self.target_col].nunique()
if cardinality != 2:
raise MlflowException(
f"Target column '{self.target_col}' must have a cardinality of 2,"
f"found '{cardinality}'.",
error_code=INVALID_PARAMETER_VALUE,
)
ingested_dataset_profile = None
if not self.skip_data_profiling:
_logger.debug("Profiling ingested dataset")
ingested_dataset_profile = get_pandas_data_profiles(
[["Profile of Ingested Dataset", ingested_df]]
)
dataset_profile_path = Path(
str(os.path.join(output_directory, BaseIngestStep._DATASET_PROFILE_OUTPUT_NAME))
)
dataset_profile_path.write_text(ingested_dataset_profile, encoding="utf-8")
_logger.debug(f"Wrote dataset profile to '{dataset_profile_path}'")
schema = pd.io.json.build_table_schema(ingested_df, index=False)
step_card = self._build_step_card(
ingested_dataset_profile=ingested_dataset_profile,
ingested_rows=len(ingested_df),
schema=schema,
data_preview=ingested_df.head(),
dataset_src_location=getattr(self.dataset, "location", None),
dataset_sql=getattr(self.dataset, "sql", None),
)
return step_card
def _build_step_card(
self,
ingested_dataset_profile: str,
ingested_rows: int,
schema: Dict,
data_preview: pd.DataFrame = None,
dataset_src_location: str = None,
dataset_sql: str = None,
) -> BaseCard:
"""
Constructs a step card instance corresponding to the current ingest step state.
:param ingested_dataset_path: The local filesystem path to the ingested parquet dataset
file.
:param dataset_src_location: The source location of the dataset
(e.g. '/tmp/myfile.parquet', 's3://mybucket/mypath', ...),
if the dataset is a location-based dataset. Either
``dataset_src_location`` or ``dataset_sql`` must be specified.
:param dataset_sql: The Spark SQL query string that defines the dataset
(e.g. 'SELECT * FROM my_spark_table'), if the dataset is a Spark SQL
dataset. Either ``dataset_src_location`` or ``dataset_sql`` must be
specified.
:return: An BaseCard instance corresponding to the current ingest step state.
"""
if dataset_src_location is None and dataset_sql is None:
raise MlflowException(
message=(
"Failed to build step card because neither a dataset location nor a"
" dataset Spark SQL query were specified"
),
error_code=INVALID_PARAMETER_VALUE,
)
card = BaseCard(self.recipe_name, self.name)
if not self.skip_data_profiling:
( # Tab #1 -- Ingested dataset profile.
card.add_tab("Data Profile", "{{PROFILE}}").add_pandas_profile(
"PROFILE", ingested_dataset_profile
)
)
# Tab #2 -- Ingested dataset schema.
schema_html = BaseCard.render_table(schema["fields"])
card.add_tab("Data Schema", "{{SCHEMA}}").add_html("SCHEMA", schema_html)
if data_preview is not None:
# Tab #3 -- Ingested dataset preview.
card.add_tab("Data Preview", "{{DATA_PREVIEW}}").add_html(
"DATA_PREVIEW", BaseCard.render_table(data_preview)
)
( # Tab #4 -- Step run summary.
card.add_tab(
"Run Summary",
"{{ INGESTED_ROWS }}"
+ "{{ DATA_SOURCE }}"
+ "{{ EXE_DURATION }}"
+ "{{ LAST_UPDATE_TIME }}",
)
.add_markdown(
name="INGESTED_ROWS",
markdown=f"**Number of rows ingested:** `{ingested_rows}`",
)
.add_markdown(
name="DATA_SOURCE",
markdown=(
f"**Dataset source location:** `{dataset_src_location}`"
if dataset_src_location is not None
else f"**Dataset SQL:** `{dataset_sql}`"
),
)
)
return card
class IngestStep(BaseIngestStep):
_DATASET_OUTPUT_NAME = "dataset.parquet"
def __init__(self, step_config: Dict[str, Any], recipe_root: str):
super().__init__(step_config, recipe_root)
self.dataset_output_name = IngestStep._DATASET_OUTPUT_NAME
@classmethod
def from_recipe_config(cls, recipe_config: Dict[str, Any], recipe_root: str):
ingest_config = recipe_config.get("steps", {}).get("ingest", {})
target_config = {"target_col": recipe_config.get("target_col")}
if "positive_class" in recipe_config:
target_config["positive_class"] = recipe_config.get("positive_class")
return cls(
step_config={
**ingest_config,
**target_config,
**{"recipe": recipe_config.get("recipe")},
},
recipe_root=recipe_root,
)
@property
def name(self) -> str:
return "ingest"
def get_artifacts(self):
return [
DataframeArtifact(
"ingested_data", self.recipe_root, self.name, IngestStep._DATASET_OUTPUT_NAME
)
]
def step_class(self):
return StepClass.TRAINING
class IngestScoringStep(BaseIngestStep):
_DATASET_OUTPUT_NAME = "scoring-dataset.parquet"
def __init__(self, step_config: Dict[str, Any], recipe_root: str):
super().__init__(step_config, recipe_root)
self.dataset_output_name = IngestScoringStep._DATASET_OUTPUT_NAME
@classmethod
def from_recipe_config(cls, recipe_config: Dict[str, Any], recipe_root: str):
step_config = recipe_config.get("steps", {}).get("ingest_scoring", {})
return cls(
step_config=step_config,
recipe_root=recipe_root,
)
@property
def name(self) -> str:
return "ingest_scoring"
def get_artifacts(self):
return [
DataframeArtifact(
"ingested_scoring_data",
self.recipe_root,
self.name,
IngestScoringStep._DATASET_OUTPUT_NAME,
)
]
def step_class(self):
return StepClass.PREDICTION
| apache-2.0 |
jzt5132/scikit-learn | sklearn/cross_validation.py | 47 | 67782 | """
The :mod:`sklearn.cross_validation` module includes utilities for cross-
validation and performance evaluation.
"""
# Author: Alexandre Gramfort <alexandre.gramfort@inria.fr>,
# Gael Varoquaux <gael.varoquaux@normalesup.org>,
# Olivier Grisel <olivier.grisel@ensta.org>
# License: BSD 3 clause
from __future__ import print_function
from __future__ import division
import warnings
from itertools import chain, combinations
from math import ceil, floor, factorial
import numbers
import time
from abc import ABCMeta, abstractmethod
import numpy as np
import scipy.sparse as sp
from .base import is_classifier, clone
from .utils import indexable, check_random_state, safe_indexing
from .utils.validation import (_is_arraylike, _num_samples,
check_array, column_or_1d)
from .utils.multiclass import type_of_target
from .externals.joblib import Parallel, delayed, logger
from .externals.six import with_metaclass
from .externals.six.moves import zip
from .metrics.scorer import check_scoring
from .utils.fixes import bincount
__all__ = ['KFold',
'LabelKFold',
'LeaveOneLabelOut',
'LeaveOneOut',
'LeavePLabelOut',
'LeavePOut',
'ShuffleSplit',
'StratifiedKFold',
'StratifiedShuffleSplit',
'PredefinedSplit',
'LabelShuffleSplit',
'check_cv',
'cross_val_score',
'cross_val_predict',
'permutation_test_score',
'train_test_split']
class _PartitionIterator(with_metaclass(ABCMeta)):
"""Base class for CV iterators where train_mask = ~test_mask
Implementations must define `_iter_test_masks` or `_iter_test_indices`.
Parameters
----------
n : int
Total number of elements in dataset.
"""
def __init__(self, n):
if abs(n - int(n)) >= np.finfo('f').eps:
raise ValueError("n must be an integer")
self.n = int(n)
def __iter__(self):
ind = np.arange(self.n)
for test_index in self._iter_test_masks():
train_index = np.logical_not(test_index)
train_index = ind[train_index]
test_index = ind[test_index]
yield train_index, test_index
# Since subclasses must implement either _iter_test_masks or
# _iter_test_indices, neither can be abstract.
def _iter_test_masks(self):
"""Generates boolean masks corresponding to test sets.
By default, delegates to _iter_test_indices()
"""
for test_index in self._iter_test_indices():
test_mask = self._empty_mask()
test_mask[test_index] = True
yield test_mask
def _iter_test_indices(self):
"""Generates integer indices corresponding to test sets."""
raise NotImplementedError
def _empty_mask(self):
return np.zeros(self.n, dtype=np.bool)
class LeaveOneOut(_PartitionIterator):
"""Leave-One-Out cross validation iterator.
Provides train/test indices to split data in train test sets. Each
sample is used once as a test set (singleton) while the remaining
samples form the training set.
Note: ``LeaveOneOut(n)`` is equivalent to ``KFold(n, n_folds=n)`` and
``LeavePOut(n, p=1)``.
Due to the high number of test sets (which is the same as the
number of samples) this cross validation method can be very costly.
For large datasets one should favor KFold, StratifiedKFold or
ShuffleSplit.
Read more in the :ref:`User Guide <cross_validation>`.
Parameters
----------
n : int
Total number of elements in dataset.
Examples
--------
>>> from sklearn import cross_validation
>>> X = np.array([[1, 2], [3, 4]])
>>> y = np.array([1, 2])
>>> loo = cross_validation.LeaveOneOut(2)
>>> len(loo)
2
>>> print(loo)
sklearn.cross_validation.LeaveOneOut(n=2)
>>> for train_index, test_index in loo:
... print("TRAIN:", train_index, "TEST:", test_index)
... X_train, X_test = X[train_index], X[test_index]
... y_train, y_test = y[train_index], y[test_index]
... print(X_train, X_test, y_train, y_test)
TRAIN: [1] TEST: [0]
[[3 4]] [[1 2]] [2] [1]
TRAIN: [0] TEST: [1]
[[1 2]] [[3 4]] [1] [2]
See also
--------
LeaveOneLabelOut for splitting the data according to explicit,
domain-specific stratification of the dataset.
"""
def _iter_test_indices(self):
return range(self.n)
def __repr__(self):
return '%s.%s(n=%i)' % (
self.__class__.__module__,
self.__class__.__name__,
self.n,
)
def __len__(self):
return self.n
class LeavePOut(_PartitionIterator):
"""Leave-P-Out cross validation iterator
Provides train/test indices to split data in train test sets. This results
in testing on all distinct samples of size p, while the remaining n - p
samples form the training set in each iteration.
Note: ``LeavePOut(n, p)`` is NOT equivalent to ``KFold(n, n_folds=n // p)``
which creates non-overlapping test sets.
Due to the high number of iterations which grows combinatorically with the
number of samples this cross validation method can be very costly. For
large datasets one should favor KFold, StratifiedKFold or ShuffleSplit.
Read more in the :ref:`User Guide <cross_validation>`.
Parameters
----------
n : int
Total number of elements in dataset.
p : int
Size of the test sets.
Examples
--------
>>> from sklearn import cross_validation
>>> X = np.array([[1, 2], [3, 4], [5, 6], [7, 8]])
>>> y = np.array([1, 2, 3, 4])
>>> lpo = cross_validation.LeavePOut(4, 2)
>>> len(lpo)
6
>>> print(lpo)
sklearn.cross_validation.LeavePOut(n=4, p=2)
>>> for train_index, test_index in lpo:
... print("TRAIN:", train_index, "TEST:", test_index)
... X_train, X_test = X[train_index], X[test_index]
... y_train, y_test = y[train_index], y[test_index]
TRAIN: [2 3] TEST: [0 1]
TRAIN: [1 3] TEST: [0 2]
TRAIN: [1 2] TEST: [0 3]
TRAIN: [0 3] TEST: [1 2]
TRAIN: [0 2] TEST: [1 3]
TRAIN: [0 1] TEST: [2 3]
"""
def __init__(self, n, p):
super(LeavePOut, self).__init__(n)
self.p = p
def _iter_test_indices(self):
for comb in combinations(range(self.n), self.p):
yield np.array(comb)
def __repr__(self):
return '%s.%s(n=%i, p=%i)' % (
self.__class__.__module__,
self.__class__.__name__,
self.n,
self.p,
)
def __len__(self):
return int(factorial(self.n) / factorial(self.n - self.p)
/ factorial(self.p))
class _BaseKFold(with_metaclass(ABCMeta, _PartitionIterator)):
"""Base class to validate KFold approaches"""
@abstractmethod
def __init__(self, n, n_folds, shuffle, random_state):
super(_BaseKFold, self).__init__(n)
if abs(n_folds - int(n_folds)) >= np.finfo('f').eps:
raise ValueError("n_folds must be an integer")
self.n_folds = n_folds = int(n_folds)
if n_folds <= 1:
raise ValueError(
"k-fold cross validation requires at least one"
" train / test split by setting n_folds=2 or more,"
" got n_folds={0}.".format(n_folds))
if n_folds > self.n:
raise ValueError(
("Cannot have number of folds n_folds={0} greater"
" than the number of samples: {1}.").format(n_folds, n))
if not isinstance(shuffle, bool):
raise TypeError("shuffle must be True or False;"
" got {0}".format(shuffle))
self.shuffle = shuffle
self.random_state = random_state
class KFold(_BaseKFold):
"""K-Folds cross validation iterator.
Provides train/test indices to split data in train test sets. Split
dataset into k consecutive folds (without shuffling by default).
Each fold is then used a validation set once while the k - 1 remaining
fold form the training set.
Read more in the :ref:`User Guide <cross_validation>`.
Parameters
----------
n : int
Total number of elements.
n_folds : int, default=3
Number of folds. Must be at least 2.
shuffle : boolean, optional
Whether to shuffle the data before splitting into batches.
random_state : None, int or RandomState
When shuffle=True, pseudo-random number generator state used for
shuffling. If None, use default numpy RNG for shuffling.
Examples
--------
>>> from sklearn.cross_validation import KFold
>>> X = np.array([[1, 2], [3, 4], [1, 2], [3, 4]])
>>> y = np.array([1, 2, 3, 4])
>>> kf = KFold(4, n_folds=2)
>>> len(kf)
2
>>> print(kf) # doctest: +NORMALIZE_WHITESPACE
sklearn.cross_validation.KFold(n=4, n_folds=2, shuffle=False,
random_state=None)
>>> for train_index, test_index in kf:
... print("TRAIN:", train_index, "TEST:", test_index)
... X_train, X_test = X[train_index], X[test_index]
... y_train, y_test = y[train_index], y[test_index]
TRAIN: [2 3] TEST: [0 1]
TRAIN: [0 1] TEST: [2 3]
Notes
-----
The first n % n_folds folds have size n // n_folds + 1, other folds have
size n // n_folds.
See also
--------
StratifiedKFold: take label information into account to avoid building
folds with imbalanced class distributions (for binary or multiclass
classification tasks).
LabelKFold: K-fold iterator variant with non-overlapping labels.
"""
def __init__(self, n, n_folds=3, shuffle=False,
random_state=None):
super(KFold, self).__init__(n, n_folds, shuffle, random_state)
self.idxs = np.arange(n)
if shuffle:
rng = check_random_state(self.random_state)
rng.shuffle(self.idxs)
def _iter_test_indices(self):
n = self.n
n_folds = self.n_folds
fold_sizes = (n // n_folds) * np.ones(n_folds, dtype=np.int)
fold_sizes[:n % n_folds] += 1
current = 0
for fold_size in fold_sizes:
start, stop = current, current + fold_size
yield self.idxs[start:stop]
current = stop
def __repr__(self):
return '%s.%s(n=%i, n_folds=%i, shuffle=%s, random_state=%s)' % (
self.__class__.__module__,
self.__class__.__name__,
self.n,
self.n_folds,
self.shuffle,
self.random_state,
)
def __len__(self):
return self.n_folds
class LabelKFold(_BaseKFold):
"""K-fold iterator variant with non-overlapping labels.
The same label will not appear in two different folds (the number of
distinct labels has to be at least equal to the number of folds).
The folds are approximately balanced in the sense that the number of
distinct labels is approximately the same in each fold.
Parameters
----------
labels : array-like with shape (n_samples, )
Contains a label for each sample.
The folds are built so that the same label does not appear in two
different folds.
n_folds : int, default=3
Number of folds. Must be at least 2.
shuffle : boolean, optional
Whether to shuffle the data before splitting into batches.
random_state : None, int or RandomState
When shuffle=True, pseudo-random number generator state used for
shuffling. If None, use default numpy RNG for shuffling.
Examples
--------
>>> from sklearn.cross_validation import LabelKFold
>>> X = np.array([[1, 2], [3, 4], [5, 6], [7, 8]])
>>> y = np.array([1, 2, 3, 4])
>>> labels = np.array([0, 0, 2, 2])
>>> label_kfold = LabelKFold(labels, n_folds=2)
>>> len(label_kfold)
2
>>> print(label_kfold)
sklearn.cross_validation.LabelKFold(n_labels=4, n_folds=2)
>>> for train_index, test_index in label_kfold:
... print("TRAIN:", train_index, "TEST:", test_index)
... X_train, X_test = X[train_index], X[test_index]
... y_train, y_test = y[train_index], y[test_index]
... print(X_train, X_test, y_train, y_test)
...
TRAIN: [0 1] TEST: [2 3]
[[1 2]
[3 4]] [[5 6]
[7 8]] [1 2] [3 4]
TRAIN: [2 3] TEST: [0 1]
[[5 6]
[7 8]] [[1 2]
[3 4]] [3 4] [1 2]
See also
--------
LeaveOneLabelOut for splitting the data according to explicit,
domain-specific stratification of the dataset.
"""
def __init__(self, labels, n_folds=3, shuffle=False, random_state=None):
super(LabelKFold, self).__init__(len(labels), n_folds, shuffle,
random_state)
unique_labels, labels = np.unique(labels, return_inverse=True)
n_labels = len(unique_labels)
if n_folds > n_labels:
raise ValueError(
("Cannot have number of folds n_folds={0} greater"
" than the number of labels: {1}.").format(n_folds,
n_labels))
# Weight labels by their number of occurences
n_samples_per_label = np.bincount(labels)
# Distribute the most frequent labels first
indices = np.argsort(n_samples_per_label)[::-1]
n_samples_per_label = n_samples_per_label[indices]
# Total weight of each fold
n_samples_per_fold = np.zeros(n_folds)
# Mapping from label index to fold index
label_to_fold = np.zeros(len(unique_labels))
# Distribute samples by adding the largest weight to the lightest fold
for label_index, weight in enumerate(n_samples_per_label):
lightest_fold = np.argmin(n_samples_per_fold)
n_samples_per_fold[lightest_fold] += weight
label_to_fold[indices[label_index]] = lightest_fold
self.idxs = label_to_fold[labels]
if shuffle:
rng = check_random_state(self.random_state)
rng.shuffle(self.idxs)
def _iter_test_indices(self):
for i in range(self.n_folds):
yield (self.idxs == i)
def __repr__(self):
return '{0}.{1}(n_labels={2}, n_folds={3})'.format(
self.__class__.__module__,
self.__class__.__name__,
self.n,
self.n_folds,
)
def __len__(self):
return self.n_folds
class StratifiedKFold(_BaseKFold):
"""Stratified K-Folds cross validation iterator
Provides train/test indices to split data in train test sets.
This cross-validation object is a variation of KFold that
returns stratified folds. The folds are made by preserving
the percentage of samples for each class.
Read more in the :ref:`User Guide <cross_validation>`.
Parameters
----------
y : array-like, [n_samples]
Samples to split in K folds.
n_folds : int, default=3
Number of folds. Must be at least 2.
shuffle : boolean, optional
Whether to shuffle each stratification of the data before splitting
into batches.
random_state : None, int or RandomState
When shuffle=True, pseudo-random number generator state used for
shuffling. If None, use default numpy RNG for shuffling.
Examples
--------
>>> from sklearn.cross_validation import StratifiedKFold
>>> X = np.array([[1, 2], [3, 4], [1, 2], [3, 4]])
>>> y = np.array([0, 0, 1, 1])
>>> skf = StratifiedKFold(y, n_folds=2)
>>> len(skf)
2
>>> print(skf) # doctest: +NORMALIZE_WHITESPACE
sklearn.cross_validation.StratifiedKFold(labels=[0 0 1 1], n_folds=2,
shuffle=False, random_state=None)
>>> for train_index, test_index in skf:
... print("TRAIN:", train_index, "TEST:", test_index)
... X_train, X_test = X[train_index], X[test_index]
... y_train, y_test = y[train_index], y[test_index]
TRAIN: [1 3] TEST: [0 2]
TRAIN: [0 2] TEST: [1 3]
Notes
-----
All the folds have size trunc(n_samples / n_folds), the last one has the
complementary.
See also
--------
LabelKFold: K-fold iterator variant with non-overlapping labels.
"""
def __init__(self, y, n_folds=3, shuffle=False,
random_state=None):
super(StratifiedKFold, self).__init__(
len(y), n_folds, shuffle, random_state)
y = np.asarray(y)
n_samples = y.shape[0]
unique_labels, y_inversed = np.unique(y, return_inverse=True)
label_counts = bincount(y_inversed)
min_labels = np.min(label_counts)
if self.n_folds > min_labels:
warnings.warn(("The least populated class in y has only %d"
" members, which is too few. The minimum"
" number of labels for any class cannot"
" be less than n_folds=%d."
% (min_labels, self.n_folds)), Warning)
# don't want to use the same seed in each label's shuffle
if self.shuffle:
rng = check_random_state(self.random_state)
else:
rng = self.random_state
# pre-assign each sample to a test fold index using individual KFold
# splitting strategies for each label so as to respect the
# balance of labels
per_label_cvs = [
KFold(max(c, self.n_folds), self.n_folds, shuffle=self.shuffle,
random_state=rng) for c in label_counts]
test_folds = np.zeros(n_samples, dtype=np.int)
for test_fold_idx, per_label_splits in enumerate(zip(*per_label_cvs)):
for label, (_, test_split) in zip(unique_labels, per_label_splits):
label_test_folds = test_folds[y == label]
# the test split can be too big because we used
# KFold(max(c, self.n_folds), self.n_folds) instead of
# KFold(c, self.n_folds) to make it possible to not crash even
# if the data is not 100% stratifiable for all the labels
# (we use a warning instead of raising an exception)
# If this is the case, let's trim it:
test_split = test_split[test_split < len(label_test_folds)]
label_test_folds[test_split] = test_fold_idx
test_folds[y == label] = label_test_folds
self.test_folds = test_folds
self.y = y
def _iter_test_masks(self):
for i in range(self.n_folds):
yield self.test_folds == i
def __repr__(self):
return '%s.%s(labels=%s, n_folds=%i, shuffle=%s, random_state=%s)' % (
self.__class__.__module__,
self.__class__.__name__,
self.y,
self.n_folds,
self.shuffle,
self.random_state,
)
def __len__(self):
return self.n_folds
class LeaveOneLabelOut(_PartitionIterator):
"""Leave-One-Label_Out cross-validation iterator
Provides train/test indices to split data according to a third-party
provided label. This label information can be used to encode arbitrary
domain specific stratifications of the samples as integers.
For instance the labels could be the year of collection of the samples
and thus allow for cross-validation against time-based splits.
Read more in the :ref:`User Guide <cross_validation>`.
Parameters
----------
labels : array-like of int with shape (n_samples,)
Arbitrary domain-specific stratification of the data to be used
to draw the splits.
Examples
--------
>>> from sklearn import cross_validation
>>> X = np.array([[1, 2], [3, 4], [5, 6], [7, 8]])
>>> y = np.array([1, 2, 1, 2])
>>> labels = np.array([1, 1, 2, 2])
>>> lol = cross_validation.LeaveOneLabelOut(labels)
>>> len(lol)
2
>>> print(lol)
sklearn.cross_validation.LeaveOneLabelOut(labels=[1 1 2 2])
>>> for train_index, test_index in lol:
... print("TRAIN:", train_index, "TEST:", test_index)
... X_train, X_test = X[train_index], X[test_index]
... y_train, y_test = y[train_index], y[test_index]
... print(X_train, X_test, y_train, y_test)
TRAIN: [2 3] TEST: [0 1]
[[5 6]
[7 8]] [[1 2]
[3 4]] [1 2] [1 2]
TRAIN: [0 1] TEST: [2 3]
[[1 2]
[3 4]] [[5 6]
[7 8]] [1 2] [1 2]
See also
--------
LabelKFold: K-fold iterator variant with non-overlapping labels.
"""
def __init__(self, labels):
super(LeaveOneLabelOut, self).__init__(len(labels))
# We make a copy of labels to avoid side-effects during iteration
self.labels = np.array(labels, copy=True)
self.unique_labels = np.unique(labels)
self.n_unique_labels = len(self.unique_labels)
def _iter_test_masks(self):
for i in self.unique_labels:
yield self.labels == i
def __repr__(self):
return '%s.%s(labels=%s)' % (
self.__class__.__module__,
self.__class__.__name__,
self.labels,
)
def __len__(self):
return self.n_unique_labels
class LeavePLabelOut(_PartitionIterator):
"""Leave-P-Label_Out cross-validation iterator
Provides train/test indices to split data according to a third-party
provided label. This label information can be used to encode arbitrary
domain specific stratifications of the samples as integers.
For instance the labels could be the year of collection of the samples
and thus allow for cross-validation against time-based splits.
The difference between LeavePLabelOut and LeaveOneLabelOut is that
the former builds the test sets with all the samples assigned to
``p`` different values of the labels while the latter uses samples
all assigned the same labels.
Read more in the :ref:`User Guide <cross_validation>`.
Parameters
----------
labels : array-like of int with shape (n_samples,)
Arbitrary domain-specific stratification of the data to be used
to draw the splits.
p : int
Number of samples to leave out in the test split.
Examples
--------
>>> from sklearn import cross_validation
>>> X = np.array([[1, 2], [3, 4], [5, 6]])
>>> y = np.array([1, 2, 1])
>>> labels = np.array([1, 2, 3])
>>> lpl = cross_validation.LeavePLabelOut(labels, p=2)
>>> len(lpl)
3
>>> print(lpl)
sklearn.cross_validation.LeavePLabelOut(labels=[1 2 3], p=2)
>>> for train_index, test_index in lpl:
... print("TRAIN:", train_index, "TEST:", test_index)
... X_train, X_test = X[train_index], X[test_index]
... y_train, y_test = y[train_index], y[test_index]
... print(X_train, X_test, y_train, y_test)
TRAIN: [2] TEST: [0 1]
[[5 6]] [[1 2]
[3 4]] [1] [1 2]
TRAIN: [1] TEST: [0 2]
[[3 4]] [[1 2]
[5 6]] [2] [1 1]
TRAIN: [0] TEST: [1 2]
[[1 2]] [[3 4]
[5 6]] [1] [2 1]
See also
--------
LabelKFold: K-fold iterator variant with non-overlapping labels.
"""
def __init__(self, labels, p):
# We make a copy of labels to avoid side-effects during iteration
super(LeavePLabelOut, self).__init__(len(labels))
self.labels = np.array(labels, copy=True)
self.unique_labels = np.unique(labels)
self.n_unique_labels = len(self.unique_labels)
self.p = p
def _iter_test_masks(self):
comb = combinations(range(self.n_unique_labels), self.p)
for idx in comb:
test_index = self._empty_mask()
idx = np.array(idx)
for l in self.unique_labels[idx]:
test_index[self.labels == l] = True
yield test_index
def __repr__(self):
return '%s.%s(labels=%s, p=%s)' % (
self.__class__.__module__,
self.__class__.__name__,
self.labels,
self.p,
)
def __len__(self):
return int(factorial(self.n_unique_labels) /
factorial(self.n_unique_labels - self.p) /
factorial(self.p))
class BaseShuffleSplit(with_metaclass(ABCMeta)):
"""Base class for ShuffleSplit and StratifiedShuffleSplit"""
def __init__(self, n, n_iter=10, test_size=0.1, train_size=None,
random_state=None):
self.n = n
self.n_iter = n_iter
self.test_size = test_size
self.train_size = train_size
self.random_state = random_state
self.n_train, self.n_test = _validate_shuffle_split(n, test_size,
train_size)
def __iter__(self):
for train, test in self._iter_indices():
yield train, test
return
@abstractmethod
def _iter_indices(self):
"""Generate (train, test) indices"""
class ShuffleSplit(BaseShuffleSplit):
"""Random permutation cross-validation iterator.
Yields indices to split data into training and test sets.
Note: contrary to other cross-validation strategies, random splits
do not guarantee that all folds will be different, although this is
still very likely for sizeable datasets.
Read more in the :ref:`User Guide <cross_validation>`.
Parameters
----------
n : int
Total number of elements in the dataset.
n_iter : int (default 10)
Number of re-shuffling & splitting iterations.
test_size : float (default 0.1), int, or None
If float, should be between 0.0 and 1.0 and represent the
proportion of the dataset to include in the test split. If
int, represents the absolute number of test samples. If None,
the value is automatically set to the complement of the train size.
train_size : float, int, or None (default is None)
If float, should be between 0.0 and 1.0 and represent the
proportion of the dataset to include in the train split. If
int, represents the absolute number of train samples. If None,
the value is automatically set to the complement of the test size.
random_state : int or RandomState
Pseudo-random number generator state used for random sampling.
Examples
--------
>>> from sklearn import cross_validation
>>> rs = cross_validation.ShuffleSplit(4, n_iter=3,
... test_size=.25, random_state=0)
>>> len(rs)
3
>>> print(rs)
... # doctest: +ELLIPSIS
ShuffleSplit(4, n_iter=3, test_size=0.25, ...)
>>> for train_index, test_index in rs:
... print("TRAIN:", train_index, "TEST:", test_index)
...
TRAIN: [3 1 0] TEST: [2]
TRAIN: [2 1 3] TEST: [0]
TRAIN: [0 2 1] TEST: [3]
>>> rs = cross_validation.ShuffleSplit(4, n_iter=3,
... train_size=0.5, test_size=.25, random_state=0)
>>> for train_index, test_index in rs:
... print("TRAIN:", train_index, "TEST:", test_index)
...
TRAIN: [3 1] TEST: [2]
TRAIN: [2 1] TEST: [0]
TRAIN: [0 2] TEST: [3]
"""
def _iter_indices(self):
rng = check_random_state(self.random_state)
for i in range(self.n_iter):
# random partition
permutation = rng.permutation(self.n)
ind_test = permutation[:self.n_test]
ind_train = permutation[self.n_test:self.n_test + self.n_train]
yield ind_train, ind_test
def __repr__(self):
return ('%s(%d, n_iter=%d, test_size=%s, '
'random_state=%s)' % (
self.__class__.__name__,
self.n,
self.n_iter,
str(self.test_size),
self.random_state,
))
def __len__(self):
return self.n_iter
def _validate_shuffle_split(n, test_size, train_size):
if test_size is None and train_size is None:
raise ValueError(
'test_size and train_size can not both be None')
if test_size is not None:
if np.asarray(test_size).dtype.kind == 'f':
if test_size >= 1.:
raise ValueError(
'test_size=%f should be smaller '
'than 1.0 or be an integer' % test_size)
elif np.asarray(test_size).dtype.kind == 'i':
if test_size >= n:
raise ValueError(
'test_size=%d should be smaller '
'than the number of samples %d' % (test_size, n))
else:
raise ValueError("Invalid value for test_size: %r" % test_size)
if train_size is not None:
if np.asarray(train_size).dtype.kind == 'f':
if train_size >= 1.:
raise ValueError("train_size=%f should be smaller "
"than 1.0 or be an integer" % train_size)
elif np.asarray(test_size).dtype.kind == 'f' and \
train_size + test_size > 1.:
raise ValueError('The sum of test_size and train_size = %f, '
'should be smaller than 1.0. Reduce '
'test_size and/or train_size.' %
(train_size + test_size))
elif np.asarray(train_size).dtype.kind == 'i':
if train_size >= n:
raise ValueError("train_size=%d should be smaller "
"than the number of samples %d" %
(train_size, n))
else:
raise ValueError("Invalid value for train_size: %r" % train_size)
if np.asarray(test_size).dtype.kind == 'f':
n_test = ceil(test_size * n)
elif np.asarray(test_size).dtype.kind == 'i':
n_test = float(test_size)
if train_size is None:
n_train = n - n_test
else:
if np.asarray(train_size).dtype.kind == 'f':
n_train = floor(train_size * n)
else:
n_train = float(train_size)
if test_size is None:
n_test = n - n_train
if n_train + n_test > n:
raise ValueError('The sum of train_size and test_size = %d, '
'should be smaller than the number of '
'samples %d. Reduce test_size and/or '
'train_size.' % (n_train + n_test, n))
return int(n_train), int(n_test)
class StratifiedShuffleSplit(BaseShuffleSplit):
"""Stratified ShuffleSplit cross validation iterator
Provides train/test indices to split data in train test sets.
This cross-validation object is a merge of StratifiedKFold and
ShuffleSplit, which returns stratified randomized folds. The folds
are made by preserving the percentage of samples for each class.
Note: like the ShuffleSplit strategy, stratified random splits
do not guarantee that all folds will be different, although this is
still very likely for sizeable datasets.
Read more in the :ref:`User Guide <cross_validation>`.
Parameters
----------
y : array, [n_samples]
Labels of samples.
n_iter : int (default 10)
Number of re-shuffling & splitting iterations.
test_size : float (default 0.1), int, or None
If float, should be between 0.0 and 1.0 and represent the
proportion of the dataset to include in the test split. If
int, represents the absolute number of test samples. If None,
the value is automatically set to the complement of the train size.
train_size : float, int, or None (default is None)
If float, should be between 0.0 and 1.0 and represent the
proportion of the dataset to include in the train split. If
int, represents the absolute number of train samples. If None,
the value is automatically set to the complement of the test size.
random_state : int or RandomState
Pseudo-random number generator state used for random sampling.
Examples
--------
>>> from sklearn.cross_validation import StratifiedShuffleSplit
>>> X = np.array([[1, 2], [3, 4], [1, 2], [3, 4]])
>>> y = np.array([0, 0, 1, 1])
>>> sss = StratifiedShuffleSplit(y, 3, test_size=0.5, random_state=0)
>>> len(sss)
3
>>> print(sss) # doctest: +ELLIPSIS
StratifiedShuffleSplit(labels=[0 0 1 1], n_iter=3, ...)
>>> for train_index, test_index in sss:
... print("TRAIN:", train_index, "TEST:", test_index)
... X_train, X_test = X[train_index], X[test_index]
... y_train, y_test = y[train_index], y[test_index]
TRAIN: [1 2] TEST: [3 0]
TRAIN: [0 2] TEST: [1 3]
TRAIN: [0 2] TEST: [3 1]
"""
def __init__(self, y, n_iter=10, test_size=0.1, train_size=None,
random_state=None):
super(StratifiedShuffleSplit, self).__init__(
len(y), n_iter, test_size, train_size, random_state)
self.y = np.array(y)
self.classes, self.y_indices = np.unique(y, return_inverse=True)
n_cls = self.classes.shape[0]
if np.min(bincount(self.y_indices)) < 2:
raise ValueError("The least populated class in y has only 1"
" member, which is too few. The minimum"
" number of labels for any class cannot"
" be less than 2.")
if self.n_train < n_cls:
raise ValueError('The train_size = %d should be greater or '
'equal to the number of classes = %d' %
(self.n_train, n_cls))
if self.n_test < n_cls:
raise ValueError('The test_size = %d should be greater or '
'equal to the number of classes = %d' %
(self.n_test, n_cls))
def _iter_indices(self):
rng = check_random_state(self.random_state)
cls_count = bincount(self.y_indices)
p_i = cls_count / float(self.n)
n_i = np.round(self.n_train * p_i).astype(int)
t_i = np.minimum(cls_count - n_i,
np.round(self.n_test * p_i).astype(int))
for n in range(self.n_iter):
train = []
test = []
for i, cls in enumerate(self.classes):
permutation = rng.permutation(cls_count[i])
cls_i = np.where((self.y == cls))[0][permutation]
train.extend(cls_i[:n_i[i]])
test.extend(cls_i[n_i[i]:n_i[i] + t_i[i]])
# Because of rounding issues (as n_train and n_test are not
# dividers of the number of elements per class), we may end
# up here with less samples in train and test than asked for.
if len(train) < self.n_train or len(test) < self.n_test:
# We complete by affecting randomly the missing indexes
missing_idx = np.where(bincount(train + test,
minlength=len(self.y)) == 0,
)[0]
missing_idx = rng.permutation(missing_idx)
train.extend(missing_idx[:(self.n_train - len(train))])
test.extend(missing_idx[-(self.n_test - len(test)):])
train = rng.permutation(train)
test = rng.permutation(test)
yield train, test
def __repr__(self):
return ('%s(labels=%s, n_iter=%d, test_size=%s, '
'random_state=%s)' % (
self.__class__.__name__,
self.y,
self.n_iter,
str(self.test_size),
self.random_state,
))
def __len__(self):
return self.n_iter
class PredefinedSplit(_PartitionIterator):
"""Predefined split cross validation iterator
Splits the data into training/test set folds according to a predefined
scheme. Each sample can be assigned to at most one test set fold, as
specified by the user through the ``test_fold`` parameter.
Read more in the :ref:`User Guide <cross_validation>`.
Parameters
----------
test_fold : "array-like, shape (n_samples,)
test_fold[i] gives the test set fold of sample i. A value of -1
indicates that the corresponding sample is not part of any test set
folds, but will instead always be put into the training fold.
Examples
--------
>>> from sklearn.cross_validation import PredefinedSplit
>>> X = np.array([[1, 2], [3, 4], [1, 2], [3, 4]])
>>> y = np.array([0, 0, 1, 1])
>>> ps = PredefinedSplit(test_fold=[0, 1, -1, 1])
>>> len(ps)
2
>>> print(ps) # doctest: +NORMALIZE_WHITESPACE +ELLIPSIS
sklearn.cross_validation.PredefinedSplit(test_fold=[ 0 1 -1 1])
>>> for train_index, test_index in ps:
... print("TRAIN:", train_index, "TEST:", test_index)
... X_train, X_test = X[train_index], X[test_index]
... y_train, y_test = y[train_index], y[test_index]
TRAIN: [1 2 3] TEST: [0]
TRAIN: [0 2] TEST: [1 3]
"""
def __init__(self, test_fold):
super(PredefinedSplit, self).__init__(len(test_fold))
self.test_fold = np.array(test_fold, dtype=np.int)
self.test_fold = column_or_1d(self.test_fold)
self.unique_folds = np.unique(self.test_fold)
self.unique_folds = self.unique_folds[self.unique_folds != -1]
def _iter_test_indices(self):
for f in self.unique_folds:
yield np.where(self.test_fold == f)[0]
def __repr__(self):
return '%s.%s(test_fold=%s)' % (
self.__class__.__module__,
self.__class__.__name__,
self.test_fold)
def __len__(self):
return len(self.unique_folds)
class LabelShuffleSplit(ShuffleSplit):
'''Shuffle-Labels-Out cross-validation iterator
Provides randomized train/test indices to split data according to a
third-party provided label. This label information can be used to encode
arbitrary domain specific stratifications of the samples as integers.
For instance the labels could be the year of collection of the samples
and thus allow for cross-validation against time-based splits.
The difference between LeavePLabelOut and LabelShuffleSplit is that
the former generates splits using all subsets of size ``p`` unique labels,
whereas LabelShuffleSplit generates a user-determined number of random
test splits, each with a user-determined fraction of unique labels.
For example, a less computationally intensive alternative to
``LeavePLabelOut(labels, p=10)`` would be
``LabelShuffleSplit(labels, test_size=10, n_iter=100)``.
Note: The parameters ``test_size`` and ``train_size`` refer to labels, and
not to samples, as in ShuffleSplit.
Parameters
----------
labels : array, [n_samples]
Labels of samples
n_iter : int (default 5)
Number of re-shuffling & splitting iterations.
test_size : float (default 0.2), int, or None
If float, should be between 0.0 and 1.0 and represent the
proportion of the labels to include in the test split. If
int, represents the absolute number of test labels. If None,
the value is automatically set to the complement of the train size.
train_size : float, int, or None (default is None)
If float, should be between 0.0 and 1.0 and represent the
proportion of the labels to include in the train split. If
int, represents the absolute number of train labels. If None,
the value is automatically set to the complement of the test size.
random_state : int or RandomState
Pseudo-random number generator state used for random sampling.
'''
def __init__(self, labels, n_iter=5, test_size=0.2, train_size=None,
random_state=None):
classes, label_indices = np.unique(labels, return_inverse=True)
super(LabelShuffleSplit, self).__init__(
len(classes),
n_iter=n_iter,
test_size=test_size,
train_size=train_size,
random_state=random_state)
self.labels = labels
self.classes = classes
self.label_indices = label_indices
def __repr__(self):
return ('%s(labels=%s, n_iter=%d, test_size=%s, '
'random_state=%s)' % (
self.__class__.__name__,
self.labels,
self.n_iter,
str(self.test_size),
self.random_state,
))
def __len__(self):
return self.n_iter
def _iter_indices(self):
for label_train, label_test in super(LabelShuffleSplit,
self)._iter_indices():
# these are the indices of classes in the partition
# invert them into data indices
train = np.flatnonzero(np.in1d(self.label_indices, label_train))
test = np.flatnonzero(np.in1d(self.label_indices, label_test))
yield train, test
##############################################################################
def _index_param_value(X, v, indices):
"""Private helper function for parameter value indexing."""
if not _is_arraylike(v) or _num_samples(v) != _num_samples(X):
# pass through: skip indexing
return v
if sp.issparse(v):
v = v.tocsr()
return safe_indexing(v, indices)
def cross_val_predict(estimator, X, y=None, cv=None, n_jobs=1,
verbose=0, fit_params=None, pre_dispatch='2*n_jobs'):
"""Generate cross-validated estimates for each input data point
Read more in the :ref:`User Guide <cross_validation>`.
Parameters
----------
estimator : estimator object implementing 'fit' and 'predict'
The object to use to fit the data.
X : array-like
The data to fit. Can be, for example a list, or an array at least 2d.
y : array-like, optional, default: None
The target variable to try to predict in the case of
supervised learning.
cv : int, cross-validation generator or an iterable, optional
Determines the cross-validation splitting strategy.
Possible inputs for cv are:
- None, to use the default 3-fold cross-validation,
- integer, to specify the number of folds.
- An object to be used as a cross-validation generator.
- An iterable yielding train/test splits.
For integer/None inputs, if ``y`` is binary or multiclass,
:class:`StratifiedKFold` used. If the estimator is a classifier
or if ``y`` is neither binary nor multiclass, :class:`KFold` is used.
Refer :ref:`User Guide <cross_validation>` for the various
cross-validation strategies that can be used here.
n_jobs : integer, optional
The number of CPUs to use to do the computation. -1 means
'all CPUs'.
verbose : integer, optional
The verbosity level.
fit_params : dict, optional
Parameters to pass to the fit method of the estimator.
pre_dispatch : int, or string, optional
Controls the number of jobs that get dispatched during parallel
execution. Reducing this number can be useful to avoid an
explosion of memory consumption when more jobs get dispatched
than CPUs can process. This parameter can be:
- None, in which case all the jobs are immediately
created and spawned. Use this for lightweight and
fast-running jobs, to avoid delays due to on-demand
spawning of the jobs
- An int, giving the exact number of total jobs that are
spawned
- A string, giving an expression as a function of n_jobs,
as in '2*n_jobs'
Returns
-------
preds : ndarray
This is the result of calling 'predict'
"""
X, y = indexable(X, y)
cv = check_cv(cv, X, y, classifier=is_classifier(estimator))
# We clone the estimator to make sure that all the folds are
# independent, and that it is pickle-able.
parallel = Parallel(n_jobs=n_jobs, verbose=verbose,
pre_dispatch=pre_dispatch)
preds_blocks = parallel(delayed(_fit_and_predict)(clone(estimator), X, y,
train, test, verbose,
fit_params)
for train, test in cv)
preds = [p for p, _ in preds_blocks]
locs = np.concatenate([loc for _, loc in preds_blocks])
if not _check_is_partition(locs, _num_samples(X)):
raise ValueError('cross_val_predict only works for partitions')
inv_locs = np.empty(len(locs), dtype=int)
inv_locs[locs] = np.arange(len(locs))
# Check for sparse predictions
if sp.issparse(preds[0]):
preds = sp.vstack(preds, format=preds[0].format)
else:
preds = np.concatenate(preds)
return preds[inv_locs]
def _fit_and_predict(estimator, X, y, train, test, verbose, fit_params):
"""Fit estimator and predict values for a given dataset split.
Read more in the :ref:`User Guide <cross_validation>`.
Parameters
----------
estimator : estimator object implementing 'fit' and 'predict'
The object to use to fit the data.
X : array-like of shape at least 2D
The data to fit.
y : array-like, optional, default: None
The target variable to try to predict in the case of
supervised learning.
train : array-like, shape (n_train_samples,)
Indices of training samples.
test : array-like, shape (n_test_samples,)
Indices of test samples.
verbose : integer
The verbosity level.
fit_params : dict or None
Parameters that will be passed to ``estimator.fit``.
Returns
-------
preds : sequence
Result of calling 'estimator.predict'
test : array-like
This is the value of the test parameter
"""
# Adjust length of sample weights
fit_params = fit_params if fit_params is not None else {}
fit_params = dict([(k, _index_param_value(X, v, train))
for k, v in fit_params.items()])
X_train, y_train = _safe_split(estimator, X, y, train)
X_test, _ = _safe_split(estimator, X, y, test, train)
if y_train is None:
estimator.fit(X_train, **fit_params)
else:
estimator.fit(X_train, y_train, **fit_params)
preds = estimator.predict(X_test)
return preds, test
def _check_is_partition(locs, n):
"""Check whether locs is a reordering of the array np.arange(n)
Parameters
----------
locs : ndarray
integer array to test
n : int
number of expected elements
Returns
-------
is_partition : bool
True iff sorted(locs) is range(n)
"""
if len(locs) != n:
return False
hit = np.zeros(n, bool)
hit[locs] = True
if not np.all(hit):
return False
return True
def cross_val_score(estimator, X, y=None, scoring=None, cv=None, n_jobs=1,
verbose=0, fit_params=None, pre_dispatch='2*n_jobs'):
"""Evaluate a score by cross-validation
Read more in the :ref:`User Guide <cross_validation>`.
Parameters
----------
estimator : estimator object implementing 'fit'
The object to use to fit the data.
X : array-like
The data to fit. Can be, for example a list, or an array at least 2d.
y : array-like, optional, default: None
The target variable to try to predict in the case of
supervised learning.
scoring : string, callable or None, optional, default: None
A string (see model evaluation documentation) or
a scorer callable object / function with signature
``scorer(estimator, X, y)``.
cv : int, cross-validation generator or an iterable, optional
Determines the cross-validation splitting strategy.
Possible inputs for cv are:
- None, to use the default 3-fold cross-validation,
- integer, to specify the number of folds.
- An object to be used as a cross-validation generator.
- An iterable yielding train/test splits.
For integer/None inputs, if ``y`` is binary or multiclass,
:class:`StratifiedKFold` used. If the estimator is a classifier
or if ``y`` is neither binary nor multiclass, :class:`KFold` is used.
Refer :ref:`User Guide <cross_validation>` for the various
cross-validation strategies that can be used here.
n_jobs : integer, optional
The number of CPUs to use to do the computation. -1 means
'all CPUs'.
verbose : integer, optional
The verbosity level.
fit_params : dict, optional
Parameters to pass to the fit method of the estimator.
pre_dispatch : int, or string, optional
Controls the number of jobs that get dispatched during parallel
execution. Reducing this number can be useful to avoid an
explosion of memory consumption when more jobs get dispatched
than CPUs can process. This parameter can be:
- None, in which case all the jobs are immediately
created and spawned. Use this for lightweight and
fast-running jobs, to avoid delays due to on-demand
spawning of the jobs
- An int, giving the exact number of total jobs that are
spawned
- A string, giving an expression as a function of n_jobs,
as in '2*n_jobs'
Returns
-------
scores : array of float, shape=(len(list(cv)),)
Array of scores of the estimator for each run of the cross validation.
"""
X, y = indexable(X, y)
cv = check_cv(cv, X, y, classifier=is_classifier(estimator))
scorer = check_scoring(estimator, scoring=scoring)
# We clone the estimator to make sure that all the folds are
# independent, and that it is pickle-able.
parallel = Parallel(n_jobs=n_jobs, verbose=verbose,
pre_dispatch=pre_dispatch)
scores = parallel(delayed(_fit_and_score)(clone(estimator), X, y, scorer,
train, test, verbose, None,
fit_params)
for train, test in cv)
return np.array(scores)[:, 0]
class FitFailedWarning(RuntimeWarning):
pass
def _fit_and_score(estimator, X, y, scorer, train, test, verbose,
parameters, fit_params, return_train_score=False,
return_parameters=False, error_score='raise'):
"""Fit estimator and compute scores for a given dataset split.
Parameters
----------
estimator : estimator object implementing 'fit'
The object to use to fit the data.
X : array-like of shape at least 2D
The data to fit.
y : array-like, optional, default: None
The target variable to try to predict in the case of
supervised learning.
scorer : callable
A scorer callable object / function with signature
``scorer(estimator, X, y)``.
train : array-like, shape (n_train_samples,)
Indices of training samples.
test : array-like, shape (n_test_samples,)
Indices of test samples.
verbose : integer
The verbosity level.
error_score : 'raise' (default) or numeric
Value to assign to the score if an error occurs in estimator fitting.
If set to 'raise', the error is raised. If a numeric value is given,
FitFailedWarning is raised. This parameter does not affect the refit
step, which will always raise the error.
parameters : dict or None
Parameters to be set on the estimator.
fit_params : dict or None
Parameters that will be passed to ``estimator.fit``.
return_train_score : boolean, optional, default: False
Compute and return score on training set.
return_parameters : boolean, optional, default: False
Return parameters that has been used for the estimator.
Returns
-------
train_score : float, optional
Score on training set, returned only if `return_train_score` is `True`.
test_score : float
Score on test set.
n_test_samples : int
Number of test samples.
scoring_time : float
Time spent for fitting and scoring in seconds.
parameters : dict or None, optional
The parameters that have been evaluated.
"""
if verbose > 1:
if parameters is None:
msg = "no parameters to be set"
else:
msg = '%s' % (', '.join('%s=%s' % (k, v)
for k, v in parameters.items()))
print("[CV] %s %s" % (msg, (64 - len(msg)) * '.'))
# Adjust length of sample weights
fit_params = fit_params if fit_params is not None else {}
fit_params = dict([(k, _index_param_value(X, v, train))
for k, v in fit_params.items()])
if parameters is not None:
estimator.set_params(**parameters)
start_time = time.time()
X_train, y_train = _safe_split(estimator, X, y, train)
X_test, y_test = _safe_split(estimator, X, y, test, train)
try:
if y_train is None:
estimator.fit(X_train, **fit_params)
else:
estimator.fit(X_train, y_train, **fit_params)
except Exception as e:
if error_score == 'raise':
raise
elif isinstance(error_score, numbers.Number):
test_score = error_score
if return_train_score:
train_score = error_score
warnings.warn("Classifier fit failed. The score on this train-test"
" partition for these parameters will be set to %f. "
"Details: \n%r" % (error_score, e), FitFailedWarning)
else:
raise ValueError("error_score must be the string 'raise' or a"
" numeric value. (Hint: if using 'raise', please"
" make sure that it has been spelled correctly.)"
)
else:
test_score = _score(estimator, X_test, y_test, scorer)
if return_train_score:
train_score = _score(estimator, X_train, y_train, scorer)
scoring_time = time.time() - start_time
if verbose > 2:
msg += ", score=%f" % test_score
if verbose > 1:
end_msg = "%s -%s" % (msg, logger.short_format_time(scoring_time))
print("[CV] %s %s" % ((64 - len(end_msg)) * '.', end_msg))
ret = [train_score] if return_train_score else []
ret.extend([test_score, _num_samples(X_test), scoring_time])
if return_parameters:
ret.append(parameters)
return ret
def _safe_split(estimator, X, y, indices, train_indices=None):
"""Create subset of dataset and properly handle kernels."""
if hasattr(estimator, 'kernel') and callable(estimator.kernel):
# cannot compute the kernel values with custom function
raise ValueError("Cannot use a custom kernel function. "
"Precompute the kernel matrix instead.")
if not hasattr(X, "shape"):
if getattr(estimator, "_pairwise", False):
raise ValueError("Precomputed kernels or affinity matrices have "
"to be passed as arrays or sparse matrices.")
X_subset = [X[idx] for idx in indices]
else:
if getattr(estimator, "_pairwise", False):
# X is a precomputed square kernel matrix
if X.shape[0] != X.shape[1]:
raise ValueError("X should be a square kernel matrix")
if train_indices is None:
X_subset = X[np.ix_(indices, indices)]
else:
X_subset = X[np.ix_(indices, train_indices)]
else:
X_subset = safe_indexing(X, indices)
if y is not None:
y_subset = safe_indexing(y, indices)
else:
y_subset = None
return X_subset, y_subset
def _score(estimator, X_test, y_test, scorer):
"""Compute the score of an estimator on a given test set."""
if y_test is None:
score = scorer(estimator, X_test)
else:
score = scorer(estimator, X_test, y_test)
if not isinstance(score, numbers.Number):
raise ValueError("scoring must return a number, got %s (%s) instead."
% (str(score), type(score)))
return score
def _permutation_test_score(estimator, X, y, cv, scorer):
"""Auxiliary function for permutation_test_score"""
avg_score = []
for train, test in cv:
estimator.fit(X[train], y[train])
avg_score.append(scorer(estimator, X[test], y[test]))
return np.mean(avg_score)
def _shuffle(y, labels, random_state):
"""Return a shuffled copy of y eventually shuffle among same labels."""
if labels is None:
ind = random_state.permutation(len(y))
else:
ind = np.arange(len(labels))
for label in np.unique(labels):
this_mask = (labels == label)
ind[this_mask] = random_state.permutation(ind[this_mask])
return y[ind]
def check_cv(cv, X=None, y=None, classifier=False):
"""Input checker utility for building a CV in a user friendly way.
Parameters
----------
cv : int, cross-validation generator or an iterable, optional
Determines the cross-validation splitting strategy.
Possible inputs for cv are:
- None, to use the default 3-fold cross-validation,
- integer, to specify the number of folds.
- An object to be used as a cross-validation generator.
- An iterable yielding train/test splits.
For integer/None inputs, if ``y`` is binary or multiclass,
:class:`StratifiedKFold` used. If the estimator is a classifier
or if ``y`` is neither binary nor multiclass, :class:`KFold` is used.
Refer :ref:`User Guide <cross_validation>` for the various
cross-validation strategies that can be used here.
X : array-like
The data the cross-val object will be applied on.
y : array-like
The target variable for a supervised learning problem.
classifier : boolean optional
Whether the task is a classification task, in which case
stratified KFold will be used.
Returns
-------
checked_cv: a cross-validation generator instance.
The return value is guaranteed to be a cv generator instance, whatever
the input type.
"""
is_sparse = sp.issparse(X)
if cv is None:
cv = 3
if isinstance(cv, numbers.Integral):
if classifier:
if type_of_target(y) in ['binary', 'multiclass']:
cv = StratifiedKFold(y, cv)
else:
cv = KFold(_num_samples(y), cv)
else:
if not is_sparse:
n_samples = len(X)
else:
n_samples = X.shape[0]
cv = KFold(n_samples, cv)
return cv
def permutation_test_score(estimator, X, y, cv=None,
n_permutations=100, n_jobs=1, labels=None,
random_state=0, verbose=0, scoring=None):
"""Evaluate the significance of a cross-validated score with permutations
Read more in the :ref:`User Guide <cross_validation>`.
Parameters
----------
estimator : estimator object implementing 'fit'
The object to use to fit the data.
X : array-like of shape at least 2D
The data to fit.
y : array-like
The target variable to try to predict in the case of
supervised learning.
scoring : string, callable or None, optional, default: None
A string (see model evaluation documentation) or
a scorer callable object / function with signature
``scorer(estimator, X, y)``.
cv : int, cross-validation generator or an iterable, optional
Determines the cross-validation splitting strategy.
Possible inputs for cv are:
- None, to use the default 3-fold cross-validation,
- integer, to specify the number of folds.
- An object to be used as a cross-validation generator.
- An iterable yielding train/test splits.
For integer/None inputs, if ``y`` is binary or multiclass,
:class:`StratifiedKFold` used. If the estimator is a classifier
or if ``y`` is neither binary nor multiclass, :class:`KFold` is used.
Refer :ref:`User Guide <cross_validation>` for the various
cross-validation strategies that can be used here.
n_permutations : integer, optional
Number of times to permute ``y``.
n_jobs : integer, optional
The number of CPUs to use to do the computation. -1 means
'all CPUs'.
labels : array-like of shape [n_samples] (optional)
Labels constrain the permutation among groups of samples with
a same label.
random_state : RandomState or an int seed (0 by default)
A random number generator instance to define the state of the
random permutations generator.
verbose : integer, optional
The verbosity level.
Returns
-------
score : float
The true score without permuting targets.
permutation_scores : array, shape (n_permutations,)
The scores obtained for each permutations.
pvalue : float
The returned value equals p-value if `scoring` returns bigger
numbers for better scores (e.g., accuracy_score). If `scoring` is
rather a loss function (i.e. when lower is better such as with
`mean_squared_error`) then this is actually the complement of the
p-value: 1 - p-value.
Notes
-----
This function implements Test 1 in:
Ojala and Garriga. Permutation Tests for Studying Classifier
Performance. The Journal of Machine Learning Research (2010)
vol. 11
"""
X, y = indexable(X, y)
cv = check_cv(cv, X, y, classifier=is_classifier(estimator))
scorer = check_scoring(estimator, scoring=scoring)
random_state = check_random_state(random_state)
# We clone the estimator to make sure that all the folds are
# independent, and that it is pickle-able.
score = _permutation_test_score(clone(estimator), X, y, cv, scorer)
permutation_scores = Parallel(n_jobs=n_jobs, verbose=verbose)(
delayed(_permutation_test_score)(
clone(estimator), X, _shuffle(y, labels, random_state), cv,
scorer)
for _ in range(n_permutations))
permutation_scores = np.array(permutation_scores)
pvalue = (np.sum(permutation_scores >= score) + 1.0) / (n_permutations + 1)
return score, permutation_scores, pvalue
permutation_test_score.__test__ = False # to avoid a pb with nosetests
def train_test_split(*arrays, **options):
"""Split arrays or matrices into random train and test subsets
Quick utility that wraps input validation and
``next(iter(ShuffleSplit(n_samples)))`` and application to input
data into a single call for splitting (and optionally subsampling)
data in a oneliner.
Read more in the :ref:`User Guide <cross_validation>`.
Parameters
----------
*arrays : sequence of arrays or scipy.sparse matrices with same shape[0]
Python lists or tuples occurring in arrays are converted to 1D numpy
arrays.
test_size : float, int, or None (default is None)
If float, should be between 0.0 and 1.0 and represent the
proportion of the dataset to include in the test split. If
int, represents the absolute number of test samples. If None,
the value is automatically set to the complement of the train size.
If train size is also None, test size is set to 0.25.
train_size : float, int, or None (default is None)
If float, should be between 0.0 and 1.0 and represent the
proportion of the dataset to include in the train split. If
int, represents the absolute number of train samples. If None,
the value is automatically set to the complement of the test size.
random_state : int or RandomState
Pseudo-random number generator state used for random sampling.
stratify : array-like or None (default is None)
If not None, data is split in a stratified fashion, using this as
the labels array.
Returns
-------
splitting : list of arrays, length=2 * len(arrays)
List containing train-test split of input array.
Examples
--------
>>> import numpy as np
>>> from sklearn.cross_validation import train_test_split
>>> X, y = np.arange(10).reshape((5, 2)), range(5)
>>> X
array([[0, 1],
[2, 3],
[4, 5],
[6, 7],
[8, 9]])
>>> list(y)
[0, 1, 2, 3, 4]
>>> X_train, X_test, y_train, y_test = train_test_split(
... X, y, test_size=0.33, random_state=42)
...
>>> X_train
array([[4, 5],
[0, 1],
[6, 7]])
>>> y_train
[2, 0, 3]
>>> X_test
array([[2, 3],
[8, 9]])
>>> y_test
[1, 4]
"""
n_arrays = len(arrays)
if n_arrays == 0:
raise ValueError("At least one array required as input")
test_size = options.pop('test_size', None)
train_size = options.pop('train_size', None)
random_state = options.pop('random_state', None)
dtype = options.pop('dtype', None)
if dtype is not None:
warnings.warn("dtype option is ignored and will be removed in 0.18.",
DeprecationWarning)
allow_nd = options.pop('allow_nd', None)
allow_lists = options.pop('allow_lists', None)
stratify = options.pop('stratify', None)
if allow_lists is not None:
warnings.warn("The allow_lists option is deprecated and will be "
"assumed True in 0.18 and removed.", DeprecationWarning)
if options:
raise TypeError("Invalid parameters passed: %s" % str(options))
if allow_nd is not None:
warnings.warn("The allow_nd option is deprecated and will be "
"assumed True in 0.18 and removed.", DeprecationWarning)
if allow_lists is False or allow_nd is False:
arrays = [check_array(x, 'csr', allow_nd=allow_nd,
force_all_finite=False, ensure_2d=False)
if x is not None else x
for x in arrays]
if test_size is None and train_size is None:
test_size = 0.25
arrays = indexable(*arrays)
if stratify is not None:
cv = StratifiedShuffleSplit(stratify, test_size=test_size,
train_size=train_size,
random_state=random_state)
else:
n_samples = _num_samples(arrays[0])
cv = ShuffleSplit(n_samples, test_size=test_size,
train_size=train_size,
random_state=random_state)
train, test = next(iter(cv))
return list(chain.from_iterable((safe_indexing(a, train),
safe_indexing(a, test)) for a in arrays))
train_test_split.__test__ = False # to avoid a pb with nosetests
| bsd-3-clause |
marionleborgne/nupic.research | projects/capybara/supervised_baseline/v1_no_sequences/plot_results.py | 9 | 3714 | # ----------------------------------------------------------------------
# Numenta Platform for Intelligent Computing (NuPIC)
# Copyright (C) 2016, Numenta, Inc. Unless you have an agreement
# with Numenta, Inc., for a separate license for this software code, the
# following terms and conditions apply:
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero Public License version 3 as
# published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
# See the GNU Affero Public License for more details.
#
# You should have received a copy of the GNU Affero Public License
# along with this program. If not, see http://www.gnu.org/licenses.
#
# http://numenta.org/licenses/
# ----------------------------------------------------------------------
import argparse
import os
import pandas as pd
from sklearn.metrics import (classification_report, confusion_matrix,
accuracy_score)
from baseline_utils import predictions_vote
from plot_utils import (plot_confusion_matrix, plot_train_history,
plot_classification_report, plot_predictions)
if __name__ == '__main__':
# Path to CSV files (training history and predictions)
parser = argparse.ArgumentParser()
parser.add_argument('--vote_window', '-v', dest='vote_window',
type=int, default=11)
parser.add_argument('--input_dir', '-i', dest='input_dir',
type=str, default='results')
parser.add_argument('--output_dir', '-o', dest='output_dir',type=str,
default='plots')
options = parser.parse_args()
vote_window = options.vote_window
input_dir = options.input_dir
output_dir = options.output_dir
if not os.path.exists(output_dir):
os.makedirs(output_dir)
train_history_path = os.path.join(input_dir, 'train_history.csv')
predictions_path = os.path.join(input_dir, 'predictions.csv')
# Training history
df = pd.read_csv(train_history_path)
epochs = range(len(df.epoch.values))
acc = df.acc.values
loss = df.loss.values
output_file = os.path.join(output_dir, 'train_history.html')
plot_train_history(epochs, acc, loss, output_file)
print 'Plot saved:', output_file
# Predictions
df = pd.read_csv(predictions_path)
t = df.t.values
X_values = df.scalar_value.values
y_true = df.y_true.values
y_pred = df.y_pred.values
if vote_window > 0:
y_pred = predictions_vote(y_pred, vote_window)
# Accuracy
acc = accuracy_score(y_true, y_pred)
print 'Accuracy on test set:', acc
label_list = sorted(df.y_true.unique())
# Plot normalized confusion matrix
cnf_matrix = confusion_matrix(y_true, y_pred)
output_file = os.path.join(output_dir, 'confusion_matrix.png')
_ = plot_confusion_matrix(cnf_matrix,
output_file,
classes=label_list,
normalize=True,
title='Confusion matrix (accuracy=%.2f)' % acc)
print 'Plot saved:', output_file
# Classification report (F1 score, etc.)
clf_report = classification_report(y_true, y_pred)
output_file = os.path.join(output_dir, 'classification_report.png')
plot_classification_report(clf_report, output_file)
print 'Plot saved:', output_file
# Plot predictions
output_file = os.path.join(output_dir, 'predictions.html')
title = 'Predictions (accuracy=%s)' % acc
plot_predictions(t, X_values, y_true, y_pred, output_file, title)
print 'Plot saved:', output_file
| agpl-3.0 |
tomsilver/nupic | tests/integration/nupic/opf/opf_description_template_test/experiments/gym/base.py | 1 | 15721 | # ----------------------------------------------------------------------
# Numenta Platform for Intelligent Computing (NuPIC)
# Copyright (C) 2014, Numenta, Inc. Unless you have an agreement
# with Numenta, Inc., for a separate license for this software code, the
# following terms and conditions apply:
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License version 3 as
# published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
# See the GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see http://www.gnu.org/licenses.
#
# http://numenta.org/licenses/
# ----------------------------------------------------------------------
"""
Template file used by the OPF Experiment Generator to generate the actual
description.py file by replacing $XXXXXXXX tokens with desired values.
"""
from nupic.frameworks.opf.expdescriptionapi import ExperimentDescriptionAPI
from nupic.frameworks.opf.expdescriptionhelpers import (
updateConfigFromSubConfig,
applyValueGettersToContainer,
DeferredDictLookup)
from nupic.frameworks.opf.clamodelcallbacks import *
from nupic.frameworks.opf.metrics import MetricSpec
from nupic.frameworks.opf.opfutils import (InferenceType,
InferenceElement)
from nupic.support import aggregationDivide
from nupic.frameworks.opf.opftaskdriver import (
IterationPhaseSpecLearnOnly,
IterationPhaseSpecInferOnly,
IterationPhaseSpecLearnAndInfer)
# Model Configuration Dictionary:
#
# Define the model parameters and adjust for any modifications if imported
# from a sub-experiment.
#
# These fields might be modified by a sub-experiment; this dict is passed
# between the sub-experiment and base experiment
#
#
# NOTE: Use of DEFERRED VALUE-GETTERs: dictionary fields and list elements
# within the config dictionary may be assigned futures derived from the
# ValueGetterBase class, such as DeferredDictLookup.
# This facility is particularly handy for enabling substitution of values in
# the config dictionary from other values in the config dictionary, which is
# needed by permutation.py-based experiments. These values will be resolved
# during the call to applyValueGettersToContainer(),
# which we call after the base experiment's config dictionary is updated from
# the sub-experiment. See ValueGetterBase and
# DeferredDictLookup for more details about value-getters.
#
# For each custom encoder parameter to be exposed to the sub-experiment/
# permutation overrides, define a variable in this section, using key names
# beginning with a single underscore character to avoid collisions with
# pre-defined keys (e.g., _dsEncoderFieldName2_N).
#
# Example:
# config = dict(
# _dsEncoderFieldName2_N = 70,
# _dsEncoderFieldName2_W = 5,
# dsEncoderSchema = [
# base=dict(
# fieldname='Name2', type='ScalarEncoder',
# name='Name2', minval=0, maxval=270, clipInput=True,
# n=DeferredDictLookup('_dsEncoderFieldName2_N'),
# w=DeferredDictLookup('_dsEncoderFieldName2_W')),
# ],
# )
# updateConfigFromSubConfig(config)
# applyValueGettersToContainer(config)
config = {
# Type of model that the rest of these parameters apply to.
'model': "CLA",
# Version that specifies the format of the config.
'version': 1,
# Intermediate variables used to compute fields in modelParams and also
# referenced from the control section.
'aggregationInfo': { 'fields': [ ('numericFieldNameA', 'mean'),
('numericFieldNameB', 'sum'),
('categoryFieldNameC', 'first')],
'hours': 0},
'predictAheadTime': None,
# Model parameter dictionary.
'modelParams': {
# The type of inference that this model will perform
'inferenceType': 'TemporalNextStep',
'sensorParams': {
# Sensor diagnostic output verbosity control;
# if > 0: sensor region will print out on screen what it's sensing
# at each step 0: silent; >=1: some info; >=2: more info;
# >=3: even more info (see compute() in py/regions/RecordSensor.py)
'verbosity' : 0,
# Example:
# dsEncoderSchema = [
# DeferredDictLookup('__field_name_encoder'),
# ],
#
# (value generated from DS_ENCODER_SCHEMA)
'encoders': {
'timestamp': dict(fieldname='timestamp', type='DateEncoder',timeOfDay=(5,5)),
'attendeeCount': dict(fieldname='attendeeCount', type='ScalarEncoder',
name='attendeeCount', minval=0, maxval=270,
clipInput=True, w=5, resolution=10, forced=True),
'consumption': dict(fieldname='consumption',type='ScalarEncoder',
name='consumption', minval=0,maxval=115,
clipInput=True, w=5, resolution=5, forced=True),
},
# A dictionary specifying the period for automatically-generated
# resets from a RecordSensor;
#
# None = disable automatically-generated resets (also disabled if
# all of the specified values evaluate to 0).
# Valid keys is the desired combination of the following:
# days, hours, minutes, seconds, milliseconds, microseconds, weeks
#
# Example for 1.5 days: sensorAutoReset = dict(days=1,hours=12),
#
# (value generated from SENSOR_AUTO_RESET)
'sensorAutoReset' : None,
},
'spEnable': True,
'spParams': {
# SP diagnostic output verbosity control;
# 0: silent; >=1: some info; >=2: more info;
'spVerbosity' : 0,
'globalInhibition': 1,
# Number of cell columns in the cortical region (same number for
# SP and TP)
# (see also tpNCellsPerCol)
'columnCount': 2048,
'inputWidth': 0,
# SP inhibition control (absolute value);
# Maximum number of active columns in the SP region's output (when
# there are more, the weaker ones are suppressed)
'numActiveColumnsPerInhArea': 20,
'seed': 1956,
# potentialPct
# What percent of the columns's receptive field is available
# for potential synapses. At initialization time, we will
# choose potentialPct * (2*potentialRadius+1)^2
'potentialPct': 0.5,
# The default connected threshold. Any synapse whose
# permanence value is above the connected threshold is
# a "connected synapse", meaning it can contribute to the
# cell's firing. Typical value is 0.10. Cells whose activity
# level before inhibition falls below minDutyCycleBeforeInh
# will have their own internal synPermConnectedCell
# threshold set below this default value.
# (This concept applies to both SP and TP and so 'cells'
# is correct here as opposed to 'columns')
'synPermConnected': 0.1,
'synPermActiveInc': 0.1,
'synPermInactiveDec': 0.01,
},
# Controls whether TP is enabled or disabled;
# TP is necessary for making temporal predictions, such as predicting
# the next inputs. Without TP, the model is only capable of
# reconstructing missing sensor inputs (via SP).
'tpEnable' : True,
'tpParams': {
# TP diagnostic output verbosity control;
# 0: silent; [1..6]: increasing levels of verbosity
# (see verbosity in nupic/trunk/py/nupic/research/TP.py and TP10X*.py)
'verbosity': 0,
# Number of cell columns in the cortical region (same number for
# SP and TP)
# (see also tpNCellsPerCol)
'columnCount': 2048,
# The number of cells (i.e., states), allocated per column.
'cellsPerColumn': 8,
'inputWidth': 2048,
'seed': 1960,
# Temporal Pooler implementation selector (see _getTPClass in
# CLARegion.py).
'temporalImp': 'cpp',
# New Synapse formation count
# NOTE: If None, use spNumActivePerInhArea
#
# TODO: need better explanation
'newSynapseCount': 15,
# Maximum number of synapses per segment
# > 0 for fixed-size CLA
# -1 for non-fixed-size CLA
#
# TODO: for Ron: once the appropriate value is placed in TP
# constructor, see if we should eliminate this parameter from
# description.py.
'maxSynapsesPerSegment': 32,
# Maximum number of segments per cell
# > 0 for fixed-size CLA
# -1 for non-fixed-size CLA
#
# TODO: for Ron: once the appropriate value is placed in TP
# constructor, see if we should eliminate this parameter from
# description.py.
'maxSegmentsPerCell': 128,
# Initial Permanence
# TODO: need better explanation
'initialPerm': 0.21,
# Permanence Increment
'permanenceInc': 0.1,
# Permanence Decrement
# If set to None, will automatically default to tpPermanenceInc
# value.
'permanenceDec' : 0.1,
'globalDecay': 0.0,
'maxAge': 0,
# Minimum number of active synapses for a segment to be considered
# during search for the best-matching segments.
# None=use default
# Replaces: tpMinThreshold
'minThreshold': 12,
# Segment activation threshold.
# A segment is active if it has >= tpSegmentActivationThreshold
# connected synapses that are active due to infActiveState
# None=use default
# Replaces: tpActivationThreshold
'activationThreshold': float("nan"),
'outputType': 'normal',
# "Pay Attention Mode" length. This tells the TP how many new
# elements to append to the end of a learned sequence at a time.
# Smaller values are better for datasets with short sequences,
# higher values are better for datasets with long sequences.
'pamLength': 1,
},
'clParams': {
'regionName' : 'CLAClassifierRegion',
# Classifier diagnostic output verbosity control;
# 0: silent; [1..6]: increasing levels of verbosity
'clVerbosity' : 0,
# This controls how fast the classifier learns/forgets. Higher values
# make it adapt faster and forget older patterns faster.
'alpha': 0.001,
# This is set after the call to updateConfigFromSubConfig and is
# computed from the aggregationInfo and predictAheadTime.
'steps': '1',
},
'trainSPNetOnlyIfRequested': False,
},
}
# end of config dictionary
# Adjust base config dictionary for any modifications if imported from a
# sub-experiment
updateConfigFromSubConfig(config)
# Compute predictionSteps based on the predictAheadTime and the aggregation
# period, which may be permuted over.
if config['predictAheadTime'] is not None:
predictionSteps = int(round(aggregationDivide(
config['predictAheadTime'], config['aggregationInfo'])))
assert (predictionSteps >= 1)
config['modelParams']['clParams']['steps'] = str(predictionSteps)
# Adjust config by applying ValueGetterBase-derived
# futures. NOTE: this MUST be called after updateConfigFromSubConfig() in order
# to support value-getter-based substitutions from the sub-experiment (if any)
applyValueGettersToContainer(config)
################################################################################
control = dict(
environment = 'opfExperiment',
# [optional] A sequence of one or more tasks that describe what to do with the
# model. Each task consists of a task label, an input spec., iteration count,
# and a task-control spec per opfTaskSchema.json
#
# NOTE: The tasks are intended for OPF clients that make use of OPFTaskDriver.
# Clients that interact with OPFExperiment directly do not make use of
# the tasks specification.
#
tasks = [
{
# Task label; this label string may be used for diagnostic logging and for
# constructing filenames or directory pathnames for task-specific files, etc.
'taskLabel' : "OnlineLearning",
# Input stream specification per py/nupicengine/cluster/database/StreamDef.json.
#
'dataset' : {
'info': 'test_NoProviders',
'version': 1,
'streams': [
{
'columns': ['*'],
'info': 'my gym.csv dataset',
'source': 'file://extra/gym/gym.csv',
'first_record': 0,
'last_record': 4000
}
],
# TODO: Aggregation is not supported yet by run_opf_experiment.py
#'aggregation' : config['aggregationInfo']
},
# Iteration count: maximum number of iterations. Each iteration corresponds
# to one record from the (possibly aggregated) dataset. The task is
# terminated when either number of iterations reaches iterationCount or
# all records in the (possibly aggregated) database have been processed,
# whichever occurs first.
#
# iterationCount of -1 = iterate over the entire dataset
'iterationCount' : -1,
# Task Control parameters for OPFTaskDriver (per opfTaskControlSchema.json)
'taskControl' : {
# Iteration cycle list consisting of opftaskdriver.IterationPhaseSpecXXXXX
# instances.
'iterationCycle' : [
#IterationPhaseSpecLearnOnly(1000),
IterationPhaseSpecLearnAndInfer(1000, dict(predictedField="consumption")),
#IterationPhaseSpecInferOnly(10),
],
'metrics' :[
MetricSpec(metric='rmse',
field="consumption",
inferenceElement=InferenceElement.prediction),
],
# Callbacks for experimentation/research (optional)
'callbacks' : {
# Callbacks to be called at the beginning of a task, before model iterations.
# Signature: callback(<reference to OPFExperiment>); returns nothing
'setup' : [],
# Callbacks to be called after every learning/inference iteration
# Signature: callback(<reference to OPFExperiment>); returns nothing
'postIter' : [],
# Callbacks to be called when the experiment task is finished
# Signature: callback(<reference to OPFExperiment>); returns nothing
'finish' : []
}
} # End of taskControl
}, # End of task
]
)
################################################################################
descriptionInterface = ExperimentDescriptionAPI(modelConfig=config,
control=control)
| gpl-3.0 |
mlperf/training_results_v0.5 | v0.5.0/google/cloud_v2.512/resnet-tpuv2-512/code/resnet/model/models/official/mnist/mnist_eager.py | 5 | 7794 | # Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""MNIST model training with TensorFlow eager execution.
See:
https://research.googleblog.com/2017/10/eager-execution-imperative-define-by.html
This program demonstrates training of the convolutional neural network model
defined in mnist.py with eager execution enabled.
If you are not interested in eager execution, you should ignore this file.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import time
# pylint: disable=g-bad-import-order
from absl import app as absl_app
from absl import flags
import tensorflow as tf
# pylint: enable=g-bad-import-order
from official.mnist import dataset as mnist_dataset
from official.mnist import mnist
from official.utils.flags import core as flags_core
from official.utils.misc import model_helpers
tfe = tf.contrib.eager
def loss(logits, labels):
return tf.reduce_mean(
tf.nn.sparse_softmax_cross_entropy_with_logits(
logits=logits, labels=labels))
def compute_accuracy(logits, labels):
predictions = tf.argmax(logits, axis=1, output_type=tf.int64)
labels = tf.cast(labels, tf.int64)
batch_size = int(logits.shape[0])
return tf.reduce_sum(
tf.cast(tf.equal(predictions, labels), dtype=tf.float32)) / batch_size
def train(model, optimizer, dataset, step_counter, log_interval=None):
"""Trains model on `dataset` using `optimizer`."""
start = time.time()
for (batch, (images, labels)) in enumerate(dataset):
with tf.contrib.summary.record_summaries_every_n_global_steps(
10, global_step=step_counter):
# Record the operations used to compute the loss given the input,
# so that the gradient of the loss with respect to the variables
# can be computed.
with tf.GradientTape() as tape:
logits = model(images, training=True)
loss_value = loss(logits, labels)
tf.contrib.summary.scalar('loss', loss_value)
tf.contrib.summary.scalar('accuracy', compute_accuracy(logits, labels))
grads = tape.gradient(loss_value, model.variables)
optimizer.apply_gradients(
zip(grads, model.variables), global_step=step_counter)
if log_interval and batch % log_interval == 0:
rate = log_interval / (time.time() - start)
print('Step #%d\tLoss: %.6f (%d steps/sec)' % (batch, loss_value, rate))
start = time.time()
def test(model, dataset):
"""Perform an evaluation of `model` on the examples from `dataset`."""
avg_loss = tfe.metrics.Mean('loss', dtype=tf.float32)
accuracy = tfe.metrics.Accuracy('accuracy', dtype=tf.float32)
for (images, labels) in dataset:
logits = model(images, training=False)
avg_loss(loss(logits, labels))
accuracy(
tf.argmax(logits, axis=1, output_type=tf.int64),
tf.cast(labels, tf.int64))
print('Test set: Average loss: %.4f, Accuracy: %4f%%\n' %
(avg_loss.result(), 100 * accuracy.result()))
with tf.contrib.summary.always_record_summaries():
tf.contrib.summary.scalar('loss', avg_loss.result())
tf.contrib.summary.scalar('accuracy', accuracy.result())
def run_mnist_eager(flags_obj):
"""Run MNIST training and eval loop in eager mode.
Args:
flags_obj: An object containing parsed flag values.
"""
tf.enable_eager_execution()
model_helpers.apply_clean(flags.FLAGS)
# Automatically determine device and data_format
(device, data_format) = ('/gpu:0', 'channels_first')
if flags_obj.no_gpu or not tf.test.is_gpu_available():
(device, data_format) = ('/cpu:0', 'channels_last')
# If data_format is defined in FLAGS, overwrite automatically set value.
if flags_obj.data_format is not None:
data_format = flags_obj.data_format
print('Using device %s, and data format %s.' % (device, data_format))
# Load the datasets
train_ds = mnist_dataset.train(flags_obj.data_dir).shuffle(60000).batch(
flags_obj.batch_size)
test_ds = mnist_dataset.test(flags_obj.data_dir).batch(
flags_obj.batch_size)
# Create the model and optimizer
model = mnist.create_model(data_format)
optimizer = tf.train.MomentumOptimizer(flags_obj.lr, flags_obj.momentum)
# Create file writers for writing TensorBoard summaries.
if flags_obj.output_dir:
# Create directories to which summaries will be written
# tensorboard --logdir=<output_dir>
# can then be used to see the recorded summaries.
train_dir = os.path.join(flags_obj.output_dir, 'train')
test_dir = os.path.join(flags_obj.output_dir, 'eval')
tf.gfile.MakeDirs(flags_obj.output_dir)
else:
train_dir = None
test_dir = None
summary_writer = tf.contrib.summary.create_file_writer(
train_dir, flush_millis=10000)
test_summary_writer = tf.contrib.summary.create_file_writer(
test_dir, flush_millis=10000, name='test')
# Create and restore checkpoint (if one exists on the path)
checkpoint_prefix = os.path.join(flags_obj.model_dir, 'ckpt')
step_counter = tf.train.get_or_create_global_step()
checkpoint = tf.train.Checkpoint(
model=model, optimizer=optimizer, step_counter=step_counter)
# Restore variables on creation if a checkpoint exists.
checkpoint.restore(tf.train.latest_checkpoint(flags_obj.model_dir))
# Train and evaluate for a set number of epochs.
with tf.device(device):
for _ in range(flags_obj.train_epochs):
start = time.time()
with summary_writer.as_default():
train(model, optimizer, train_ds, step_counter,
flags_obj.log_interval)
end = time.time()
print('\nTrain time for epoch #%d (%d total steps): %f' %
(checkpoint.save_counter.numpy() + 1,
step_counter.numpy(),
end - start))
with test_summary_writer.as_default():
test(model, test_ds)
checkpoint.save(checkpoint_prefix)
def define_mnist_eager_flags():
"""Defined flags and defaults for MNIST in eager mode."""
flags_core.define_base_eager()
flags_core.define_image()
flags.adopt_module_key_flags(flags_core)
flags.DEFINE_integer(
name='log_interval', short_name='li', default=10,
help=flags_core.help_wrap('batches between logging training status'))
flags.DEFINE_string(
name='output_dir', short_name='od', default=None,
help=flags_core.help_wrap('Directory to write TensorBoard summaries'))
flags.DEFINE_float(name='learning_rate', short_name='lr', default=0.01,
help=flags_core.help_wrap('Learning rate.'))
flags.DEFINE_float(name='momentum', short_name='m', default=0.5,
help=flags_core.help_wrap('SGD momentum.'))
flags.DEFINE_bool(name='no_gpu', short_name='nogpu', default=False,
help=flags_core.help_wrap(
'disables GPU usage even if a GPU is available'))
flags_core.set_defaults(
data_dir='/tmp/tensorflow/mnist/input_data',
model_dir='/tmp/tensorflow/mnist/checkpoints/',
batch_size=100,
train_epochs=10,
)
def main(_):
run_mnist_eager(flags.FLAGS)
if __name__ == '__main__':
define_mnist_eager_flags()
absl_app.run(main=main)
| apache-2.0 |
eadgarchen/tensorflow | tensorflow/contrib/factorization/python/ops/gmm_test.py | 40 | 9763 | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for ops.gmm."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from six.moves import xrange # pylint: disable=redefined-builtin
from tensorflow.contrib.factorization.python.ops import gmm as gmm_lib
from tensorflow.contrib.learn.python.learn.estimators import kmeans
from tensorflow.contrib.learn.python.learn.estimators import run_config
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import random_seed as random_seed_lib
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import data_flow_ops
from tensorflow.python.ops import random_ops
from tensorflow.python.platform import flags
from tensorflow.python.platform import test
from tensorflow.python.training import queue_runner
FLAGS = flags.FLAGS
class GMMTest(test.TestCase):
def input_fn(self, batch_size=None, points=None):
batch_size = batch_size or self.batch_size
points = points if points is not None else self.points
num_points = points.shape[0]
def _fn():
x = constant_op.constant(points)
if batch_size == num_points:
return x, None
indices = random_ops.random_uniform(constant_op.constant([batch_size]),
minval=0, maxval=num_points-1,
dtype=dtypes.int32,
seed=10)
return array_ops.gather(x, indices), None
return _fn
def setUp(self):
np.random.seed(3)
random_seed_lib.set_random_seed(2)
self.num_centers = 2
self.num_dims = 2
self.num_points = 4000
self.batch_size = self.num_points
self.true_centers = self.make_random_centers(self.num_centers,
self.num_dims)
self.points, self.assignments, self.scores = self.make_random_points(
self.true_centers, self.num_points)
self.true_score = np.add.reduce(self.scores)
# Use initial means from kmeans (just like scikit-learn does).
clusterer = kmeans.KMeansClustering(num_clusters=self.num_centers)
clusterer.fit(input_fn=lambda: (constant_op.constant(self.points), None),
steps=30)
self.initial_means = clusterer.clusters()
@staticmethod
def make_random_centers(num_centers, num_dims):
return np.round(
np.random.rand(num_centers, num_dims).astype(np.float32) * 500)
@staticmethod
def make_random_points(centers, num_points):
num_centers, num_dims = centers.shape
assignments = np.random.choice(num_centers, num_points)
offsets = np.round(
np.random.randn(num_points, num_dims).astype(np.float32) * 20)
points = centers[assignments] + offsets
means = [
np.mean(
points[assignments == center], axis=0)
for center in xrange(num_centers)
]
covs = [
np.cov(points[assignments == center].T)
for center in xrange(num_centers)
]
scores = []
for r in xrange(num_points):
scores.append(
np.sqrt(
np.dot(
np.dot(points[r, :] - means[assignments[r]],
np.linalg.inv(covs[assignments[r]])), points[r, :] -
means[assignments[r]])))
return (points, assignments, scores)
def test_weights(self):
"""Tests the shape of the weights."""
gmm = gmm_lib.GMM(self.num_centers,
initial_clusters=self.initial_means,
random_seed=4,
config=run_config.RunConfig(tf_random_seed=2))
gmm.fit(input_fn=self.input_fn(), steps=0)
weights = gmm.weights()
self.assertAllEqual(list(weights.shape), [self.num_centers])
def test_clusters(self):
"""Tests the shape of the clusters."""
gmm = gmm_lib.GMM(self.num_centers,
initial_clusters=self.initial_means,
random_seed=4,
config=run_config.RunConfig(tf_random_seed=2))
gmm.fit(input_fn=self.input_fn(), steps=0)
clusters = gmm.clusters()
self.assertAllEqual(list(clusters.shape), [self.num_centers, self.num_dims])
def test_fit(self):
gmm = gmm_lib.GMM(self.num_centers,
initial_clusters='random',
random_seed=4,
config=run_config.RunConfig(tf_random_seed=2))
gmm.fit(input_fn=self.input_fn(), steps=1)
score1 = gmm.score(input_fn=self.input_fn(batch_size=self.num_points),
steps=1)
gmm.fit(input_fn=self.input_fn(), steps=10)
score2 = gmm.score(input_fn=self.input_fn(batch_size=self.num_points),
steps=1)
self.assertGreater(score1, score2)
self.assertNear(self.true_score, score2, self.true_score * 0.15)
def test_infer(self):
gmm = gmm_lib.GMM(self.num_centers,
initial_clusters=self.initial_means,
random_seed=4,
config=run_config.RunConfig(tf_random_seed=2))
gmm.fit(input_fn=self.input_fn(), steps=60)
clusters = gmm.clusters()
# Make a small test set
num_points = 40
points, true_assignments, true_offsets = (
self.make_random_points(clusters, num_points))
assignments = []
for item in gmm.predict_assignments(
input_fn=self.input_fn(points=points, batch_size=num_points)):
assignments.append(item)
assignments = np.ravel(assignments)
self.assertAllEqual(true_assignments, assignments)
# Test score
score = gmm.score(input_fn=self.input_fn(points=points,
batch_size=num_points), steps=1)
self.assertNear(score, np.sum(true_offsets), 4.05)
def _compare_with_sklearn(self, cov_type):
# sklearn version.
iterations = 40
np.random.seed(5)
sklearn_assignments = np.asarray([0, 0, 1, 0, 0, 0, 1, 0, 0, 1])
sklearn_means = np.asarray([[144.83417719, 254.20130341],
[274.38754816, 353.16074346]])
sklearn_covs = np.asarray([[[395.0081194, -4.50389512],
[-4.50389512, 408.27543989]],
[[385.17484203, -31.27834935],
[-31.27834935, 391.74249925]]])
# skflow version.
gmm = gmm_lib.GMM(self.num_centers,
initial_clusters=self.initial_means,
covariance_type=cov_type,
config=run_config.RunConfig(tf_random_seed=2))
gmm.fit(input_fn=self.input_fn(), steps=iterations)
points = self.points[:10, :]
skflow_assignments = []
for item in gmm.predict_assignments(
input_fn=self.input_fn(points=points, batch_size=10)):
skflow_assignments.append(item)
self.assertAllClose(sklearn_assignments,
np.ravel(skflow_assignments).astype(int))
self.assertAllClose(sklearn_means, gmm.clusters())
if cov_type == 'full':
self.assertAllClose(sklearn_covs, gmm.covariances(), rtol=0.01)
else:
for d in [0, 1]:
self.assertAllClose(
np.diag(sklearn_covs[d]), gmm.covariances()[d, :], rtol=0.01)
def test_compare_full(self):
self._compare_with_sklearn('full')
def test_compare_diag(self):
self._compare_with_sklearn('diag')
def test_random_input_large(self):
# sklearn version.
iterations = 5 # that should be enough to know whether this diverges
np.random.seed(5)
num_classes = 20
x = np.array([[np.random.random() for _ in range(100)]
for _ in range(num_classes)], dtype=np.float32)
# skflow version.
gmm = gmm_lib.GMM(num_classes,
covariance_type='full',
config=run_config.RunConfig(tf_random_seed=2))
def get_input_fn(x):
def input_fn():
return constant_op.constant(x.astype(np.float32)), None
return input_fn
gmm.fit(input_fn=get_input_fn(x), steps=iterations)
self.assertFalse(np.isnan(gmm.clusters()).any())
class GMMTestQueues(test.TestCase):
def input_fn(self):
def _fn():
queue = data_flow_ops.FIFOQueue(capacity=10,
dtypes=dtypes.float32,
shapes=[10, 3])
enqueue_op = queue.enqueue(array_ops.zeros([10, 3], dtype=dtypes.float32))
queue_runner.add_queue_runner(queue_runner.QueueRunner(queue,
[enqueue_op]))
return queue.dequeue(), None
return _fn
# This test makes sure that there are no deadlocks when using a QueueRunner.
# Note that since cluster initialization is dependendent on inputs, if input
# is generated using a QueueRunner, one has to make sure that these runners
# are started before the initialization.
def test_queues(self):
gmm = gmm_lib.GMM(2, covariance_type='diag')
gmm.fit(input_fn=self.input_fn(), steps=1)
if __name__ == '__main__':
test.main()
| apache-2.0 |
ASethi77/StateOfTheMedia | src/model/tune_generic_hyperparams.py | 1 | 2503 | # adding this to suppress sklearn DeprecationWarnings...
from mpl_toolkits.mplot3d import Axes3D
from model.linear_regression_model import LinearRegressionModel
from model.MLPRegressionModel import MLPRegressionModel
def warn(*args, **kwargs):
pass
import warnings
warnings.warn = warn
import time
from sklearn.model_selection import train_test_split
from util.config import Config, RegressionModels
import matplotlib.pyplot as plt
from matplotlib import cm
from model.overall_runner import corpora_to_day_features, \
init_corpora, combine_day_ranges, match_features_to_labels
current_milli_time = lambda: int(round(time.time() * 1000))
if __name__ == '__main__':
PREDICT_DELAY_RANGE = range(1, 15)
DAY_RANGE_RANGE = range(10, 30)
plot_x = []
plot_y = []
plot_z = []
approval_ratings, political_article_corpora = init_corpora()
for delay in PREDICT_DELAY_RANGE:
plot_x.append([])
plot_y.append([])
plot_z.append([])
for day_range in DAY_RANGE_RANGE:
plot_x[delay - 1].append(delay)
plot_y[delay - 1].append(day_range)
Config.POLL_DELAY = delay
Config.DAY_RANGE = day_range
features_by_day = corpora_to_day_features(political_article_corpora)
features_by_range = combine_day_ranges(features_by_day, approval_ratings)
X, Y = match_features_to_labels(features_by_range, approval_ratings)
X_train_and_val, X_test, Y_train_and_val, Y_test = \
train_test_split(X, Y, test_size=Config.TRAINING_PARTITION, random_state=2)
X_train, X_val, Y_train, Y_val = \
train_test_split(X_train_and_val, Y_train_and_val, test_size=0.125, random_state=2)
# setup model and configurations
if Config.REGRESSION_MODEL == RegressionModels.LINEAR_REGRESSION:
model = LinearRegressionModel([X_train, Y_train])
elif Config.REGRESSION_MODEL == RegressionModels.MLP:
model = MLPRegressionModel([X_train, Y_train])
print(model)
model.train()
mse = model.evaluate(X_val, Y_val)
print("MSE is {}".format(mse))
plot_z[delay - 1].append(mse)
fig = plt.figure()
ax = fig.add_subplot(111, projection='3d')
surf = ax.plot_surface(plot_x, plot_y, plot_z, cmap=cm.coolwarm, antialiased=True, rstride=2, cstride=2)
plt.show()
| apache-2.0 |
luo66/scikit-learn | sklearn/covariance/tests/test_robust_covariance.py | 212 | 3359 | # Author: Alexandre Gramfort <alexandre.gramfort@inria.fr>
# Gael Varoquaux <gael.varoquaux@normalesup.org>
# Virgile Fritsch <virgile.fritsch@inria.fr>
#
# License: BSD 3 clause
import numpy as np
from sklearn.utils.testing import assert_almost_equal
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_raises
from sklearn.utils.validation import NotFittedError
from sklearn import datasets
from sklearn.covariance import empirical_covariance, MinCovDet, \
EllipticEnvelope
X = datasets.load_iris().data
X_1d = X[:, 0]
n_samples, n_features = X.shape
def test_mcd():
# Tests the FastMCD algorithm implementation
# Small data set
# test without outliers (random independent normal data)
launch_mcd_on_dataset(100, 5, 0, 0.01, 0.1, 80)
# test with a contaminated data set (medium contamination)
launch_mcd_on_dataset(100, 5, 20, 0.01, 0.01, 70)
# test with a contaminated data set (strong contamination)
launch_mcd_on_dataset(100, 5, 40, 0.1, 0.1, 50)
# Medium data set
launch_mcd_on_dataset(1000, 5, 450, 0.1, 0.1, 540)
# Large data set
launch_mcd_on_dataset(1700, 5, 800, 0.1, 0.1, 870)
# 1D data set
launch_mcd_on_dataset(500, 1, 100, 0.001, 0.001, 350)
def launch_mcd_on_dataset(n_samples, n_features, n_outliers, tol_loc, tol_cov,
tol_support):
rand_gen = np.random.RandomState(0)
data = rand_gen.randn(n_samples, n_features)
# add some outliers
outliers_index = rand_gen.permutation(n_samples)[:n_outliers]
outliers_offset = 10. * \
(rand_gen.randint(2, size=(n_outliers, n_features)) - 0.5)
data[outliers_index] += outliers_offset
inliers_mask = np.ones(n_samples).astype(bool)
inliers_mask[outliers_index] = False
pure_data = data[inliers_mask]
# compute MCD by fitting an object
mcd_fit = MinCovDet(random_state=rand_gen).fit(data)
T = mcd_fit.location_
S = mcd_fit.covariance_
H = mcd_fit.support_
# compare with the estimates learnt from the inliers
error_location = np.mean((pure_data.mean(0) - T) ** 2)
assert(error_location < tol_loc)
error_cov = np.mean((empirical_covariance(pure_data) - S) ** 2)
assert(error_cov < tol_cov)
assert(np.sum(H) >= tol_support)
assert_array_almost_equal(mcd_fit.mahalanobis(data), mcd_fit.dist_)
def test_mcd_issue1127():
# Check that the code does not break with X.shape = (3, 1)
# (i.e. n_support = n_samples)
rnd = np.random.RandomState(0)
X = rnd.normal(size=(3, 1))
mcd = MinCovDet()
mcd.fit(X)
def test_outlier_detection():
rnd = np.random.RandomState(0)
X = rnd.randn(100, 10)
clf = EllipticEnvelope(contamination=0.1)
assert_raises(NotFittedError, clf.predict, X)
assert_raises(NotFittedError, clf.decision_function, X)
clf.fit(X)
y_pred = clf.predict(X)
decision = clf.decision_function(X, raw_values=True)
decision_transformed = clf.decision_function(X, raw_values=False)
assert_array_almost_equal(
decision, clf.mahalanobis(X))
assert_array_almost_equal(clf.mahalanobis(X), clf.dist_)
assert_almost_equal(clf.score(X, np.ones(100)),
(100 - y_pred[y_pred == -1].size) / 100.)
assert(sum(y_pred == -1) == sum(decision_transformed < 0))
| bsd-3-clause |
mlflow/mlflow | mlflow/tracking/metric_value_conversion_utils.py | 1 | 2248 | import sys
from mlflow.exceptions import MlflowException, INVALID_PARAMETER_VALUE
def _is_module_imported(module_name: str) -> bool:
return module_name in sys.modules
def _try_get_item(x):
try:
return x.item()
except Exception as e:
raise MlflowException(
f"Failed to convert metric value to float: {e}",
error_code=INVALID_PARAMETER_VALUE,
)
def _converter_requires(module_name: str):
"""Wrapper function that checks if specified `module_name`
is already imported before invoking wrapped function."""
def decorator(func):
def wrapper(x):
if not _is_module_imported(module_name):
return x
return func(x)
return wrapper
return decorator
def convert_metric_value_to_float_if_possible(x) -> float:
if x is None or type(x) == float:
return x
converter_fns_to_try = [
convert_metric_value_to_float_if_ndarray,
convert_metric_value_to_float_if_tensorflow_tensor,
convert_metric_value_to_float_if_torch_tensor,
]
for converter_fn in converter_fns_to_try:
possible_float = converter_fn(x)
if type(possible_float) == float:
return possible_float
try:
return float(x)
except ValueError:
return x # let backend handle conversion if possible
@_converter_requires("numpy")
def convert_metric_value_to_float_if_ndarray(x):
import numpy as np
if isinstance(x, np.ndarray):
return float(_try_get_item(x))
return x
@_converter_requires("torch")
def convert_metric_value_to_float_if_torch_tensor(x):
import torch
if isinstance(x, torch.Tensor):
extracted_tensor_val = x.detach().cpu()
return float(_try_get_item(extracted_tensor_val))
return x
@_converter_requires("tensorflow")
def convert_metric_value_to_float_if_tensorflow_tensor(x):
import tensorflow as tf
if isinstance(x, tf.Tensor):
try:
return float(x)
except Exception as e:
raise MlflowException(
f"Failed to convert metric value to float: {repr(e)}",
error_code=INVALID_PARAMETER_VALUE,
)
return x
| apache-2.0 |
lilleswing/deepchem | examples/factors/FACTORS_tf_singletask.py | 6 | 3341 | """
Script that trains Tensorflow Singletask models on FACTORS dataset.
"""
from __future__ import print_function
from __future__ import division
from __future__ import unicode_literals
import os
import numpy as np
import tempfile
import shutil
import deepchem as dc
from FACTORS_datasets import load_factors
###Load data###
shard_size = 2000
num_trials = 2
print("About to load FACTORS data.")
FACTORS_tasks, datasets, transformers = load_factors(shard_size=shard_size)
train_dataset, valid_dataset, test_dataset = datasets
print("Number of compounds in train set")
print(len(train_dataset))
print("Number of compounds in validation set")
print(len(valid_dataset))
print("Number of compounds in test set")
print(len(test_dataset))
metric = dc.metrics.Metric(dc.metrics.pearson_r2_score, task_averager=np.mean)
###Create model###
n_layers = 3
nb_epoch = 125
n_features = train_dataset.get_data_shape()[0]
def task_model_builder(m_dir):
return dc.models.TensorflowMultitaskRegressor(
n_tasks=1,
n_features=n_features,
logdir=m_dir,
layer_sizes=[1000] * n_layers,
dropouts=[.25] * n_layers,
weight_init_stddevs=[.02] * n_layers,
bias_init_consts=[1.] * n_layers,
learning_rate=.0003,
penalty=.0001,
penalty_type="l2",
optimizer="adam",
batch_size=100)
all_results = []
for trial in range(num_trials):
print("Starting trial %d" % trial)
model = dc.models.SingletaskToMultitask(FACTORS_tasks, task_model_builder)
print("Fitting Model")
model.fit(train_dataset, nb_epoch=nb_epoch)
print("Evaluating models")
train_score, train_task_scores = model.evaluate(
train_dataset, [metric], transformers, per_task_metrics=True)
valid_score, valid_task_scores = model.evaluate(
valid_dataset, [metric], transformers, per_task_metrics=True)
test_score, test_task_scores = model.evaluate(
test_dataset, [metric], transformers, per_task_metrics=True)
all_results.append((train_score, train_task_scores, valid_score,
valid_task_scores, test_score, test_task_scores))
print("----------------------------------------------------------------")
print("Scores for trial %d" % trial)
print("----------------------------------------------------------------")
print("train_task_scores")
print(train_task_scores)
print("Mean Train score")
print(train_score)
print("valid_task_scores")
print(valid_task_scores)
print("Mean Validation score")
print(valid_score)
print("test_task_scores")
print(test_task_scores)
print("Mean Test score")
print(test_score)
print("####################################################################")
for trial in range(num_trials):
(train_score, train_task_scores, valid_score, valid_task_scores, test_score,
test_task_scores) = all_results[trial]
print("----------------------------------------------------------------")
print("Scores for trial %d" % trial)
print("----------------------------------------------------------------")
print("train_task_scores")
print(train_task_scores)
print("Mean Train score")
print(train_score)
print("valid_task_scores")
print(valid_task_scores)
print("Mean Validation score")
print(valid_score)
print("test_task_scores")
print(test_task_scores)
print("Mean Test score")
print(test_score)
| mit |
LohithBlaze/scikit-learn | examples/feature_selection/plot_feature_selection.py | 248 | 2827 | """
===============================
Univariate Feature Selection
===============================
An example showing univariate feature selection.
Noisy (non informative) features are added to the iris data and
univariate feature selection is applied. For each feature, we plot the
p-values for the univariate feature selection and the corresponding
weights of an SVM. We can see that univariate feature selection
selects the informative features and that these have larger SVM weights.
In the total set of features, only the 4 first ones are significant. We
can see that they have the highest score with univariate feature
selection. The SVM assigns a large weight to one of these features, but also
Selects many of the non-informative features.
Applying univariate feature selection before the SVM
increases the SVM weight attributed to the significant features, and will
thus improve classification.
"""
print(__doc__)
import numpy as np
import matplotlib.pyplot as plt
from sklearn import datasets, svm
from sklearn.feature_selection import SelectPercentile, f_classif
###############################################################################
# import some data to play with
# The iris dataset
iris = datasets.load_iris()
# Some noisy data not correlated
E = np.random.uniform(0, 0.1, size=(len(iris.data), 20))
# Add the noisy data to the informative features
X = np.hstack((iris.data, E))
y = iris.target
###############################################################################
plt.figure(1)
plt.clf()
X_indices = np.arange(X.shape[-1])
###############################################################################
# Univariate feature selection with F-test for feature scoring
# We use the default selection function: the 10% most significant features
selector = SelectPercentile(f_classif, percentile=10)
selector.fit(X, y)
scores = -np.log10(selector.pvalues_)
scores /= scores.max()
plt.bar(X_indices - .45, scores, width=.2,
label=r'Univariate score ($-Log(p_{value})$)', color='g')
###############################################################################
# Compare to the weights of an SVM
clf = svm.SVC(kernel='linear')
clf.fit(X, y)
svm_weights = (clf.coef_ ** 2).sum(axis=0)
svm_weights /= svm_weights.max()
plt.bar(X_indices - .25, svm_weights, width=.2, label='SVM weight', color='r')
clf_selected = svm.SVC(kernel='linear')
clf_selected.fit(selector.transform(X), y)
svm_weights_selected = (clf_selected.coef_ ** 2).sum(axis=0)
svm_weights_selected /= svm_weights_selected.max()
plt.bar(X_indices[selector.get_support()] - .05, svm_weights_selected,
width=.2, label='SVM weights after selection', color='b')
plt.title("Comparing feature selection")
plt.xlabel('Feature number')
plt.yticks(())
plt.axis('tight')
plt.legend(loc='upper right')
plt.show()
| bsd-3-clause |
fx2003/tensorflow-study | TensorFlow实战/models/inception/inception/data/build_imagenet_data.py | 12 | 26205 | # Copyright 2016 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Converts ImageNet data to TFRecords file format with Example protos.
The raw ImageNet data set is expected to reside in JPEG files located in the
following directory structure.
data_dir/n01440764/ILSVRC2012_val_00000293.JPEG
data_dir/n01440764/ILSVRC2012_val_00000543.JPEG
...
where 'n01440764' is the unique synset label associated with
these images.
The training data set consists of 1000 sub-directories (i.e. labels)
each containing 1200 JPEG images for a total of 1.2M JPEG images.
The evaluation data set consists of 1000 sub-directories (i.e. labels)
each containing 50 JPEG images for a total of 50K JPEG images.
This TensorFlow script converts the training and evaluation data into
a sharded data set consisting of 1024 and 128 TFRecord files, respectively.
train_directory/train-00000-of-01024
train_directory/train-00001-of-01024
...
train_directory/train-01023-of-01024
and
validation_directory/validation-00000-of-00128
validation_directory/validation-00001-of-00128
...
validation_directory/validation-00127-of-00128
Each validation TFRecord file contains ~390 records. Each training TFREcord
file contains ~1250 records. Each record within the TFRecord file is a
serialized Example proto. The Example proto contains the following fields:
image/encoded: string containing JPEG encoded image in RGB colorspace
image/height: integer, image height in pixels
image/width: integer, image width in pixels
image/colorspace: string, specifying the colorspace, always 'RGB'
image/channels: integer, specifying the number of channels, always 3
image/format: string, specifying the format, always 'JPEG'
image/filename: string containing the basename of the image file
e.g. 'n01440764_10026.JPEG' or 'ILSVRC2012_val_00000293.JPEG'
image/class/label: integer specifying the index in a classification layer.
The label ranges from [1, 1000] where 0 is not used.
image/class/synset: string specifying the unique ID of the label,
e.g. 'n01440764'
image/class/text: string specifying the human-readable version of the label
e.g. 'red fox, Vulpes vulpes'
image/object/bbox/xmin: list of integers specifying the 0+ human annotated
bounding boxes
image/object/bbox/xmax: list of integers specifying the 0+ human annotated
bounding boxes
image/object/bbox/ymin: list of integers specifying the 0+ human annotated
bounding boxes
image/object/bbox/ymax: list of integers specifying the 0+ human annotated
bounding boxes
image/object/bbox/label: integer specifying the index in a classification
layer. The label ranges from [1, 1000] where 0 is not used. Note this is
always identical to the image label.
Note that the length of xmin is identical to the length of xmax, ymin and ymax
for each example.
Running this script using 16 threads may take around ~2.5 hours on an HP Z420.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from datetime import datetime
import os
import random
import sys
import threading
import numpy as np
import tensorflow as tf
tf.app.flags.DEFINE_string('train_directory', '/tmp/',
'Training data directory')
tf.app.flags.DEFINE_string('validation_directory', '/tmp/',
'Validation data directory')
tf.app.flags.DEFINE_string('output_directory', '/tmp/',
'Output data directory')
tf.app.flags.DEFINE_integer('train_shards', 1024,
'Number of shards in training TFRecord files.')
tf.app.flags.DEFINE_integer('validation_shards', 128,
'Number of shards in validation TFRecord files.')
tf.app.flags.DEFINE_integer('num_threads', 8,
'Number of threads to preprocess the images.')
# The labels file contains a list of valid labels are held in this file.
# Assumes that the file contains entries as such:
# n01440764
# n01443537
# n01484850
# where each line corresponds to a label expressed as a synset. We map
# each synset contained in the file to an integer (based on the alphabetical
# ordering). See below for details.
tf.app.flags.DEFINE_string('labels_file',
'imagenet_lsvrc_2015_synsets.txt',
'Labels file')
# This file containing mapping from synset to human-readable label.
# Assumes each line of the file looks like:
#
# n02119247 black fox
# n02119359 silver fox
# n02119477 red fox, Vulpes fulva
#
# where each line corresponds to a unique mapping. Note that each line is
# formatted as <synset>\t<human readable label>.
tf.app.flags.DEFINE_string('imagenet_metadata_file',
'imagenet_metadata.txt',
'ImageNet metadata file')
# This file is the output of process_bounding_box.py
# Assumes each line of the file looks like:
#
# n00007846_64193.JPEG,0.0060,0.2620,0.7545,0.9940
#
# where each line corresponds to one bounding box annotation associated
# with an image. Each line can be parsed as:
#
# <JPEG file name>, <xmin>, <ymin>, <xmax>, <ymax>
#
# Note that there might exist mulitple bounding box annotations associated
# with an image file.
tf.app.flags.DEFINE_string('bounding_box_file',
'./imagenet_2012_bounding_boxes.csv',
'Bounding box file')
FLAGS = tf.app.flags.FLAGS
def _int64_feature(value):
"""Wrapper for inserting int64 features into Example proto."""
if not isinstance(value, list):
value = [value]
return tf.train.Feature(int64_list=tf.train.Int64List(value=value))
def _float_feature(value):
"""Wrapper for inserting float features into Example proto."""
if not isinstance(value, list):
value = [value]
return tf.train.Feature(float_list=tf.train.FloatList(value=value))
def _bytes_feature(value):
"""Wrapper for inserting bytes features into Example proto."""
return tf.train.Feature(bytes_list=tf.train.BytesList(value=[value]))
def _convert_to_example(filename, image_buffer, label, synset, human, bbox,
height, width):
"""Build an Example proto for an example.
Args:
filename: string, path to an image file, e.g., '/path/to/example.JPG'
image_buffer: string, JPEG encoding of RGB image
label: integer, identifier for the ground truth for the network
synset: string, unique WordNet ID specifying the label, e.g., 'n02323233'
human: string, human-readable label, e.g., 'red fox, Vulpes vulpes'
bbox: list of bounding boxes; each box is a list of integers
specifying [xmin, ymin, xmax, ymax]. All boxes are assumed to belong to
the same label as the image label.
height: integer, image height in pixels
width: integer, image width in pixels
Returns:
Example proto
"""
xmin = []
ymin = []
xmax = []
ymax = []
for b in bbox:
assert len(b) == 4
# pylint: disable=expression-not-assigned
[l.append(point) for l, point in zip([xmin, ymin, xmax, ymax], b)]
# pylint: enable=expression-not-assigned
colorspace = 'RGB'
channels = 3
image_format = 'JPEG'
example = tf.train.Example(features=tf.train.Features(feature={
'image/height': _int64_feature(height),
'image/width': _int64_feature(width),
'image/colorspace': _bytes_feature(colorspace),
'image/channels': _int64_feature(channels),
'image/class/label': _int64_feature(label),
'image/class/synset': _bytes_feature(synset),
'image/class/text': _bytes_feature(human),
'image/object/bbox/xmin': _float_feature(xmin),
'image/object/bbox/xmax': _float_feature(xmax),
'image/object/bbox/ymin': _float_feature(ymin),
'image/object/bbox/ymax': _float_feature(ymax),
'image/object/bbox/label': _int64_feature([label] * len(xmin)),
'image/format': _bytes_feature(image_format),
'image/filename': _bytes_feature(os.path.basename(filename)),
'image/encoded': _bytes_feature(image_buffer)}))
return example
class ImageCoder(object):
"""Helper class that provides TensorFlow image coding utilities."""
def __init__(self):
# Create a single Session to run all image coding calls.
self._sess = tf.Session()
# Initializes function that converts PNG to JPEG data.
self._png_data = tf.placeholder(dtype=tf.string)
image = tf.image.decode_png(self._png_data, channels=3)
self._png_to_jpeg = tf.image.encode_jpeg(image, format='rgb', quality=100)
# Initializes function that converts CMYK JPEG data to RGB JPEG data.
self._cmyk_data = tf.placeholder(dtype=tf.string)
image = tf.image.decode_jpeg(self._cmyk_data, channels=0)
self._cmyk_to_rgb = tf.image.encode_jpeg(image, format='rgb', quality=100)
# Initializes function that decodes RGB JPEG data.
self._decode_jpeg_data = tf.placeholder(dtype=tf.string)
self._decode_jpeg = tf.image.decode_jpeg(self._decode_jpeg_data, channels=3)
def png_to_jpeg(self, image_data):
return self._sess.run(self._png_to_jpeg,
feed_dict={self._png_data: image_data})
def cmyk_to_rgb(self, image_data):
return self._sess.run(self._cmyk_to_rgb,
feed_dict={self._cmyk_data: image_data})
def decode_jpeg(self, image_data):
image = self._sess.run(self._decode_jpeg,
feed_dict={self._decode_jpeg_data: image_data})
assert len(image.shape) == 3
assert image.shape[2] == 3
return image
def _is_png(filename):
"""Determine if a file contains a PNG format image.
Args:
filename: string, path of the image file.
Returns:
boolean indicating if the image is a PNG.
"""
# File list from:
# https://groups.google.com/forum/embed/?place=forum/torch7#!topic/torch7/fOSTXHIESSU
return 'n02105855_2933.JPEG' in filename
def _is_cmyk(filename):
"""Determine if file contains a CMYK JPEG format image.
Args:
filename: string, path of the image file.
Returns:
boolean indicating if the image is a JPEG encoded with CMYK color space.
"""
# File list from:
# https://github.com/cytsai/ilsvrc-cmyk-image-list
blacklist = ['n01739381_1309.JPEG', 'n02077923_14822.JPEG',
'n02447366_23489.JPEG', 'n02492035_15739.JPEG',
'n02747177_10752.JPEG', 'n03018349_4028.JPEG',
'n03062245_4620.JPEG', 'n03347037_9675.JPEG',
'n03467068_12171.JPEG', 'n03529860_11437.JPEG',
'n03544143_17228.JPEG', 'n03633091_5218.JPEG',
'n03710637_5125.JPEG', 'n03961711_5286.JPEG',
'n04033995_2932.JPEG', 'n04258138_17003.JPEG',
'n04264628_27969.JPEG', 'n04336792_7448.JPEG',
'n04371774_5854.JPEG', 'n04596742_4225.JPEG',
'n07583066_647.JPEG', 'n13037406_4650.JPEG']
return filename.split('/')[-1] in blacklist
def _process_image(filename, coder):
"""Process a single image file.
Args:
filename: string, path to an image file e.g., '/path/to/example.JPG'.
coder: instance of ImageCoder to provide TensorFlow image coding utils.
Returns:
image_buffer: string, JPEG encoding of RGB image.
height: integer, image height in pixels.
width: integer, image width in pixels.
"""
# Read the image file.
with tf.gfile.FastGFile(filename, 'r') as f:
image_data = f.read()
# Clean the dirty data.
if _is_png(filename):
# 1 image is a PNG.
print('Converting PNG to JPEG for %s' % filename)
image_data = coder.png_to_jpeg(image_data)
elif _is_cmyk(filename):
# 22 JPEG images are in CMYK colorspace.
print('Converting CMYK to RGB for %s' % filename)
image_data = coder.cmyk_to_rgb(image_data)
# Decode the RGB JPEG.
image = coder.decode_jpeg(image_data)
# Check that image converted to RGB
assert len(image.shape) == 3
height = image.shape[0]
width = image.shape[1]
assert image.shape[2] == 3
return image_data, height, width
def _process_image_files_batch(coder, thread_index, ranges, name, filenames,
synsets, labels, humans, bboxes, num_shards):
"""Processes and saves list of images as TFRecord in 1 thread.
Args:
coder: instance of ImageCoder to provide TensorFlow image coding utils.
thread_index: integer, unique batch to run index is within [0, len(ranges)).
ranges: list of pairs of integers specifying ranges of each batches to
analyze in parallel.
name: string, unique identifier specifying the data set
filenames: list of strings; each string is a path to an image file
synsets: list of strings; each string is a unique WordNet ID
labels: list of integer; each integer identifies the ground truth
humans: list of strings; each string is a human-readable label
bboxes: list of bounding boxes for each image. Note that each entry in this
list might contain from 0+ entries corresponding to the number of bounding
box annotations for the image.
num_shards: integer number of shards for this data set.
"""
# Each thread produces N shards where N = int(num_shards / num_threads).
# For instance, if num_shards = 128, and the num_threads = 2, then the first
# thread would produce shards [0, 64).
num_threads = len(ranges)
assert not num_shards % num_threads
num_shards_per_batch = int(num_shards / num_threads)
shard_ranges = np.linspace(ranges[thread_index][0],
ranges[thread_index][1],
num_shards_per_batch + 1).astype(int)
num_files_in_thread = ranges[thread_index][1] - ranges[thread_index][0]
counter = 0
for s in range(num_shards_per_batch):
# Generate a sharded version of the file name, e.g. 'train-00002-of-00010'
shard = thread_index * num_shards_per_batch + s
output_filename = '%s-%.5d-of-%.5d' % (name, shard, num_shards)
output_file = os.path.join(FLAGS.output_directory, output_filename)
writer = tf.python_io.TFRecordWriter(output_file)
shard_counter = 0
files_in_shard = np.arange(shard_ranges[s], shard_ranges[s + 1], dtype=int)
for i in files_in_shard:
filename = filenames[i]
label = labels[i]
synset = synsets[i]
human = humans[i]
bbox = bboxes[i]
image_buffer, height, width = _process_image(filename, coder)
example = _convert_to_example(filename, image_buffer, label,
synset, human, bbox,
height, width)
writer.write(example.SerializeToString())
shard_counter += 1
counter += 1
if not counter % 1000:
print('%s [thread %d]: Processed %d of %d images in thread batch.' %
(datetime.now(), thread_index, counter, num_files_in_thread))
sys.stdout.flush()
writer.close()
print('%s [thread %d]: Wrote %d images to %s' %
(datetime.now(), thread_index, shard_counter, output_file))
sys.stdout.flush()
shard_counter = 0
print('%s [thread %d]: Wrote %d images to %d shards.' %
(datetime.now(), thread_index, counter, num_files_in_thread))
sys.stdout.flush()
def _process_image_files(name, filenames, synsets, labels, humans,
bboxes, num_shards):
"""Process and save list of images as TFRecord of Example protos.
Args:
name: string, unique identifier specifying the data set
filenames: list of strings; each string is a path to an image file
synsets: list of strings; each string is a unique WordNet ID
labels: list of integer; each integer identifies the ground truth
humans: list of strings; each string is a human-readable label
bboxes: list of bounding boxes for each image. Note that each entry in this
list might contain from 0+ entries corresponding to the number of bounding
box annotations for the image.
num_shards: integer number of shards for this data set.
"""
assert len(filenames) == len(synsets)
assert len(filenames) == len(labels)
assert len(filenames) == len(humans)
assert len(filenames) == len(bboxes)
# Break all images into batches with a [ranges[i][0], ranges[i][1]].
spacing = np.linspace(0, len(filenames), FLAGS.num_threads + 1).astype(np.int)
ranges = []
threads = []
for i in range(len(spacing) - 1):
ranges.append([spacing[i], spacing[i + 1]])
# Launch a thread for each batch.
print('Launching %d threads for spacings: %s' % (FLAGS.num_threads, ranges))
sys.stdout.flush()
# Create a mechanism for monitoring when all threads are finished.
coord = tf.train.Coordinator()
# Create a generic TensorFlow-based utility for converting all image codings.
coder = ImageCoder()
threads = []
for thread_index in range(len(ranges)):
args = (coder, thread_index, ranges, name, filenames,
synsets, labels, humans, bboxes, num_shards)
t = threading.Thread(target=_process_image_files_batch, args=args)
t.start()
threads.append(t)
# Wait for all the threads to terminate.
coord.join(threads)
print('%s: Finished writing all %d images in data set.' %
(datetime.now(), len(filenames)))
sys.stdout.flush()
def _find_image_files(data_dir, labels_file):
"""Build a list of all images files and labels in the data set.
Args:
data_dir: string, path to the root directory of images.
Assumes that the ImageNet data set resides in JPEG files located in
the following directory structure.
data_dir/n01440764/ILSVRC2012_val_00000293.JPEG
data_dir/n01440764/ILSVRC2012_val_00000543.JPEG
where 'n01440764' is the unique synset label associated with these images.
labels_file: string, path to the labels file.
The list of valid labels are held in this file. Assumes that the file
contains entries as such:
n01440764
n01443537
n01484850
where each line corresponds to a label expressed as a synset. We map
each synset contained in the file to an integer (based on the alphabetical
ordering) starting with the integer 1 corresponding to the synset
contained in the first line.
The reason we start the integer labels at 1 is to reserve label 0 as an
unused background class.
Returns:
filenames: list of strings; each string is a path to an image file.
synsets: list of strings; each string is a unique WordNet ID.
labels: list of integer; each integer identifies the ground truth.
"""
print('Determining list of input files and labels from %s.' % data_dir)
challenge_synsets = [l.strip() for l in
tf.gfile.FastGFile(labels_file, 'r').readlines()]
labels = []
filenames = []
synsets = []
# Leave label index 0 empty as a background class.
label_index = 1
# Construct the list of JPEG files and labels.
for synset in challenge_synsets:
jpeg_file_path = '%s/%s/*.JPEG' % (data_dir, synset)
matching_files = tf.gfile.Glob(jpeg_file_path)
labels.extend([label_index] * len(matching_files))
synsets.extend([synset] * len(matching_files))
filenames.extend(matching_files)
if not label_index % 100:
print('Finished finding files in %d of %d classes.' % (
label_index, len(challenge_synsets)))
label_index += 1
# Shuffle the ordering of all image files in order to guarantee
# random ordering of the images with respect to label in the
# saved TFRecord files. Make the randomization repeatable.
shuffled_index = list(range(len(filenames)))
random.seed(12345)
random.shuffle(shuffled_index)
filenames = [filenames[i] for i in shuffled_index]
synsets = [synsets[i] for i in shuffled_index]
labels = [labels[i] for i in shuffled_index]
print('Found %d JPEG files across %d labels inside %s.' %
(len(filenames), len(challenge_synsets), data_dir))
return filenames, synsets, labels
def _find_human_readable_labels(synsets, synset_to_human):
"""Build a list of human-readable labels.
Args:
synsets: list of strings; each string is a unique WordNet ID.
synset_to_human: dict of synset to human labels, e.g.,
'n02119022' --> 'red fox, Vulpes vulpes'
Returns:
List of human-readable strings corresponding to each synset.
"""
humans = []
for s in synsets:
assert s in synset_to_human, ('Failed to find: %s' % s)
humans.append(synset_to_human[s])
return humans
def _find_image_bounding_boxes(filenames, image_to_bboxes):
"""Find the bounding boxes for a given image file.
Args:
filenames: list of strings; each string is a path to an image file.
image_to_bboxes: dictionary mapping image file names to a list of
bounding boxes. This list contains 0+ bounding boxes.
Returns:
List of bounding boxes for each image. Note that each entry in this
list might contain from 0+ entries corresponding to the number of bounding
box annotations for the image.
"""
num_image_bbox = 0
bboxes = []
for f in filenames:
basename = os.path.basename(f)
if basename in image_to_bboxes:
bboxes.append(image_to_bboxes[basename])
num_image_bbox += 1
else:
bboxes.append([])
print('Found %d images with bboxes out of %d images' % (
num_image_bbox, len(filenames)))
return bboxes
def _process_dataset(name, directory, num_shards, synset_to_human,
image_to_bboxes):
"""Process a complete data set and save it as a TFRecord.
Args:
name: string, unique identifier specifying the data set.
directory: string, root path to the data set.
num_shards: integer number of shards for this data set.
synset_to_human: dict of synset to human labels, e.g.,
'n02119022' --> 'red fox, Vulpes vulpes'
image_to_bboxes: dictionary mapping image file names to a list of
bounding boxes. This list contains 0+ bounding boxes.
"""
filenames, synsets, labels = _find_image_files(directory, FLAGS.labels_file)
humans = _find_human_readable_labels(synsets, synset_to_human)
bboxes = _find_image_bounding_boxes(filenames, image_to_bboxes)
_process_image_files(name, filenames, synsets, labels,
humans, bboxes, num_shards)
def _build_synset_lookup(imagenet_metadata_file):
"""Build lookup for synset to human-readable label.
Args:
imagenet_metadata_file: string, path to file containing mapping from
synset to human-readable label.
Assumes each line of the file looks like:
n02119247 black fox
n02119359 silver fox
n02119477 red fox, Vulpes fulva
where each line corresponds to a unique mapping. Note that each line is
formatted as <synset>\t<human readable label>.
Returns:
Dictionary of synset to human labels, such as:
'n02119022' --> 'red fox, Vulpes vulpes'
"""
lines = tf.gfile.FastGFile(imagenet_metadata_file, 'r').readlines()
synset_to_human = {}
for l in lines:
if l:
parts = l.strip().split('\t')
assert len(parts) == 2
synset = parts[0]
human = parts[1]
synset_to_human[synset] = human
return synset_to_human
def _build_bounding_box_lookup(bounding_box_file):
"""Build a lookup from image file to bounding boxes.
Args:
bounding_box_file: string, path to file with bounding boxes annotations.
Assumes each line of the file looks like:
n00007846_64193.JPEG,0.0060,0.2620,0.7545,0.9940
where each line corresponds to one bounding box annotation associated
with an image. Each line can be parsed as:
<JPEG file name>, <xmin>, <ymin>, <xmax>, <ymax>
Note that there might exist mulitple bounding box annotations associated
with an image file. This file is the output of process_bounding_boxes.py.
Returns:
Dictionary mapping image file names to a list of bounding boxes. This list
contains 0+ bounding boxes.
"""
lines = tf.gfile.FastGFile(bounding_box_file, 'r').readlines()
images_to_bboxes = {}
num_bbox = 0
num_image = 0
for l in lines:
if l:
parts = l.split(',')
assert len(parts) == 5, ('Failed to parse: %s' % l)
filename = parts[0]
xmin = float(parts[1])
ymin = float(parts[2])
xmax = float(parts[3])
ymax = float(parts[4])
box = [xmin, ymin, xmax, ymax]
if filename not in images_to_bboxes:
images_to_bboxes[filename] = []
num_image += 1
images_to_bboxes[filename].append(box)
num_bbox += 1
print('Successfully read %d bounding boxes '
'across %d images.' % (num_bbox, num_image))
return images_to_bboxes
def main(unused_argv):
assert not FLAGS.train_shards % FLAGS.num_threads, (
'Please make the FLAGS.num_threads commensurate with FLAGS.train_shards')
assert not FLAGS.validation_shards % FLAGS.num_threads, (
'Please make the FLAGS.num_threads commensurate with '
'FLAGS.validation_shards')
print('Saving results to %s' % FLAGS.output_directory)
# Build a map from synset to human-readable label.
synset_to_human = _build_synset_lookup(FLAGS.imagenet_metadata_file)
image_to_bboxes = _build_bounding_box_lookup(FLAGS.bounding_box_file)
# Run it!
_process_dataset('validation', FLAGS.validation_directory,
FLAGS.validation_shards, synset_to_human, image_to_bboxes)
_process_dataset('train', FLAGS.train_directory, FLAGS.train_shards,
synset_to_human, image_to_bboxes)
if __name__ == '__main__':
tf.app.run()
| mit |
sgenoud/scikit-learn | examples/svm/plot_custom_kernel.py | 3 | 1522 | """
======================
SVM with custom kernel
======================
Simple usage of Support Vector Machines to classify a sample. It will
plot the decision surface and the support vectors.
"""
print __doc__
import numpy as np
import pylab as pl
from sklearn import svm, datasets
# import some data to play with
iris = datasets.load_iris()
X = iris.data[:, :2] # we only take the first two features. We could
# avoid this ugly slicing by using a two-dim dataset
Y = iris.target
def my_kernel(x, y):
"""
We create a custom kernel:
(2 0)
k(x, y) = x ( ) y.T
(0 1)
"""
M = np.array([[2, 0], [0, 1.0]])
return np.dot(np.dot(x, M), y.T)
h = .02 # step size in the mesh
# we create an instance of SVM and fit out data.
clf = svm.SVC(kernel=my_kernel)
clf.fit(X, Y)
# Plot the decision boundary. For that, we will asign a color to each
# point in the mesh [x_min, m_max]x[y_min, y_max].
x_min, x_max = X[:, 0].min() - 1, X[:, 0].max() + 1
y_min, y_max = X[:, 1].min() - 1, X[:, 1].max() + 1
xx, yy = np.meshgrid(np.arange(x_min, x_max, h), np.arange(y_min, y_max, h))
Z = clf.predict(np.c_[xx.ravel(), yy.ravel()])
# Put the result into a color plot
Z = Z.reshape(xx.shape)
pl.pcolormesh(xx, yy, Z, cmap=pl.cm.Paired)
# Plot also the training points
pl.scatter(X[:, 0], X[:, 1], c=Y, cmap=pl.cm.Paired)
pl.title('3-Class classification using Support Vector Machine with custom'
' kernel')
pl.axis('tight')
pl.show()
| bsd-3-clause |
glouppe/scikit-learn | sklearn/decomposition/tests/test_nmf.py | 25 | 8544 | import numpy as np
from scipy import linalg
from sklearn.decomposition import (NMF, ProjectedGradientNMF,
non_negative_factorization)
from sklearn.decomposition import nmf # For testing internals
from scipy.sparse import csc_matrix
from sklearn.utils.testing import assert_true
from sklearn.utils.testing import assert_false
from sklearn.utils.testing import assert_raise_message
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_almost_equal
from sklearn.utils.testing import assert_greater
from sklearn.utils.testing import assert_less
from sklearn.utils.testing import ignore_warnings
from sklearn.base import clone
random_state = np.random.mtrand.RandomState(0)
def test_initialize_nn_output():
# Test that initialization does not return negative values
data = np.abs(random_state.randn(10, 10))
for init in ('random', 'nndsvd', 'nndsvda', 'nndsvdar'):
W, H = nmf._initialize_nmf(data, 10, init=init, random_state=0)
assert_false((W < 0).any() or (H < 0).any())
@ignore_warnings
def test_parameter_checking():
A = np.ones((2, 2))
name = 'spam'
msg = "Invalid solver parameter: got 'spam' instead of one of"
assert_raise_message(ValueError, msg, NMF(solver=name).fit, A)
msg = "Invalid init parameter: got 'spam' instead of one of"
assert_raise_message(ValueError, msg, NMF(init=name).fit, A)
msg = "Invalid sparseness parameter: got 'spam' instead of one of"
assert_raise_message(ValueError, msg, NMF(sparseness=name).fit, A)
msg = "Negative values in data passed to"
assert_raise_message(ValueError, msg, NMF().fit, -A)
assert_raise_message(ValueError, msg, nmf._initialize_nmf, -A,
2, 'nndsvd')
clf = NMF(2, tol=0.1).fit(A)
assert_raise_message(ValueError, msg, clf.transform, -A)
def test_initialize_close():
# Test NNDSVD error
# Test that _initialize_nmf error is less than the standard deviation of
# the entries in the matrix.
A = np.abs(random_state.randn(10, 10))
W, H = nmf._initialize_nmf(A, 10, init='nndsvd')
error = linalg.norm(np.dot(W, H) - A)
sdev = linalg.norm(A - A.mean())
assert_true(error <= sdev)
def test_initialize_variants():
# Test NNDSVD variants correctness
# Test that the variants 'nndsvda' and 'nndsvdar' differ from basic
# 'nndsvd' only where the basic version has zeros.
data = np.abs(random_state.randn(10, 10))
W0, H0 = nmf._initialize_nmf(data, 10, init='nndsvd')
Wa, Ha = nmf._initialize_nmf(data, 10, init='nndsvda')
War, Har = nmf._initialize_nmf(data, 10, init='nndsvdar',
random_state=0)
for ref, evl in ((W0, Wa), (W0, War), (H0, Ha), (H0, Har)):
assert_true(np.allclose(evl[ref != 0], ref[ref != 0]))
@ignore_warnings
def test_nmf_fit_nn_output():
# Test that the decomposition does not contain negative values
A = np.c_[5 * np.ones(5) - np.arange(1, 6),
5 * np.ones(5) + np.arange(1, 6)]
for solver in ('pg', 'cd'):
for init in (None, 'nndsvd', 'nndsvda', 'nndsvdar'):
model = NMF(n_components=2, solver=solver, init=init,
random_state=0)
transf = model.fit_transform(A)
assert_false((model.components_ < 0).any() or
(transf < 0).any())
@ignore_warnings
def test_nmf_fit_close():
# Test that the fit is not too far away
for solver in ('pg', 'cd'):
pnmf = NMF(5, solver=solver, init='nndsvd', random_state=0)
X = np.abs(random_state.randn(6, 5))
assert_less(pnmf.fit(X).reconstruction_err_, 0.05)
def test_nls_nn_output():
# Test that NLS solver doesn't return negative values
A = np.arange(1, 5).reshape(1, -1)
Ap, _, _ = nmf._nls_subproblem(np.dot(A.T, -A), A.T, A, 0.001, 100)
assert_false((Ap < 0).any())
def test_nls_close():
# Test that the NLS results should be close
A = np.arange(1, 5).reshape(1, -1)
Ap, _, _ = nmf._nls_subproblem(np.dot(A.T, A), A.T, np.zeros_like(A),
0.001, 100)
assert_true((np.abs(Ap - A) < 0.01).all())
@ignore_warnings
def test_nmf_transform():
# Test that NMF.transform returns close values
A = np.abs(random_state.randn(6, 5))
for solver in ('pg', 'cd'):
m = NMF(solver=solver, n_components=4, init='nndsvd', random_state=0)
ft = m.fit_transform(A)
t = m.transform(A)
assert_array_almost_equal(ft, t, decimal=2)
@ignore_warnings
def test_n_components_greater_n_features():
# Smoke test for the case of more components than features.
A = np.abs(random_state.randn(30, 10))
NMF(n_components=15, random_state=0, tol=1e-2).fit(A)
@ignore_warnings
def test_projgrad_nmf_sparseness():
# Test sparseness
# Test that sparsity constraints actually increase sparseness in the
# part where they are applied.
tol = 1e-2
A = np.abs(random_state.randn(10, 10))
m = ProjectedGradientNMF(n_components=5, random_state=0, tol=tol).fit(A)
data_sp = ProjectedGradientNMF(n_components=5, sparseness='data',
random_state=0,
tol=tol).fit(A).data_sparseness_
comp_sp = ProjectedGradientNMF(n_components=5, sparseness='components',
random_state=0,
tol=tol).fit(A).comp_sparseness_
assert_greater(data_sp, m.data_sparseness_)
assert_greater(comp_sp, m.comp_sparseness_)
@ignore_warnings
def test_sparse_input():
# Test that sparse matrices are accepted as input
from scipy.sparse import csc_matrix
A = np.abs(random_state.randn(10, 10))
A[:, 2 * np.arange(5)] = 0
A_sparse = csc_matrix(A)
for solver in ('pg', 'cd'):
est1 = NMF(solver=solver, n_components=5, init='random',
random_state=0, tol=1e-2)
est2 = clone(est1)
W1 = est1.fit_transform(A)
W2 = est2.fit_transform(A_sparse)
H1 = est1.components_
H2 = est2.components_
assert_array_almost_equal(W1, W2)
assert_array_almost_equal(H1, H2)
@ignore_warnings
def test_sparse_transform():
# Test that transform works on sparse data. Issue #2124
A = np.abs(random_state.randn(3, 2))
A[A > 1.0] = 0
A = csc_matrix(A)
for solver in ('pg', 'cd'):
model = NMF(solver=solver, random_state=0, tol=1e-4, n_components=2)
A_fit_tr = model.fit_transform(A)
A_tr = model.transform(A)
assert_array_almost_equal(A_fit_tr, A_tr, decimal=1)
@ignore_warnings
def test_non_negative_factorization_consistency():
# Test that the function is called in the same way, either directly
# or through the NMF class
A = np.abs(random_state.randn(10, 10))
A[:, 2 * np.arange(5)] = 0
for solver in ('pg', 'cd'):
W_nmf, H, _ = non_negative_factorization(
A, solver=solver, random_state=1, tol=1e-2)
W_nmf_2, _, _ = non_negative_factorization(
A, H=H, update_H=False, solver=solver, random_state=1, tol=1e-2)
model_class = NMF(solver=solver, random_state=1, tol=1e-2)
W_cls = model_class.fit_transform(A)
W_cls_2 = model_class.transform(A)
assert_array_almost_equal(W_nmf, W_cls, decimal=10)
assert_array_almost_equal(W_nmf_2, W_cls_2, decimal=10)
@ignore_warnings
def test_non_negative_factorization_checking():
A = np.ones((2, 2))
# Test parameters checking is public function
nnmf = non_negative_factorization
msg = "Number of components must be positive; got (n_components='2')"
assert_raise_message(ValueError, msg, nnmf, A, A, A, '2')
msg = "Negative values in data passed to NMF (input H)"
assert_raise_message(ValueError, msg, nnmf, A, A, -A, 2, 'custom')
msg = "Negative values in data passed to NMF (input W)"
assert_raise_message(ValueError, msg, nnmf, A, -A, A, 2, 'custom')
msg = "Array passed to NMF (input H) is full of zeros"
assert_raise_message(ValueError, msg, nnmf, A, A, 0 * A, 2, 'custom')
def test_safe_compute_error():
A = np.abs(random_state.randn(10, 10))
A[:, 2 * np.arange(5)] = 0
A_sparse = csc_matrix(A)
W, H = nmf._initialize_nmf(A, 5, init='random', random_state=0)
error = nmf._safe_compute_error(A, W, H)
error_sparse = nmf._safe_compute_error(A_sparse, W, H)
assert_almost_equal(error, error_sparse)
| bsd-3-clause |
elkingtonmcb/h2o-2 | py/testdir_single_jvm/test_rf_histo_fail_fvec.py | 9 | 1394 | import unittest, random, sys, time
sys.path.extend(['.','..','../..','py'])
import h2o, h2o_cmd, h2o_rf, h2o_import as h2i
paramDict = {
'destination_key': 'model_keyA',
'ntrees': 13,
'response': 'C55',
'mtries': 3,
'source': u'covtype.hex',
'seed': '1231231',
'importance': 0,
'balance_classes': 0,
}
class Basic(unittest.TestCase):
def tearDown(self):
h2o.check_sandbox_for_errors()
@classmethod
def setUpClass(cls):
global SEED
SEED = h2o.setup_random_seed()
h2o.init(java_heap_GB=10)
@classmethod
def tearDownClass(cls):
h2o.tear_down_cloud()
def test_rf_histo_fail_fvec(self):
csvPathname = 'standard/covtype.data'
for trial in range(3):
kwargs = paramDict.copy()
timeoutSecs = 180
start = time.time()
parseResult = h2i.import_parse(bucket='home-0xdiag-datasets', path=csvPathname, schema='put')
rfSeed = random.randint(0, sys.maxint)
kwargs.update({'seed': rfSeed})
h2o_cmd.runRF(parseResult=parseResult, timeoutSecs=timeoutSecs, retryDelaySecs=1, **kwargs)
elapsed = time.time()-start
print "Trial #", trial, "completed in", elapsed, "seconds.", "%d pct. of timeout" % ((elapsed*100)/timeoutSecs)
if __name__ == '__main__':
h2o.unit_main()
| apache-2.0 |
MohammedWasim/scikit-learn | sklearn/metrics/cluster/supervised.py | 206 | 27395 | """Utilities to evaluate the clustering performance of models
Functions named as *_score return a scalar value to maximize: the higher the
better.
"""
# Authors: Olivier Grisel <olivier.grisel@ensta.org>
# Wei LI <kuantkid@gmail.com>
# Diego Molla <dmolla-aliod@gmail.com>
# License: BSD 3 clause
from math import log
from scipy.misc import comb
from scipy.sparse import coo_matrix
import numpy as np
from .expected_mutual_info_fast import expected_mutual_information
from ...utils.fixes import bincount
def comb2(n):
# the exact version is faster for k == 2: use it by default globally in
# this module instead of the float approximate variant
return comb(n, 2, exact=1)
def check_clusterings(labels_true, labels_pred):
"""Check that the two clusterings matching 1D integer arrays"""
labels_true = np.asarray(labels_true)
labels_pred = np.asarray(labels_pred)
# input checks
if labels_true.ndim != 1:
raise ValueError(
"labels_true must be 1D: shape is %r" % (labels_true.shape,))
if labels_pred.ndim != 1:
raise ValueError(
"labels_pred must be 1D: shape is %r" % (labels_pred.shape,))
if labels_true.shape != labels_pred.shape:
raise ValueError(
"labels_true and labels_pred must have same size, got %d and %d"
% (labels_true.shape[0], labels_pred.shape[0]))
return labels_true, labels_pred
def contingency_matrix(labels_true, labels_pred, eps=None):
"""Build a contengency matrix describing the relationship between labels.
Parameters
----------
labels_true : int array, shape = [n_samples]
Ground truth class labels to be used as a reference
labels_pred : array, shape = [n_samples]
Cluster labels to evaluate
eps: None or float
If a float, that value is added to all values in the contingency
matrix. This helps to stop NaN propagation.
If ``None``, nothing is adjusted.
Returns
-------
contingency: array, shape=[n_classes_true, n_classes_pred]
Matrix :math:`C` such that :math:`C_{i, j}` is the number of samples in
true class :math:`i` and in predicted class :math:`j`. If
``eps is None``, the dtype of this array will be integer. If ``eps`` is
given, the dtype will be float.
"""
classes, class_idx = np.unique(labels_true, return_inverse=True)
clusters, cluster_idx = np.unique(labels_pred, return_inverse=True)
n_classes = classes.shape[0]
n_clusters = clusters.shape[0]
# Using coo_matrix to accelerate simple histogram calculation,
# i.e. bins are consecutive integers
# Currently, coo_matrix is faster than histogram2d for simple cases
contingency = coo_matrix((np.ones(class_idx.shape[0]),
(class_idx, cluster_idx)),
shape=(n_classes, n_clusters),
dtype=np.int).toarray()
if eps is not None:
# don't use += as contingency is integer
contingency = contingency + eps
return contingency
# clustering measures
def adjusted_rand_score(labels_true, labels_pred):
"""Rand index adjusted for chance
The Rand Index computes a similarity measure between two clusterings
by considering all pairs of samples and counting pairs that are
assigned in the same or different clusters in the predicted and
true clusterings.
The raw RI score is then "adjusted for chance" into the ARI score
using the following scheme::
ARI = (RI - Expected_RI) / (max(RI) - Expected_RI)
The adjusted Rand index is thus ensured to have a value close to
0.0 for random labeling independently of the number of clusters and
samples and exactly 1.0 when the clusterings are identical (up to
a permutation).
ARI is a symmetric measure::
adjusted_rand_score(a, b) == adjusted_rand_score(b, a)
Read more in the :ref:`User Guide <adjusted_rand_score>`.
Parameters
----------
labels_true : int array, shape = [n_samples]
Ground truth class labels to be used as a reference
labels_pred : array, shape = [n_samples]
Cluster labels to evaluate
Returns
-------
ari : float
Similarity score between -1.0 and 1.0. Random labelings have an ARI
close to 0.0. 1.0 stands for perfect match.
Examples
--------
Perfectly maching labelings have a score of 1 even
>>> from sklearn.metrics.cluster import adjusted_rand_score
>>> adjusted_rand_score([0, 0, 1, 1], [0, 0, 1, 1])
1.0
>>> adjusted_rand_score([0, 0, 1, 1], [1, 1, 0, 0])
1.0
Labelings that assign all classes members to the same clusters
are complete be not always pure, hence penalized::
>>> adjusted_rand_score([0, 0, 1, 2], [0, 0, 1, 1]) # doctest: +ELLIPSIS
0.57...
ARI is symmetric, so labelings that have pure clusters with members
coming from the same classes but unnecessary splits are penalized::
>>> adjusted_rand_score([0, 0, 1, 1], [0, 0, 1, 2]) # doctest: +ELLIPSIS
0.57...
If classes members are completely split across different clusters, the
assignment is totally incomplete, hence the ARI is very low::
>>> adjusted_rand_score([0, 0, 0, 0], [0, 1, 2, 3])
0.0
References
----------
.. [Hubert1985] `L. Hubert and P. Arabie, Comparing Partitions,
Journal of Classification 1985`
http://www.springerlink.com/content/x64124718341j1j0/
.. [wk] http://en.wikipedia.org/wiki/Rand_index#Adjusted_Rand_index
See also
--------
adjusted_mutual_info_score: Adjusted Mutual Information
"""
labels_true, labels_pred = check_clusterings(labels_true, labels_pred)
n_samples = labels_true.shape[0]
classes = np.unique(labels_true)
clusters = np.unique(labels_pred)
# Special limit cases: no clustering since the data is not split;
# or trivial clustering where each document is assigned a unique cluster.
# These are perfect matches hence return 1.0.
if (classes.shape[0] == clusters.shape[0] == 1
or classes.shape[0] == clusters.shape[0] == 0
or classes.shape[0] == clusters.shape[0] == len(labels_true)):
return 1.0
contingency = contingency_matrix(labels_true, labels_pred)
# Compute the ARI using the contingency data
sum_comb_c = sum(comb2(n_c) for n_c in contingency.sum(axis=1))
sum_comb_k = sum(comb2(n_k) for n_k in contingency.sum(axis=0))
sum_comb = sum(comb2(n_ij) for n_ij in contingency.flatten())
prod_comb = (sum_comb_c * sum_comb_k) / float(comb(n_samples, 2))
mean_comb = (sum_comb_k + sum_comb_c) / 2.
return ((sum_comb - prod_comb) / (mean_comb - prod_comb))
def homogeneity_completeness_v_measure(labels_true, labels_pred):
"""Compute the homogeneity and completeness and V-Measure scores at once
Those metrics are based on normalized conditional entropy measures of
the clustering labeling to evaluate given the knowledge of a Ground
Truth class labels of the same samples.
A clustering result satisfies homogeneity if all of its clusters
contain only data points which are members of a single class.
A clustering result satisfies completeness if all the data points
that are members of a given class are elements of the same cluster.
Both scores have positive values between 0.0 and 1.0, larger values
being desirable.
Those 3 metrics are independent of the absolute values of the labels:
a permutation of the class or cluster label values won't change the
score values in any way.
V-Measure is furthermore symmetric: swapping ``labels_true`` and
``label_pred`` will give the same score. This does not hold for
homogeneity and completeness.
Read more in the :ref:`User Guide <homogeneity_completeness>`.
Parameters
----------
labels_true : int array, shape = [n_samples]
ground truth class labels to be used as a reference
labels_pred : array, shape = [n_samples]
cluster labels to evaluate
Returns
-------
homogeneity: float
score between 0.0 and 1.0. 1.0 stands for perfectly homogeneous labeling
completeness: float
score between 0.0 and 1.0. 1.0 stands for perfectly complete labeling
v_measure: float
harmonic mean of the first two
See also
--------
homogeneity_score
completeness_score
v_measure_score
"""
labels_true, labels_pred = check_clusterings(labels_true, labels_pred)
if len(labels_true) == 0:
return 1.0, 1.0, 1.0
entropy_C = entropy(labels_true)
entropy_K = entropy(labels_pred)
MI = mutual_info_score(labels_true, labels_pred)
homogeneity = MI / (entropy_C) if entropy_C else 1.0
completeness = MI / (entropy_K) if entropy_K else 1.0
if homogeneity + completeness == 0.0:
v_measure_score = 0.0
else:
v_measure_score = (2.0 * homogeneity * completeness
/ (homogeneity + completeness))
return homogeneity, completeness, v_measure_score
def homogeneity_score(labels_true, labels_pred):
"""Homogeneity metric of a cluster labeling given a ground truth
A clustering result satisfies homogeneity if all of its clusters
contain only data points which are members of a single class.
This metric is independent of the absolute values of the labels:
a permutation of the class or cluster label values won't change the
score value in any way.
This metric is not symmetric: switching ``label_true`` with ``label_pred``
will return the :func:`completeness_score` which will be different in
general.
Read more in the :ref:`User Guide <homogeneity_completeness>`.
Parameters
----------
labels_true : int array, shape = [n_samples]
ground truth class labels to be used as a reference
labels_pred : array, shape = [n_samples]
cluster labels to evaluate
Returns
-------
homogeneity: float
score between 0.0 and 1.0. 1.0 stands for perfectly homogeneous labeling
References
----------
.. [1] `Andrew Rosenberg and Julia Hirschberg, 2007. V-Measure: A
conditional entropy-based external cluster evaluation measure
<http://aclweb.org/anthology/D/D07/D07-1043.pdf>`_
See also
--------
completeness_score
v_measure_score
Examples
--------
Perfect labelings are homogeneous::
>>> from sklearn.metrics.cluster import homogeneity_score
>>> homogeneity_score([0, 0, 1, 1], [1, 1, 0, 0])
1.0
Non-perfect labelings that further split classes into more clusters can be
perfectly homogeneous::
>>> print("%.6f" % homogeneity_score([0, 0, 1, 1], [0, 0, 1, 2]))
... # doctest: +ELLIPSIS
1.0...
>>> print("%.6f" % homogeneity_score([0, 0, 1, 1], [0, 1, 2, 3]))
... # doctest: +ELLIPSIS
1.0...
Clusters that include samples from different classes do not make for an
homogeneous labeling::
>>> print("%.6f" % homogeneity_score([0, 0, 1, 1], [0, 1, 0, 1]))
... # doctest: +ELLIPSIS
0.0...
>>> print("%.6f" % homogeneity_score([0, 0, 1, 1], [0, 0, 0, 0]))
... # doctest: +ELLIPSIS
0.0...
"""
return homogeneity_completeness_v_measure(labels_true, labels_pred)[0]
def completeness_score(labels_true, labels_pred):
"""Completeness metric of a cluster labeling given a ground truth
A clustering result satisfies completeness if all the data points
that are members of a given class are elements of the same cluster.
This metric is independent of the absolute values of the labels:
a permutation of the class or cluster label values won't change the
score value in any way.
This metric is not symmetric: switching ``label_true`` with ``label_pred``
will return the :func:`homogeneity_score` which will be different in
general.
Read more in the :ref:`User Guide <homogeneity_completeness>`.
Parameters
----------
labels_true : int array, shape = [n_samples]
ground truth class labels to be used as a reference
labels_pred : array, shape = [n_samples]
cluster labels to evaluate
Returns
-------
completeness: float
score between 0.0 and 1.0. 1.0 stands for perfectly complete labeling
References
----------
.. [1] `Andrew Rosenberg and Julia Hirschberg, 2007. V-Measure: A
conditional entropy-based external cluster evaluation measure
<http://aclweb.org/anthology/D/D07/D07-1043.pdf>`_
See also
--------
homogeneity_score
v_measure_score
Examples
--------
Perfect labelings are complete::
>>> from sklearn.metrics.cluster import completeness_score
>>> completeness_score([0, 0, 1, 1], [1, 1, 0, 0])
1.0
Non-perfect labelings that assign all classes members to the same clusters
are still complete::
>>> print(completeness_score([0, 0, 1, 1], [0, 0, 0, 0]))
1.0
>>> print(completeness_score([0, 1, 2, 3], [0, 0, 1, 1]))
1.0
If classes members are split across different clusters, the
assignment cannot be complete::
>>> print(completeness_score([0, 0, 1, 1], [0, 1, 0, 1]))
0.0
>>> print(completeness_score([0, 0, 0, 0], [0, 1, 2, 3]))
0.0
"""
return homogeneity_completeness_v_measure(labels_true, labels_pred)[1]
def v_measure_score(labels_true, labels_pred):
"""V-measure cluster labeling given a ground truth.
This score is identical to :func:`normalized_mutual_info_score`.
The V-measure is the harmonic mean between homogeneity and completeness::
v = 2 * (homogeneity * completeness) / (homogeneity + completeness)
This metric is independent of the absolute values of the labels:
a permutation of the class or cluster label values won't change the
score value in any way.
This metric is furthermore symmetric: switching ``label_true`` with
``label_pred`` will return the same score value. This can be useful to
measure the agreement of two independent label assignments strategies
on the same dataset when the real ground truth is not known.
Read more in the :ref:`User Guide <homogeneity_completeness>`.
Parameters
----------
labels_true : int array, shape = [n_samples]
ground truth class labels to be used as a reference
labels_pred : array, shape = [n_samples]
cluster labels to evaluate
Returns
-------
v_measure: float
score between 0.0 and 1.0. 1.0 stands for perfectly complete labeling
References
----------
.. [1] `Andrew Rosenberg and Julia Hirschberg, 2007. V-Measure: A
conditional entropy-based external cluster evaluation measure
<http://aclweb.org/anthology/D/D07/D07-1043.pdf>`_
See also
--------
homogeneity_score
completeness_score
Examples
--------
Perfect labelings are both homogeneous and complete, hence have score 1.0::
>>> from sklearn.metrics.cluster import v_measure_score
>>> v_measure_score([0, 0, 1, 1], [0, 0, 1, 1])
1.0
>>> v_measure_score([0, 0, 1, 1], [1, 1, 0, 0])
1.0
Labelings that assign all classes members to the same clusters
are complete be not homogeneous, hence penalized::
>>> print("%.6f" % v_measure_score([0, 0, 1, 2], [0, 0, 1, 1]))
... # doctest: +ELLIPSIS
0.8...
>>> print("%.6f" % v_measure_score([0, 1, 2, 3], [0, 0, 1, 1]))
... # doctest: +ELLIPSIS
0.66...
Labelings that have pure clusters with members coming from the same
classes are homogeneous but un-necessary splits harms completeness
and thus penalize V-measure as well::
>>> print("%.6f" % v_measure_score([0, 0, 1, 1], [0, 0, 1, 2]))
... # doctest: +ELLIPSIS
0.8...
>>> print("%.6f" % v_measure_score([0, 0, 1, 1], [0, 1, 2, 3]))
... # doctest: +ELLIPSIS
0.66...
If classes members are completely split across different clusters,
the assignment is totally incomplete, hence the V-Measure is null::
>>> print("%.6f" % v_measure_score([0, 0, 0, 0], [0, 1, 2, 3]))
... # doctest: +ELLIPSIS
0.0...
Clusters that include samples from totally different classes totally
destroy the homogeneity of the labeling, hence::
>>> print("%.6f" % v_measure_score([0, 0, 1, 1], [0, 0, 0, 0]))
... # doctest: +ELLIPSIS
0.0...
"""
return homogeneity_completeness_v_measure(labels_true, labels_pred)[2]
def mutual_info_score(labels_true, labels_pred, contingency=None):
"""Mutual Information between two clusterings
The Mutual Information is a measure of the similarity between two labels of
the same data. Where :math:`P(i)` is the probability of a random sample
occurring in cluster :math:`U_i` and :math:`P'(j)` is the probability of a
random sample occurring in cluster :math:`V_j`, the Mutual Information
between clusterings :math:`U` and :math:`V` is given as:
.. math::
MI(U,V)=\sum_{i=1}^R \sum_{j=1}^C P(i,j)\log\\frac{P(i,j)}{P(i)P'(j)}
This is equal to the Kullback-Leibler divergence of the joint distribution
with the product distribution of the marginals.
This metric is independent of the absolute values of the labels:
a permutation of the class or cluster label values won't change the
score value in any way.
This metric is furthermore symmetric: switching ``label_true`` with
``label_pred`` will return the same score value. This can be useful to
measure the agreement of two independent label assignments strategies
on the same dataset when the real ground truth is not known.
Read more in the :ref:`User Guide <mutual_info_score>`.
Parameters
----------
labels_true : int array, shape = [n_samples]
A clustering of the data into disjoint subsets.
labels_pred : array, shape = [n_samples]
A clustering of the data into disjoint subsets.
contingency: None or array, shape = [n_classes_true, n_classes_pred]
A contingency matrix given by the :func:`contingency_matrix` function.
If value is ``None``, it will be computed, otherwise the given value is
used, with ``labels_true`` and ``labels_pred`` ignored.
Returns
-------
mi: float
Mutual information, a non-negative value
See also
--------
adjusted_mutual_info_score: Adjusted against chance Mutual Information
normalized_mutual_info_score: Normalized Mutual Information
"""
if contingency is None:
labels_true, labels_pred = check_clusterings(labels_true, labels_pred)
contingency = contingency_matrix(labels_true, labels_pred)
contingency = np.array(contingency, dtype='float')
contingency_sum = np.sum(contingency)
pi = np.sum(contingency, axis=1)
pj = np.sum(contingency, axis=0)
outer = np.outer(pi, pj)
nnz = contingency != 0.0
# normalized contingency
contingency_nm = contingency[nnz]
log_contingency_nm = np.log(contingency_nm)
contingency_nm /= contingency_sum
# log(a / b) should be calculated as log(a) - log(b) for
# possible loss of precision
log_outer = -np.log(outer[nnz]) + log(pi.sum()) + log(pj.sum())
mi = (contingency_nm * (log_contingency_nm - log(contingency_sum))
+ contingency_nm * log_outer)
return mi.sum()
def adjusted_mutual_info_score(labels_true, labels_pred):
"""Adjusted Mutual Information between two clusterings
Adjusted Mutual Information (AMI) is an adjustment of the Mutual
Information (MI) score to account for chance. It accounts for the fact that
the MI is generally higher for two clusterings with a larger number of
clusters, regardless of whether there is actually more information shared.
For two clusterings :math:`U` and :math:`V`, the AMI is given as::
AMI(U, V) = [MI(U, V) - E(MI(U, V))] / [max(H(U), H(V)) - E(MI(U, V))]
This metric is independent of the absolute values of the labels:
a permutation of the class or cluster label values won't change the
score value in any way.
This metric is furthermore symmetric: switching ``label_true`` with
``label_pred`` will return the same score value. This can be useful to
measure the agreement of two independent label assignments strategies
on the same dataset when the real ground truth is not known.
Be mindful that this function is an order of magnitude slower than other
metrics, such as the Adjusted Rand Index.
Read more in the :ref:`User Guide <mutual_info_score>`.
Parameters
----------
labels_true : int array, shape = [n_samples]
A clustering of the data into disjoint subsets.
labels_pred : array, shape = [n_samples]
A clustering of the data into disjoint subsets.
Returns
-------
ami: float(upperlimited by 1.0)
The AMI returns a value of 1 when the two partitions are identical
(ie perfectly matched). Random partitions (independent labellings) have
an expected AMI around 0 on average hence can be negative.
See also
--------
adjusted_rand_score: Adjusted Rand Index
mutual_information_score: Mutual Information (not adjusted for chance)
Examples
--------
Perfect labelings are both homogeneous and complete, hence have
score 1.0::
>>> from sklearn.metrics.cluster import adjusted_mutual_info_score
>>> adjusted_mutual_info_score([0, 0, 1, 1], [0, 0, 1, 1])
1.0
>>> adjusted_mutual_info_score([0, 0, 1, 1], [1, 1, 0, 0])
1.0
If classes members are completely split across different clusters,
the assignment is totally in-complete, hence the AMI is null::
>>> adjusted_mutual_info_score([0, 0, 0, 0], [0, 1, 2, 3])
0.0
References
----------
.. [1] `Vinh, Epps, and Bailey, (2010). Information Theoretic Measures for
Clusterings Comparison: Variants, Properties, Normalization and
Correction for Chance, JMLR
<http://jmlr.csail.mit.edu/papers/volume11/vinh10a/vinh10a.pdf>`_
.. [2] `Wikipedia entry for the Adjusted Mutual Information
<http://en.wikipedia.org/wiki/Adjusted_Mutual_Information>`_
"""
labels_true, labels_pred = check_clusterings(labels_true, labels_pred)
n_samples = labels_true.shape[0]
classes = np.unique(labels_true)
clusters = np.unique(labels_pred)
# Special limit cases: no clustering since the data is not split.
# This is a perfect match hence return 1.0.
if (classes.shape[0] == clusters.shape[0] == 1
or classes.shape[0] == clusters.shape[0] == 0):
return 1.0
contingency = contingency_matrix(labels_true, labels_pred)
contingency = np.array(contingency, dtype='float')
# Calculate the MI for the two clusterings
mi = mutual_info_score(labels_true, labels_pred,
contingency=contingency)
# Calculate the expected value for the mutual information
emi = expected_mutual_information(contingency, n_samples)
# Calculate entropy for each labeling
h_true, h_pred = entropy(labels_true), entropy(labels_pred)
ami = (mi - emi) / (max(h_true, h_pred) - emi)
return ami
def normalized_mutual_info_score(labels_true, labels_pred):
"""Normalized Mutual Information between two clusterings
Normalized Mutual Information (NMI) is an normalization of the Mutual
Information (MI) score to scale the results between 0 (no mutual
information) and 1 (perfect correlation). In this function, mutual
information is normalized by ``sqrt(H(labels_true) * H(labels_pred))``
This measure is not adjusted for chance. Therefore
:func:`adjusted_mustual_info_score` might be preferred.
This metric is independent of the absolute values of the labels:
a permutation of the class or cluster label values won't change the
score value in any way.
This metric is furthermore symmetric: switching ``label_true`` with
``label_pred`` will return the same score value. This can be useful to
measure the agreement of two independent label assignments strategies
on the same dataset when the real ground truth is not known.
Read more in the :ref:`User Guide <mutual_info_score>`.
Parameters
----------
labels_true : int array, shape = [n_samples]
A clustering of the data into disjoint subsets.
labels_pred : array, shape = [n_samples]
A clustering of the data into disjoint subsets.
Returns
-------
nmi: float
score between 0.0 and 1.0. 1.0 stands for perfectly complete labeling
See also
--------
adjusted_rand_score: Adjusted Rand Index
adjusted_mutual_info_score: Adjusted Mutual Information (adjusted
against chance)
Examples
--------
Perfect labelings are both homogeneous and complete, hence have
score 1.0::
>>> from sklearn.metrics.cluster import normalized_mutual_info_score
>>> normalized_mutual_info_score([0, 0, 1, 1], [0, 0, 1, 1])
1.0
>>> normalized_mutual_info_score([0, 0, 1, 1], [1, 1, 0, 0])
1.0
If classes members are completely split across different clusters,
the assignment is totally in-complete, hence the NMI is null::
>>> normalized_mutual_info_score([0, 0, 0, 0], [0, 1, 2, 3])
0.0
"""
labels_true, labels_pred = check_clusterings(labels_true, labels_pred)
classes = np.unique(labels_true)
clusters = np.unique(labels_pred)
# Special limit cases: no clustering since the data is not split.
# This is a perfect match hence return 1.0.
if (classes.shape[0] == clusters.shape[0] == 1
or classes.shape[0] == clusters.shape[0] == 0):
return 1.0
contingency = contingency_matrix(labels_true, labels_pred)
contingency = np.array(contingency, dtype='float')
# Calculate the MI for the two clusterings
mi = mutual_info_score(labels_true, labels_pred,
contingency=contingency)
# Calculate the expected value for the mutual information
# Calculate entropy for each labeling
h_true, h_pred = entropy(labels_true), entropy(labels_pred)
nmi = mi / max(np.sqrt(h_true * h_pred), 1e-10)
return nmi
def entropy(labels):
"""Calculates the entropy for a labeling."""
if len(labels) == 0:
return 1.0
label_idx = np.unique(labels, return_inverse=True)[1]
pi = bincount(label_idx).astype(np.float)
pi = pi[pi > 0]
pi_sum = np.sum(pi)
# log(a / b) should be calculated as log(a) - log(b) for
# possible loss of precision
return -np.sum((pi / pi_sum) * (np.log(pi) - log(pi_sum)))
| bsd-3-clause |
lilleswing/deepchem | contrib/torch/torch_multitask_classification.py | 8 | 4402 | # -*- coding: utf-8 -*-
"""
Created on Mon Mar 13 22:31:24 2017
@author: Zhenqin Wu
"""
import torch
import numpy as np
from deepchem.metrics import from_one_hot
from torch_model import TorchMultitaskModel
class TorchMultitaskClassification(TorchMultitaskModel):
def __init__(self, n_tasks, n_features, n_classes=2, **kwargs):
"""Constructs the computational graph.
This function constructs the computational graph for the model. It relies
subclassed methods (build/cost) to construct specific graphs.
Parameters
----------
n_tasks: int
Number of tasks
n_features: int
Number of features.
n_classes: int
Number of classes if this is for classification.
"""
# Save hyperparameters
self.n_tasks = n_tasks
self.n_features = n_features
self.n_classes = n_classes
super(TorchMultitaskClassification, self).__init__(**kwargs)
def build(self):
"""Constructs the graph architecture as specified in its config.
This method creates the following Placeholders:
mol_features: Molecule descriptor (e.g. fingerprint) tensor with shape
batch_size x n_features.
"""
layer_sizes = self.layer_sizes
weight_init_stddevs = self.weight_init_stddevs
bias_init_consts = self.bias_init_consts
dropouts = self.dropouts
lengths_set = {
len(layer_sizes),
len(weight_init_stddevs),
len(bias_init_consts),
len(dropouts),
}
assert len(lengths_set) == 1, 'All layer params must have same length.'
n_layers = lengths_set.pop()
assert n_layers > 0, 'Must have some layers defined.'
prev_layer_size = self.n_features
self.W_list = []
self.b_list = []
for i in range(n_layers):
W_init = np.random.normal(0, weight_init_stddevs[i],
(prev_layer_size, layer_sizes[i]))
W_init = torch.cuda.FloatTensor(W_init)
self.W_list.append(torch.autograd.Variable(W_init, requires_grad=True))
b_init = np.full((layer_sizes[i],), bias_init_consts[i])
b_init = torch.cuda.FloatTensor(b_init)
self.b_list.append(torch.autograd.Variable(b_init, requires_grad=True))
prev_layer_size = layer_sizes[i]
self.task_W_list = []
self.task_b_list = []
for i in range(self.n_tasks):
W_init = np.random.normal(0, weight_init_stddevs[-1],
(prev_layer_size, self.n_classes))
W_init = torch.cuda.FloatTensor(W_init)
self.task_W_list.append(
torch.autograd.Variable(W_init, requires_grad=True))
b_init = np.full((self.n_classes,), bias_init_consts[-1])
b_init = torch.cuda.FloatTensor(b_init)
self.task_b_list.append(
torch.autograd.Variable(b_init, requires_grad=True))
self.trainables = self.W_list + self.b_list + self.task_W_list + self.task_b_list
self.regularizaed_variables = self.W_list + self.task_W_list
def forward(self, X, training=False):
for i, W in enumerate(self.W_list):
X = X.mm(W)
X += self.b_list[i].unsqueeze(0).expand_as(X)
X = torch.nn.ReLU()(X)
if training:
X = torch.nn.Dropout(p=self.dropouts[i])(X)
outputs = []
for i, W in enumerate(self.task_W_list):
output = X.mm(W)
output += self.task_b_list[i].unsqueeze(0).expand_as(output)
if not training:
output = torch.nn.functional.softmax(output)
outputs.append(output)
return outputs
def cost(self, logit, label, weight):
loss = []
for i in range(logit.size()[0]):
loss.append(
torch.nn.functional.cross_entropy(logit[i, :], label[i].long()).mul(
weight[i]))
loss = torch.cat(loss).mean()
return loss
def predict_on_batch(self, X_batch):
X_batch = torch.autograd.Variable(torch.cuda.FloatTensor(X_batch))
outputs = self.forward(X_batch, training=False)
y_pred_batch = torch.stack(outputs, 1).data.cpu().numpy()[:]
y_pred_batch = from_one_hot(y_pred_batch, 2)
return y_pred_batch
def predict_proba_on_batch(self, X_batch):
X_batch = torch.autograd.Variable(torch.cuda.FloatTensor(X_batch))
outputs = self.forward(X_batch, training=False)
y_pred_batch = torch.stack(outputs, 1).data.cpu().numpy()[:]
return y_pred_batch
| mit |
luo66/scikit-learn | sklearn/metrics/cluster/supervised.py | 206 | 27395 | """Utilities to evaluate the clustering performance of models
Functions named as *_score return a scalar value to maximize: the higher the
better.
"""
# Authors: Olivier Grisel <olivier.grisel@ensta.org>
# Wei LI <kuantkid@gmail.com>
# Diego Molla <dmolla-aliod@gmail.com>
# License: BSD 3 clause
from math import log
from scipy.misc import comb
from scipy.sparse import coo_matrix
import numpy as np
from .expected_mutual_info_fast import expected_mutual_information
from ...utils.fixes import bincount
def comb2(n):
# the exact version is faster for k == 2: use it by default globally in
# this module instead of the float approximate variant
return comb(n, 2, exact=1)
def check_clusterings(labels_true, labels_pred):
"""Check that the two clusterings matching 1D integer arrays"""
labels_true = np.asarray(labels_true)
labels_pred = np.asarray(labels_pred)
# input checks
if labels_true.ndim != 1:
raise ValueError(
"labels_true must be 1D: shape is %r" % (labels_true.shape,))
if labels_pred.ndim != 1:
raise ValueError(
"labels_pred must be 1D: shape is %r" % (labels_pred.shape,))
if labels_true.shape != labels_pred.shape:
raise ValueError(
"labels_true and labels_pred must have same size, got %d and %d"
% (labels_true.shape[0], labels_pred.shape[0]))
return labels_true, labels_pred
def contingency_matrix(labels_true, labels_pred, eps=None):
"""Build a contengency matrix describing the relationship between labels.
Parameters
----------
labels_true : int array, shape = [n_samples]
Ground truth class labels to be used as a reference
labels_pred : array, shape = [n_samples]
Cluster labels to evaluate
eps: None or float
If a float, that value is added to all values in the contingency
matrix. This helps to stop NaN propagation.
If ``None``, nothing is adjusted.
Returns
-------
contingency: array, shape=[n_classes_true, n_classes_pred]
Matrix :math:`C` such that :math:`C_{i, j}` is the number of samples in
true class :math:`i` and in predicted class :math:`j`. If
``eps is None``, the dtype of this array will be integer. If ``eps`` is
given, the dtype will be float.
"""
classes, class_idx = np.unique(labels_true, return_inverse=True)
clusters, cluster_idx = np.unique(labels_pred, return_inverse=True)
n_classes = classes.shape[0]
n_clusters = clusters.shape[0]
# Using coo_matrix to accelerate simple histogram calculation,
# i.e. bins are consecutive integers
# Currently, coo_matrix is faster than histogram2d for simple cases
contingency = coo_matrix((np.ones(class_idx.shape[0]),
(class_idx, cluster_idx)),
shape=(n_classes, n_clusters),
dtype=np.int).toarray()
if eps is not None:
# don't use += as contingency is integer
contingency = contingency + eps
return contingency
# clustering measures
def adjusted_rand_score(labels_true, labels_pred):
"""Rand index adjusted for chance
The Rand Index computes a similarity measure between two clusterings
by considering all pairs of samples and counting pairs that are
assigned in the same or different clusters in the predicted and
true clusterings.
The raw RI score is then "adjusted for chance" into the ARI score
using the following scheme::
ARI = (RI - Expected_RI) / (max(RI) - Expected_RI)
The adjusted Rand index is thus ensured to have a value close to
0.0 for random labeling independently of the number of clusters and
samples and exactly 1.0 when the clusterings are identical (up to
a permutation).
ARI is a symmetric measure::
adjusted_rand_score(a, b) == adjusted_rand_score(b, a)
Read more in the :ref:`User Guide <adjusted_rand_score>`.
Parameters
----------
labels_true : int array, shape = [n_samples]
Ground truth class labels to be used as a reference
labels_pred : array, shape = [n_samples]
Cluster labels to evaluate
Returns
-------
ari : float
Similarity score between -1.0 and 1.0. Random labelings have an ARI
close to 0.0. 1.0 stands for perfect match.
Examples
--------
Perfectly maching labelings have a score of 1 even
>>> from sklearn.metrics.cluster import adjusted_rand_score
>>> adjusted_rand_score([0, 0, 1, 1], [0, 0, 1, 1])
1.0
>>> adjusted_rand_score([0, 0, 1, 1], [1, 1, 0, 0])
1.0
Labelings that assign all classes members to the same clusters
are complete be not always pure, hence penalized::
>>> adjusted_rand_score([0, 0, 1, 2], [0, 0, 1, 1]) # doctest: +ELLIPSIS
0.57...
ARI is symmetric, so labelings that have pure clusters with members
coming from the same classes but unnecessary splits are penalized::
>>> adjusted_rand_score([0, 0, 1, 1], [0, 0, 1, 2]) # doctest: +ELLIPSIS
0.57...
If classes members are completely split across different clusters, the
assignment is totally incomplete, hence the ARI is very low::
>>> adjusted_rand_score([0, 0, 0, 0], [0, 1, 2, 3])
0.0
References
----------
.. [Hubert1985] `L. Hubert and P. Arabie, Comparing Partitions,
Journal of Classification 1985`
http://www.springerlink.com/content/x64124718341j1j0/
.. [wk] http://en.wikipedia.org/wiki/Rand_index#Adjusted_Rand_index
See also
--------
adjusted_mutual_info_score: Adjusted Mutual Information
"""
labels_true, labels_pred = check_clusterings(labels_true, labels_pred)
n_samples = labels_true.shape[0]
classes = np.unique(labels_true)
clusters = np.unique(labels_pred)
# Special limit cases: no clustering since the data is not split;
# or trivial clustering where each document is assigned a unique cluster.
# These are perfect matches hence return 1.0.
if (classes.shape[0] == clusters.shape[0] == 1
or classes.shape[0] == clusters.shape[0] == 0
or classes.shape[0] == clusters.shape[0] == len(labels_true)):
return 1.0
contingency = contingency_matrix(labels_true, labels_pred)
# Compute the ARI using the contingency data
sum_comb_c = sum(comb2(n_c) for n_c in contingency.sum(axis=1))
sum_comb_k = sum(comb2(n_k) for n_k in contingency.sum(axis=0))
sum_comb = sum(comb2(n_ij) for n_ij in contingency.flatten())
prod_comb = (sum_comb_c * sum_comb_k) / float(comb(n_samples, 2))
mean_comb = (sum_comb_k + sum_comb_c) / 2.
return ((sum_comb - prod_comb) / (mean_comb - prod_comb))
def homogeneity_completeness_v_measure(labels_true, labels_pred):
"""Compute the homogeneity and completeness and V-Measure scores at once
Those metrics are based on normalized conditional entropy measures of
the clustering labeling to evaluate given the knowledge of a Ground
Truth class labels of the same samples.
A clustering result satisfies homogeneity if all of its clusters
contain only data points which are members of a single class.
A clustering result satisfies completeness if all the data points
that are members of a given class are elements of the same cluster.
Both scores have positive values between 0.0 and 1.0, larger values
being desirable.
Those 3 metrics are independent of the absolute values of the labels:
a permutation of the class or cluster label values won't change the
score values in any way.
V-Measure is furthermore symmetric: swapping ``labels_true`` and
``label_pred`` will give the same score. This does not hold for
homogeneity and completeness.
Read more in the :ref:`User Guide <homogeneity_completeness>`.
Parameters
----------
labels_true : int array, shape = [n_samples]
ground truth class labels to be used as a reference
labels_pred : array, shape = [n_samples]
cluster labels to evaluate
Returns
-------
homogeneity: float
score between 0.0 and 1.0. 1.0 stands for perfectly homogeneous labeling
completeness: float
score between 0.0 and 1.0. 1.0 stands for perfectly complete labeling
v_measure: float
harmonic mean of the first two
See also
--------
homogeneity_score
completeness_score
v_measure_score
"""
labels_true, labels_pred = check_clusterings(labels_true, labels_pred)
if len(labels_true) == 0:
return 1.0, 1.0, 1.0
entropy_C = entropy(labels_true)
entropy_K = entropy(labels_pred)
MI = mutual_info_score(labels_true, labels_pred)
homogeneity = MI / (entropy_C) if entropy_C else 1.0
completeness = MI / (entropy_K) if entropy_K else 1.0
if homogeneity + completeness == 0.0:
v_measure_score = 0.0
else:
v_measure_score = (2.0 * homogeneity * completeness
/ (homogeneity + completeness))
return homogeneity, completeness, v_measure_score
def homogeneity_score(labels_true, labels_pred):
"""Homogeneity metric of a cluster labeling given a ground truth
A clustering result satisfies homogeneity if all of its clusters
contain only data points which are members of a single class.
This metric is independent of the absolute values of the labels:
a permutation of the class or cluster label values won't change the
score value in any way.
This metric is not symmetric: switching ``label_true`` with ``label_pred``
will return the :func:`completeness_score` which will be different in
general.
Read more in the :ref:`User Guide <homogeneity_completeness>`.
Parameters
----------
labels_true : int array, shape = [n_samples]
ground truth class labels to be used as a reference
labels_pred : array, shape = [n_samples]
cluster labels to evaluate
Returns
-------
homogeneity: float
score between 0.0 and 1.0. 1.0 stands for perfectly homogeneous labeling
References
----------
.. [1] `Andrew Rosenberg and Julia Hirschberg, 2007. V-Measure: A
conditional entropy-based external cluster evaluation measure
<http://aclweb.org/anthology/D/D07/D07-1043.pdf>`_
See also
--------
completeness_score
v_measure_score
Examples
--------
Perfect labelings are homogeneous::
>>> from sklearn.metrics.cluster import homogeneity_score
>>> homogeneity_score([0, 0, 1, 1], [1, 1, 0, 0])
1.0
Non-perfect labelings that further split classes into more clusters can be
perfectly homogeneous::
>>> print("%.6f" % homogeneity_score([0, 0, 1, 1], [0, 0, 1, 2]))
... # doctest: +ELLIPSIS
1.0...
>>> print("%.6f" % homogeneity_score([0, 0, 1, 1], [0, 1, 2, 3]))
... # doctest: +ELLIPSIS
1.0...
Clusters that include samples from different classes do not make for an
homogeneous labeling::
>>> print("%.6f" % homogeneity_score([0, 0, 1, 1], [0, 1, 0, 1]))
... # doctest: +ELLIPSIS
0.0...
>>> print("%.6f" % homogeneity_score([0, 0, 1, 1], [0, 0, 0, 0]))
... # doctest: +ELLIPSIS
0.0...
"""
return homogeneity_completeness_v_measure(labels_true, labels_pred)[0]
def completeness_score(labels_true, labels_pred):
"""Completeness metric of a cluster labeling given a ground truth
A clustering result satisfies completeness if all the data points
that are members of a given class are elements of the same cluster.
This metric is independent of the absolute values of the labels:
a permutation of the class or cluster label values won't change the
score value in any way.
This metric is not symmetric: switching ``label_true`` with ``label_pred``
will return the :func:`homogeneity_score` which will be different in
general.
Read more in the :ref:`User Guide <homogeneity_completeness>`.
Parameters
----------
labels_true : int array, shape = [n_samples]
ground truth class labels to be used as a reference
labels_pred : array, shape = [n_samples]
cluster labels to evaluate
Returns
-------
completeness: float
score between 0.0 and 1.0. 1.0 stands for perfectly complete labeling
References
----------
.. [1] `Andrew Rosenberg and Julia Hirschberg, 2007. V-Measure: A
conditional entropy-based external cluster evaluation measure
<http://aclweb.org/anthology/D/D07/D07-1043.pdf>`_
See also
--------
homogeneity_score
v_measure_score
Examples
--------
Perfect labelings are complete::
>>> from sklearn.metrics.cluster import completeness_score
>>> completeness_score([0, 0, 1, 1], [1, 1, 0, 0])
1.0
Non-perfect labelings that assign all classes members to the same clusters
are still complete::
>>> print(completeness_score([0, 0, 1, 1], [0, 0, 0, 0]))
1.0
>>> print(completeness_score([0, 1, 2, 3], [0, 0, 1, 1]))
1.0
If classes members are split across different clusters, the
assignment cannot be complete::
>>> print(completeness_score([0, 0, 1, 1], [0, 1, 0, 1]))
0.0
>>> print(completeness_score([0, 0, 0, 0], [0, 1, 2, 3]))
0.0
"""
return homogeneity_completeness_v_measure(labels_true, labels_pred)[1]
def v_measure_score(labels_true, labels_pred):
"""V-measure cluster labeling given a ground truth.
This score is identical to :func:`normalized_mutual_info_score`.
The V-measure is the harmonic mean between homogeneity and completeness::
v = 2 * (homogeneity * completeness) / (homogeneity + completeness)
This metric is independent of the absolute values of the labels:
a permutation of the class or cluster label values won't change the
score value in any way.
This metric is furthermore symmetric: switching ``label_true`` with
``label_pred`` will return the same score value. This can be useful to
measure the agreement of two independent label assignments strategies
on the same dataset when the real ground truth is not known.
Read more in the :ref:`User Guide <homogeneity_completeness>`.
Parameters
----------
labels_true : int array, shape = [n_samples]
ground truth class labels to be used as a reference
labels_pred : array, shape = [n_samples]
cluster labels to evaluate
Returns
-------
v_measure: float
score between 0.0 and 1.0. 1.0 stands for perfectly complete labeling
References
----------
.. [1] `Andrew Rosenberg and Julia Hirschberg, 2007. V-Measure: A
conditional entropy-based external cluster evaluation measure
<http://aclweb.org/anthology/D/D07/D07-1043.pdf>`_
See also
--------
homogeneity_score
completeness_score
Examples
--------
Perfect labelings are both homogeneous and complete, hence have score 1.0::
>>> from sklearn.metrics.cluster import v_measure_score
>>> v_measure_score([0, 0, 1, 1], [0, 0, 1, 1])
1.0
>>> v_measure_score([0, 0, 1, 1], [1, 1, 0, 0])
1.0
Labelings that assign all classes members to the same clusters
are complete be not homogeneous, hence penalized::
>>> print("%.6f" % v_measure_score([0, 0, 1, 2], [0, 0, 1, 1]))
... # doctest: +ELLIPSIS
0.8...
>>> print("%.6f" % v_measure_score([0, 1, 2, 3], [0, 0, 1, 1]))
... # doctest: +ELLIPSIS
0.66...
Labelings that have pure clusters with members coming from the same
classes are homogeneous but un-necessary splits harms completeness
and thus penalize V-measure as well::
>>> print("%.6f" % v_measure_score([0, 0, 1, 1], [0, 0, 1, 2]))
... # doctest: +ELLIPSIS
0.8...
>>> print("%.6f" % v_measure_score([0, 0, 1, 1], [0, 1, 2, 3]))
... # doctest: +ELLIPSIS
0.66...
If classes members are completely split across different clusters,
the assignment is totally incomplete, hence the V-Measure is null::
>>> print("%.6f" % v_measure_score([0, 0, 0, 0], [0, 1, 2, 3]))
... # doctest: +ELLIPSIS
0.0...
Clusters that include samples from totally different classes totally
destroy the homogeneity of the labeling, hence::
>>> print("%.6f" % v_measure_score([0, 0, 1, 1], [0, 0, 0, 0]))
... # doctest: +ELLIPSIS
0.0...
"""
return homogeneity_completeness_v_measure(labels_true, labels_pred)[2]
def mutual_info_score(labels_true, labels_pred, contingency=None):
"""Mutual Information between two clusterings
The Mutual Information is a measure of the similarity between two labels of
the same data. Where :math:`P(i)` is the probability of a random sample
occurring in cluster :math:`U_i` and :math:`P'(j)` is the probability of a
random sample occurring in cluster :math:`V_j`, the Mutual Information
between clusterings :math:`U` and :math:`V` is given as:
.. math::
MI(U,V)=\sum_{i=1}^R \sum_{j=1}^C P(i,j)\log\\frac{P(i,j)}{P(i)P'(j)}
This is equal to the Kullback-Leibler divergence of the joint distribution
with the product distribution of the marginals.
This metric is independent of the absolute values of the labels:
a permutation of the class or cluster label values won't change the
score value in any way.
This metric is furthermore symmetric: switching ``label_true`` with
``label_pred`` will return the same score value. This can be useful to
measure the agreement of two independent label assignments strategies
on the same dataset when the real ground truth is not known.
Read more in the :ref:`User Guide <mutual_info_score>`.
Parameters
----------
labels_true : int array, shape = [n_samples]
A clustering of the data into disjoint subsets.
labels_pred : array, shape = [n_samples]
A clustering of the data into disjoint subsets.
contingency: None or array, shape = [n_classes_true, n_classes_pred]
A contingency matrix given by the :func:`contingency_matrix` function.
If value is ``None``, it will be computed, otherwise the given value is
used, with ``labels_true`` and ``labels_pred`` ignored.
Returns
-------
mi: float
Mutual information, a non-negative value
See also
--------
adjusted_mutual_info_score: Adjusted against chance Mutual Information
normalized_mutual_info_score: Normalized Mutual Information
"""
if contingency is None:
labels_true, labels_pred = check_clusterings(labels_true, labels_pred)
contingency = contingency_matrix(labels_true, labels_pred)
contingency = np.array(contingency, dtype='float')
contingency_sum = np.sum(contingency)
pi = np.sum(contingency, axis=1)
pj = np.sum(contingency, axis=0)
outer = np.outer(pi, pj)
nnz = contingency != 0.0
# normalized contingency
contingency_nm = contingency[nnz]
log_contingency_nm = np.log(contingency_nm)
contingency_nm /= contingency_sum
# log(a / b) should be calculated as log(a) - log(b) for
# possible loss of precision
log_outer = -np.log(outer[nnz]) + log(pi.sum()) + log(pj.sum())
mi = (contingency_nm * (log_contingency_nm - log(contingency_sum))
+ contingency_nm * log_outer)
return mi.sum()
def adjusted_mutual_info_score(labels_true, labels_pred):
"""Adjusted Mutual Information between two clusterings
Adjusted Mutual Information (AMI) is an adjustment of the Mutual
Information (MI) score to account for chance. It accounts for the fact that
the MI is generally higher for two clusterings with a larger number of
clusters, regardless of whether there is actually more information shared.
For two clusterings :math:`U` and :math:`V`, the AMI is given as::
AMI(U, V) = [MI(U, V) - E(MI(U, V))] / [max(H(U), H(V)) - E(MI(U, V))]
This metric is independent of the absolute values of the labels:
a permutation of the class or cluster label values won't change the
score value in any way.
This metric is furthermore symmetric: switching ``label_true`` with
``label_pred`` will return the same score value. This can be useful to
measure the agreement of two independent label assignments strategies
on the same dataset when the real ground truth is not known.
Be mindful that this function is an order of magnitude slower than other
metrics, such as the Adjusted Rand Index.
Read more in the :ref:`User Guide <mutual_info_score>`.
Parameters
----------
labels_true : int array, shape = [n_samples]
A clustering of the data into disjoint subsets.
labels_pred : array, shape = [n_samples]
A clustering of the data into disjoint subsets.
Returns
-------
ami: float(upperlimited by 1.0)
The AMI returns a value of 1 when the two partitions are identical
(ie perfectly matched). Random partitions (independent labellings) have
an expected AMI around 0 on average hence can be negative.
See also
--------
adjusted_rand_score: Adjusted Rand Index
mutual_information_score: Mutual Information (not adjusted for chance)
Examples
--------
Perfect labelings are both homogeneous and complete, hence have
score 1.0::
>>> from sklearn.metrics.cluster import adjusted_mutual_info_score
>>> adjusted_mutual_info_score([0, 0, 1, 1], [0, 0, 1, 1])
1.0
>>> adjusted_mutual_info_score([0, 0, 1, 1], [1, 1, 0, 0])
1.0
If classes members are completely split across different clusters,
the assignment is totally in-complete, hence the AMI is null::
>>> adjusted_mutual_info_score([0, 0, 0, 0], [0, 1, 2, 3])
0.0
References
----------
.. [1] `Vinh, Epps, and Bailey, (2010). Information Theoretic Measures for
Clusterings Comparison: Variants, Properties, Normalization and
Correction for Chance, JMLR
<http://jmlr.csail.mit.edu/papers/volume11/vinh10a/vinh10a.pdf>`_
.. [2] `Wikipedia entry for the Adjusted Mutual Information
<http://en.wikipedia.org/wiki/Adjusted_Mutual_Information>`_
"""
labels_true, labels_pred = check_clusterings(labels_true, labels_pred)
n_samples = labels_true.shape[0]
classes = np.unique(labels_true)
clusters = np.unique(labels_pred)
# Special limit cases: no clustering since the data is not split.
# This is a perfect match hence return 1.0.
if (classes.shape[0] == clusters.shape[0] == 1
or classes.shape[0] == clusters.shape[0] == 0):
return 1.0
contingency = contingency_matrix(labels_true, labels_pred)
contingency = np.array(contingency, dtype='float')
# Calculate the MI for the two clusterings
mi = mutual_info_score(labels_true, labels_pred,
contingency=contingency)
# Calculate the expected value for the mutual information
emi = expected_mutual_information(contingency, n_samples)
# Calculate entropy for each labeling
h_true, h_pred = entropy(labels_true), entropy(labels_pred)
ami = (mi - emi) / (max(h_true, h_pred) - emi)
return ami
def normalized_mutual_info_score(labels_true, labels_pred):
"""Normalized Mutual Information between two clusterings
Normalized Mutual Information (NMI) is an normalization of the Mutual
Information (MI) score to scale the results between 0 (no mutual
information) and 1 (perfect correlation). In this function, mutual
information is normalized by ``sqrt(H(labels_true) * H(labels_pred))``
This measure is not adjusted for chance. Therefore
:func:`adjusted_mustual_info_score` might be preferred.
This metric is independent of the absolute values of the labels:
a permutation of the class or cluster label values won't change the
score value in any way.
This metric is furthermore symmetric: switching ``label_true`` with
``label_pred`` will return the same score value. This can be useful to
measure the agreement of two independent label assignments strategies
on the same dataset when the real ground truth is not known.
Read more in the :ref:`User Guide <mutual_info_score>`.
Parameters
----------
labels_true : int array, shape = [n_samples]
A clustering of the data into disjoint subsets.
labels_pred : array, shape = [n_samples]
A clustering of the data into disjoint subsets.
Returns
-------
nmi: float
score between 0.0 and 1.0. 1.0 stands for perfectly complete labeling
See also
--------
adjusted_rand_score: Adjusted Rand Index
adjusted_mutual_info_score: Adjusted Mutual Information (adjusted
against chance)
Examples
--------
Perfect labelings are both homogeneous and complete, hence have
score 1.0::
>>> from sklearn.metrics.cluster import normalized_mutual_info_score
>>> normalized_mutual_info_score([0, 0, 1, 1], [0, 0, 1, 1])
1.0
>>> normalized_mutual_info_score([0, 0, 1, 1], [1, 1, 0, 0])
1.0
If classes members are completely split across different clusters,
the assignment is totally in-complete, hence the NMI is null::
>>> normalized_mutual_info_score([0, 0, 0, 0], [0, 1, 2, 3])
0.0
"""
labels_true, labels_pred = check_clusterings(labels_true, labels_pred)
classes = np.unique(labels_true)
clusters = np.unique(labels_pred)
# Special limit cases: no clustering since the data is not split.
# This is a perfect match hence return 1.0.
if (classes.shape[0] == clusters.shape[0] == 1
or classes.shape[0] == clusters.shape[0] == 0):
return 1.0
contingency = contingency_matrix(labels_true, labels_pred)
contingency = np.array(contingency, dtype='float')
# Calculate the MI for the two clusterings
mi = mutual_info_score(labels_true, labels_pred,
contingency=contingency)
# Calculate the expected value for the mutual information
# Calculate entropy for each labeling
h_true, h_pred = entropy(labels_true), entropy(labels_pred)
nmi = mi / max(np.sqrt(h_true * h_pred), 1e-10)
return nmi
def entropy(labels):
"""Calculates the entropy for a labeling."""
if len(labels) == 0:
return 1.0
label_idx = np.unique(labels, return_inverse=True)[1]
pi = bincount(label_idx).astype(np.float)
pi = pi[pi > 0]
pi_sum = np.sum(pi)
# log(a / b) should be calculated as log(a) - log(b) for
# possible loss of precision
return -np.sum((pi / pi_sum) * (np.log(pi) - log(pi_sum)))
| bsd-3-clause |
sgenoud/scikit-learn | sklearn/covariance/tests/test_robust_covariance.py | 1 | 2616 | # Author: Alexandre Gramfort <alexandre.gramfort@inria.fr>
# Gael Varoquaux <gael.varoquaux@normalesup.org>
# Virgile Fritsch <virgile.fritsch@inria.fr>
#
# License: BSD Style.
from numpy.testing import assert_almost_equal, assert_array_almost_equal
import numpy as np
from sklearn import datasets
from sklearn.covariance import empirical_covariance, MinCovDet, \
EllipticEnvelope
X = datasets.load_iris().data
X_1d = X[:, 0]
n_samples, n_features = X.shape
def test_mcd():
"""Tests the FastMCD algorithm implementation
"""
### Small data set
# test without outliers (random independent normal data)
launch_mcd_on_dataset(100, 5, 0, 0.01, 0.1, 80)
# test with a contaminated data set (medium contamination)
launch_mcd_on_dataset(100, 5, 20, 0.01, 0.01, 70)
# test with a contaminated data set (strong contamination)
launch_mcd_on_dataset(100, 5, 40, 0.1, 0.1, 50)
### Medium data set
launch_mcd_on_dataset(1000, 5, 450, 1e-3, 1e-3, 540)
### Large data set
launch_mcd_on_dataset(1700, 5, 800, 1e-3, 1e-3, 870)
### 1D data set
launch_mcd_on_dataset(500, 1, 100, 0.001, 0.001, 350)
def launch_mcd_on_dataset(
n_samples, n_features, n_outliers, tol_loc, tol_cov, tol_support):
rand_gen = np.random.RandomState(0)
data = rand_gen.randn(n_samples, n_features)
# add some outliers
outliers_index = rand_gen.permutation(n_samples)[:n_outliers]
outliers_offset = 10. * \
(rand_gen.randint(2, size=(n_outliers, n_features)) - 0.5)
data[outliers_index] += outliers_offset
inliers_mask = np.ones(n_samples).astype(bool)
inliers_mask[outliers_index] = False
pure_data = data[inliers_mask]
# compute MCD by fitting an object
mcd_fit = MinCovDet(random_state=rand_gen).fit(data)
T = mcd_fit.location_
S = mcd_fit.covariance_
H = mcd_fit.support_
# compare with the estimates learnt from the inliers
error_location = np.mean((pure_data.mean(0) - T) ** 2)
assert(error_location < tol_loc)
error_cov = np.mean((empirical_covariance(pure_data) - S) ** 2)
assert(error_cov < tol_cov)
assert(np.sum(H) >= tol_support)
def test_outlier_detection():
rnd = np.random.RandomState(0)
X = rnd.randn(100, 10)
clf = EllipticEnvelope(contamination=0.1)
clf.fit(X)
y_pred = clf.predict(X)
assert_array_almost_equal(
clf.decision_function(X, raw_mahalanobis=True),
clf.mahalanobis(X - clf.location_))
assert_almost_equal(clf.score(X, np.ones(100)),
(100 - y_pred[y_pred == -1].size) / 100.)
| bsd-3-clause |
Diyago/Machine-Learning-scripts | time series regression/autocorelation, mov avg etc/doubleExponentialSmoothing.py | 1 | 2945 | # Load modules
from __future__ import print_function
import os
import pandas as pd
import numpy as np
from matplotlib import pyplot as plt
# Read dataset into a pandas.DataFrame
beer_df = pd.read_csv(
"datasets/quarterly-beer-production-in-aus-March 1956-June 1994.csv"
)
# Display shape of the dataset
print("Shape of the dataframe:", beer_df.shape)
beer_df.head()
# Rename the 2nd column
beer_df.rename(
columns={
"Quarterly beer production in Australia: megalitres. March 1956 ? June 1994": "Beer_Prod"
},
inplace=True,
)
# Remove missing values
missing = (pd.isnull(beer_df["Quarter"])) | (pd.isnull(beer_df["Beer_Prod"]))
print("Number of rows with at least one missing values:", missing.sum())
beer_df = beer_df.loc[~missing, :]
print("Shape after removing missing values:", beer_df.shape)
# Function for Sigle exponential smoothing
def double_exp_smoothing(x, alpha, beta):
yhat = [x[0]] # first value is same as series
for t in range(1, len(x)):
if t == 1:
F, T = x[0], x[1] - x[0]
F_n_1, F = F, alpha * x[t] + (1 - alpha) * (F + T)
T = beta * (F - F_n_1) + (1 - beta) * T
yhat.append(F + T)
return yhat
beer_df["DEF"] = double_exp_smoothing(beer_df["Beer_Prod"], 0.4, 0.7)
### Plot Single Exponential Smoothing forecasted value
fig = plt.figure(figsize=(5.5, 5.5))
ax = fig.add_subplot(2, 1, 1)
beer_df["Beer_Prod"].plot(ax=ax)
ax.set_title("Beer Production")
ax = fig.add_subplot(2, 1, 2)
beer_df["DEF"].plot(ax=ax, color="r")
ax.set_title("Double Smoothing Forecast")
plt.savefig("plots/ch2/B07887_03_14.png", format="png", dpi=300)
# Single vs Double Forecast value
# Function for Sigle exponential smoothing
def single_exp_smoothing(x, alpha):
F = [x[0]] # first value is same as series
for t in range(1, len(x)):
F.append(alpha * x[t] + (1 - alpha) * F[t - 1])
return F
beer_df["Single_Exponential_Forecast"] = single_exp_smoothing(beer_df["Beer_Prod"], 0.4)
### Plot Single Exponential Smoothing forecasted value
f, axarr = plt.subplots(2, sharex=True)
f.set_size_inches(5.5, 5.5)
beer_df["Beer_Prod"].iloc[:153].plot(color="b", linestyle="-", ax=axarr[0])
beer_df["DEF"].iloc[:153].plot(color="r", linestyle="--", ax=axarr[0])
axarr[0].set_title("Actual Vs Double Smoothing Forecasting")
beer_df["Beer_Prod"].iloc[:153].plot(color="b", linestyle="-", ax=axarr[1])
beer_df["Single_Exponential_Forecast"].iloc[:153].plot(
color="r", linestyle="--", ax=axarr[1]
)
axarr[1].set_title("Actual Vs Single Smoothing Forecasting")
# Plot single and double exponential smoothing
fig = plt.figure(figsize=(5.5, 5.5))
ax = fig.add_subplot(2, 1, 1)
beer_df["Single_Exponential_Forecast"].plot(ax=ax)
ax.set_title("Single Exponential Smoothing")
ax = fig.add_subplot(2, 1, 2)
beer_df["DEF"].plot(ax=ax, color="r")
ax.set_title("Double Smoothing Forecast")
plt.savefig("plots/ch2/B07887_03_14.png", format="png", dpi=300)
| apache-2.0 |
LohithBlaze/scikit-learn | sklearn/linear_model/tests/test_bayes.py | 296 | 1770 | # Author: Alexandre Gramfort <alexandre.gramfort@inria.fr>
# Fabian Pedregosa <fabian.pedregosa@inria.fr>
#
# License: BSD 3 clause
import numpy as np
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import SkipTest
from sklearn.linear_model.bayes import BayesianRidge, ARDRegression
from sklearn import datasets
from sklearn.utils.testing import assert_array_almost_equal
def test_bayesian_on_diabetes():
# Test BayesianRidge on diabetes
raise SkipTest("XFailed Test")
diabetes = datasets.load_diabetes()
X, y = diabetes.data, diabetes.target
clf = BayesianRidge(compute_score=True)
# Test with more samples than features
clf.fit(X, y)
# Test that scores are increasing at each iteration
assert_array_equal(np.diff(clf.scores_) > 0, True)
# Test with more features than samples
X = X[:5, :]
y = y[:5]
clf.fit(X, y)
# Test that scores are increasing at each iteration
assert_array_equal(np.diff(clf.scores_) > 0, True)
def test_toy_bayesian_ridge_object():
# Test BayesianRidge on toy
X = np.array([[1], [2], [6], [8], [10]])
Y = np.array([1, 2, 6, 8, 10])
clf = BayesianRidge(compute_score=True)
clf.fit(X, Y)
# Check that the model could approximately learn the identity function
test = [[1], [3], [4]]
assert_array_almost_equal(clf.predict(test), [1, 3, 4], 2)
def test_toy_ard_object():
# Test BayesianRegression ARD classifier
X = np.array([[1], [2], [3]])
Y = np.array([1, 2, 3])
clf = ARDRegression(compute_score=True)
clf.fit(X, Y)
# Check that the model could approximately learn the identity function
test = [[1], [3], [4]]
assert_array_almost_equal(clf.predict(test), [1, 3, 4], 2)
| bsd-3-clause |
LohithBlaze/scikit-learn | sklearn/ensemble/tests/test_weight_boosting.py | 35 | 16763 | """Testing for the boost module (sklearn.ensemble.boost)."""
import numpy as np
from sklearn.utils.testing import assert_array_equal, assert_array_less
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_equal, assert_true
from sklearn.utils.testing import assert_raises, assert_raises_regexp
from sklearn.cross_validation import train_test_split
from sklearn.grid_search import GridSearchCV
from sklearn.ensemble import AdaBoostClassifier
from sklearn.ensemble import AdaBoostRegressor
from sklearn.ensemble import weight_boosting
from scipy.sparse import csc_matrix
from scipy.sparse import csr_matrix
from scipy.sparse import coo_matrix
from scipy.sparse import dok_matrix
from scipy.sparse import lil_matrix
from sklearn.svm import SVC, SVR
from sklearn.tree import DecisionTreeClassifier, DecisionTreeRegressor
from sklearn.utils import shuffle
from sklearn import datasets
# Common random state
rng = np.random.RandomState(0)
# Toy sample
X = [[-2, -1], [-1, -1], [-1, -2], [1, 1], [1, 2], [2, 1]]
y_class = ["foo", "foo", "foo", 1, 1, 1] # test string class labels
y_regr = [-1, -1, -1, 1, 1, 1]
T = [[-1, -1], [2, 2], [3, 2]]
y_t_class = ["foo", 1, 1]
y_t_regr = [-1, 1, 1]
# Load the iris dataset and randomly permute it
iris = datasets.load_iris()
perm = rng.permutation(iris.target.size)
iris.data, iris.target = shuffle(iris.data, iris.target, random_state=rng)
# Load the boston dataset and randomly permute it
boston = datasets.load_boston()
boston.data, boston.target = shuffle(boston.data, boston.target,
random_state=rng)
def test_samme_proba():
# Test the `_samme_proba` helper function.
# Define some example (bad) `predict_proba` output.
probs = np.array([[1, 1e-6, 0],
[0.19, 0.6, 0.2],
[-999, 0.51, 0.5],
[1e-6, 1, 1e-9]])
probs /= np.abs(probs.sum(axis=1))[:, np.newaxis]
# _samme_proba calls estimator.predict_proba.
# Make a mock object so I can control what gets returned.
class MockEstimator(object):
def predict_proba(self, X):
assert_array_equal(X.shape, probs.shape)
return probs
mock = MockEstimator()
samme_proba = weight_boosting._samme_proba(mock, 3, np.ones_like(probs))
assert_array_equal(samme_proba.shape, probs.shape)
assert_true(np.isfinite(samme_proba).all())
# Make sure that the correct elements come out as smallest --
# `_samme_proba` should preserve the ordering in each example.
assert_array_equal(np.argmin(samme_proba, axis=1), [2, 0, 0, 2])
assert_array_equal(np.argmax(samme_proba, axis=1), [0, 1, 1, 1])
def test_classification_toy():
# Check classification on a toy dataset.
for alg in ['SAMME', 'SAMME.R']:
clf = AdaBoostClassifier(algorithm=alg, random_state=0)
clf.fit(X, y_class)
assert_array_equal(clf.predict(T), y_t_class)
assert_array_equal(np.unique(np.asarray(y_t_class)), clf.classes_)
assert_equal(clf.predict_proba(T).shape, (len(T), 2))
assert_equal(clf.decision_function(T).shape, (len(T),))
def test_regression_toy():
# Check classification on a toy dataset.
clf = AdaBoostRegressor(random_state=0)
clf.fit(X, y_regr)
assert_array_equal(clf.predict(T), y_t_regr)
def test_iris():
# Check consistency on dataset iris.
classes = np.unique(iris.target)
clf_samme = prob_samme = None
for alg in ['SAMME', 'SAMME.R']:
clf = AdaBoostClassifier(algorithm=alg)
clf.fit(iris.data, iris.target)
assert_array_equal(classes, clf.classes_)
proba = clf.predict_proba(iris.data)
if alg == "SAMME":
clf_samme = clf
prob_samme = proba
assert_equal(proba.shape[1], len(classes))
assert_equal(clf.decision_function(iris.data).shape[1], len(classes))
score = clf.score(iris.data, iris.target)
assert score > 0.9, "Failed with algorithm %s and score = %f" % \
(alg, score)
# Somewhat hacky regression test: prior to
# ae7adc880d624615a34bafdb1d75ef67051b8200,
# predict_proba returned SAMME.R values for SAMME.
clf_samme.algorithm = "SAMME.R"
assert_array_less(0,
np.abs(clf_samme.predict_proba(iris.data) - prob_samme))
def test_boston():
# Check consistency on dataset boston house prices.
clf = AdaBoostRegressor(random_state=0)
clf.fit(boston.data, boston.target)
score = clf.score(boston.data, boston.target)
assert score > 0.85
def test_staged_predict():
# Check staged predictions.
rng = np.random.RandomState(0)
iris_weights = rng.randint(10, size=iris.target.shape)
boston_weights = rng.randint(10, size=boston.target.shape)
# AdaBoost classification
for alg in ['SAMME', 'SAMME.R']:
clf = AdaBoostClassifier(algorithm=alg, n_estimators=10)
clf.fit(iris.data, iris.target, sample_weight=iris_weights)
predictions = clf.predict(iris.data)
staged_predictions = [p for p in clf.staged_predict(iris.data)]
proba = clf.predict_proba(iris.data)
staged_probas = [p for p in clf.staged_predict_proba(iris.data)]
score = clf.score(iris.data, iris.target, sample_weight=iris_weights)
staged_scores = [
s for s in clf.staged_score(
iris.data, iris.target, sample_weight=iris_weights)]
assert_equal(len(staged_predictions), 10)
assert_array_almost_equal(predictions, staged_predictions[-1])
assert_equal(len(staged_probas), 10)
assert_array_almost_equal(proba, staged_probas[-1])
assert_equal(len(staged_scores), 10)
assert_array_almost_equal(score, staged_scores[-1])
# AdaBoost regression
clf = AdaBoostRegressor(n_estimators=10, random_state=0)
clf.fit(boston.data, boston.target, sample_weight=boston_weights)
predictions = clf.predict(boston.data)
staged_predictions = [p for p in clf.staged_predict(boston.data)]
score = clf.score(boston.data, boston.target, sample_weight=boston_weights)
staged_scores = [
s for s in clf.staged_score(
boston.data, boston.target, sample_weight=boston_weights)]
assert_equal(len(staged_predictions), 10)
assert_array_almost_equal(predictions, staged_predictions[-1])
assert_equal(len(staged_scores), 10)
assert_array_almost_equal(score, staged_scores[-1])
def test_gridsearch():
# Check that base trees can be grid-searched.
# AdaBoost classification
boost = AdaBoostClassifier(base_estimator=DecisionTreeClassifier())
parameters = {'n_estimators': (1, 2),
'base_estimator__max_depth': (1, 2),
'algorithm': ('SAMME', 'SAMME.R')}
clf = GridSearchCV(boost, parameters)
clf.fit(iris.data, iris.target)
# AdaBoost regression
boost = AdaBoostRegressor(base_estimator=DecisionTreeRegressor(),
random_state=0)
parameters = {'n_estimators': (1, 2),
'base_estimator__max_depth': (1, 2)}
clf = GridSearchCV(boost, parameters)
clf.fit(boston.data, boston.target)
def test_pickle():
# Check pickability.
import pickle
# Adaboost classifier
for alg in ['SAMME', 'SAMME.R']:
obj = AdaBoostClassifier(algorithm=alg)
obj.fit(iris.data, iris.target)
score = obj.score(iris.data, iris.target)
s = pickle.dumps(obj)
obj2 = pickle.loads(s)
assert_equal(type(obj2), obj.__class__)
score2 = obj2.score(iris.data, iris.target)
assert_equal(score, score2)
# Adaboost regressor
obj = AdaBoostRegressor(random_state=0)
obj.fit(boston.data, boston.target)
score = obj.score(boston.data, boston.target)
s = pickle.dumps(obj)
obj2 = pickle.loads(s)
assert_equal(type(obj2), obj.__class__)
score2 = obj2.score(boston.data, boston.target)
assert_equal(score, score2)
def test_importances():
# Check variable importances.
X, y = datasets.make_classification(n_samples=2000,
n_features=10,
n_informative=3,
n_redundant=0,
n_repeated=0,
shuffle=False,
random_state=1)
for alg in ['SAMME', 'SAMME.R']:
clf = AdaBoostClassifier(algorithm=alg)
clf.fit(X, y)
importances = clf.feature_importances_
assert_equal(importances.shape[0], 10)
assert_equal((importances[:3, np.newaxis] >= importances[3:]).all(),
True)
def test_error():
# Test that it gives proper exception on deficient input.
assert_raises(ValueError,
AdaBoostClassifier(learning_rate=-1).fit,
X, y_class)
assert_raises(ValueError,
AdaBoostClassifier(algorithm="foo").fit,
X, y_class)
assert_raises(ValueError,
AdaBoostClassifier().fit,
X, y_class, sample_weight=np.asarray([-1]))
def test_base_estimator():
# Test different base estimators.
from sklearn.ensemble import RandomForestClassifier
from sklearn.svm import SVC
# XXX doesn't work with y_class because RF doesn't support classes_
# Shouldn't AdaBoost run a LabelBinarizer?
clf = AdaBoostClassifier(RandomForestClassifier())
clf.fit(X, y_regr)
clf = AdaBoostClassifier(SVC(), algorithm="SAMME")
clf.fit(X, y_class)
from sklearn.ensemble import RandomForestRegressor
from sklearn.svm import SVR
clf = AdaBoostRegressor(RandomForestRegressor(), random_state=0)
clf.fit(X, y_regr)
clf = AdaBoostRegressor(SVR(), random_state=0)
clf.fit(X, y_regr)
# Check that an empty discrete ensemble fails in fit, not predict.
X_fail = [[1, 1], [1, 1], [1, 1], [1, 1]]
y_fail = ["foo", "bar", 1, 2]
clf = AdaBoostClassifier(SVC(), algorithm="SAMME")
assert_raises_regexp(ValueError, "worse than random",
clf.fit, X_fail, y_fail)
def test_sample_weight_missing():
from sklearn.linear_model import LinearRegression
from sklearn.cluster import KMeans
clf = AdaBoostClassifier(LinearRegression(), algorithm="SAMME")
assert_raises(ValueError, clf.fit, X, y_regr)
clf = AdaBoostRegressor(LinearRegression())
assert_raises(ValueError, clf.fit, X, y_regr)
clf = AdaBoostClassifier(KMeans(), algorithm="SAMME")
assert_raises(ValueError, clf.fit, X, y_regr)
clf = AdaBoostRegressor(KMeans())
assert_raises(ValueError, clf.fit, X, y_regr)
def test_sparse_classification():
# Check classification with sparse input.
class CustomSVC(SVC):
"""SVC variant that records the nature of the training set."""
def fit(self, X, y, sample_weight=None):
"""Modification on fit caries data type for later verification."""
super(CustomSVC, self).fit(X, y, sample_weight=sample_weight)
self.data_type_ = type(X)
return self
X, y = datasets.make_multilabel_classification(n_classes=1, n_samples=15,
n_features=5,
random_state=42)
# Flatten y to a 1d array
y = np.ravel(y)
X_train, X_test, y_train, y_test = train_test_split(X, y, random_state=0)
for sparse_format in [csc_matrix, csr_matrix, lil_matrix, coo_matrix,
dok_matrix]:
X_train_sparse = sparse_format(X_train)
X_test_sparse = sparse_format(X_test)
# Trained on sparse format
sparse_classifier = AdaBoostClassifier(
base_estimator=CustomSVC(probability=True),
random_state=1,
algorithm="SAMME"
).fit(X_train_sparse, y_train)
# Trained on dense format
dense_classifier = AdaBoostClassifier(
base_estimator=CustomSVC(probability=True),
random_state=1,
algorithm="SAMME"
).fit(X_train, y_train)
# predict
sparse_results = sparse_classifier.predict(X_test_sparse)
dense_results = dense_classifier.predict(X_test)
assert_array_equal(sparse_results, dense_results)
# decision_function
sparse_results = sparse_classifier.decision_function(X_test_sparse)
dense_results = dense_classifier.decision_function(X_test)
assert_array_equal(sparse_results, dense_results)
# predict_log_proba
sparse_results = sparse_classifier.predict_log_proba(X_test_sparse)
dense_results = dense_classifier.predict_log_proba(X_test)
assert_array_equal(sparse_results, dense_results)
# predict_proba
sparse_results = sparse_classifier.predict_proba(X_test_sparse)
dense_results = dense_classifier.predict_proba(X_test)
assert_array_equal(sparse_results, dense_results)
# score
sparse_results = sparse_classifier.score(X_test_sparse, y_test)
dense_results = dense_classifier.score(X_test, y_test)
assert_array_equal(sparse_results, dense_results)
# staged_decision_function
sparse_results = sparse_classifier.staged_decision_function(
X_test_sparse)
dense_results = dense_classifier.staged_decision_function(X_test)
for sprase_res, dense_res in zip(sparse_results, dense_results):
assert_array_equal(sprase_res, dense_res)
# staged_predict
sparse_results = sparse_classifier.staged_predict(X_test_sparse)
dense_results = dense_classifier.staged_predict(X_test)
for sprase_res, dense_res in zip(sparse_results, dense_results):
assert_array_equal(sprase_res, dense_res)
# staged_predict_proba
sparse_results = sparse_classifier.staged_predict_proba(X_test_sparse)
dense_results = dense_classifier.staged_predict_proba(X_test)
for sprase_res, dense_res in zip(sparse_results, dense_results):
assert_array_equal(sprase_res, dense_res)
# staged_score
sparse_results = sparse_classifier.staged_score(X_test_sparse,
y_test)
dense_results = dense_classifier.staged_score(X_test, y_test)
for sprase_res, dense_res in zip(sparse_results, dense_results):
assert_array_equal(sprase_res, dense_res)
# Verify sparsity of data is maintained during training
types = [i.data_type_ for i in sparse_classifier.estimators_]
assert all([(t == csc_matrix or t == csr_matrix)
for t in types])
def test_sparse_regression():
# Check regression with sparse input.
class CustomSVR(SVR):
"""SVR variant that records the nature of the training set."""
def fit(self, X, y, sample_weight=None):
"""Modification on fit caries data type for later verification."""
super(CustomSVR, self).fit(X, y, sample_weight=sample_weight)
self.data_type_ = type(X)
return self
X, y = datasets.make_regression(n_samples=15, n_features=50, n_targets=1,
random_state=42)
X_train, X_test, y_train, y_test = train_test_split(X, y, random_state=0)
for sparse_format in [csc_matrix, csr_matrix, lil_matrix, coo_matrix,
dok_matrix]:
X_train_sparse = sparse_format(X_train)
X_test_sparse = sparse_format(X_test)
# Trained on sparse format
sparse_classifier = AdaBoostRegressor(
base_estimator=CustomSVR(),
random_state=1
).fit(X_train_sparse, y_train)
# Trained on dense format
dense_classifier = dense_results = AdaBoostRegressor(
base_estimator=CustomSVR(),
random_state=1
).fit(X_train, y_train)
# predict
sparse_results = sparse_classifier.predict(X_test_sparse)
dense_results = dense_classifier.predict(X_test)
assert_array_equal(sparse_results, dense_results)
# staged_predict
sparse_results = sparse_classifier.staged_predict(X_test_sparse)
dense_results = dense_classifier.staged_predict(X_test)
for sprase_res, dense_res in zip(sparse_results, dense_results):
assert_array_equal(sprase_res, dense_res)
types = [i.data_type_ for i in sparse_classifier.estimators_]
assert all([(t == csc_matrix or t == csr_matrix)
for t in types])
| bsd-3-clause |
manipopopo/tensorflow | tensorflow/contrib/learn/python/learn/estimators/logistic_regressor_test.py | 44 | 4901 | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for LogisticRegressor."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from tensorflow.contrib import layers
from tensorflow.python.training import training_util
from tensorflow.contrib.layers.python.layers import optimizers
from tensorflow.contrib.learn.python.learn.datasets import base
from tensorflow.contrib.learn.python.learn.estimators import logistic_regressor
from tensorflow.contrib.learn.python.learn.estimators import metric_key
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import init_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops.losses import losses
from tensorflow.python.platform import test
def _iris_data_input_fn():
# Converts iris data to a logistic regression problem.
iris = base.load_iris()
ids = np.where((iris.target == 0) | (iris.target == 1))
features = constant_op.constant(iris.data[ids], dtype=dtypes.float32)
labels = constant_op.constant(iris.target[ids], dtype=dtypes.float32)
labels = array_ops.reshape(labels, labels.get_shape().concatenate(1))
return features, labels
def _logistic_regression_model_fn(features, labels, mode):
_ = mode
logits = layers.linear(
features,
1,
weights_initializer=init_ops.zeros_initializer(),
# Intentionally uses really awful initial values so that
# AUC/precision/recall/etc will change meaningfully even on a toy dataset.
biases_initializer=init_ops.constant_initializer(-10.0))
predictions = math_ops.sigmoid(logits)
loss = losses.sigmoid_cross_entropy(labels, logits)
train_op = optimizers.optimize_loss(
loss,
training_util.get_global_step(),
optimizer='Adagrad',
learning_rate=0.1)
return predictions, loss, train_op
class LogisticRegressorTest(test.TestCase):
def test_fit_and_evaluate_metrics(self):
"""Tests basic fit and evaluate, and checks the evaluation metrics."""
regressor = logistic_regressor.LogisticRegressor(
model_fn=_logistic_regression_model_fn)
# Get some (intentionally horrible) baseline metrics.
regressor.fit(input_fn=_iris_data_input_fn, steps=1)
eval_metrics = regressor.evaluate(input_fn=_iris_data_input_fn, steps=1)
self.assertNear(
0.0, eval_metrics[metric_key.MetricKey.PREDICTION_MEAN], err=1e-3)
self.assertNear(
0.5, eval_metrics[metric_key.MetricKey.LABEL_MEAN], err=1e-6)
self.assertNear(
0.5, eval_metrics[metric_key.MetricKey.ACCURACY_BASELINE], err=1e-6)
self.assertNear(0.5, eval_metrics[metric_key.MetricKey.AUC], err=1e-6)
self.assertNear(
0.5, eval_metrics[metric_key.MetricKey.ACCURACY_MEAN % 0.5], err=1e-6)
self.assertNear(
0.0, eval_metrics[metric_key.MetricKey.PRECISION_MEAN % 0.5], err=1e-6)
self.assertNear(
0.0, eval_metrics[metric_key.MetricKey.RECALL_MEAN % 0.5], err=1e-6)
# Train for more steps and check the metrics again.
regressor.fit(input_fn=_iris_data_input_fn, steps=100)
eval_metrics = regressor.evaluate(input_fn=_iris_data_input_fn, steps=1)
# Mean prediction moves from ~0.0 to ~0.5 as we stop predicting all 0's.
self.assertNear(
0.5, eval_metrics[metric_key.MetricKey.PREDICTION_MEAN], err=1e-2)
# Label mean and baseline both remain the same at 0.5.
self.assertNear(
0.5, eval_metrics[metric_key.MetricKey.LABEL_MEAN], err=1e-6)
self.assertNear(
0.5, eval_metrics[metric_key.MetricKey.ACCURACY_BASELINE], err=1e-6)
# AUC improves from 0.5 to 1.0.
self.assertNear(1.0, eval_metrics[metric_key.MetricKey.AUC], err=1e-6)
# Accuracy improves from 0.5 to >0.9.
self.assertTrue(
eval_metrics[metric_key.MetricKey.ACCURACY_MEAN % 0.5] > 0.9)
# Precision improves from 0.0 to 1.0.
self.assertNear(
1.0, eval_metrics[metric_key.MetricKey.PRECISION_MEAN % 0.5], err=1e-6)
# Recall improves from 0.0 to >0.9.
self.assertTrue(eval_metrics[metric_key.MetricKey.RECALL_MEAN % 0.5] > 0.9)
if __name__ == '__main__':
test.main()
| apache-2.0 |
codeworldprodigy/lab2 | lib/jinja2/visitor.py | 1402 | 3316 | # -*- coding: utf-8 -*-
"""
jinja2.visitor
~~~~~~~~~~~~~~
This module implements a visitor for the nodes.
:copyright: (c) 2010 by the Jinja Team.
:license: BSD.
"""
from jinja2.nodes import Node
class NodeVisitor(object):
"""Walks the abstract syntax tree and call visitor functions for every
node found. The visitor functions may return values which will be
forwarded by the `visit` method.
Per default the visitor functions for the nodes are ``'visit_'`` +
class name of the node. So a `TryFinally` node visit function would
be `visit_TryFinally`. This behavior can be changed by overriding
the `get_visitor` function. If no visitor function exists for a node
(return value `None`) the `generic_visit` visitor is used instead.
"""
def get_visitor(self, node):
"""Return the visitor function for this node or `None` if no visitor
exists for this node. In that case the generic visit function is
used instead.
"""
method = 'visit_' + node.__class__.__name__
return getattr(self, method, None)
def visit(self, node, *args, **kwargs):
"""Visit a node."""
f = self.get_visitor(node)
if f is not None:
return f(node, *args, **kwargs)
return self.generic_visit(node, *args, **kwargs)
def generic_visit(self, node, *args, **kwargs):
"""Called if no explicit visitor function exists for a node."""
for node in node.iter_child_nodes():
self.visit(node, *args, **kwargs)
class NodeTransformer(NodeVisitor):
"""Walks the abstract syntax tree and allows modifications of nodes.
The `NodeTransformer` will walk the AST and use the return value of the
visitor functions to replace or remove the old node. If the return
value of the visitor function is `None` the node will be removed
from the previous location otherwise it's replaced with the return
value. The return value may be the original node in which case no
replacement takes place.
"""
def generic_visit(self, node, *args, **kwargs):
for field, old_value in node.iter_fields():
if isinstance(old_value, list):
new_values = []
for value in old_value:
if isinstance(value, Node):
value = self.visit(value, *args, **kwargs)
if value is None:
continue
elif not isinstance(value, Node):
new_values.extend(value)
continue
new_values.append(value)
old_value[:] = new_values
elif isinstance(old_value, Node):
new_node = self.visit(old_value, *args, **kwargs)
if new_node is None:
delattr(node, field)
else:
setattr(node, field, new_node)
return node
def visit_list(self, node, *args, **kwargs):
"""As transformers may return lists in some places this method
can be used to enforce a list as return value.
"""
rv = self.visit(node, *args, **kwargs)
if not isinstance(rv, list):
rv = [rv]
return rv
| apache-2.0 |
manipopopo/tensorflow | tensorflow/contrib/learn/python/learn/__init__.py | 40 | 2715 | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""High level API for learning with TensorFlow (deprecated).
This module and all its submodules are deprecated. See
[contrib/learn/README.md](https://www.tensorflow.org/code/tensorflow/contrib/learn/README.md)
for migration instructions.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
# pylint: disable=wildcard-import
from tensorflow.contrib.learn.python.learn import basic_session_run_hooks
from tensorflow.contrib.learn.python.learn import datasets
from tensorflow.contrib.learn.python.learn import estimators
from tensorflow.contrib.learn.python.learn import graph_actions
from tensorflow.contrib.learn.python.learn import learn_io as io
from tensorflow.contrib.learn.python.learn import models
from tensorflow.contrib.learn.python.learn import monitors
from tensorflow.contrib.learn.python.learn import ops
from tensorflow.contrib.learn.python.learn import preprocessing
from tensorflow.contrib.learn.python.learn import utils
from tensorflow.contrib.learn.python.learn.estimators import *
from tensorflow.contrib.learn.python.learn.evaluable import Evaluable
from tensorflow.contrib.learn.python.learn.experiment import Experiment
from tensorflow.contrib.learn.python.learn.export_strategy import ExportStrategy
from tensorflow.contrib.learn.python.learn.graph_actions import evaluate
from tensorflow.contrib.learn.python.learn.graph_actions import infer
from tensorflow.contrib.learn.python.learn.graph_actions import run_feeds
from tensorflow.contrib.learn.python.learn.graph_actions import run_n
from tensorflow.contrib.learn.python.learn.graph_actions import train
from tensorflow.contrib.learn.python.learn.learn_io import *
from tensorflow.contrib.learn.python.learn.metric_spec import MetricSpec
from tensorflow.contrib.learn.python.learn.monitors import NanLossDuringTrainingError
from tensorflow.contrib.learn.python.learn.trainable import Trainable
from tensorflow.contrib.learn.python.learn.utils import *
# pylint: enable=wildcard-import
| apache-2.0 |
mileistone/test | vedanet/data/transform/_preprocess.py | 1 | 21012 | #
# Image and annotations preprocessing for lightnet networks
# The image transformations work with both Pillow and OpenCV images
# The annotation transformations work with brambox.annotations.Annotation objects
# Copyright EAVISE
#
# modified by mileistone
import random
import collections
import logging as log
import torch
import numpy as np
from PIL import Image, ImageOps
import brambox.boxes as bbb
from .util import BaseTransform, BaseMultiTransform
try:
import cv2
except ImportError:
log.warn('OpenCV is not installed and cannot be used')
cv2 = None
__all__ = ['Letterbox', 'RandomCrop', 'RandomCropLetterbox', 'RandomFlip', 'HSVShift', 'BramboxToTensor']
class Letterbox(BaseMultiTransform):
""" Transform images and annotations to the right network dimensions.
Args:
dimension (tuple, optional): Default size for the letterboxing, expressed as a (width, height) tuple; Default **None**
dataset (lightnet.data.Dataset, optional): Dataset that uses this transform; Default **None**
Note:
Create 1 Letterbox object and use it for both image and annotation transforms.
This object will save data from the image transform and use that on the annotation transform.
"""
def __init__(self, dimension=None, dataset=None):
super().__init__(dimension=dimension, dataset=dataset)
if self.dimension is None and self.dataset is None:
raise ValueError('This transform either requires a dimension or a dataset to infer the dimension')
self.pad = None
self.scale = None
self.fill_color = 127
def __call__(self, data):
if data is None:
return None
elif isinstance(data, collections.Sequence):
return self._tf_anno(data)
elif isinstance(data, Image.Image):
return self._tf_pil(data)
elif isinstance(data, np.ndarray):
return self._tf_cv(data)
else:
log.error(f'Letterbox only works with <brambox annotation lists>, <PIL images> or <OpenCV images> [{type(data)}]')
return data
def _tf_pil(self, img):
""" Letterbox an image to fit in the network """
if self.dataset is not None:
net_w, net_h = self.dataset.input_dim
else:
net_w, net_h = self.dimension
im_w, im_h = img.size
if im_w == net_w and im_h == net_h:
self.scale = None
self.pad = None
return img
# Rescaling
if im_w / net_w >= im_h / net_h:
self.scale = net_w / im_w
else:
self.scale = net_h / im_h
if self.scale != 1:
resample_mode = Image.NEAREST #Image.BILINEAR if self.scale > 1 else Image.ANTIALIAS
img = img.resize((int(self.scale*im_w), int(self.scale*im_h)), resample_mode)
im_w, im_h = img.size
if im_w == net_w and im_h == net_h:
self.pad = None
return img
# Padding
img_np = np.array(img)
channels = img_np.shape[2] if len(img_np.shape) > 2 else 1
pad_w = (net_w - im_w) / 2
pad_h = (net_h - im_h) / 2
self.pad = (int(pad_w), int(pad_h), int(pad_w+.5), int(pad_h+.5))
img = ImageOps.expand(img, border=self.pad, fill=(self.fill_color,)*channels)
return img
def _tf_cv(self, img):
""" Letterbox and image to fit in the network """
if self.dataset is not None:
net_w, net_h = self.dataset.input_dim
else:
net_w, net_h = self.dimension
im_h, im_w = img.shape[:2]
if im_w == net_w and im_h == net_h:
self.scale = None
self.pad = None
return img
# Rescaling
if im_w / net_w >= im_h / net_h:
self.scale = net_w / im_w
else:
self.scale = net_h / im_h
if self.scale != 1:
img = cv2.resize(img, None, fx=self.scale, fy=self.scale, interpolation=cv2.INTER_CUBIC)
im_h, im_w = img.shape[:2]
if im_w == net_w and im_h == net_h:
self.pad = None
return img
# Padding
channels = img.shape[2] if len(img.shape) > 2 else 1
pad_w = (net_w - im_w) / 2
pad_h = (net_h - im_h) / 2
self.pad = (int(pad_w), int(pad_h), int(pad_w+.5), int(pad_h+.5))
img = cv2.copyMakeBorder(img, self.pad[1], self.pad[3], self.pad[0], self.pad[2], cv2.BORDER_CONSTANT, value=(self.fill_color,)*channels)
return img
def _tf_anno(self, annos):
""" Change coordinates of an annotation, according to the previous letterboxing """
for anno in annos:
if self.scale is not None:
anno.x_top_left *= self.scale
anno.y_top_left *= self.scale
anno.width *= self.scale
anno.height *= self.scale
if self.pad is not None:
anno.x_top_left += self.pad[0]
anno.y_top_left += self.pad[1]
return annos
class RandomCrop(BaseMultiTransform):
""" Take random crop from the image.
Args:
jitter (Number [0-1]): Indicates how much of the image we can crop
crop_anno(Boolean, optional): Whether we crop the annotations inside the image crop; Default **False**
intersection_threshold(number or list, optional): Argument passed on to :class:`brambox.boxes.util.modifiers.CropModifier`
Note:
Create 1 RandomCrop object and use it for both image and annotation transforms.
This object will save data from the image transform and use that on the annotation transform.
"""
def __init__(self, jitter, crop_anno=False, intersection_threshold=0.001, fill_color=127):
super().__init__(jitter=jitter, crop_anno=crop_anno, fill_color=fill_color)
self.crop_modifier = bbb.CropModifier(float('Inf'), intersection_threshold)
def __call__(self, data):
if data is None:
return None
elif isinstance(data, collections.Sequence):
return self._tf_anno(data)
elif isinstance(data, Image.Image):
return self._tf_pil(data)
elif isinstance(data, np.ndarray):
return self._tf_cv(data)
else:
log.error(f'RandomCrop only works with <brambox annotation lists>, <PIL images> or <OpenCV images> [{type(data)}]')
return data
def _tf_pil(self, img):
""" Take random crop from image """
im_w, im_h = img.size
crop = self._get_crop(im_w, im_h)
crop_w = crop[2] - crop[0]
crop_h = crop[3] - crop[1]
img_np = np.array(img)
channels = img_np.shape[2] if len(img_np.shape) > 2 else 1
img = img.crop((max(0, crop[0]), max(0, crop[1]), min(im_w, crop[2]-1), min(im_h, crop[3]-1)))
img_crop = Image.new(img.mode, (crop_w, crop_h), color=(self.fill_color,)*channels)
img_crop.paste(img, (max(0, -crop[0]), max(0, -crop[1])))
return img_crop
def _tf_cv(self, img):
""" Take random crop from image """
im_h, im_w = img.shape[:2]
crop = self._get_crop(im_w, im_h)
crop_w = crop[2] - crop[0]
crop_h = crop[3] - crop[1]
img_crop = np.ones((crop_h, crop_w) + img.shape[2:], dtype=img.dtype) * self.fill_color
src_x1 = max(0, crop[0])
src_x2 = min(crop[2], im_w)
src_y1 = max(0, crop[1])
src_y2 = min(crop[3], im_h)
dst_x1 = max(0, -crop[0])
dst_x2 = crop_w - max(0, crop[2]-im_w)
dst_y1 = max(0, -crop[1])
dst_y2 = crop_h - max(0, crop[3]-im_h)
img_crop[dst_y1:dst_y2, dst_x1:dst_x2] = img[src_y1:src_y2, src_x1:src_x2]
return img_crop
def _get_crop(self, im_w, im_h):
dw, dh = int(im_w*self.jitter), int(im_h*self.jitter)
crop_left = random.randint(-dw, dw)
crop_right = random.randint(-dw, dw)
crop_top = random.randint(-dh, dh)
crop_bottom = random.randint(-dh, dh)
crop = (crop_left, crop_top, im_w-crop_right, im_h-crop_bottom)
self.crop_modifier.area = crop
return crop
def _tf_anno(self, annos):
""" Change coordinates of an annotation, according to the previous crop """
if self.crop_anno:
bbb.modify(annos, [self.crop_modifier])
else:
crop = self.crop_modifier.area
for i in range(len(annos)-1, -1, -1):
anno = annos[i]
x1 = max(crop[0], anno.x_top_left)
x2 = min(crop[2], anno.x_top_left+anno.width)
y1 = max(crop[1], anno.y_top_left)
y2 = min(crop[3], anno.y_top_left+anno.height)
w = x2-x1
h = y2-y1
if self.crop_modifier.inter_area:
ratio = ((w * h) / (anno.width * anno.height)) < self.crop_modifier.inter_thresh
else:
ratio = (w / anno.width) < self.crop_modifier.inter_thresh[0] or (h / anno.height) < self.crop_modifier.inter_thresh[1]
if w <= 0 or h <= 0 or ratio:
del annos[i]
continue
annos[i].x_top_left -= crop[0]
annos[i].y_top_left -= crop[1]
return annos
class RandomCropLetterbox(BaseMultiTransform):
""" Take random crop from the image.
Args:
jitter (Number [0-1]): Indicates how much of the image we can crop
crop_anno(Boolean, optional): Whether we crop the annotations inside the image crop; Default **False**
intersection_threshold(number or list, optional): Argument passed on to :class:`brambox.boxes.util.modifiers.CropModifier`
Note:
Create 1 RandomCrop object and use it for both image and annotation transforms.
This object will save data from the image transform and use that on the annotation transform.
"""
def __init__(self, dataset, jitter, fill_color=127):
super().__init__(dataset=dataset, jitter=jitter, fill_color=fill_color)
self.crop_info = None
self.output_w = None
self.output_h = None
def __call__(self, data):
if data is None:
return None
elif isinstance(data, collections.Sequence):
return self._tf_anno(data)
elif isinstance(data, Image.Image):
return self._tf_pil(data)
else:
log.error(f'RandomCrop only works with <brambox annotation lists>, <PIL images> or <OpenCV images> [{type(data)}]')
return data
def _tf_pil(self, img):
""" Take random crop from image """
self.output_w, self.output_h = self.dataset.input_dim
#print('output shape: %d, %d' % (self.output_w, self.output_h))
orig_w, orig_h = img.size
img_np = np.array(img)
channels = img_np.shape[2] if len(img_np.shape) > 2 else 1
dw = int(self.jitter * orig_w)
dh = int(self.jitter * orig_h)
new_ar = float(orig_w + random.randint(-dw, dw)) / (orig_h + random.randint(-dh, dh))
scale = random.random()*(2-0.25) + 0.25
if new_ar < 1:
nh = int(scale * orig_h)
nw = int(nh * new_ar)
else:
nw = int(scale * orig_w)
nh = int(nw / new_ar)
if self.output_w > nw:
dx = random.randint(0, self.output_w - nw)
else:
dx = random.randint(self.output_w - nw, 0)
if self.output_h > nh:
dy = random.randint(0, self.output_h - nh)
else:
dy = random.randint(self.output_h - nh, 0)
nxmin = max(0, -dx)
nymin = max(0, -dy)
nxmax = min(nw, -dx + self.output_w - 1)
nymax = min(nh, -dy + self.output_h - 1)
sx, sy = float(orig_w)/nw, float(orig_h)/nh
orig_xmin = int(nxmin * sx)
orig_ymin = int(nymin * sy)
orig_xmax = int(nxmax * sx)
orig_ymax = int(nymax * sy)
orig_crop = img.crop((orig_xmin, orig_ymin, orig_xmax, orig_ymax))
orig_crop_resize = orig_crop.resize((nxmax - nxmin, nymax - nymin))
output_img = Image.new(img.mode, (self.output_w, self.output_h), color=(self.fill_color,)*channels)
output_img.paste(orig_crop_resize, (0, 0))
self.crop_info = [sx, sy, nxmin, nymin, nxmax, nymax]
return output_img
def _tf_anno(self, annos):
""" Change coordinates of an annotation, according to the previous crop """
sx, sy, crop_xmin, crop_ymin, crop_xmax, crop_ymax = self.crop_info
for i in range(len(annos)-1, -1, -1):
anno = annos[i]
x1 = max(crop_xmin, int(anno.x_top_left/sx))
x2 = min(crop_xmax, int((anno.x_top_left+anno.width)/sx))
y1 = max(crop_ymin, int(anno.y_top_left/sy))
y2 = min(crop_ymax, int((anno.y_top_left+anno.height)/sy))
w = x2-x1
h = y2-y1
if w <= 2 or h <= 2: # or w*h/(anno.width*anno.height/sx/sy) <= 0.5:
del annos[i]
continue
annos[i].x_top_left = x1 - crop_xmin
annos[i].y_top_left = y1 -crop_ymin
annos[i].width = w
annos[i].height = h
return annos
class RandomFlip(BaseMultiTransform):
""" Randomly flip image.
Args:
threshold (Number [0-1]): Chance of flipping the image
Note:
Create 1 RandomFlip object and use it for both image and annotation transforms.
This object will save data from the image transform and use that on the annotation transform.
"""
def __init__(self, threshold):
self.threshold = threshold
self.flip = False
self.im_w = None
def __call__(self, data):
if data is None:
return None
elif isinstance(data, collections.Sequence):
return [self._tf_anno(anno) for anno in data]
elif isinstance(data, Image.Image):
return self._tf_pil(data)
elif isinstance(data, np.ndarray):
return self._tf_cv(data)
else:
log.error(f'RandomFlip only works with <brambox annotation lists>, <PIL images> or <OpenCV images> [{type(data)}]')
return data
def _tf_pil(self, img):
""" Randomly flip image """
self._get_flip()
self.im_w = img.size[0]
if self.flip:
img = img.transpose(Image.FLIP_LEFT_RIGHT)
return img
def _tf_cv(self, img):
""" Randomly flip image """
self._get_flip()
self.im_w = img.shape[1]
if self.flip:
img = cv2.flip(img, 1)
return img
def _get_flip(self):
self.flip = random.random() < self.threshold
def _tf_anno(self, anno):
""" Change coordinates of an annotation, according to the previous flip """
if self.flip and self.im_w is not None:
anno.x_top_left = self.im_w - anno.x_top_left - anno.width
return anno
class HSVShift(BaseTransform):
""" Perform random HSV shift on the RGB data.
Args:
hue (Number): Random number between -hue,hue is used to shift the hue
saturation (Number): Random number between 1,saturation is used to shift the saturation; 50% chance to get 1/dSaturation in stead of dSaturation
value (Number): Random number between 1,value is used to shift the value; 50% chance to get 1/dValue in stead of dValue
Warning:
If you use OpenCV as your image processing library, make sure the image is RGB before using this transform.
By default OpenCV uses BGR, so you must use `cvtColor`_ function to transform it to RGB.
.. _cvtColor: https://docs.opencv.org/master/d7/d1b/group__imgproc__misc.html#ga397ae87e1288a81d2363b61574eb8cab
"""
def __init__(self, hue, saturation, value):
super().__init__(hue=hue, saturation=saturation, value=value)
@classmethod
def apply(cls, data, hue, saturation, value):
dh = random.uniform(-hue, hue)
ds = random.uniform(1, saturation)
if random.random() < 0.5:
ds = 1/ds
dv = random.uniform(1, value)
if random.random() < 0.5:
dv = 1/dv
if data is None:
return None
elif isinstance(data, Image.Image):
return cls._tf_pil(data, dh, ds, dv)
elif isinstance(data, np.ndarray):
return cls._tf_cv(data, dh, ds, dv)
else:
log.error(f'HSVShift only works with <PIL images> or <OpenCV images> [{type(data)}]')
return data
@staticmethod
def _tf_pil(img, dh, ds, dv):
""" Random hsv shift """
img = img.convert('HSV')
channels = list(img.split())
def change_hue(x):
x += int(dh * 255)
if x > 255:
x -= 255
elif x < 0:
x += 0
return x
channels[0] = channels[0].point(change_hue)
channels[1] = channels[1].point(lambda i: min(255, max(0, int(i*ds))))
channels[2] = channels[2].point(lambda i: min(255, max(0, int(i*dv))))
img = Image.merge(img.mode, tuple(channels))
img = img.convert('RGB')
return img
@staticmethod
def _tf_cv(img, dh, ds, dv):
""" Random hsv shift """
img = img.astype(np.float32) / 255.0
img = cv2.cvtColor(img, cv2.COLOR_RGB2HSV)
def wrap_hue(x):
x[x >= 360.0] -= 360.0
x[x < 0.0] += 360.0
return x
img[:, :, 0] = wrap_hue(hsv[:, :, 0] + (360.0 * dh))
img[:, :, 1] = np.clip(ds * img[:, :, 1], 0.0, 1.0)
img[:, :, 2] = np.clip(dv * img[:, :, 2], 0.0, 1.0)
img = cv2.cvtColor(img, cv2.COLOR_HSV2RGB)
img = (img * 255).astype(np.uint8)
return img
class BramboxToTensor(BaseTransform):
""" Converts a list of brambox annotation objects to a tensor.
Args:
dimension (tuple, optional): Default size of the transformed images, expressed as a (width, height) tuple; Default **None**
dataset (lightnet.data.Dataset, optional): Dataset that uses this transform; Default **None**
max_anno (Number, optional): Maximum number of annotations in the list; Default **50**
class_label_map (list, optional): class label map to convert class names to an index; Default **None**
Return:
torch.Tensor: tensor of dimension [max_anno, 5] containing [class_idx,center_x,center_y,width,height] for every detection
Warning:
If no class_label_map is given, this function will first try to convert the class_label to an integer. If that fails, it is simply given the number 0.
"""
def __init__(self, dimension=None, dataset=None, max_anno=50, class_label_map=None):
super().__init__(dimension=dimension, dataset=dataset, max_anno=max_anno, class_label_map=class_label_map)
if self.dimension is None and self.dataset is None:
raise ValueError('This transform either requires a dimension or a dataset to infer the dimension')
if self.class_label_map is None:
log.warn('No class_label_map given. If the class_labels are not integers, they will be set to zero.')
def __call__(self, data):
if self.dataset is not None:
dim = self.dataset.input_dim
else:
dim = self.dimension
return self.apply(data, dim, self.max_anno, self.class_label_map)
@classmethod
def apply(cls, data, dimension, max_anno=None, class_label_map=None):
if not isinstance(data, collections.Sequence):
raise TypeError(f'BramboxToTensor only works with <brambox annotation list> [{type(data)}]')
anno_np = np.array([cls._tf_anno(anno, dimension, class_label_map) for anno in data], dtype=np.float32)
if max_anno is not None:
anno_len = len(data)
if anno_len > max_anno:
raise ValueError(f'More annotations than maximum allowed [{anno_len}/{max_anno}]')
z_np = np.zeros((max_anno-anno_len, 5), dtype=np.float32)
z_np[:, 0] = -1
if anno_len > 0:
return torch.from_numpy(np.concatenate((anno_np, z_np)))
else:
return torch.from_numpy(z_np)
else:
return torch.from_numpy(anno_np)
@staticmethod
def _tf_anno(anno, dimension, class_label_map):
""" Transforms brambox annotation to list """
net_w, net_h = dimension
if class_label_map is not None:
cls = class_label_map.index(anno.class_label)
else:
try:
cls = int(anno.class_label)
except ValueError:
cls = 0
cx = (anno.x_top_left + (anno.width / 2)) / net_w
cy = (anno.y_top_left + (anno.height / 2)) / net_h
w = anno.width / net_w
h = anno.height / net_h
return [cls, cx, cy, w, h]
| mit |
caronc/nzb-subliminal | Subliminal/guessit/transfo/guess_episode_details.py | 7 | 2809 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# GuessIt - A library for guessing information from filenames
# Copyright (c) 2013 Nicolas Wack <wackou@gmail.com>
#
# GuessIt is free software; you can redistribute it and/or modify it under
# the terms of the Lesser GNU General Public License as published by
# the Free Software Foundation; either version 3 of the License, or
# (at your option) any later version.
#
# GuessIt is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# Lesser GNU General Public License for more details.
#
# You should have received a copy of the Lesser GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
from __future__ import absolute_import, division, print_function, unicode_literals
from guessit.plugins.transformers import Transformer
from guessit.matcher import found_guess
from guessit.containers import PropertiesContainer
import itertools
class GuessEpisodeDetails(Transformer):
def __init__(self):
Transformer.__init__(self, -205)
self.container = PropertiesContainer()
self.container.register_property('episodeDetails', 'Special', 'Bonus', 'Omake', 'Ova', 'Oav', 'Pilot', 'Unaired')
self.container.register_property('episodeDetails', 'Extras?', canonical_form='Extras')
def guess_details(self, string, node=None, options=None):
properties = self.container.find_properties(string, node, options, 'episodeDetails', multiple=True)
guesses = self.container.as_guess(properties, multiple=True)
return guesses
def second_pass_options(self, mtree, options=None):
if not mtree.guess.get('type', '').startswith('episode'):
for unidentified_leaf in mtree.unidentified_leaves():
properties = self.container.find_properties(unidentified_leaf.value, unidentified_leaf, options, 'episodeDetails')
guess = self.container.as_guess(properties)
if guess:
return {'type': 'episode'}
return None
def supported_properties(self):
return self.container.get_supported_properties()
def process(self, mtree, options=None):
if (mtree.guess.get('type', '').startswith('episode') and
(not mtree.info.get('episodeNumber') or
mtree.info.get('season') == 0)):
for leaf in itertools.chain(mtree.leaves_containing('title'),
mtree.unidentified_leaves()):
guesses = self.guess_details(leaf.value, leaf, options)
for guess in guesses:
found_guess(leaf, guess, update_guess=False)
return None
| gpl-3.0 |
luo66/scikit-learn | examples/mixture/plot_gmm_sin.py | 247 | 2747 | """
=================================
Gaussian Mixture Model Sine Curve
=================================
This example highlights the advantages of the Dirichlet Process:
complexity control and dealing with sparse data. The dataset is formed
by 100 points loosely spaced following a noisy sine curve. The fit by
the GMM class, using the expectation-maximization algorithm to fit a
mixture of 10 Gaussian components, finds too-small components and very
little structure. The fits by the Dirichlet process, however, show
that the model can either learn a global structure for the data (small
alpha) or easily interpolate to finding relevant local structure
(large alpha), never falling into the problems shown by the GMM class.
"""
import itertools
import numpy as np
from scipy import linalg
import matplotlib.pyplot as plt
import matplotlib as mpl
from sklearn import mixture
from sklearn.externals.six.moves import xrange
# Number of samples per component
n_samples = 100
# Generate random sample following a sine curve
np.random.seed(0)
X = np.zeros((n_samples, 2))
step = 4 * np.pi / n_samples
for i in xrange(X.shape[0]):
x = i * step - 6
X[i, 0] = x + np.random.normal(0, 0.1)
X[i, 1] = 3 * (np.sin(x) + np.random.normal(0, .2))
color_iter = itertools.cycle(['r', 'g', 'b', 'c', 'm'])
for i, (clf, title) in enumerate([
(mixture.GMM(n_components=10, covariance_type='full', n_iter=100),
"Expectation-maximization"),
(mixture.DPGMM(n_components=10, covariance_type='full', alpha=0.01,
n_iter=100),
"Dirichlet Process,alpha=0.01"),
(mixture.DPGMM(n_components=10, covariance_type='diag', alpha=100.,
n_iter=100),
"Dirichlet Process,alpha=100.")]):
clf.fit(X)
splot = plt.subplot(3, 1, 1 + i)
Y_ = clf.predict(X)
for i, (mean, covar, color) in enumerate(zip(
clf.means_, clf._get_covars(), color_iter)):
v, w = linalg.eigh(covar)
u = w[0] / linalg.norm(w[0])
# as the DP will not use every component it has access to
# unless it needs it, we shouldn't plot the redundant
# components.
if not np.any(Y_ == i):
continue
plt.scatter(X[Y_ == i, 0], X[Y_ == i, 1], .8, color=color)
# Plot an ellipse to show the Gaussian component
angle = np.arctan(u[1] / u[0])
angle = 180 * angle / np.pi # convert to degrees
ell = mpl.patches.Ellipse(mean, v[0], v[1], 180 + angle, color=color)
ell.set_clip_box(splot.bbox)
ell.set_alpha(0.5)
splot.add_artist(ell)
plt.xlim(-6, 4 * np.pi - 6)
plt.ylim(-5, 5)
plt.title(title)
plt.xticks(())
plt.yticks(())
plt.show()
| bsd-3-clause |
automl/auto-sklearn | examples/80_extending/example_extending_regression.py | 1 | 5223 | """
================================================
Extending Auto-Sklearn with Regression Component
================================================
The following example demonstrates how to create a new regression
component for using in auto-sklearn.
"""
from typing import Optional
from pprint import pprint
from ConfigSpace.configuration_space import ConfigurationSpace
from ConfigSpace.hyperparameters import (
UniformFloatHyperparameter,
UniformIntegerHyperparameter,
CategoricalHyperparameter,
)
from ConfigSpace.conditions import EqualsCondition
import sklearn.metrics
from autosklearn.askl_typing import FEAT_TYPE_TYPE
import autosklearn.regression
import autosklearn.pipeline.components.regression
from autosklearn.pipeline.components.base import AutoSklearnRegressionAlgorithm
from autosklearn.pipeline.constants import (
SPARSE,
DENSE,
SIGNED_DATA,
UNSIGNED_DATA,
PREDICTIONS,
)
from sklearn.datasets import load_diabetes
from sklearn.model_selection import train_test_split
############################################################################
# Implement kernel ridge regression component for auto-sklearn
# ============================================================
class KernelRidgeRegression(AutoSklearnRegressionAlgorithm):
def __init__(self, alpha, kernel, gamma, degree, coef0, random_state=None):
self.alpha = alpha
self.kernel = kernel
self.gamma = gamma
self.degree = degree
self.coef0 = coef0
self.random_state = random_state
self.estimator = None
def fit(self, X, y):
self.alpha = float(self.alpha)
self.gamma = float(self.gamma)
self.degree = int(self.degree)
self.coef0 = float(self.coef0)
import sklearn.kernel_ridge
self.estimator = sklearn.kernel_ridge.KernelRidge(
alpha=self.alpha,
kernel=self.kernel,
gamma=self.gamma,
degree=self.degree,
coef0=self.coef0,
)
self.estimator.fit(X, y)
return self
def predict(self, X):
if self.estimator is None:
raise NotImplementedError
return self.estimator.predict(X)
@staticmethod
def get_properties(dataset_properties=None):
return {
"shortname": "KRR",
"name": "Kernel Ridge Regression",
"handles_regression": True,
"handles_classification": False,
"handles_multiclass": False,
"handles_multilabel": False,
"handles_multioutput": True,
"is_deterministic": True,
"input": (SPARSE, DENSE, UNSIGNED_DATA, SIGNED_DATA),
"output": (PREDICTIONS,),
}
@staticmethod
def get_hyperparameter_search_space(
feat_type: Optional[FEAT_TYPE_TYPE] = None, dataset_properties=None
):
cs = ConfigurationSpace()
alpha = UniformFloatHyperparameter(
name="alpha", lower=10**-5, upper=1, log=True, default_value=1.0
)
kernel = CategoricalHyperparameter(
name="kernel",
# We restrict ourselves to two possible kernels for this example
choices=["polynomial", "rbf"],
default_value="polynomial",
)
gamma = UniformFloatHyperparameter(
name="gamma", lower=0.00001, upper=1, default_value=0.1, log=True
)
degree = UniformIntegerHyperparameter(
name="degree", lower=2, upper=5, default_value=3
)
coef0 = UniformFloatHyperparameter(
name="coef0",
lower=1e-2,
upper=1e2,
log=True,
default_value=1,
)
cs.add_hyperparameters([alpha, kernel, gamma, degree, coef0])
degree_condition = EqualsCondition(degree, kernel, "polynomial")
coef0_condition = EqualsCondition(coef0, kernel, "polynomial")
cs.add_conditions([degree_condition, coef0_condition])
return cs
# Add KRR component to auto-sklearn.
autosklearn.pipeline.components.regression.add_regressor(KernelRidgeRegression)
cs = KernelRidgeRegression.get_hyperparameter_search_space()
print(cs)
############################################################################
# Generate data
# =============
X, y = load_diabetes(return_X_y=True)
X_train, X_test, y_train, y_test = train_test_split(X, y)
############################################################################
# Fit the model using KRR
# =======================
reg = autosklearn.regression.AutoSklearnRegressor(
time_left_for_this_task=30,
per_run_time_limit=10,
include={"regressor": ["KernelRidgeRegression"]},
# Bellow two flags are provided to speed up calculations
# Not recommended for a real implementation
initial_configurations_via_metalearning=0,
smac_scenario_args={"runcount_limit": 5},
)
reg.fit(X_train, y_train)
############################################################################
# Print prediction score and statistics
# =====================================
y_pred = reg.predict(X_test)
print("r2 score: ", sklearn.metrics.r2_score(y_pred, y_test))
pprint(reg.show_models(), indent=4)
| bsd-3-clause |
GauthamGoli/quantify-2016 | Machine Learning - Bond Liquidity Prediction/final_code.py | 1 | 5973 | # importing various modules that would be required in the program
import pandas as pd
import numpy as np
import dateutil.parser as dateparser
from datetime import datetime
import dateutil.parser as dateparser
from datetime import datetime
from sklearn.ensemble import RandomForestRegressor
from sklearn.preprocessing import Imputer, LabelEncoder
# Preprocessing the ML_Bond_metadata.csv file after initial treatment in excel.
data = pd.read_csv('ML_Bond_metadata_corrected_dates.csv')
# print data.isnull().sum()
numerical_fields = [
'coupon',
'amtIssued',
'amtOutstanding',
]
categorical_fields = [
'issuer',
'Market',
'collateralType',
'couponFrequency'
'couponType',
'industryGroup',
'industrySector',
'industrySubGroup',
'maturityType',
'securityType',
'paymentRank',
'144aflag',
'ratingAgency1Rating',
'ratingAgency2Rating',
'ratingAgency1Watch',
'ratingAgency2Watch'
]
date_fields = [
'issueDate',
'maturity',
'ratingAgency1EffectiveDate',
'ratingAgency2EffectiveDate'
]
# The difference in the amounts is a new feature added to the data to give better insights
data['AmtDiff'] = data['amtIssued'] - data['amtOutstanding']
# The duration between issue and maturity
data['DateDiff'] = data['maturity'] - data['IssueDate']
# Imputing values in the columns where NANs are found
for i in ['issueDate','maturity','ratingAgency1EffectiveDate','ratingAgency1EffectiveDate']:
data[i] = data[i].fillna(data.median())
# Changing the value of couponFrequency from NAN to 0 if coupon is also zero
temp = []
for i in range(leng):
if data['coupon'].iloc[i] == 0.00:
temp.append(i)
for i in temp:
j = j+1
data.set_value(i,'couponFrequency',0)
# For Cleaning Categorical Data
for i in ['couponFrequency']:
data[i] = LabelEncoder().fit_transform(data[i])
# For Cleaning Numeric Data
for i in ['AmtDiff','DateDiff','amtOutstanding', 'amtIssued']:
data[i] = StandardScaler().fit_transform(data[i])
pd.to_csv('metadata_clean.csv')
# functions which will be used for cleaning the dataset.csv file
def get_cluster(x):
if x == 0:
return 'A'
elif x == 1:
return 'B'
elif x == 2:
return 'C'
elif x == 3:
return 'D'
elif x == 4:
return 'E'
elif x == 5:
return 'F'
elif x == 6:
return 'G'
elif x == 7:
return 'H'
elif x == 8:
return 'I'
elif x == 9:
return 'J'
# Starting of the training time i.e. 16 March 2016 is considered as the epoch
def get_days(x):
dt = datetime.strptime(x, "%d%b%Y")
epoch = datetime(2016,3,16)
return int((dt-epoch).total_seconds()/86400)
def get_time(x):
if x.find('pm') == -1 and x.find('am') == -1:
dt = datetime.strptime(x[:26], "%a %d%b%Y %H:%M:%S.%f")
else:
dt = datetime.strptime(x, "%a %d%b%Y %I:%M:%S.%f %p")
epoch = datetime(2016,3,16)
diff = dt - epoch
return int(diff.total_seconds())
def get_side_back(x):
if x == 0:
return 'S'
elif x == 1:
return 'B'
def get_isin(x):
return int(x[4:])
def correct_time(x):
return x[:9]+'20'+x[9:]
# Clustering of the Bonds
# importing the module containg the python implementation of k-ptototype algorithm
from kmodes import kprototypes
X = data.as_matrix()
kproto = kprototypes.KPrototypes(n_clusters=10, init='Cao', verbose=2)
clusters = kproto.fit_predict(X, categorical=[0, 2, 5, 6, 7, 8, 9, 10, 11 ,12, 13, 14, 15, 16, 18, 19])
# New column has been created in the dataset for clusters
data['cluster'] = clusters
# saving the results to a csv file
data.to_csv('metadata_clean_cluster_10.csv')
# creating a temporary dataframe
temp_df = pd.DataFrame(columns=['isin','cluster'])
temp_df['isin'] = temp_df['isin']
temp_df['cluster'] = temp_df['cluster']
data1 = pd.read_csv('dataset.csv')
# price culd not be modelled
data1 = data1.drop(['price'])
data1['isin'] = data1['isin'].apply(get_isin)
data1['time'] = data1['time'].apply(correct_time)
data1['time'] = data1['time'].apply(get_time)
data1['date'] = data1['date'].apply(get_days)
data1 = data1.drop(['time'],axis=1)
# sequence to compress the various trades in a day into one single entry
isins = data1['isin'].unique()
for i in isins:
temp = data1[data1['isin'] == i]
temp_dates = temp['date'].unique()
for j in temp_dates:
temp1 = temp[temp['date'] == j]
temp_side = temp1['side'].unique()
for k in temp_side:
temp2 = temp1[temp1['side'] == k]
sum_vol = temp2['volume'].sum()
res1 = pd.DataFrame([[i,sum_vol,k,j]],columns=['isin','volume','side','date'])
res = res.append(res1)
data1 = res
# merge tables to include cluster data in the dataset also
data1 = data1.merge(temp_df, on='isin', how='left')
data1.to_csv('dataset_clean_cluster10.csv')
data1['cluster'] = data1['cluster'].apply(get_cluster)
# dividing the data into training and validation test
data1['is_t_data'] = np.random.uniform(0,1,len(data))<=0.75
train, validate = data1[data1['is_t_data']==True], data1[data1['is_t_data']==False]
# for the purpose of validation tests
train = train.drop(['is_t_data'],axis=1)
validate = validate.drop(['is_t_data'],axis=1)
model = MixedLM.from_formula('volume ~ side + date + cluster', data = data1, re_formula = 'date', groups = train['isin'])
result = model.fit()
print result.summary()
# template was created for easy access of output parameters
out = pd.read_csv('output_template.csv')
out['dummy'] = 0
dummy, X = dmatrices('dummy ~ side + date - 1', data=out, return_type='dataframe')
values = model.predict(X)
# values are returned as an numpy ndarray and after careful addition in excel, the csv file is uploaded.
| mit |
MohammedWasim/scikit-learn | sklearn/neighbors/base.py | 71 | 31147 | """Base and mixin classes for nearest neighbors"""
# Authors: Jake Vanderplas <vanderplas@astro.washington.edu>
# Fabian Pedregosa <fabian.pedregosa@inria.fr>
# Alexandre Gramfort <alexandre.gramfort@inria.fr>
# Sparseness support by Lars Buitinck <L.J.Buitinck@uva.nl>
# Multi-output support by Arnaud Joly <a.joly@ulg.ac.be>
#
# License: BSD 3 clause (C) INRIA, University of Amsterdam
import warnings
from abc import ABCMeta, abstractmethod
import numpy as np
from scipy.sparse import csr_matrix, issparse
from .ball_tree import BallTree
from .kd_tree import KDTree
from ..base import BaseEstimator
from ..metrics import pairwise_distances
from ..metrics.pairwise import PAIRWISE_DISTANCE_FUNCTIONS
from ..utils import check_X_y, check_array, _get_n_jobs, gen_even_slices
from ..utils.fixes import argpartition
from ..utils.validation import DataConversionWarning
from ..utils.validation import NotFittedError
from ..externals import six
from ..externals.joblib import Parallel, delayed
VALID_METRICS = dict(ball_tree=BallTree.valid_metrics,
kd_tree=KDTree.valid_metrics,
# The following list comes from the
# sklearn.metrics.pairwise doc string
brute=(list(PAIRWISE_DISTANCE_FUNCTIONS.keys()) +
['braycurtis', 'canberra', 'chebyshev',
'correlation', 'cosine', 'dice', 'hamming',
'jaccard', 'kulsinski', 'mahalanobis',
'matching', 'minkowski', 'rogerstanimoto',
'russellrao', 'seuclidean', 'sokalmichener',
'sokalsneath', 'sqeuclidean',
'yule', 'wminkowski']))
VALID_METRICS_SPARSE = dict(ball_tree=[],
kd_tree=[],
brute=PAIRWISE_DISTANCE_FUNCTIONS.keys())
class NeighborsWarning(UserWarning):
pass
# Make sure that NeighborsWarning are displayed more than once
warnings.simplefilter("always", NeighborsWarning)
def _check_weights(weights):
"""Check to make sure weights are valid"""
if weights in (None, 'uniform', 'distance'):
return weights
elif callable(weights):
return weights
else:
raise ValueError("weights not recognized: should be 'uniform', "
"'distance', or a callable function")
def _get_weights(dist, weights):
"""Get the weights from an array of distances and a parameter ``weights``
Parameters
===========
dist: ndarray
The input distances
weights: {'uniform', 'distance' or a callable}
The kind of weighting used
Returns
========
weights_arr: array of the same shape as ``dist``
if ``weights == 'uniform'``, then returns None
"""
if weights in (None, 'uniform'):
return None
elif weights == 'distance':
# if user attempts to classify a point that was zero distance from one
# or more training points, those training points are weighted as 1.0
# and the other points as 0.0
if dist.dtype is np.dtype(object):
for point_dist_i, point_dist in enumerate(dist):
# check if point_dist is iterable
# (ex: RadiusNeighborClassifier.predict may set an element of
# dist to 1e-6 to represent an 'outlier')
if hasattr(point_dist, '__contains__') and 0. in point_dist:
dist[point_dist_i] = point_dist == 0.
else:
dist[point_dist_i] = 1. / point_dist
else:
with np.errstate(divide='ignore'):
dist = 1. / dist
inf_mask = np.isinf(dist)
inf_row = np.any(inf_mask, axis=1)
dist[inf_row] = inf_mask[inf_row]
return dist
elif callable(weights):
return weights(dist)
else:
raise ValueError("weights not recognized: should be 'uniform', "
"'distance', or a callable function")
class NeighborsBase(six.with_metaclass(ABCMeta, BaseEstimator)):
"""Base class for nearest neighbors estimators."""
@abstractmethod
def __init__(self):
pass
def _init_params(self, n_neighbors=None, radius=None,
algorithm='auto', leaf_size=30, metric='minkowski',
p=2, metric_params=None, n_jobs=1, **kwargs):
if kwargs:
warnings.warn("Passing additional arguments to the metric "
"function as **kwargs is deprecated "
"and will no longer be supported in 0.18. "
"Use metric_params instead.",
DeprecationWarning, stacklevel=3)
if metric_params is None:
metric_params = {}
metric_params.update(kwargs)
self.n_neighbors = n_neighbors
self.radius = radius
self.algorithm = algorithm
self.leaf_size = leaf_size
self.metric = metric
self.metric_params = metric_params
self.p = p
self.n_jobs = n_jobs
if algorithm not in ['auto', 'brute',
'kd_tree', 'ball_tree']:
raise ValueError("unrecognized algorithm: '%s'" % algorithm)
if algorithm == 'auto':
if metric == 'precomputed':
alg_check = 'brute'
else:
alg_check = 'ball_tree'
else:
alg_check = algorithm
if callable(metric):
if algorithm == 'kd_tree':
# callable metric is only valid for brute force and ball_tree
raise ValueError(
"kd_tree algorithm does not support callable metric '%s'"
% metric)
elif metric not in VALID_METRICS[alg_check]:
raise ValueError("Metric '%s' not valid for algorithm '%s'"
% (metric, algorithm))
if self.metric_params is not None and 'p' in self.metric_params:
warnings.warn("Parameter p is found in metric_params. "
"The corresponding parameter from __init__ "
"is ignored.", SyntaxWarning, stacklevel=3)
effective_p = metric_params['p']
else:
effective_p = self.p
if self.metric in ['wminkowski', 'minkowski'] and effective_p < 1:
raise ValueError("p must be greater than one for minkowski metric")
self._fit_X = None
self._tree = None
self._fit_method = None
def _fit(self, X):
if self.metric_params is None:
self.effective_metric_params_ = {}
else:
self.effective_metric_params_ = self.metric_params.copy()
effective_p = self.effective_metric_params_.get('p', self.p)
if self.metric in ['wminkowski', 'minkowski']:
self.effective_metric_params_['p'] = effective_p
self.effective_metric_ = self.metric
# For minkowski distance, use more efficient methods where available
if self.metric == 'minkowski':
p = self.effective_metric_params_.pop('p', 2)
if p < 1:
raise ValueError("p must be greater than one "
"for minkowski metric")
elif p == 1:
self.effective_metric_ = 'manhattan'
elif p == 2:
self.effective_metric_ = 'euclidean'
elif p == np.inf:
self.effective_metric_ = 'chebyshev'
else:
self.effective_metric_params_['p'] = p
if isinstance(X, NeighborsBase):
self._fit_X = X._fit_X
self._tree = X._tree
self._fit_method = X._fit_method
return self
elif isinstance(X, BallTree):
self._fit_X = X.data
self._tree = X
self._fit_method = 'ball_tree'
return self
elif isinstance(X, KDTree):
self._fit_X = X.data
self._tree = X
self._fit_method = 'kd_tree'
return self
X = check_array(X, accept_sparse='csr')
n_samples = X.shape[0]
if n_samples == 0:
raise ValueError("n_samples must be greater than 0")
if issparse(X):
if self.algorithm not in ('auto', 'brute'):
warnings.warn("cannot use tree with sparse input: "
"using brute force")
if self.effective_metric_ not in VALID_METRICS_SPARSE['brute']:
raise ValueError("metric '%s' not valid for sparse input"
% self.effective_metric_)
self._fit_X = X.copy()
self._tree = None
self._fit_method = 'brute'
return self
self._fit_method = self.algorithm
self._fit_X = X
if self._fit_method == 'auto':
# A tree approach is better for small number of neighbors,
# and KDTree is generally faster when available
if ((self.n_neighbors is None or
self.n_neighbors < self._fit_X.shape[0] // 2) and
self.metric != 'precomputed'):
if self.effective_metric_ in VALID_METRICS['kd_tree']:
self._fit_method = 'kd_tree'
else:
self._fit_method = 'ball_tree'
else:
self._fit_method = 'brute'
if self._fit_method == 'ball_tree':
self._tree = BallTree(X, self.leaf_size,
metric=self.effective_metric_,
**self.effective_metric_params_)
elif self._fit_method == 'kd_tree':
self._tree = KDTree(X, self.leaf_size,
metric=self.effective_metric_,
**self.effective_metric_params_)
elif self._fit_method == 'brute':
self._tree = None
else:
raise ValueError("algorithm = '%s' not recognized"
% self.algorithm)
if self.n_neighbors is not None:
if self.n_neighbors <= 0:
raise ValueError(
"Expected n_neighbors > 0. Got %d" %
self.n_neighbors
)
return self
@property
def _pairwise(self):
# For cross-validation routines to split data correctly
return self.metric == 'precomputed'
class KNeighborsMixin(object):
"""Mixin for k-neighbors searches"""
def kneighbors(self, X=None, n_neighbors=None, return_distance=True):
"""Finds the K-neighbors of a point.
Returns indices of and distances to the neighbors of each point.
Parameters
----------
X : array-like, shape (n_query, n_features), \
or (n_query, n_indexed) if metric == 'precomputed'
The query point or points.
If not provided, neighbors of each indexed point are returned.
In this case, the query point is not considered its own neighbor.
n_neighbors : int
Number of neighbors to get (default is the value
passed to the constructor).
return_distance : boolean, optional. Defaults to True.
If False, distances will not be returned
Returns
-------
dist : array
Array representing the lengths to points, only present if
return_distance=True
ind : array
Indices of the nearest points in the population matrix.
Examples
--------
In the following example, we construct a NeighborsClassifier
class from an array representing our data set and ask who's
the closest point to [1,1,1]
>>> samples = [[0., 0., 0.], [0., .5, 0.], [1., 1., .5]]
>>> from sklearn.neighbors import NearestNeighbors
>>> neigh = NearestNeighbors(n_neighbors=1)
>>> neigh.fit(samples) # doctest: +ELLIPSIS
NearestNeighbors(algorithm='auto', leaf_size=30, ...)
>>> print(neigh.kneighbors([[1., 1., 1.]])) # doctest: +ELLIPSIS
(array([[ 0.5]]), array([[2]]...))
As you can see, it returns [[0.5]], and [[2]], which means that the
element is at distance 0.5 and is the third element of samples
(indexes start at 0). You can also query for multiple points:
>>> X = [[0., 1., 0.], [1., 0., 1.]]
>>> neigh.kneighbors(X, return_distance=False) # doctest: +ELLIPSIS
array([[1],
[2]]...)
"""
if self._fit_method is None:
raise NotFittedError("Must fit neighbors before querying.")
if n_neighbors is None:
n_neighbors = self.n_neighbors
if X is not None:
query_is_train = False
X = check_array(X, accept_sparse='csr')
else:
query_is_train = True
X = self._fit_X
# Include an extra neighbor to account for the sample itself being
# returned, which is removed later
n_neighbors += 1
train_size = self._fit_X.shape[0]
if n_neighbors > train_size:
raise ValueError(
"Expected n_neighbors <= n_samples, "
" but n_samples = %d, n_neighbors = %d" %
(train_size, n_neighbors)
)
n_samples, _ = X.shape
sample_range = np.arange(n_samples)[:, None]
n_jobs = _get_n_jobs(self.n_jobs)
if self._fit_method == 'brute':
# for efficiency, use squared euclidean distances
if self.effective_metric_ == 'euclidean':
dist = pairwise_distances(X, self._fit_X, 'euclidean',
n_jobs=n_jobs, squared=True)
else:
dist = pairwise_distances(
X, self._fit_X, self.effective_metric_, n_jobs=n_jobs,
**self.effective_metric_params_)
neigh_ind = argpartition(dist, n_neighbors - 1, axis=1)
neigh_ind = neigh_ind[:, :n_neighbors]
# argpartition doesn't guarantee sorted order, so we sort again
neigh_ind = neigh_ind[
sample_range, np.argsort(dist[sample_range, neigh_ind])]
if return_distance:
if self.effective_metric_ == 'euclidean':
result = np.sqrt(dist[sample_range, neigh_ind]), neigh_ind
else:
result = dist[sample_range, neigh_ind], neigh_ind
else:
result = neigh_ind
elif self._fit_method in ['ball_tree', 'kd_tree']:
if issparse(X):
raise ValueError(
"%s does not work with sparse matrices. Densify the data, "
"or set algorithm='brute'" % self._fit_method)
result = Parallel(n_jobs, backend='threading')(
delayed(self._tree.query, check_pickle=False)(
X[s], n_neighbors, return_distance)
for s in gen_even_slices(X.shape[0], n_jobs)
)
if return_distance:
dist, neigh_ind = tuple(zip(*result))
result = np.vstack(dist), np.vstack(neigh_ind)
else:
result = np.vstack(result)
else:
raise ValueError("internal: _fit_method not recognized")
if not query_is_train:
return result
else:
# If the query data is the same as the indexed data, we would like
# to ignore the first nearest neighbor of every sample, i.e
# the sample itself.
if return_distance:
dist, neigh_ind = result
else:
neigh_ind = result
sample_mask = neigh_ind != sample_range
# Corner case: When the number of duplicates are more
# than the number of neighbors, the first NN will not
# be the sample, but a duplicate.
# In that case mask the first duplicate.
dup_gr_nbrs = np.all(sample_mask, axis=1)
sample_mask[:, 0][dup_gr_nbrs] = False
neigh_ind = np.reshape(
neigh_ind[sample_mask], (n_samples, n_neighbors - 1))
if return_distance:
dist = np.reshape(
dist[sample_mask], (n_samples, n_neighbors - 1))
return dist, neigh_ind
return neigh_ind
def kneighbors_graph(self, X=None, n_neighbors=None,
mode='connectivity'):
"""Computes the (weighted) graph of k-Neighbors for points in X
Parameters
----------
X : array-like, shape (n_query, n_features), \
or (n_query, n_indexed) if metric == 'precomputed'
The query point or points.
If not provided, neighbors of each indexed point are returned.
In this case, the query point is not considered its own neighbor.
n_neighbors : int
Number of neighbors for each sample.
(default is value passed to the constructor).
mode : {'connectivity', 'distance'}, optional
Type of returned matrix: 'connectivity' will return the
connectivity matrix with ones and zeros, in 'distance' the
edges are Euclidean distance between points.
Returns
-------
A : sparse matrix in CSR format, shape = [n_samples, n_samples_fit]
n_samples_fit is the number of samples in the fitted data
A[i, j] is assigned the weight of edge that connects i to j.
Examples
--------
>>> X = [[0], [3], [1]]
>>> from sklearn.neighbors import NearestNeighbors
>>> neigh = NearestNeighbors(n_neighbors=2)
>>> neigh.fit(X) # doctest: +ELLIPSIS
NearestNeighbors(algorithm='auto', leaf_size=30, ...)
>>> A = neigh.kneighbors_graph(X)
>>> A.toarray()
array([[ 1., 0., 1.],
[ 0., 1., 1.],
[ 1., 0., 1.]])
See also
--------
NearestNeighbors.radius_neighbors_graph
"""
if n_neighbors is None:
n_neighbors = self.n_neighbors
# kneighbors does the None handling.
if X is not None:
X = check_array(X, accept_sparse='csr')
n_samples1 = X.shape[0]
else:
n_samples1 = self._fit_X.shape[0]
n_samples2 = self._fit_X.shape[0]
n_nonzero = n_samples1 * n_neighbors
A_indptr = np.arange(0, n_nonzero + 1, n_neighbors)
# construct CSR matrix representation of the k-NN graph
if mode == 'connectivity':
A_data = np.ones(n_samples1 * n_neighbors)
A_ind = self.kneighbors(X, n_neighbors, return_distance=False)
elif mode == 'distance':
A_data, A_ind = self.kneighbors(
X, n_neighbors, return_distance=True)
A_data = np.ravel(A_data)
else:
raise ValueError(
'Unsupported mode, must be one of "connectivity" '
'or "distance" but got "%s" instead' % mode)
kneighbors_graph = csr_matrix((A_data, A_ind.ravel(), A_indptr),
shape=(n_samples1, n_samples2))
return kneighbors_graph
class RadiusNeighborsMixin(object):
"""Mixin for radius-based neighbors searches"""
def radius_neighbors(self, X=None, radius=None, return_distance=True):
"""Finds the neighbors within a given radius of a point or points.
Return the indices and distances of each point from the dataset
lying in a ball with size ``radius`` around the points of the query
array. Points lying on the boundary are included in the results.
The result points are *not* necessarily sorted by distance to their
query point.
Parameters
----------
X : array-like, (n_samples, n_features), optional
The query point or points.
If not provided, neighbors of each indexed point are returned.
In this case, the query point is not considered its own neighbor.
radius : float
Limiting distance of neighbors to return.
(default is the value passed to the constructor).
return_distance : boolean, optional. Defaults to True.
If False, distances will not be returned
Returns
-------
dist : array, shape (n_samples,) of arrays
Array representing the distances to each point, only present if
return_distance=True. The distance values are computed according
to the ``metric`` constructor parameter.
ind : array, shape (n_samples,) of arrays
An array of arrays of indices of the approximate nearest points
from the population matrix that lie within a ball of size
``radius`` around the query points.
Examples
--------
In the following example, we construct a NeighborsClassifier
class from an array representing our data set and ask who's
the closest point to [1, 1, 1]:
>>> import numpy as np
>>> samples = [[0., 0., 0.], [0., .5, 0.], [1., 1., .5]]
>>> from sklearn.neighbors import NearestNeighbors
>>> neigh = NearestNeighbors(radius=1.6)
>>> neigh.fit(samples) # doctest: +ELLIPSIS
NearestNeighbors(algorithm='auto', leaf_size=30, ...)
>>> rng = neigh.radius_neighbors([[1., 1., 1.]])
>>> print(np.asarray(rng[0][0])) # doctest: +ELLIPSIS
[ 1.5 0.5]
>>> print(np.asarray(rng[1][0])) # doctest: +ELLIPSIS
[1 2]
The first array returned contains the distances to all points which
are closer than 1.6, while the second array returned contains their
indices. In general, multiple points can be queried at the same time.
Notes
-----
Because the number of neighbors of each point is not necessarily
equal, the results for multiple query points cannot be fit in a
standard data array.
For efficiency, `radius_neighbors` returns arrays of objects, where
each object is a 1D array of indices or distances.
"""
if self._fit_method is None:
raise NotFittedError("Must fit neighbors before querying.")
if X is not None:
query_is_train = False
X = check_array(X, accept_sparse='csr')
else:
query_is_train = True
X = self._fit_X
if radius is None:
radius = self.radius
n_samples = X.shape[0]
if self._fit_method == 'brute':
# for efficiency, use squared euclidean distances
if self.effective_metric_ == 'euclidean':
dist = pairwise_distances(X, self._fit_X, 'euclidean',
squared=True)
radius *= radius
else:
dist = pairwise_distances(X, self._fit_X,
self.effective_metric_,
**self.effective_metric_params_)
neigh_ind_list = [np.where(d <= radius)[0] for d in dist]
# See https://github.com/numpy/numpy/issues/5456
# if you want to understand why this is initialized this way.
neigh_ind = np.empty(n_samples, dtype='object')
neigh_ind[:] = neigh_ind_list
if return_distance:
dist_array = np.empty(n_samples, dtype='object')
if self.effective_metric_ == 'euclidean':
dist_list = [np.sqrt(d[neigh_ind[i]])
for i, d in enumerate(dist)]
else:
dist_list = [d[neigh_ind[i]]
for i, d in enumerate(dist)]
dist_array[:] = dist_list
results = dist_array, neigh_ind
else:
results = neigh_ind
elif self._fit_method in ['ball_tree', 'kd_tree']:
if issparse(X):
raise ValueError(
"%s does not work with sparse matrices. Densify the data, "
"or set algorithm='brute'" % self._fit_method)
results = self._tree.query_radius(X, radius,
return_distance=return_distance)
if return_distance:
results = results[::-1]
else:
raise ValueError("internal: _fit_method not recognized")
if not query_is_train:
return results
else:
# If the query data is the same as the indexed data, we would like
# to ignore the first nearest neighbor of every sample, i.e
# the sample itself.
if return_distance:
dist, neigh_ind = results
else:
neigh_ind = results
for ind, ind_neighbor in enumerate(neigh_ind):
mask = ind_neighbor != ind
neigh_ind[ind] = ind_neighbor[mask]
if return_distance:
dist[ind] = dist[ind][mask]
if return_distance:
return dist, neigh_ind
return neigh_ind
def radius_neighbors_graph(self, X=None, radius=None, mode='connectivity'):
"""Computes the (weighted) graph of Neighbors for points in X
Neighborhoods are restricted the points at a distance lower than
radius.
Parameters
----------
X : array-like, shape = [n_samples, n_features], optional
The query point or points.
If not provided, neighbors of each indexed point are returned.
In this case, the query point is not considered its own neighbor.
radius : float
Radius of neighborhoods.
(default is the value passed to the constructor).
mode : {'connectivity', 'distance'}, optional
Type of returned matrix: 'connectivity' will return the
connectivity matrix with ones and zeros, in 'distance' the
edges are Euclidean distance between points.
Returns
-------
A : sparse matrix in CSR format, shape = [n_samples, n_samples]
A[i, j] is assigned the weight of edge that connects i to j.
Examples
--------
>>> X = [[0], [3], [1]]
>>> from sklearn.neighbors import NearestNeighbors
>>> neigh = NearestNeighbors(radius=1.5)
>>> neigh.fit(X) # doctest: +ELLIPSIS
NearestNeighbors(algorithm='auto', leaf_size=30, ...)
>>> A = neigh.radius_neighbors_graph(X)
>>> A.toarray()
array([[ 1., 0., 1.],
[ 0., 1., 0.],
[ 1., 0., 1.]])
See also
--------
kneighbors_graph
"""
if X is not None:
X = check_array(X, accept_sparse=['csr', 'csc', 'coo'])
n_samples2 = self._fit_X.shape[0]
if radius is None:
radius = self.radius
# construct CSR matrix representation of the NN graph
if mode == 'connectivity':
A_ind = self.radius_neighbors(X, radius,
return_distance=False)
A_data = None
elif mode == 'distance':
dist, A_ind = self.radius_neighbors(X, radius,
return_distance=True)
A_data = np.concatenate(list(dist))
else:
raise ValueError(
'Unsupported mode, must be one of "connectivity", '
'or "distance" but got %s instead' % mode)
n_samples1 = A_ind.shape[0]
n_neighbors = np.array([len(a) for a in A_ind])
A_ind = np.concatenate(list(A_ind))
if A_data is None:
A_data = np.ones(len(A_ind))
A_indptr = np.concatenate((np.zeros(1, dtype=int),
np.cumsum(n_neighbors)))
return csr_matrix((A_data, A_ind, A_indptr),
shape=(n_samples1, n_samples2))
class SupervisedFloatMixin(object):
def fit(self, X, y):
"""Fit the model using X as training data and y as target values
Parameters
----------
X : {array-like, sparse matrix, BallTree, KDTree}
Training data. If array or matrix, shape [n_samples, n_features],
or [n_samples, n_samples] if metric='precomputed'.
y : {array-like, sparse matrix}
Target values, array of float values, shape = [n_samples]
or [n_samples, n_outputs]
"""
if not isinstance(X, (KDTree, BallTree)):
X, y = check_X_y(X, y, "csr", multi_output=True)
self._y = y
return self._fit(X)
class SupervisedIntegerMixin(object):
def fit(self, X, y):
"""Fit the model using X as training data and y as target values
Parameters
----------
X : {array-like, sparse matrix, BallTree, KDTree}
Training data. If array or matrix, shape [n_samples, n_features],
or [n_samples, n_samples] if metric='precomputed'.
y : {array-like, sparse matrix}
Target values of shape = [n_samples] or [n_samples, n_outputs]
"""
if not isinstance(X, (KDTree, BallTree)):
X, y = check_X_y(X, y, "csr", multi_output=True)
if y.ndim == 1 or y.ndim == 2 and y.shape[1] == 1:
if y.ndim != 1:
warnings.warn("A column-vector y was passed when a 1d array "
"was expected. Please change the shape of y to "
"(n_samples, ), for example using ravel().",
DataConversionWarning, stacklevel=2)
self.outputs_2d_ = False
y = y.reshape((-1, 1))
else:
self.outputs_2d_ = True
self.classes_ = []
self._y = np.empty(y.shape, dtype=np.int)
for k in range(self._y.shape[1]):
classes, self._y[:, k] = np.unique(y[:, k], return_inverse=True)
self.classes_.append(classes)
if not self.outputs_2d_:
self.classes_ = self.classes_[0]
self._y = self._y.ravel()
return self._fit(X)
class UnsupervisedMixin(object):
def fit(self, X, y=None):
"""Fit the model using X as training data
Parameters
----------
X : {array-like, sparse matrix, BallTree, KDTree}
Training data. If array or matrix, shape [n_samples, n_features],
or [n_samples, n_samples] if metric='precomputed'.
"""
return self._fit(X)
| bsd-3-clause |
sgenoud/scikit-learn | examples/plot_digits_pipe.py | 5 | 1781 | #!/usr/bin/python
# -*- coding: utf-8 -*-
"""
=========================================================
Pipelining: chaining a PCA and a logistic regression
=========================================================
The PCA does an unsupervised dimensionality reduction, while the logistic
regression does the prediction.
We use a GridSearchCV to set the dimensionality of the PCA
"""
print __doc__
# Code source: Gael Varoqueux
# Modified for Documentation merge by Jaques Grobler
# License: BSD
import numpy as np
import pylab as pl
from sklearn import linear_model, decomposition, datasets
logistic = linear_model.LogisticRegression()
pca = decomposition.PCA()
from sklearn.pipeline import Pipeline
pipe = Pipeline(steps=[('pca', pca), ('logistic', logistic)])
digits = datasets.load_digits()
X_digits = digits.data
y_digits = digits.target
###############################################################################
# Plot the PCA spectrum
pca.fit(X_digits)
pl.figure(1, figsize=(4, 3))
pl.clf()
pl.axes([.2, .2, .7, .7])
pl.plot(pca.explained_variance_, linewidth=2)
pl.axis('tight')
pl.xlabel('n_components')
pl.ylabel('explained_variance_')
###############################################################################
# Prediction
from sklearn.grid_search import GridSearchCV
n_components = [20, 40, 64]
Cs = np.logspace(-4, 4, 3)
#Parameters of pipelines can be set using ‘__’ separated parameter names:
estimator = GridSearchCV(pipe,
dict(pca__n_components=n_components,
logistic__C=Cs))
estimator.fit(X_digits, y_digits)
pl.axvline(estimator.best_estimator_.named_steps['pca'].n_components,
linestyle=':', label='n_components chosen')
pl.legend(prop=dict(size=12))
pl.show()
| bsd-3-clause |
mlperf/training_results_v0.5 | v0.5.0/google/cloud_v2.8/resnet-tpuv2-8/code/resnet/model/models/official/transformer/data_download.py | 4 | 14804 | # Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Download and preprocess WMT17 ende training and evaluation datasets."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import random
import tarfile
# pylint: disable=g-bad-import-order
import six
from six.moves import urllib
from absl import app as absl_app
from absl import flags
import tensorflow as tf
# pylint: enable=g-bad-import-order
from official.transformer.utils import tokenizer
from official.utils.flags import core as flags_core
# Data sources for training/evaluating the transformer translation model.
# If any of the training sources are changed, then either:
# 1) use the flag `--search` to find the best min count or
# 2) update the _TRAIN_DATA_MIN_COUNT constant.
# min_count is the minimum number of times a token must appear in the data
# before it is added to the vocabulary. "Best min count" refers to the value
# that generates a vocabulary set that is closest in size to _TARGET_VOCAB_SIZE.
_TRAIN_DATA_SOURCES = [
{
"url": "http://data.statmt.org/wmt17/translation-task/"
"training-parallel-nc-v12.tgz",
"input": "news-commentary-v12.de-en.en",
"target": "news-commentary-v12.de-en.de",
},
{
"url": "http://www.statmt.org/wmt13/training-parallel-commoncrawl.tgz",
"input": "commoncrawl.de-en.en",
"target": "commoncrawl.de-en.de",
},
{
"url": "http://www.statmt.org/wmt13/training-parallel-europarl-v7.tgz",
"input": "europarl-v7.de-en.en",
"target": "europarl-v7.de-en.de",
},
]
# Use pre-defined minimum count to generate subtoken vocabulary.
_TRAIN_DATA_MIN_COUNT = 6
_EVAL_DATA_SOURCES = [
{
"url": "http://data.statmt.org/wmt17/translation-task/dev.tgz",
"input": "newstest2013.en",
"target": "newstest2013.de",
}
]
# Vocabulary constants
_TARGET_VOCAB_SIZE = 32768 # Number of subtokens in the vocabulary list.
_TARGET_THRESHOLD = 327 # Accept vocabulary if size is within this threshold
VOCAB_FILE = "vocab.ende.%d" % _TARGET_VOCAB_SIZE
# Strings to inclue in the generated files.
_PREFIX = "wmt32k"
_TRAIN_TAG = "train"
_EVAL_TAG = "dev" # Following WMT and Tensor2Tensor conventions, in which the
# evaluation datasets are tagged as "dev" for development.
# Number of files to split train and evaluation data
_TRAIN_SHARDS = 100
_EVAL_SHARDS = 1
def find_file(path, filename, max_depth=5):
"""Returns full filepath if the file is in path or a subdirectory."""
for root, dirs, files in os.walk(path):
if filename in files:
return os.path.join(root, filename)
# Don't search past max_depth
depth = root[len(path) + 1:].count(os.sep)
if depth > max_depth:
del dirs[:] # Clear dirs
return None
###############################################################################
# Download and extraction functions
###############################################################################
def get_raw_files(raw_dir, data_source):
"""Return raw files from source. Downloads/extracts if needed.
Args:
raw_dir: string directory to store raw files
data_source: dictionary with
{"url": url of compressed dataset containing input and target files
"input": file with data in input language
"target": file with data in target language}
Returns:
dictionary with
{"inputs": list of files containing data in input language
"targets": list of files containing corresponding data in target language
}
"""
raw_files = {
"inputs": [],
"targets": [],
} # keys
for d in data_source:
input_file, target_file = download_and_extract(
raw_dir, d["url"], d["input"], d["target"])
raw_files["inputs"].append(input_file)
raw_files["targets"].append(target_file)
return raw_files
def download_report_hook(count, block_size, total_size):
"""Report hook for download progress.
Args:
count: current block number
block_size: block size
total_size: total size
"""
percent = int(count * block_size * 100 / total_size)
print("\r%d%%" % percent + " completed", end="\r")
def download_from_url(path, url):
"""Download content from a url.
Args:
path: string directory where file will be downloaded
url: string url
Returns:
Full path to downloaded file
"""
filename = url.split("/")[-1]
found_file = find_file(path, filename, max_depth=0)
if found_file is None:
filename = os.path.join(path, filename)
tf.logging.info("Downloading from %s to %s." % (url, filename))
inprogress_filepath = filename + ".incomplete"
inprogress_filepath, _ = urllib.request.urlretrieve(
url, inprogress_filepath, reporthook=download_report_hook)
# Print newline to clear the carriage return from the download progress.
print()
tf.gfile.Rename(inprogress_filepath, filename)
return filename
else:
tf.logging.info("Already downloaded: %s (at %s)." % (url, found_file))
return found_file
def download_and_extract(path, url, input_filename, target_filename):
"""Extract files from downloaded compressed archive file.
Args:
path: string directory where the files will be downloaded
url: url containing the compressed input and target files
input_filename: name of file containing data in source language
target_filename: name of file containing data in target language
Returns:
Full paths to extracted input and target files.
Raises:
OSError: if the the download/extraction fails.
"""
# Check if extracted files already exist in path
input_file = find_file(path, input_filename)
target_file = find_file(path, target_filename)
if input_file and target_file:
tf.logging.info("Already downloaded and extracted %s." % url)
return input_file, target_file
# Download archive file if it doesn't already exist.
compressed_file = download_from_url(path, url)
# Extract compressed files
tf.logging.info("Extracting %s." % compressed_file)
with tarfile.open(compressed_file, "r:gz") as corpus_tar:
corpus_tar.extractall(path)
# Return filepaths of the requested files.
input_file = find_file(path, input_filename)
target_file = find_file(path, target_filename)
if input_file and target_file:
return input_file, target_file
raise OSError("Download/extraction failed for url %s to path %s" %
(url, path))
def txt_line_iterator(path):
"""Iterate through lines of file."""
with tf.gfile.Open(path) as f:
for line in f:
yield line.strip()
def compile_files(raw_dir, raw_files, tag):
"""Compile raw files into a single file for each language.
Args:
raw_dir: Directory containing downloaded raw files.
raw_files: Dict containing filenames of input and target data.
{"inputs": list of files containing data in input language
"targets": list of files containing corresponding data in target language
}
tag: String to append to the compiled filename.
Returns:
Full path of compiled input and target files.
"""
tf.logging.info("Compiling files with tag %s." % tag)
filename = "%s-%s" % (_PREFIX, tag)
input_compiled_file = os.path.join(raw_dir, filename + ".lang1")
target_compiled_file = os.path.join(raw_dir, filename + ".lang2")
with tf.gfile.Open(input_compiled_file, mode="w") as input_writer:
with tf.gfile.Open(target_compiled_file, mode="w") as target_writer:
for i in range(len(raw_files["inputs"])):
input_file = raw_files["inputs"][i]
target_file = raw_files["targets"][i]
tf.logging.info("Reading files %s and %s." % (input_file, target_file))
write_file(input_writer, input_file)
write_file(target_writer, target_file)
return input_compiled_file, target_compiled_file
def write_file(writer, filename):
"""Write all of lines from file using the writer."""
for line in txt_line_iterator(filename):
writer.write(line)
writer.write("\n")
###############################################################################
# Data preprocessing
###############################################################################
def encode_and_save_files(
subtokenizer, data_dir, raw_files, tag, total_shards):
"""Save data from files as encoded Examples in TFrecord format.
Args:
subtokenizer: Subtokenizer object that will be used to encode the strings.
data_dir: The directory in which to write the examples
raw_files: A tuple of (input, target) data files. Each line in the input and
the corresponding line in target file will be saved in a tf.Example.
tag: String that will be added onto the file names.
total_shards: Number of files to divide the data into.
Returns:
List of all files produced.
"""
# Create a file for each shard.
filepaths = [shard_filename(data_dir, tag, n + 1, total_shards)
for n in range(total_shards)]
if all_exist(filepaths):
tf.logging.info("Files with tag %s already exist." % tag)
return filepaths
tf.logging.info("Saving files with tag %s." % tag)
input_file = raw_files[0]
target_file = raw_files[1]
# Write examples to each shard in round robin order.
tmp_filepaths = [fname + ".incomplete" for fname in filepaths]
writers = [tf.python_io.TFRecordWriter(fname) for fname in tmp_filepaths]
counter, shard = 0, 0
for counter, (input_line, target_line) in enumerate(zip(
txt_line_iterator(input_file), txt_line_iterator(target_file))):
if counter > 0 and counter % 100000 == 0:
tf.logging.info("\tSaving case %d." % counter)
example = dict_to_example(
{"inputs": subtokenizer.encode(input_line, add_eos=True),
"targets": subtokenizer.encode(target_line, add_eos=True)})
writers[shard].write(example.SerializeToString())
shard = (shard + 1) % total_shards
for writer in writers:
writer.close()
for tmp_name, final_name in zip(tmp_filepaths, filepaths):
tf.gfile.Rename(tmp_name, final_name)
tf.logging.info("Saved %d Examples", counter + 1)
return filepaths
def shard_filename(path, tag, shard_num, total_shards):
"""Create filename for data shard."""
return os.path.join(
path, "%s-%s-%.5d-of-%.5d" % (_PREFIX, tag, shard_num, total_shards))
def shuffle_records(fname):
"""Shuffle records in a single file."""
tf.logging.info("Shuffling records in file %s" % fname)
# Rename file prior to shuffling
tmp_fname = fname + ".unshuffled"
tf.gfile.Rename(fname, tmp_fname)
reader = tf.python_io.tf_record_iterator(tmp_fname)
records = []
for record in reader:
records.append(record)
if len(records) % 100000 == 0:
tf.logging.info("\tRead: %d", len(records))
random.shuffle(records)
# Write shuffled records to original file name
with tf.python_io.TFRecordWriter(fname) as w:
for count, record in enumerate(records):
w.write(record)
if count > 0 and count % 100000 == 0:
tf.logging.info("\tWriting record: %d" % count)
tf.gfile.Remove(tmp_fname)
def dict_to_example(dictionary):
"""Converts a dictionary of string->int to a tf.Example."""
features = {}
for k, v in six.iteritems(dictionary):
features[k] = tf.train.Feature(int64_list=tf.train.Int64List(value=v))
return tf.train.Example(features=tf.train.Features(feature=features))
def all_exist(filepaths):
"""Returns true if all files in the list exist."""
for fname in filepaths:
if not tf.gfile.Exists(fname):
return False
return True
def make_dir(path):
if not tf.gfile.Exists(path):
tf.logging.info("Creating directory %s" % path)
tf.gfile.MakeDirs(path)
def main(unused_argv):
"""Obtain training and evaluation data for the Transformer model."""
make_dir(FLAGS.raw_dir)
make_dir(FLAGS.data_dir)
# Get paths of download/extracted training and evaluation files.
tf.logging.info("Step 1/4: Downloading data from source")
train_files = get_raw_files(FLAGS.raw_dir, _TRAIN_DATA_SOURCES)
eval_files = get_raw_files(FLAGS.raw_dir, _EVAL_DATA_SOURCES)
# Create subtokenizer based on the training files.
tf.logging.info("Step 2/4: Creating subtokenizer and building vocabulary")
train_files_flat = train_files["inputs"] + train_files["targets"]
vocab_file = os.path.join(FLAGS.data_dir, VOCAB_FILE)
subtokenizer = tokenizer.Subtokenizer.init_from_files(
vocab_file, train_files_flat, _TARGET_VOCAB_SIZE, _TARGET_THRESHOLD,
min_count=None if FLAGS.search else _TRAIN_DATA_MIN_COUNT)
tf.logging.info("Step 3/4: Compiling training and evaluation data")
compiled_train_files = compile_files(FLAGS.raw_dir, train_files, _TRAIN_TAG)
compiled_eval_files = compile_files(FLAGS.raw_dir, eval_files, _EVAL_TAG)
# Tokenize and save data as Examples in the TFRecord format.
tf.logging.info("Step 4/4: Preprocessing and saving data")
train_tfrecord_files = encode_and_save_files(
subtokenizer, FLAGS.data_dir, compiled_train_files, _TRAIN_TAG,
_TRAIN_SHARDS)
encode_and_save_files(
subtokenizer, FLAGS.data_dir, compiled_eval_files, _EVAL_TAG,
_EVAL_SHARDS)
for fname in train_tfrecord_files:
shuffle_records(fname)
def define_data_download_flags():
"""Add flags specifying data download arguments."""
flags.DEFINE_string(
name="data_dir", short_name="dd", default="/tmp/translate_ende",
help=flags_core.help_wrap(
"Directory for where the translate_ende_wmt32k dataset is saved."))
flags.DEFINE_string(
name="raw_dir", short_name="rd", default="/tmp/translate_ende_raw",
help=flags_core.help_wrap(
"Path where the raw data will be downloaded and extracted."))
flags.DEFINE_bool(
name="search", default=False,
help=flags_core.help_wrap(
"If set, use binary search to find the vocabulary set with size"
"closest to the target size (%d)." % _TARGET_VOCAB_SIZE))
if __name__ == "__main__":
tf.logging.set_verbosity(tf.logging.INFO)
define_data_download_flags()
FLAGS = flags.FLAGS
absl_app.run(main)
| apache-2.0 |
mlflow/mlflow | tests/utils/test_requirements_utils.py | 1 | 13714 | import os
import importlib
from unittest import mock
import importlib_metadata
import pytest
import mlflow
import mlflow.utils.requirements_utils
from mlflow.utils.requirements_utils import (
_is_comment,
_is_empty,
_is_requirements_file,
_strip_inline_comment,
_join_continued_lines,
_parse_requirements,
_prune_packages,
_strip_local_version_label,
_get_installed_version,
_get_pinned_requirement,
_infer_requirements,
_normalize_package_name,
_PyPIPackageIndex,
)
def test_is_comment():
assert _is_comment("# comment")
assert _is_comment("#")
assert _is_comment("### comment ###")
assert not _is_comment("comment")
assert not _is_comment("")
def test_is_empty():
assert _is_empty("")
assert not _is_empty(" ")
assert not _is_empty("a")
def test_is_requirements_file():
assert _is_requirements_file("-r req.txt")
assert _is_requirements_file("-r req.txt")
assert _is_requirements_file("--requirement req.txt")
assert _is_requirements_file("--requirement req.txt")
assert not _is_requirements_file("req")
def test_strip_inline_comment():
assert _strip_inline_comment("aaa # comment") == "aaa"
assert _strip_inline_comment("aaa # comment") == "aaa"
assert _strip_inline_comment("aaa # comment") == "aaa"
assert _strip_inline_comment("aaa # com1 # com2") == "aaa"
# Ensure a URI fragment is not stripped
assert (
_strip_inline_comment("git+https://git/repo.git#subdirectory=subdir")
== "git+https://git/repo.git#subdirectory=subdir"
)
def test_join_continued_lines():
assert list(_join_continued_lines(["a"])) == ["a"]
assert list(_join_continued_lines(["a\\", "b"])) == ["ab"]
assert list(_join_continued_lines(["a\\", "b\\", "c"])) == ["abc"]
assert list(_join_continued_lines(["a\\", " b"])) == ["a b"]
assert list(_join_continued_lines(["a\\", " b\\", " c"])) == ["a b c"]
assert list(_join_continued_lines(["a\\", "\\", "b"])) == ["ab"]
assert list(_join_continued_lines(["a\\", "b", "c\\", "d"])) == ["ab", "cd"]
assert list(_join_continued_lines(["a\\", "", "b"])) == ["a", "b"]
assert list(_join_continued_lines(["a\\"])) == ["a"]
assert list(_join_continued_lines(["\\", "a"])) == ["a"]
def test_parse_requirements(request, tmpdir):
"""
Ensures `_parse_requirements` returns the same result as `pip._internal.req.parse_requirements`
"""
from pip._internal.req import parse_requirements as pip_parse_requirements
from pip._internal.network.session import PipSession
root_req_src = """
# No version specifier
noverspec
no-ver-spec
# Version specifiers
verspec<1.0
ver-spec == 2.0
# Environment marker
env-marker; python_version < "3.8"
inline-comm # Inline comment
inlinecomm # Inline comment
# Git URIs
git+https://github.com/git/uri
git+https://github.com/sub/dir#subdirectory=subdir
# Requirements files
-r {relative_req}
--requirement {absolute_req}
# Constraints files
-c {relative_con}
--constraint {absolute_con}
# Line continuation
line-cont\
==\
1.0
# Line continuation with spaces
line-cont-space \
== \
1.0
# Line continuation with a blank line
line-cont-blank\
# Line continuation at EOF
line-cont-eof\
""".strip()
try:
os.chdir(tmpdir)
root_req = tmpdir.join("requirements.txt")
# Requirements files
rel_req = tmpdir.join("relative_req.txt")
abs_req = tmpdir.join("absolute_req.txt")
# Constraints files
rel_con = tmpdir.join("relative_con.txt")
abs_con = tmpdir.join("absolute_con.txt")
# pip's requirements parser collapses an absolute requirements file path:
# https://github.com/pypa/pip/issues/10121
# As a workaround, use a relative path on Windows.
absolute_req = abs_req.basename if os.name == "nt" else abs_req.strpath
absolute_con = abs_con.basename if os.name == "nt" else abs_con.strpath
root_req.write(
root_req_src.format(
relative_req=rel_req.basename,
absolute_req=absolute_req,
relative_con=rel_con.basename,
absolute_con=absolute_con,
)
)
rel_req.write("rel-req-xxx\nrel-req-yyy")
abs_req.write("abs-req-zzz")
rel_con.write("rel-con-xxx\nrel-con-yyy")
abs_con.write("abs-con-zzz")
expected_cons = [
"rel-con-xxx",
"rel-con-yyy",
"abs-con-zzz",
]
expected_reqs = [
"noverspec",
"no-ver-spec",
"verspec<1.0",
"ver-spec == 2.0",
'env-marker; python_version < "3.8"',
"inline-comm",
"inlinecomm",
"git+https://github.com/git/uri",
"git+https://github.com/sub/dir#subdirectory=subdir",
"rel-req-xxx",
"rel-req-yyy",
"abs-req-zzz",
"line-cont==1.0",
"line-cont-space == 1.0",
"line-cont-blank",
"line-cont-eof",
]
parsed_reqs = list(_parse_requirements(root_req.basename, is_constraint=False))
pip_reqs = list(pip_parse_requirements(root_req.basename, session=PipSession()))
# Requirements
assert [r.req_str for r in parsed_reqs if not r.is_constraint] == expected_reqs
assert [r.requirement for r in pip_reqs if not r.constraint] == expected_reqs
# Constraints
assert [r.req_str for r in parsed_reqs if r.is_constraint] == expected_cons
assert [r.requirement for r in pip_reqs if r.constraint] == expected_cons
finally:
os.chdir(request.config.invocation_dir)
def test_normalize_package_name():
assert _normalize_package_name("abc") == "abc"
assert _normalize_package_name("ABC") == "abc"
assert _normalize_package_name("a-b-c") == "a-b-c"
assert _normalize_package_name("a.b.c") == "a-b-c"
assert _normalize_package_name("a_b_c") == "a-b-c"
assert _normalize_package_name("a--b--c") == "a-b-c"
assert _normalize_package_name("a-._b-._c") == "a-b-c"
def test_prune_packages():
assert _prune_packages(["mlflow"]) == {"mlflow"}
assert _prune_packages(["mlflow", "packaging"]) == {"mlflow"}
assert _prune_packages(["mlflow", "scikit-learn"]) == {"mlflow"}
def test_capture_imported_modules():
from mlflow.utils._capture_modules import _CaptureImportedModules
with _CaptureImportedModules() as cap:
# pylint: disable=unused-import
import math
__import__("pandas")
importlib.import_module("numpy")
assert "math" in cap.imported_modules
assert "pandas" in cap.imported_modules
assert "numpy" in cap.imported_modules
def test_strip_local_version_label():
assert _strip_local_version_label("1.2.3") == "1.2.3"
assert _strip_local_version_label("1.2.3+ab") == "1.2.3"
assert _strip_local_version_label("1.2.3rc0+ab") == "1.2.3rc0"
assert _strip_local_version_label("1.2.3.dev0+ab") == "1.2.3.dev0"
assert _strip_local_version_label("1.2.3.post0+ab") == "1.2.3.post0"
assert _strip_local_version_label("invalid") == "invalid"
def test_get_installed_version(tmpdir, monkeypatch):
import numpy as np
import pandas as pd
import sklearn
assert _get_installed_version("mlflow") == mlflow.__version__
assert _get_installed_version("numpy") == np.__version__
assert _get_installed_version("pandas") == pd.__version__
assert _get_installed_version("scikit-learn", module="sklearn") == sklearn.__version__
not_found_package = tmpdir.join("not_found.py")
not_found_package.write("__version__ = '1.2.3'")
monkeypatch.syspath_prepend(tmpdir.strpath)
with pytest.raises(importlib_metadata.PackageNotFoundError, match=r".+"):
importlib_metadata.version("not_found")
assert _get_installed_version("not_found") == "1.2.3"
def test_get_pinned_requirement(tmpdir, monkeypatch):
assert _get_pinned_requirement("mlflow") == f"mlflow=={mlflow.__version__}"
assert _get_pinned_requirement("mlflow", version="1.2.3") == "mlflow==1.2.3"
not_found_package = tmpdir.join("not_found.py")
not_found_package.write("__version__ = '1.2.3'")
monkeypatch.syspath_prepend(tmpdir.strpath)
with pytest.raises(importlib_metadata.PackageNotFoundError, match=r".+"):
importlib_metadata.version("not_found")
assert _get_pinned_requirement("not_found") == "not_found==1.2.3"
def test_get_pinned_requirement_local_version_label(tmpdir, monkeypatch):
package = tmpdir.join("my_package.py")
lvl = "abc.def.ghi" # Local version label
package.write(f"__version__ = '1.2.3+{lvl}'")
monkeypatch.syspath_prepend(tmpdir.strpath)
with mock.patch("mlflow.utils.requirements_utils._logger.warning") as mock_warning:
req = _get_pinned_requirement("my_package")
mock_warning.assert_called_once()
(first_pos_arg,) = mock_warning.call_args[0]
assert first_pos_arg.startswith(
f"Found my_package version (1.2.3+{lvl}) contains a local version label (+{lvl})."
)
assert req == "my_package==1.2.3"
def test_infer_requirements_excludes_mlflow():
with mock.patch(
"mlflow.utils.requirements_utils._capture_imported_modules",
return_value=["mlflow", "pytest"],
):
mlflow_package = "mlflow-skinny" if "MLFLOW_SKINNY" in os.environ else "mlflow"
assert mlflow_package in importlib_metadata.packages_distributions()["mlflow"]
assert _infer_requirements("path/to/model", "sklearn") == [f"pytest=={pytest.__version__}"]
def test_infer_requirements_prints_warning_for_unrecognized_packages():
with mock.patch(
"mlflow.utils.requirements_utils._capture_imported_modules",
return_value=["sklearn"],
), mock.patch(
"mlflow.utils.requirements_utils._PYPI_PACKAGE_INDEX",
_PyPIPackageIndex(date="2022-01-01", package_names=set()),
), mock.patch(
"mlflow.utils.requirements_utils._logger.warning"
) as mock_warning:
_infer_requirements("path/to/model", "sklearn")
mock_warning.assert_called_once()
warning_template = mock_warning.call_args[0][0]
date, unrecognized_packages = mock_warning.call_args[0][1:3]
warning_text = warning_template % (date, unrecognized_packages)
assert "not found in the public PyPI package index" in warning_text
assert "scikit-learn" in warning_text
def test_infer_requirements_does_not_print_warning_for_recognized_packages():
with mock.patch(
"mlflow.utils.requirements_utils._capture_imported_modules",
return_value=["sklearn"],
), mock.patch(
"mlflow.utils.requirements_utils._PYPI_PACKAGE_INDEX",
_PyPIPackageIndex(date="2022-01-01", package_names={"scikit-learn"}),
), mock.patch(
"mlflow.utils.requirements_utils._logger.warning"
) as mock_warning:
_infer_requirements("path/to/model", "sklearn")
mock_warning.assert_not_called()
def test_capture_imported_modules_scopes_databricks_imports(monkeypatch, tmpdir):
from mlflow.utils._capture_modules import _CaptureImportedModules
monkeypatch.chdir(tmpdir)
monkeypatch.syspath_prepend(str(tmpdir))
databricks_dir = os.path.join(tmpdir, "databricks")
os.makedirs(databricks_dir)
for file_name in [
"__init__.py",
"automl.py",
"automl_runtime.py",
"automl_foo.py",
"model_monitoring.py",
"other.py",
]:
with open(os.path.join(databricks_dir, file_name), "w"):
pass
with _CaptureImportedModules() as cap:
# pylint: disable=unused-import
import databricks
import databricks.automl
import databricks.automl_foo
import databricks.automl_runtime
import databricks.model_monitoring
assert "databricks.automl" in cap.imported_modules
assert "databricks.model_monitoring" in cap.imported_modules
assert "databricks" not in cap.imported_modules
assert "databricks.automl_foo" not in cap.imported_modules
with _CaptureImportedModules() as cap:
# pylint: disable=unused-import
import databricks.automl
import databricks.automl_foo
import databricks.automl_runtime
import databricks.model_monitoring
import databricks.other
assert "databricks.automl" in cap.imported_modules
assert "databricks.model_monitoring" in cap.imported_modules
assert "databricks" in cap.imported_modules
assert "databricks.automl_foo" not in cap.imported_modules
def test_infer_pip_requirements_scopes_databricks_imports():
mlflow.utils.requirements_utils._MODULES_TO_PACKAGES = None
mlflow.utils.requirements_utils._PACKAGES_TO_MODULES = None
with mock.patch(
"mlflow.utils.requirements_utils._capture_imported_modules",
return_value=[
"databricks.automl",
"databricks.model_monitoring",
"databricks.automl_runtime",
],
), mock.patch(
"mlflow.utils.requirements_utils._get_installed_version",
return_value="1.0",
), mock.patch(
"importlib_metadata.packages_distributions",
return_value={
"databricks": ["databricks-automl-runtime", "databricks-model-monitoring", "koalas"],
},
):
assert _infer_requirements("path/to/model", "sklearn") == [
"databricks-automl-runtime==1.0",
"databricks-model-monitoring==1.0",
]
assert mlflow.utils.requirements_utils._MODULES_TO_PACKAGES["databricks"] == ["koalas"]
| apache-2.0 |
jzt5132/scikit-learn | sklearn/feature_selection/tests/test_feature_select.py | 102 | 22297 | """
Todo: cross-check the F-value with stats model
"""
from __future__ import division
import itertools
import warnings
import numpy as np
from scipy import stats, sparse
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_almost_equal
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import assert_true
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_not_in
from sklearn.utils.testing import assert_less
from sklearn.utils.testing import assert_warns
from sklearn.utils.testing import ignore_warnings
from sklearn.utils.testing import assert_warns_message
from sklearn.utils.testing import assert_greater
from sklearn.utils.testing import assert_greater_equal
from sklearn.utils import safe_mask
from sklearn.datasets.samples_generator import (make_classification,
make_regression)
from sklearn.feature_selection import (chi2, f_classif, f_oneway, f_regression,
SelectPercentile, SelectKBest,
SelectFpr, SelectFdr, SelectFwe,
GenericUnivariateSelect)
##############################################################################
# Test the score functions
def test_f_oneway_vs_scipy_stats():
# Test that our f_oneway gives the same result as scipy.stats
rng = np.random.RandomState(0)
X1 = rng.randn(10, 3)
X2 = 1 + rng.randn(10, 3)
f, pv = stats.f_oneway(X1, X2)
f2, pv2 = f_oneway(X1, X2)
assert_true(np.allclose(f, f2))
assert_true(np.allclose(pv, pv2))
def test_f_oneway_ints():
# Smoke test f_oneway on integers: that it does raise casting errors
# with recent numpys
rng = np.random.RandomState(0)
X = rng.randint(10, size=(10, 10))
y = np.arange(10)
fint, pint = f_oneway(X, y)
# test that is gives the same result as with float
f, p = f_oneway(X.astype(np.float), y)
assert_array_almost_equal(f, fint, decimal=4)
assert_array_almost_equal(p, pint, decimal=4)
def test_f_classif():
# Test whether the F test yields meaningful results
# on a simple simulated classification problem
X, y = make_classification(n_samples=200, n_features=20,
n_informative=3, n_redundant=2,
n_repeated=0, n_classes=8,
n_clusters_per_class=1, flip_y=0.0,
class_sep=10, shuffle=False, random_state=0)
F, pv = f_classif(X, y)
F_sparse, pv_sparse = f_classif(sparse.csr_matrix(X), y)
assert_true((F > 0).all())
assert_true((pv > 0).all())
assert_true((pv < 1).all())
assert_true((pv[:5] < 0.05).all())
assert_true((pv[5:] > 1.e-4).all())
assert_array_almost_equal(F_sparse, F)
assert_array_almost_equal(pv_sparse, pv)
def test_f_regression():
# Test whether the F test yields meaningful results
# on a simple simulated regression problem
X, y = make_regression(n_samples=200, n_features=20, n_informative=5,
shuffle=False, random_state=0)
F, pv = f_regression(X, y)
assert_true((F > 0).all())
assert_true((pv > 0).all())
assert_true((pv < 1).all())
assert_true((pv[:5] < 0.05).all())
assert_true((pv[5:] > 1.e-4).all())
# again without centering, compare with sparse
F, pv = f_regression(X, y, center=False)
F_sparse, pv_sparse = f_regression(sparse.csr_matrix(X), y, center=False)
assert_array_almost_equal(F_sparse, F)
assert_array_almost_equal(pv_sparse, pv)
def test_f_regression_input_dtype():
# Test whether f_regression returns the same value
# for any numeric data_type
rng = np.random.RandomState(0)
X = rng.rand(10, 20)
y = np.arange(10).astype(np.int)
F1, pv1 = f_regression(X, y)
F2, pv2 = f_regression(X, y.astype(np.float))
assert_array_almost_equal(F1, F2, 5)
assert_array_almost_equal(pv1, pv2, 5)
def test_f_regression_center():
# Test whether f_regression preserves dof according to 'center' argument
# We use two centered variates so we have a simple relationship between
# F-score with variates centering and F-score without variates centering.
# Create toy example
X = np.arange(-5, 6).reshape(-1, 1) # X has zero mean
n_samples = X.size
Y = np.ones(n_samples)
Y[::2] *= -1.
Y[0] = 0. # have Y mean being null
F1, _ = f_regression(X, Y, center=True)
F2, _ = f_regression(X, Y, center=False)
assert_array_almost_equal(F1 * (n_samples - 1.) / (n_samples - 2.), F2)
assert_almost_equal(F2[0], 0.232558139) # value from statsmodels OLS
def test_f_classif_multi_class():
# Test whether the F test yields meaningful results
# on a simple simulated classification problem
X, y = make_classification(n_samples=200, n_features=20,
n_informative=3, n_redundant=2,
n_repeated=0, n_classes=8,
n_clusters_per_class=1, flip_y=0.0,
class_sep=10, shuffle=False, random_state=0)
F, pv = f_classif(X, y)
assert_true((F > 0).all())
assert_true((pv > 0).all())
assert_true((pv < 1).all())
assert_true((pv[:5] < 0.05).all())
assert_true((pv[5:] > 1.e-4).all())
def test_select_percentile_classif():
# Test whether the relative univariate feature selection
# gets the correct items in a simple classification problem
# with the percentile heuristic
X, y = make_classification(n_samples=200, n_features=20,
n_informative=3, n_redundant=2,
n_repeated=0, n_classes=8,
n_clusters_per_class=1, flip_y=0.0,
class_sep=10, shuffle=False, random_state=0)
univariate_filter = SelectPercentile(f_classif, percentile=25)
X_r = univariate_filter.fit(X, y).transform(X)
X_r2 = GenericUnivariateSelect(f_classif, mode='percentile',
param=25).fit(X, y).transform(X)
assert_array_equal(X_r, X_r2)
support = univariate_filter.get_support()
gtruth = np.zeros(20)
gtruth[:5] = 1
assert_array_equal(support, gtruth)
def test_select_percentile_classif_sparse():
# Test whether the relative univariate feature selection
# gets the correct items in a simple classification problem
# with the percentile heuristic
X, y = make_classification(n_samples=200, n_features=20,
n_informative=3, n_redundant=2,
n_repeated=0, n_classes=8,
n_clusters_per_class=1, flip_y=0.0,
class_sep=10, shuffle=False, random_state=0)
X = sparse.csr_matrix(X)
univariate_filter = SelectPercentile(f_classif, percentile=25)
X_r = univariate_filter.fit(X, y).transform(X)
X_r2 = GenericUnivariateSelect(f_classif, mode='percentile',
param=25).fit(X, y).transform(X)
assert_array_equal(X_r.toarray(), X_r2.toarray())
support = univariate_filter.get_support()
gtruth = np.zeros(20)
gtruth[:5] = 1
assert_array_equal(support, gtruth)
X_r2inv = univariate_filter.inverse_transform(X_r2)
assert_true(sparse.issparse(X_r2inv))
support_mask = safe_mask(X_r2inv, support)
assert_equal(X_r2inv.shape, X.shape)
assert_array_equal(X_r2inv[:, support_mask].toarray(), X_r.toarray())
# Check other columns are empty
assert_equal(X_r2inv.getnnz(), X_r.getnnz())
##############################################################################
# Test univariate selection in classification settings
def test_select_kbest_classif():
# Test whether the relative univariate feature selection
# gets the correct items in a simple classification problem
# with the k best heuristic
X, y = make_classification(n_samples=200, n_features=20,
n_informative=3, n_redundant=2,
n_repeated=0, n_classes=8,
n_clusters_per_class=1, flip_y=0.0,
class_sep=10, shuffle=False, random_state=0)
univariate_filter = SelectKBest(f_classif, k=5)
X_r = univariate_filter.fit(X, y).transform(X)
X_r2 = GenericUnivariateSelect(
f_classif, mode='k_best', param=5).fit(X, y).transform(X)
assert_array_equal(X_r, X_r2)
support = univariate_filter.get_support()
gtruth = np.zeros(20)
gtruth[:5] = 1
assert_array_equal(support, gtruth)
def test_select_kbest_all():
# Test whether k="all" correctly returns all features.
X, y = make_classification(n_samples=20, n_features=10,
shuffle=False, random_state=0)
univariate_filter = SelectKBest(f_classif, k='all')
X_r = univariate_filter.fit(X, y).transform(X)
assert_array_equal(X, X_r)
def test_select_kbest_zero():
# Test whether k=0 correctly returns no features.
X, y = make_classification(n_samples=20, n_features=10,
shuffle=False, random_state=0)
univariate_filter = SelectKBest(f_classif, k=0)
univariate_filter.fit(X, y)
support = univariate_filter.get_support()
gtruth = np.zeros(10, dtype=bool)
assert_array_equal(support, gtruth)
X_selected = assert_warns_message(UserWarning, 'No features were selected',
univariate_filter.transform, X)
assert_equal(X_selected.shape, (20, 0))
def test_select_heuristics_classif():
# Test whether the relative univariate feature selection
# gets the correct items in a simple classification problem
# with the fdr, fwe and fpr heuristics
X, y = make_classification(n_samples=200, n_features=20,
n_informative=3, n_redundant=2,
n_repeated=0, n_classes=8,
n_clusters_per_class=1, flip_y=0.0,
class_sep=10, shuffle=False, random_state=0)
univariate_filter = SelectFwe(f_classif, alpha=0.01)
X_r = univariate_filter.fit(X, y).transform(X)
gtruth = np.zeros(20)
gtruth[:5] = 1
for mode in ['fdr', 'fpr', 'fwe']:
X_r2 = GenericUnivariateSelect(
f_classif, mode=mode, param=0.01).fit(X, y).transform(X)
assert_array_equal(X_r, X_r2)
support = univariate_filter.get_support()
assert_array_almost_equal(support, gtruth)
##############################################################################
# Test univariate selection in regression settings
def assert_best_scores_kept(score_filter):
scores = score_filter.scores_
support = score_filter.get_support()
assert_array_equal(np.sort(scores[support]),
np.sort(scores)[-support.sum():])
def test_select_percentile_regression():
# Test whether the relative univariate feature selection
# gets the correct items in a simple regression problem
# with the percentile heuristic
X, y = make_regression(n_samples=200, n_features=20,
n_informative=5, shuffle=False, random_state=0)
univariate_filter = SelectPercentile(f_regression, percentile=25)
X_r = univariate_filter.fit(X, y).transform(X)
assert_best_scores_kept(univariate_filter)
X_r2 = GenericUnivariateSelect(
f_regression, mode='percentile', param=25).fit(X, y).transform(X)
assert_array_equal(X_r, X_r2)
support = univariate_filter.get_support()
gtruth = np.zeros(20)
gtruth[:5] = 1
assert_array_equal(support, gtruth)
X_2 = X.copy()
X_2[:, np.logical_not(support)] = 0
assert_array_equal(X_2, univariate_filter.inverse_transform(X_r))
# Check inverse_transform respects dtype
assert_array_equal(X_2.astype(bool),
univariate_filter.inverse_transform(X_r.astype(bool)))
def test_select_percentile_regression_full():
# Test whether the relative univariate feature selection
# selects all features when '100%' is asked.
X, y = make_regression(n_samples=200, n_features=20,
n_informative=5, shuffle=False, random_state=0)
univariate_filter = SelectPercentile(f_regression, percentile=100)
X_r = univariate_filter.fit(X, y).transform(X)
assert_best_scores_kept(univariate_filter)
X_r2 = GenericUnivariateSelect(
f_regression, mode='percentile', param=100).fit(X, y).transform(X)
assert_array_equal(X_r, X_r2)
support = univariate_filter.get_support()
gtruth = np.ones(20)
assert_array_equal(support, gtruth)
def test_invalid_percentile():
X, y = make_regression(n_samples=10, n_features=20,
n_informative=2, shuffle=False, random_state=0)
assert_raises(ValueError, SelectPercentile(percentile=-1).fit, X, y)
assert_raises(ValueError, SelectPercentile(percentile=101).fit, X, y)
assert_raises(ValueError, GenericUnivariateSelect(mode='percentile',
param=-1).fit, X, y)
assert_raises(ValueError, GenericUnivariateSelect(mode='percentile',
param=101).fit, X, y)
def test_select_kbest_regression():
# Test whether the relative univariate feature selection
# gets the correct items in a simple regression problem
# with the k best heuristic
X, y = make_regression(n_samples=200, n_features=20, n_informative=5,
shuffle=False, random_state=0, noise=10)
univariate_filter = SelectKBest(f_regression, k=5)
X_r = univariate_filter.fit(X, y).transform(X)
assert_best_scores_kept(univariate_filter)
X_r2 = GenericUnivariateSelect(
f_regression, mode='k_best', param=5).fit(X, y).transform(X)
assert_array_equal(X_r, X_r2)
support = univariate_filter.get_support()
gtruth = np.zeros(20)
gtruth[:5] = 1
assert_array_equal(support, gtruth)
def test_select_heuristics_regression():
# Test whether the relative univariate feature selection
# gets the correct items in a simple regression problem
# with the fpr, fdr or fwe heuristics
X, y = make_regression(n_samples=200, n_features=20, n_informative=5,
shuffle=False, random_state=0, noise=10)
univariate_filter = SelectFpr(f_regression, alpha=0.01)
X_r = univariate_filter.fit(X, y).transform(X)
gtruth = np.zeros(20)
gtruth[:5] = 1
for mode in ['fdr', 'fpr', 'fwe']:
X_r2 = GenericUnivariateSelect(
f_regression, mode=mode, param=0.01).fit(X, y).transform(X)
assert_array_equal(X_r, X_r2)
support = univariate_filter.get_support()
assert_array_equal(support[:5], np.ones((5, ), dtype=np.bool))
assert_less(np.sum(support[5:] == 1), 3)
def test_select_fdr_regression():
# Test that fdr heuristic actually has low FDR.
def single_fdr(alpha, n_informative, random_state):
X, y = make_regression(n_samples=150, n_features=20,
n_informative=n_informative, shuffle=False,
random_state=random_state, noise=10)
with warnings.catch_warnings(record=True):
# Warnings can be raised when no features are selected
# (low alpha or very noisy data)
univariate_filter = SelectFdr(f_regression, alpha=alpha)
X_r = univariate_filter.fit(X, y).transform(X)
X_r2 = GenericUnivariateSelect(
f_regression, mode='fdr', param=alpha).fit(X, y).transform(X)
assert_array_equal(X_r, X_r2)
support = univariate_filter.get_support()
num_false_positives = np.sum(support[n_informative:] == 1)
num_true_positives = np.sum(support[:n_informative] == 1)
if num_false_positives == 0:
return 0.
false_discovery_rate = (num_false_positives /
(num_true_positives + num_false_positives))
return false_discovery_rate
for alpha in [0.001, 0.01, 0.1]:
for n_informative in [1, 5, 10]:
# As per Benjamini-Hochberg, the expected false discovery rate
# should be lower than alpha:
# FDR = E(FP / (TP + FP)) <= alpha
false_discovery_rate = np.mean([single_fdr(alpha, n_informative,
random_state) for
random_state in range(30)])
assert_greater_equal(alpha, false_discovery_rate)
# Make sure that the empirical false discovery rate increases
# with alpha:
if false_discovery_rate != 0:
assert_greater(false_discovery_rate, alpha / 10)
def test_select_fwe_regression():
# Test whether the relative univariate feature selection
# gets the correct items in a simple regression problem
# with the fwe heuristic
X, y = make_regression(n_samples=200, n_features=20,
n_informative=5, shuffle=False, random_state=0)
univariate_filter = SelectFwe(f_regression, alpha=0.01)
X_r = univariate_filter.fit(X, y).transform(X)
X_r2 = GenericUnivariateSelect(
f_regression, mode='fwe', param=0.01).fit(X, y).transform(X)
assert_array_equal(X_r, X_r2)
support = univariate_filter.get_support()
gtruth = np.zeros(20)
gtruth[:5] = 1
assert_array_equal(support[:5], np.ones((5, ), dtype=np.bool))
assert_less(np.sum(support[5:] == 1), 2)
def test_selectkbest_tiebreaking():
# Test whether SelectKBest actually selects k features in case of ties.
# Prior to 0.11, SelectKBest would return more features than requested.
Xs = [[0, 1, 1], [0, 0, 1], [1, 0, 0], [1, 1, 0]]
y = [1]
dummy_score = lambda X, y: (X[0], X[0])
for X in Xs:
sel = SelectKBest(dummy_score, k=1)
X1 = ignore_warnings(sel.fit_transform)([X], y)
assert_equal(X1.shape[1], 1)
assert_best_scores_kept(sel)
sel = SelectKBest(dummy_score, k=2)
X2 = ignore_warnings(sel.fit_transform)([X], y)
assert_equal(X2.shape[1], 2)
assert_best_scores_kept(sel)
def test_selectpercentile_tiebreaking():
# Test if SelectPercentile selects the right n_features in case of ties.
Xs = [[0, 1, 1], [0, 0, 1], [1, 0, 0], [1, 1, 0]]
y = [1]
dummy_score = lambda X, y: (X[0], X[0])
for X in Xs:
sel = SelectPercentile(dummy_score, percentile=34)
X1 = ignore_warnings(sel.fit_transform)([X], y)
assert_equal(X1.shape[1], 1)
assert_best_scores_kept(sel)
sel = SelectPercentile(dummy_score, percentile=67)
X2 = ignore_warnings(sel.fit_transform)([X], y)
assert_equal(X2.shape[1], 2)
assert_best_scores_kept(sel)
def test_tied_pvalues():
# Test whether k-best and percentiles work with tied pvalues from chi2.
# chi2 will return the same p-values for the following features, but it
# will return different scores.
X0 = np.array([[10000, 9999, 9998], [1, 1, 1]])
y = [0, 1]
for perm in itertools.permutations((0, 1, 2)):
X = X0[:, perm]
Xt = SelectKBest(chi2, k=2).fit_transform(X, y)
assert_equal(Xt.shape, (2, 2))
assert_not_in(9998, Xt)
Xt = SelectPercentile(chi2, percentile=67).fit_transform(X, y)
assert_equal(Xt.shape, (2, 2))
assert_not_in(9998, Xt)
def test_tied_scores():
# Test for stable sorting in k-best with tied scores.
X_train = np.array([[0, 0, 0], [1, 1, 1]])
y_train = [0, 1]
for n_features in [1, 2, 3]:
sel = SelectKBest(chi2, k=n_features).fit(X_train, y_train)
X_test = sel.transform([[0, 1, 2]])
assert_array_equal(X_test[0], np.arange(3)[-n_features:])
def test_nans():
# Assert that SelectKBest and SelectPercentile can handle NaNs.
# First feature has zero variance to confuse f_classif (ANOVA) and
# make it return a NaN.
X = [[0, 1, 0], [0, -1, -1], [0, .5, .5]]
y = [1, 0, 1]
for select in (SelectKBest(f_classif, 2),
SelectPercentile(f_classif, percentile=67)):
ignore_warnings(select.fit)(X, y)
assert_array_equal(select.get_support(indices=True), np.array([1, 2]))
def test_score_func_error():
X = [[0, 1, 0], [0, -1, -1], [0, .5, .5]]
y = [1, 0, 1]
for SelectFeatures in [SelectKBest, SelectPercentile, SelectFwe,
SelectFdr, SelectFpr, GenericUnivariateSelect]:
assert_raises(TypeError, SelectFeatures(score_func=10).fit, X, y)
def test_invalid_k():
X = [[0, 1, 0], [0, -1, -1], [0, .5, .5]]
y = [1, 0, 1]
assert_raises(ValueError, SelectKBest(k=-1).fit, X, y)
assert_raises(ValueError, SelectKBest(k=4).fit, X, y)
assert_raises(ValueError,
GenericUnivariateSelect(mode='k_best', param=-1).fit, X, y)
assert_raises(ValueError,
GenericUnivariateSelect(mode='k_best', param=4).fit, X, y)
def test_f_classif_constant_feature():
# Test that f_classif warns if a feature is constant throughout.
X, y = make_classification(n_samples=10, n_features=5)
X[:, 0] = 2.0
assert_warns(UserWarning, f_classif, X, y)
def test_no_feature_selected():
rng = np.random.RandomState(0)
# Generate random uncorrelated data: a strict univariate test should
# rejects all the features
X = rng.rand(40, 10)
y = rng.randint(0, 4, size=40)
strict_selectors = [
SelectFwe(alpha=0.01).fit(X, y),
SelectFdr(alpha=0.01).fit(X, y),
SelectFpr(alpha=0.01).fit(X, y),
SelectPercentile(percentile=0).fit(X, y),
SelectKBest(k=0).fit(X, y),
]
for selector in strict_selectors:
assert_array_equal(selector.get_support(), np.zeros(10))
X_selected = assert_warns_message(
UserWarning, 'No features were selected', selector.transform, X)
assert_equal(X_selected.shape, (40, 0))
| bsd-3-clause |
luo66/scikit-learn | sklearn/feature_selection/tests/test_feature_select.py | 102 | 22297 | """
Todo: cross-check the F-value with stats model
"""
from __future__ import division
import itertools
import warnings
import numpy as np
from scipy import stats, sparse
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_almost_equal
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import assert_true
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_not_in
from sklearn.utils.testing import assert_less
from sklearn.utils.testing import assert_warns
from sklearn.utils.testing import ignore_warnings
from sklearn.utils.testing import assert_warns_message
from sklearn.utils.testing import assert_greater
from sklearn.utils.testing import assert_greater_equal
from sklearn.utils import safe_mask
from sklearn.datasets.samples_generator import (make_classification,
make_regression)
from sklearn.feature_selection import (chi2, f_classif, f_oneway, f_regression,
SelectPercentile, SelectKBest,
SelectFpr, SelectFdr, SelectFwe,
GenericUnivariateSelect)
##############################################################################
# Test the score functions
def test_f_oneway_vs_scipy_stats():
# Test that our f_oneway gives the same result as scipy.stats
rng = np.random.RandomState(0)
X1 = rng.randn(10, 3)
X2 = 1 + rng.randn(10, 3)
f, pv = stats.f_oneway(X1, X2)
f2, pv2 = f_oneway(X1, X2)
assert_true(np.allclose(f, f2))
assert_true(np.allclose(pv, pv2))
def test_f_oneway_ints():
# Smoke test f_oneway on integers: that it does raise casting errors
# with recent numpys
rng = np.random.RandomState(0)
X = rng.randint(10, size=(10, 10))
y = np.arange(10)
fint, pint = f_oneway(X, y)
# test that is gives the same result as with float
f, p = f_oneway(X.astype(np.float), y)
assert_array_almost_equal(f, fint, decimal=4)
assert_array_almost_equal(p, pint, decimal=4)
def test_f_classif():
# Test whether the F test yields meaningful results
# on a simple simulated classification problem
X, y = make_classification(n_samples=200, n_features=20,
n_informative=3, n_redundant=2,
n_repeated=0, n_classes=8,
n_clusters_per_class=1, flip_y=0.0,
class_sep=10, shuffle=False, random_state=0)
F, pv = f_classif(X, y)
F_sparse, pv_sparse = f_classif(sparse.csr_matrix(X), y)
assert_true((F > 0).all())
assert_true((pv > 0).all())
assert_true((pv < 1).all())
assert_true((pv[:5] < 0.05).all())
assert_true((pv[5:] > 1.e-4).all())
assert_array_almost_equal(F_sparse, F)
assert_array_almost_equal(pv_sparse, pv)
def test_f_regression():
# Test whether the F test yields meaningful results
# on a simple simulated regression problem
X, y = make_regression(n_samples=200, n_features=20, n_informative=5,
shuffle=False, random_state=0)
F, pv = f_regression(X, y)
assert_true((F > 0).all())
assert_true((pv > 0).all())
assert_true((pv < 1).all())
assert_true((pv[:5] < 0.05).all())
assert_true((pv[5:] > 1.e-4).all())
# again without centering, compare with sparse
F, pv = f_regression(X, y, center=False)
F_sparse, pv_sparse = f_regression(sparse.csr_matrix(X), y, center=False)
assert_array_almost_equal(F_sparse, F)
assert_array_almost_equal(pv_sparse, pv)
def test_f_regression_input_dtype():
# Test whether f_regression returns the same value
# for any numeric data_type
rng = np.random.RandomState(0)
X = rng.rand(10, 20)
y = np.arange(10).astype(np.int)
F1, pv1 = f_regression(X, y)
F2, pv2 = f_regression(X, y.astype(np.float))
assert_array_almost_equal(F1, F2, 5)
assert_array_almost_equal(pv1, pv2, 5)
def test_f_regression_center():
# Test whether f_regression preserves dof according to 'center' argument
# We use two centered variates so we have a simple relationship between
# F-score with variates centering and F-score without variates centering.
# Create toy example
X = np.arange(-5, 6).reshape(-1, 1) # X has zero mean
n_samples = X.size
Y = np.ones(n_samples)
Y[::2] *= -1.
Y[0] = 0. # have Y mean being null
F1, _ = f_regression(X, Y, center=True)
F2, _ = f_regression(X, Y, center=False)
assert_array_almost_equal(F1 * (n_samples - 1.) / (n_samples - 2.), F2)
assert_almost_equal(F2[0], 0.232558139) # value from statsmodels OLS
def test_f_classif_multi_class():
# Test whether the F test yields meaningful results
# on a simple simulated classification problem
X, y = make_classification(n_samples=200, n_features=20,
n_informative=3, n_redundant=2,
n_repeated=0, n_classes=8,
n_clusters_per_class=1, flip_y=0.0,
class_sep=10, shuffle=False, random_state=0)
F, pv = f_classif(X, y)
assert_true((F > 0).all())
assert_true((pv > 0).all())
assert_true((pv < 1).all())
assert_true((pv[:5] < 0.05).all())
assert_true((pv[5:] > 1.e-4).all())
def test_select_percentile_classif():
# Test whether the relative univariate feature selection
# gets the correct items in a simple classification problem
# with the percentile heuristic
X, y = make_classification(n_samples=200, n_features=20,
n_informative=3, n_redundant=2,
n_repeated=0, n_classes=8,
n_clusters_per_class=1, flip_y=0.0,
class_sep=10, shuffle=False, random_state=0)
univariate_filter = SelectPercentile(f_classif, percentile=25)
X_r = univariate_filter.fit(X, y).transform(X)
X_r2 = GenericUnivariateSelect(f_classif, mode='percentile',
param=25).fit(X, y).transform(X)
assert_array_equal(X_r, X_r2)
support = univariate_filter.get_support()
gtruth = np.zeros(20)
gtruth[:5] = 1
assert_array_equal(support, gtruth)
def test_select_percentile_classif_sparse():
# Test whether the relative univariate feature selection
# gets the correct items in a simple classification problem
# with the percentile heuristic
X, y = make_classification(n_samples=200, n_features=20,
n_informative=3, n_redundant=2,
n_repeated=0, n_classes=8,
n_clusters_per_class=1, flip_y=0.0,
class_sep=10, shuffle=False, random_state=0)
X = sparse.csr_matrix(X)
univariate_filter = SelectPercentile(f_classif, percentile=25)
X_r = univariate_filter.fit(X, y).transform(X)
X_r2 = GenericUnivariateSelect(f_classif, mode='percentile',
param=25).fit(X, y).transform(X)
assert_array_equal(X_r.toarray(), X_r2.toarray())
support = univariate_filter.get_support()
gtruth = np.zeros(20)
gtruth[:5] = 1
assert_array_equal(support, gtruth)
X_r2inv = univariate_filter.inverse_transform(X_r2)
assert_true(sparse.issparse(X_r2inv))
support_mask = safe_mask(X_r2inv, support)
assert_equal(X_r2inv.shape, X.shape)
assert_array_equal(X_r2inv[:, support_mask].toarray(), X_r.toarray())
# Check other columns are empty
assert_equal(X_r2inv.getnnz(), X_r.getnnz())
##############################################################################
# Test univariate selection in classification settings
def test_select_kbest_classif():
# Test whether the relative univariate feature selection
# gets the correct items in a simple classification problem
# with the k best heuristic
X, y = make_classification(n_samples=200, n_features=20,
n_informative=3, n_redundant=2,
n_repeated=0, n_classes=8,
n_clusters_per_class=1, flip_y=0.0,
class_sep=10, shuffle=False, random_state=0)
univariate_filter = SelectKBest(f_classif, k=5)
X_r = univariate_filter.fit(X, y).transform(X)
X_r2 = GenericUnivariateSelect(
f_classif, mode='k_best', param=5).fit(X, y).transform(X)
assert_array_equal(X_r, X_r2)
support = univariate_filter.get_support()
gtruth = np.zeros(20)
gtruth[:5] = 1
assert_array_equal(support, gtruth)
def test_select_kbest_all():
# Test whether k="all" correctly returns all features.
X, y = make_classification(n_samples=20, n_features=10,
shuffle=False, random_state=0)
univariate_filter = SelectKBest(f_classif, k='all')
X_r = univariate_filter.fit(X, y).transform(X)
assert_array_equal(X, X_r)
def test_select_kbest_zero():
# Test whether k=0 correctly returns no features.
X, y = make_classification(n_samples=20, n_features=10,
shuffle=False, random_state=0)
univariate_filter = SelectKBest(f_classif, k=0)
univariate_filter.fit(X, y)
support = univariate_filter.get_support()
gtruth = np.zeros(10, dtype=bool)
assert_array_equal(support, gtruth)
X_selected = assert_warns_message(UserWarning, 'No features were selected',
univariate_filter.transform, X)
assert_equal(X_selected.shape, (20, 0))
def test_select_heuristics_classif():
# Test whether the relative univariate feature selection
# gets the correct items in a simple classification problem
# with the fdr, fwe and fpr heuristics
X, y = make_classification(n_samples=200, n_features=20,
n_informative=3, n_redundant=2,
n_repeated=0, n_classes=8,
n_clusters_per_class=1, flip_y=0.0,
class_sep=10, shuffle=False, random_state=0)
univariate_filter = SelectFwe(f_classif, alpha=0.01)
X_r = univariate_filter.fit(X, y).transform(X)
gtruth = np.zeros(20)
gtruth[:5] = 1
for mode in ['fdr', 'fpr', 'fwe']:
X_r2 = GenericUnivariateSelect(
f_classif, mode=mode, param=0.01).fit(X, y).transform(X)
assert_array_equal(X_r, X_r2)
support = univariate_filter.get_support()
assert_array_almost_equal(support, gtruth)
##############################################################################
# Test univariate selection in regression settings
def assert_best_scores_kept(score_filter):
scores = score_filter.scores_
support = score_filter.get_support()
assert_array_equal(np.sort(scores[support]),
np.sort(scores)[-support.sum():])
def test_select_percentile_regression():
# Test whether the relative univariate feature selection
# gets the correct items in a simple regression problem
# with the percentile heuristic
X, y = make_regression(n_samples=200, n_features=20,
n_informative=5, shuffle=False, random_state=0)
univariate_filter = SelectPercentile(f_regression, percentile=25)
X_r = univariate_filter.fit(X, y).transform(X)
assert_best_scores_kept(univariate_filter)
X_r2 = GenericUnivariateSelect(
f_regression, mode='percentile', param=25).fit(X, y).transform(X)
assert_array_equal(X_r, X_r2)
support = univariate_filter.get_support()
gtruth = np.zeros(20)
gtruth[:5] = 1
assert_array_equal(support, gtruth)
X_2 = X.copy()
X_2[:, np.logical_not(support)] = 0
assert_array_equal(X_2, univariate_filter.inverse_transform(X_r))
# Check inverse_transform respects dtype
assert_array_equal(X_2.astype(bool),
univariate_filter.inverse_transform(X_r.astype(bool)))
def test_select_percentile_regression_full():
# Test whether the relative univariate feature selection
# selects all features when '100%' is asked.
X, y = make_regression(n_samples=200, n_features=20,
n_informative=5, shuffle=False, random_state=0)
univariate_filter = SelectPercentile(f_regression, percentile=100)
X_r = univariate_filter.fit(X, y).transform(X)
assert_best_scores_kept(univariate_filter)
X_r2 = GenericUnivariateSelect(
f_regression, mode='percentile', param=100).fit(X, y).transform(X)
assert_array_equal(X_r, X_r2)
support = univariate_filter.get_support()
gtruth = np.ones(20)
assert_array_equal(support, gtruth)
def test_invalid_percentile():
X, y = make_regression(n_samples=10, n_features=20,
n_informative=2, shuffle=False, random_state=0)
assert_raises(ValueError, SelectPercentile(percentile=-1).fit, X, y)
assert_raises(ValueError, SelectPercentile(percentile=101).fit, X, y)
assert_raises(ValueError, GenericUnivariateSelect(mode='percentile',
param=-1).fit, X, y)
assert_raises(ValueError, GenericUnivariateSelect(mode='percentile',
param=101).fit, X, y)
def test_select_kbest_regression():
# Test whether the relative univariate feature selection
# gets the correct items in a simple regression problem
# with the k best heuristic
X, y = make_regression(n_samples=200, n_features=20, n_informative=5,
shuffle=False, random_state=0, noise=10)
univariate_filter = SelectKBest(f_regression, k=5)
X_r = univariate_filter.fit(X, y).transform(X)
assert_best_scores_kept(univariate_filter)
X_r2 = GenericUnivariateSelect(
f_regression, mode='k_best', param=5).fit(X, y).transform(X)
assert_array_equal(X_r, X_r2)
support = univariate_filter.get_support()
gtruth = np.zeros(20)
gtruth[:5] = 1
assert_array_equal(support, gtruth)
def test_select_heuristics_regression():
# Test whether the relative univariate feature selection
# gets the correct items in a simple regression problem
# with the fpr, fdr or fwe heuristics
X, y = make_regression(n_samples=200, n_features=20, n_informative=5,
shuffle=False, random_state=0, noise=10)
univariate_filter = SelectFpr(f_regression, alpha=0.01)
X_r = univariate_filter.fit(X, y).transform(X)
gtruth = np.zeros(20)
gtruth[:5] = 1
for mode in ['fdr', 'fpr', 'fwe']:
X_r2 = GenericUnivariateSelect(
f_regression, mode=mode, param=0.01).fit(X, y).transform(X)
assert_array_equal(X_r, X_r2)
support = univariate_filter.get_support()
assert_array_equal(support[:5], np.ones((5, ), dtype=np.bool))
assert_less(np.sum(support[5:] == 1), 3)
def test_select_fdr_regression():
# Test that fdr heuristic actually has low FDR.
def single_fdr(alpha, n_informative, random_state):
X, y = make_regression(n_samples=150, n_features=20,
n_informative=n_informative, shuffle=False,
random_state=random_state, noise=10)
with warnings.catch_warnings(record=True):
# Warnings can be raised when no features are selected
# (low alpha or very noisy data)
univariate_filter = SelectFdr(f_regression, alpha=alpha)
X_r = univariate_filter.fit(X, y).transform(X)
X_r2 = GenericUnivariateSelect(
f_regression, mode='fdr', param=alpha).fit(X, y).transform(X)
assert_array_equal(X_r, X_r2)
support = univariate_filter.get_support()
num_false_positives = np.sum(support[n_informative:] == 1)
num_true_positives = np.sum(support[:n_informative] == 1)
if num_false_positives == 0:
return 0.
false_discovery_rate = (num_false_positives /
(num_true_positives + num_false_positives))
return false_discovery_rate
for alpha in [0.001, 0.01, 0.1]:
for n_informative in [1, 5, 10]:
# As per Benjamini-Hochberg, the expected false discovery rate
# should be lower than alpha:
# FDR = E(FP / (TP + FP)) <= alpha
false_discovery_rate = np.mean([single_fdr(alpha, n_informative,
random_state) for
random_state in range(30)])
assert_greater_equal(alpha, false_discovery_rate)
# Make sure that the empirical false discovery rate increases
# with alpha:
if false_discovery_rate != 0:
assert_greater(false_discovery_rate, alpha / 10)
def test_select_fwe_regression():
# Test whether the relative univariate feature selection
# gets the correct items in a simple regression problem
# with the fwe heuristic
X, y = make_regression(n_samples=200, n_features=20,
n_informative=5, shuffle=False, random_state=0)
univariate_filter = SelectFwe(f_regression, alpha=0.01)
X_r = univariate_filter.fit(X, y).transform(X)
X_r2 = GenericUnivariateSelect(
f_regression, mode='fwe', param=0.01).fit(X, y).transform(X)
assert_array_equal(X_r, X_r2)
support = univariate_filter.get_support()
gtruth = np.zeros(20)
gtruth[:5] = 1
assert_array_equal(support[:5], np.ones((5, ), dtype=np.bool))
assert_less(np.sum(support[5:] == 1), 2)
def test_selectkbest_tiebreaking():
# Test whether SelectKBest actually selects k features in case of ties.
# Prior to 0.11, SelectKBest would return more features than requested.
Xs = [[0, 1, 1], [0, 0, 1], [1, 0, 0], [1, 1, 0]]
y = [1]
dummy_score = lambda X, y: (X[0], X[0])
for X in Xs:
sel = SelectKBest(dummy_score, k=1)
X1 = ignore_warnings(sel.fit_transform)([X], y)
assert_equal(X1.shape[1], 1)
assert_best_scores_kept(sel)
sel = SelectKBest(dummy_score, k=2)
X2 = ignore_warnings(sel.fit_transform)([X], y)
assert_equal(X2.shape[1], 2)
assert_best_scores_kept(sel)
def test_selectpercentile_tiebreaking():
# Test if SelectPercentile selects the right n_features in case of ties.
Xs = [[0, 1, 1], [0, 0, 1], [1, 0, 0], [1, 1, 0]]
y = [1]
dummy_score = lambda X, y: (X[0], X[0])
for X in Xs:
sel = SelectPercentile(dummy_score, percentile=34)
X1 = ignore_warnings(sel.fit_transform)([X], y)
assert_equal(X1.shape[1], 1)
assert_best_scores_kept(sel)
sel = SelectPercentile(dummy_score, percentile=67)
X2 = ignore_warnings(sel.fit_transform)([X], y)
assert_equal(X2.shape[1], 2)
assert_best_scores_kept(sel)
def test_tied_pvalues():
# Test whether k-best and percentiles work with tied pvalues from chi2.
# chi2 will return the same p-values for the following features, but it
# will return different scores.
X0 = np.array([[10000, 9999, 9998], [1, 1, 1]])
y = [0, 1]
for perm in itertools.permutations((0, 1, 2)):
X = X0[:, perm]
Xt = SelectKBest(chi2, k=2).fit_transform(X, y)
assert_equal(Xt.shape, (2, 2))
assert_not_in(9998, Xt)
Xt = SelectPercentile(chi2, percentile=67).fit_transform(X, y)
assert_equal(Xt.shape, (2, 2))
assert_not_in(9998, Xt)
def test_tied_scores():
# Test for stable sorting in k-best with tied scores.
X_train = np.array([[0, 0, 0], [1, 1, 1]])
y_train = [0, 1]
for n_features in [1, 2, 3]:
sel = SelectKBest(chi2, k=n_features).fit(X_train, y_train)
X_test = sel.transform([[0, 1, 2]])
assert_array_equal(X_test[0], np.arange(3)[-n_features:])
def test_nans():
# Assert that SelectKBest and SelectPercentile can handle NaNs.
# First feature has zero variance to confuse f_classif (ANOVA) and
# make it return a NaN.
X = [[0, 1, 0], [0, -1, -1], [0, .5, .5]]
y = [1, 0, 1]
for select in (SelectKBest(f_classif, 2),
SelectPercentile(f_classif, percentile=67)):
ignore_warnings(select.fit)(X, y)
assert_array_equal(select.get_support(indices=True), np.array([1, 2]))
def test_score_func_error():
X = [[0, 1, 0], [0, -1, -1], [0, .5, .5]]
y = [1, 0, 1]
for SelectFeatures in [SelectKBest, SelectPercentile, SelectFwe,
SelectFdr, SelectFpr, GenericUnivariateSelect]:
assert_raises(TypeError, SelectFeatures(score_func=10).fit, X, y)
def test_invalid_k():
X = [[0, 1, 0], [0, -1, -1], [0, .5, .5]]
y = [1, 0, 1]
assert_raises(ValueError, SelectKBest(k=-1).fit, X, y)
assert_raises(ValueError, SelectKBest(k=4).fit, X, y)
assert_raises(ValueError,
GenericUnivariateSelect(mode='k_best', param=-1).fit, X, y)
assert_raises(ValueError,
GenericUnivariateSelect(mode='k_best', param=4).fit, X, y)
def test_f_classif_constant_feature():
# Test that f_classif warns if a feature is constant throughout.
X, y = make_classification(n_samples=10, n_features=5)
X[:, 0] = 2.0
assert_warns(UserWarning, f_classif, X, y)
def test_no_feature_selected():
rng = np.random.RandomState(0)
# Generate random uncorrelated data: a strict univariate test should
# rejects all the features
X = rng.rand(40, 10)
y = rng.randint(0, 4, size=40)
strict_selectors = [
SelectFwe(alpha=0.01).fit(X, y),
SelectFdr(alpha=0.01).fit(X, y),
SelectFpr(alpha=0.01).fit(X, y),
SelectPercentile(percentile=0).fit(X, y),
SelectKBest(k=0).fit(X, y),
]
for selector in strict_selectors:
assert_array_equal(selector.get_support(), np.zeros(10))
X_selected = assert_warns_message(
UserWarning, 'No features were selected', selector.transform, X)
assert_equal(X_selected.shape, (40, 0))
| bsd-3-clause |
luo66/scikit-learn | benchmarks/bench_plot_ward.py | 288 | 1260 | """
Benchmark scikit-learn's Ward implement compared to SciPy's
"""
import time
import numpy as np
from scipy.cluster import hierarchy
import pylab as pl
from sklearn.cluster import AgglomerativeClustering
ward = AgglomerativeClustering(n_clusters=3, linkage='ward')
n_samples = np.logspace(.5, 3, 9)
n_features = np.logspace(1, 3.5, 7)
N_samples, N_features = np.meshgrid(n_samples,
n_features)
scikits_time = np.zeros(N_samples.shape)
scipy_time = np.zeros(N_samples.shape)
for i, n in enumerate(n_samples):
for j, p in enumerate(n_features):
X = np.random.normal(size=(n, p))
t0 = time.time()
ward.fit(X)
scikits_time[j, i] = time.time() - t0
t0 = time.time()
hierarchy.ward(X)
scipy_time[j, i] = time.time() - t0
ratio = scikits_time / scipy_time
pl.figure("scikit-learn Ward's method benchmark results")
pl.imshow(np.log(ratio), aspect='auto', origin="lower")
pl.colorbar()
pl.contour(ratio, levels=[1, ], colors='k')
pl.yticks(range(len(n_features)), n_features.astype(np.int))
pl.ylabel('N features')
pl.xticks(range(len(n_samples)), n_samples.astype(np.int))
pl.xlabel('N samples')
pl.title("Scikit's time, in units of scipy time (log)")
pl.show()
| bsd-3-clause |
mlflow/mlflow | examples/pyspark_ml_autologging/pipeline.py | 1 | 1185 | from pyspark.ml.classification import LogisticRegression
from pyspark.ml.feature import VectorAssembler, StandardScaler
from pyspark.ml import Pipeline
from pyspark.sql import SparkSession
from sklearn.datasets import load_iris
import mlflow
spark = SparkSession.builder.getOrCreate()
mlflow.pyspark.ml.autolog()
df = load_iris(as_frame=True).frame.rename(columns={"target": "label"})
df = spark.createDataFrame(df)
train, test = df.randomSplit([0.8, 0.2])
assembler = VectorAssembler(inputCols=df.columns[:-1], outputCol="features")
scaler = StandardScaler(inputCol=assembler.getOutputCol(), outputCol="scaledFeatures")
lor = LogisticRegression(maxIter=5, featuresCol=scaler.getOutputCol())
# Non-neseted pipeline
pipeline = Pipeline(stages=[assembler, scaler, lor])
with mlflow.start_run():
pipeline_model = pipeline.fit(train)
columns = ["features", "prediction"]
pipeline_model.transform(test).select(columns).show()
# Nested pipeline
nested_pipeline = Pipeline(stages=[Pipeline(stages=[assembler, scaler]), lor])
with mlflow.start_run():
nested_pipeline_model = nested_pipeline.fit(train)
nested_pipeline_model.transform(test).select(columns).show()
spark.stop()
| apache-2.0 |
sgenoud/scikit-learn | examples/plot_feature_selection.py | 1 | 2443 | """
===============================
Univariate Feature Selection
===============================
An example showing univariate feature selection.
Noisy (non informative) features are added to the iris data and
univariate feature selection is applied. For each feature, we plot the
p-values for the univariate feature selection and the corresponding
weights of an SVM. We can see that univariate feature selection
selects the informative features and that these have larger SVM weights.
In the total set of features, only the 4 first ones are significant. We
can see that they have the highest score with univariate feature
selection. The SVM attributes small weights to these features, but these
weight are non zero. Applying univariate feature selection before the SVM
increases the SVM weight attributed to the significant features, and will
thus improve classification.
"""
print __doc__
import numpy as np
import pylab as pl
from sklearn import datasets, svm
from sklearn.feature_selection import SelectPercentile, f_classif
###############################################################################
# import some data to play with
# The IRIS dataset
iris = datasets.load_iris()
# Some noisy data not correlated
E = np.random.normal(size=(len(iris.data), 35))
# Add the noisy data to the informative features
x = np.hstack((iris.data, E))
y = iris.target
###############################################################################
pl.figure(1)
pl.clf()
x_indices = np.arange(x.shape[-1])
###############################################################################
# Univariate feature selection with F-test for feature scoring
# We use the default selection function: the 10% most significant features
selector = SelectPercentile(f_classif, percentile=10)
selector.fit(x, y)
scores = -np.log10(selector.scores_)
scores /= scores.max()
pl.bar(x_indices - .45, scores, width=.3,
label=r'Univariate score ($-Log(p_{value})$)',
color='g')
###############################################################################
# Compare to the weights of an SVM
clf = svm.SVC(kernel='linear')
clf.fit(x, y)
svm_weights = (clf.coef_ ** 2).sum(axis=0)
svm_weights /= svm_weights.max()
pl.bar(x_indices - .15, svm_weights, width=.3, label='SVM weight',
color='r')
pl.title("Comparing feature selection")
pl.xlabel('Feature number')
pl.yticks(())
pl.axis('tight')
pl.legend(loc='upper right')
pl.show()
| bsd-3-clause |
rcln/tag.suggestion | code_python27/toolEval/alchemyAPItagger/alchemyeval_strict.py | 1 | 5202 | # -*- coding: utf-8 -*-
"""
Created on Thu Jul 16 18:40:12 2015
@author: ivan
"""
from __future__ import division
#from sklearn.feature_extraction.text import CountVectorizer
#from sklearn.feature_extraction.text import TfidfTransformer
import argparse
from lxml import etree
import cPickle
import os
import time
import numpy as np
from alchemyapi import AlchemyAPI
import json
def parseBlogFile(myfile):
f=open(myfile)
document = etree.fromstring(f.read())
date=document.xpath("date/text()")
# print date[0]
title=document.xpath("title/text()")
# print title[0]
author=document.xpath("author/text()")
# print author[0]
tags=document.xpath("tags_set/tag/text()")
# for tag in tags:
# print tag.text
cats=document.xpath("categories_set/category/text()")
# for cat in cats:
# print cat.text
text=document.xpath("text/text()")[0]
# print text
return (date, author, tags, cats, text, title)
def exploreDir(targetDir):
f=[]
for (dirpath,dirnames,filenames) in os.walk(targetDir):
f.extend(filenames)
break
return f
def readXMLCorpusFrom(thisDirectory):
corpusFiles=exploreDir(thisDirectory)
corpus=[]
tagw=[]
#indx=0
for doc in corpusFiles:
#print doc
[dat,aut,tag,cat,txt,tit]=parseBlogFile(thisDirectory+doc)
corpus.append(txt)
#print type(tag)
tagw.append(tag)
#if indx==11:
# print tag
#print tit
#indx+=1
return [corpus,tagw]
def loadStopwords(listfile):
lf=open(listfile)
stopwordlst=[]
for word in lf.readlines():
word=word.replace('\n',"")
stopwordlst.append(unicode(word,'utf8'))
#print word
return stopwordlst
def evalAvPrecision(prop,tgs):
n=len(tgs)
precArr=[]
#cycle to go through all instances
for x in range(n):
count=0
#cycle to go through actual tags of instance x
for tg in tgs[x]:
#cycle to go through proposed tags for instance x
for tprop in prop[x]:
#if x==11:
# print tg.lower()+" "+tprop
if tg.lower() == tprop:
#if x==11:
# print tg.lower()+" "+tprop
count+=1
#if x==11:
# print strcount
if len(tgs[x])> 0 :
precArr.append(count/len(prop[x]))
return np.mean(precArr), precArr
def evalAvRecall(prop,tgs):
n=len(tgs)
recArr=[]
#cycle to go through all instances
for x in range(n):
count=0
#cycle to go through actual tags of instance x
for tg in tgs[x]:
#cycle to go through proposed tags for instance x
for tprop in prop[x]:
if tg.lower()==tprop:
count+=1
if len(tgs[x])> 0 :
recArr.append(count/len(tgs[x]))
return np.mean(recArr), recArr
def f1measure(a,b):
if (a+b)>0:
return 2*(a*b)/(a+b)
else:
return 0
def mergeList(l1,l2):
n=len(l1)
l3=[]
for x in range(n):
l3.append(list(set(l1[x]+l2[x])))
return l3
parser = argparse.ArgumentParser()
parser.add_argument('corpusloc', help='corpus location')
parser.add_argument('resultfile', help='file storing results')
parser.add_argument('--n', dest="nTOpropose", type=int, default=10, help='how many tags will be proposed, default=10')
parser.add_argument('--ngram', dest="ngram", type=int, default=2, help='max size of the n-grams considered, default=2')
args = parser.parse_args()
if args.corpusloc[-1]!='/':
args.corpusloc+='/'
topn=0-args.nTOpropose
#stpwrds=loadStopwords("stopwords.french.list")
#print len(stpwrds)
start = time.time()
#vectorizer = CountVectorizer(ngram_range=(1,args.ngram),lowercase=True,stop_words=stpwrds)
[txtCorpus,tagsw]=readXMLCorpusFrom(args.corpusloc)
#print tagsw[11]
#print len(txtCorpus)
tgsugtop=[]
alchemyapi = AlchemyAPI()
for docX in txtCorpus:
#print docX
tagsug=[]
response = alchemyapi.combined('text', docX)
if response['status'] == 'OK':
for keyword in response['keywords']:
tagsug.append([keyword['text'],keyword['relevance']])
for entity in response['entities']:
tagsug.append([entity['text'],entity['relevance']])
for tgsg in sorted(tagsug, key=lambda x: x[1])[:10]:
tgsugtop.append(tgsg[0])
print tgsg[0]
#--------------------------------------
#f=open('../../results/tfidfsuggestion/'+args.resultfile+"_"+str(args.nTOpropose)+".res", 'wb')
f=open("../../../results/tooleval/alchemyAPItagger/"+args.resultfile+"_"+str(args.nTOpropose)+".res", 'wb')
cPickle.dump(tgsugtop, f)
#cPickle.dump(tagsw, f)
f.close()
end = time.time()
print "processing time: "+str(end - start)
start = time.time()
print "Evaluation AlchemyAPI tagger:"
[avprec2,precs2]=evalAvPrecision(tgsugtop,tagsw)
[avrec2,recs2]=evalAvRecall(tgsugtop,tagsw)
print "Precision "+str(avprec2)
print "Recall "+str(avrec2)
print "F1-measure "+str(f1measure(avprec2,avrec2))
end = time.time()
print "evaluation time: "+str(end - start) | gpl-2.0 |
TinghuiWang/pyActLearn | examples/CASAS_Single_Test/b1_lstm_raw.py | 1 | 8331 | import os
import pickle
import logging
import argparse
import numpy as np
import tensorflow as tf
from datetime import datetime
from pyActLearn.CASAS.data import CASASData
from pyActLearn.CASAS.fuel import CASASFuel
from pyActLearn.learning.nn.lstm import LSTM
from pyActLearn.performance.record import LearningResult
from pyActLearn.performance import get_confusion_matrix
logger = logging.getLogger(__file__)
def training_and_test(token, train_data, test_data, num_classes, result, model, log_dir):
"""Train and test
Args:
token (:obj:`str`): token representing this run
train_data (:obj:`tuple` of :obj:`numpy.array`): Tuple of training feature and label
test_data (:obj:`tuple` of :obj:`numpy.array`): Tuple of testing feature and label
num_classes (:obj:`int`): Number of classes
result (:obj:`pyActLearn.performance.record.LearningResult`): LearningResult object to hold learning result
"""
train_y = np.zeros((train_data[1].shape[0], num_classes))
test_y = np.zeros((test_data[1].shape[0], num_classes))
for i in range(train_data[1].shape[0]):
train_y[i, train_data[1].flatten()[i]] = 1
for i in range(test_data[1].shape[0]):
test_y[i, test_data[1].flatten()[i]] = 1
model.fit(train_data[0], train_y, iter_num=8000, batch_size=100, criterion='monitor_based',
summaries_dir=log_dir, test_x=test_data[0], test_y=test_y,
summary_interval=100)
# Test
predicted_y = model.predict(test_data[0])
predicted_proba = model.predict_proba(test_data[0])
# Evaluate the Test and Store Result
confusion_matrix = get_confusion_matrix(num_classes=num_classes,
label=test_data[1][model.num_steps:].flatten(),
predicted=predicted_y)
variable_file = os.path.join(log_dir, token + '_save.ckpt')
saver.save(model.sess, variable_file)
result.add_record(variable_file, key=token, confusion_matrix=confusion_matrix)
return predicted_y, predicted_proba
def load_and_test(token, test_data, num_classes, result, model):
"""Load and test
Args:
token (:obj:`str`): token representing this run
test_data (:obj:`tuple` of :obj:`numpy.array`): Tuple of testing feature and label
num_classes (:obj:`int`): Number of classes
result (:obj:`pyActLearn.performance.record.LearningResult`): LearningResult object to hold learning result
"""
saver.restore(model.sess, result.get_record_by_key(token)['model'])
# Test
predicted_y = model.predict(test_data[0])
predicted_proba = model.predict_proba(test_data[0])
return predicted_y, predicted_proba
if __name__ == '__main__':
args_ok = False
parser = argparse.ArgumentParser(description='Run LSTM on single resident CASAS datasets.')
parser.add_argument('-d', '--dataset', help='Directory to original datasets')
parser.add_argument('-o', '--output', help='Output folder')
parser.add_argument('--week', type=int, metavar='N', help='Train on week N-1 and run on week N')
parser.add_argument('--h5py', help='HDF5 dataset folder')
args = parser.parse_args()
# Default parameters
log_filename = os.path.basename(__file__).split('.')[0] + \
'-%s.log' % datetime.now().strftime('%y%m%d_%H:%M:%S')
# Setup output directory
output_dir = args.output
if output_dir is not None:
output_dir = os.path.abspath(os.path.expanduser(output_dir))
if os.path.exists(output_dir):
# Found output_dir, check if it is a directory
if not os.path.isdir(output_dir):
exit('Output directory %s is found, but not a directory. Abort.' % output_dir)
else:
# Create directory
os.makedirs(output_dir)
else:
output_dir = '.'
log_filename = os.path.join(output_dir, log_filename)
# Setup Logging as early as possible
logging.basicConfig(level=logging.DEBUG,
format='[%(asctime)s] %(name)s:%(levelname)s:%(message)s',
handlers=[logging.FileHandler(log_filename),
logging.StreamHandler()])
# If dataset is specified, update h5py
casas_data_dir = args.dataset
if casas_data_dir is not None:
casas_data_dir = os.path.abspath(os.path.expanduser(casas_data_dir))
if not os.path.isdir(casas_data_dir):
exit('CASAS dataset at %s does not exist. Abort.' % casas_data_dir)
# Find h5py dataset first
h5py_dir = args.h5py
if h5py_dir is not None:
h5py_dir = os.path.abspath(os.path.expanduser(h5py_dir))
else:
# Default location
h5py_dir = os.path.join(output_dir, 'h5py')
if os.path.exists(h5py_dir):
if not os.path.isdir(h5py_dir):
exit('h5py dataset location %s is not a directory. Abort.' % h5py_dir)
if not CASASFuel.files_exist(h5py_dir):
# Finish check and creating all directory needed - now load datasets
if casas_data_dir is not None:
casas_data = CASASData(path=casas_data_dir)
casas_data.summary()
# SVM needs to use statistical feature with per-sensor and normalization
casas_data.populate_feature(method='raw', normalized=True, per_sensor=True)
casas_data.export_hdf5(h5py_dir)
casas_fuel = CASASFuel(dir_name=h5py_dir)
# Prepare learning result
result_pkl_file = os.path.join(output_dir, 'result.pkl')
result = None
if os.path.isfile(result_pkl_file):
f = open(result_pkl_file, 'rb')
result = pickle.load(f)
f.close()
if result.data != h5py_dir:
logger.error('Result pickle file found for different dataset %s' % result.data)
exit('Cannot save learning result at %s' % result_pkl_file)
else:
result = LearningResult(name='LSTM', data=h5py_dir, mode='by_week')
num_classes = casas_fuel.get_output_dims()
# Open Fuel and get all splits
split_list = casas_fuel.get_set_list()
# If week is specified
if args.week is not None:
if 0 < args.week < len(split_list):
split_list = [split_list[args.week - 1], split_list[args.week]]
# Start training
train_names = ('week 24', 'week 23', 'week 22', 'week 21')
test_names = ('week 25', 'week 26', 'week 27', 'week 28')
test_name = 'single_test'
train_set = casas_fuel.get_dataset(train_names, load_in_memory=True)
(train_set_data) = train_set.data_sources
test_set = casas_fuel.get_dataset(test_names, load_in_memory=True)
(test_set_data) = test_set.data_sources
# Prepare Back Annotation
fp_back_annotated = open(os.path.join(output_dir, 'back_annotated.txt'), 'w')
fp_back_probability = open(os.path.join(output_dir, 'back_annotated_proba.txt'), 'w')
output_log_dir = os.path.join(output_dir, 'log')
if not os.path.isdir(output_log_dir):
os.makedirs(output_log_dir)
model = LSTM(casas_fuel.get_input_dims(), casas_fuel.get_output_dims(), num_units=200, num_steps=100)
saver = tf.train.Saver(max_to_keep=len(split_list))
session = tf.Session()
model.sess = session
log_dir = output_log_dir
if not os.path.isdir(log_dir):
os.makedirs(log_dir)
# run svm
logger.info('Training on %s, Testing on %s' % (str(train_names), str(test_names)))
if result.get_record_by_key(test_name) is None:
prediction, prediction_proba = training_and_test(test_name, train_set_data, test_set_data, num_classes,
result, model=model, log_dir=log_dir)
else:
prediction, prediction_proba = load_and_test(test_name, test_set_data, num_classes, result, model=model)
casas_fuel.back_annotate(fp_back_annotated, prediction=prediction, split_name=test_names)
casas_fuel.back_annotate_with_proba(fp_back_probability, prediction_proba, split_name=test_names)
train_name = test_name
train_set_data = test_set_data
fp_back_annotated.close()
fp_back_probability.close()
f = open(result_pkl_file, 'wb')
pickle.dump(obj=result, file=f, protocol=pickle.HIGHEST_PROTOCOL)
f.close()
result.export_to_xlsx(os.path.join(output_dir, 'result.xlsx'))
| bsd-3-clause |
lilleswing/deepchem | contrib/one_shot_models/examples/sider_from_tox21_res_one_fold.py | 8 | 2496 | """
Train low-data res models on Tox21. Test on SIDER. Test last fold only.
"""
from __future__ import print_function
from __future__ import division
from __future__ import unicode_literals
import numpy as np
import deepchem as dc
import tensorflow as tf
from datasets import load_sider_convmol
from datasets import load_tox21_convmol
from datasets import to_numpy_dataset
# Number of folds for split
K = 4
# Depth of attention module
max_depth = 3
# num positive/negative ligands
n_pos = 10
n_neg = 10
# Set batch sizes for network
test_batch_size = 128
support_batch_size = n_pos + n_neg
nb_epochs = 1
n_train_trials = 2000
n_eval_trials = 20
learning_rate = 1e-4
log_every_n_samples = 50
# Number of features on conv-mols
n_feat = 75
sider_tasks, sider_dataset, _ = load_sider_convmol()
sider_dataset = to_numpy_dataset(sider_dataset)
tox21_tasks, tox21_dataset, _ = load_tox21_convmol()
tox21_dataset = to_numpy_dataset(tox21_dataset)
# Define metric
metric = dc.metrics.Metric(dc.metrics.roc_auc_score, mode="classification")
# Train support model on train
support_model = dc.nn.SequentialSupportGraph(n_feat)
# Add layers
support_model.add(dc.nn.GraphConv(64, n_feat, activation='relu'))
support_model.add(dc.nn.GraphPool())
support_model.add(dc.nn.GraphConv(128, 64, activation='relu'))
support_model.add(dc.nn.GraphPool())
support_model.add(dc.nn.GraphConv(64, 128, activation='relu'))
support_model.add(dc.nn.GraphPool())
support_model.add(dc.nn.Dense(128, 64, activation='tanh'))
support_model.add_test(dc.nn.GraphGather(test_batch_size, activation='tanh'))
support_model.add_support(
dc.nn.GraphGather(support_batch_size, activation='tanh'))
# Apply a residual lstm layer
support_model.join(
dc.nn.ResiLSTMEmbedding(test_batch_size, support_batch_size, 128,
max_depth))
model = dc.models.SupportGraphClassifier(
support_model,
test_batch_size=test_batch_size,
support_batch_size=support_batch_size,
learning_rate=learning_rate)
model.fit(
tox21_dataset,
nb_epochs=nb_epochs,
n_episodes_per_epoch=n_train_trials,
n_pos=n_pos,
n_neg=n_neg,
log_every_n_samples=log_every_n_samples)
mean_scores, std_scores = model.evaluate(
sider_dataset, metric, n_pos, n_neg, n_trials=n_eval_trials)
print("Mean Scores on evaluation dataset")
print(mean_scores)
print("Standard Deviations on evaluation dataset")
print(std_scores)
print("Median of Mean Scores")
print(np.median(np.array(mean_scores.values())))
| mit |
tomsilver/nupic | tests/swarming/nupic/swarming/experiments/spatial_classification/description.py | 1 | 15598 | # ----------------------------------------------------------------------
# Numenta Platform for Intelligent Computing (NuPIC)
# Copyright (C) 2013, Numenta, Inc. Unless you have an agreement
# with Numenta, Inc., for a separate license for this software code, the
# following terms and conditions apply:
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License version 3 as
# published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
# See the GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see http://www.gnu.org/licenses.
#
# http://numenta.org/licenses/
# ----------------------------------------------------------------------
"""
Template file used by the OPF Experiment Generator to generate the actual
description.py file by replacing $XXXXXXXX tokens with desired values.
This description.py file was generated by:
'/Users/ronmarianetti/nupic/eng/lib/python2.6/site-packages/nupicengine/frameworks/opf/expGenerator/ExpGenerator.pyc'
"""
from nupic.frameworks.opf.expdescriptionapi import ExperimentDescriptionAPI
from nupic.frameworks.opf.expdescriptionhelpers import (
updateConfigFromSubConfig,
applyValueGettersToContainer,
DeferredDictLookup)
from nupic.frameworks.opf.clamodelcallbacks import *
from nupic.frameworks.opf.metrics import MetricSpec
from nupic.frameworks.opf.opfutils import (InferenceType,
InferenceElement)
from nupic.support import aggregationDivide
from nupic.frameworks.opf.opftaskdriver import (
IterationPhaseSpecLearnOnly,
IterationPhaseSpecInferOnly,
IterationPhaseSpecLearnAndInfer)
# Model Configuration Dictionary:
#
# Define the model parameters and adjust for any modifications if imported
# from a sub-experiment.
#
# These fields might be modified by a sub-experiment; this dict is passed
# between the sub-experiment and base experiment
#
#
# NOTE: Use of DEFERRED VALUE-GETTERs: dictionary fields and list elements
# within the config dictionary may be assigned futures derived from the
# ValueGetterBase class, such as DeferredDictLookup.
# This facility is particularly handy for enabling substitution of values in
# the config dictionary from other values in the config dictionary, which is
# needed by permutation.py-based experiments. These values will be resolved
# during the call to applyValueGettersToContainer(),
# which we call after the base experiment's config dictionary is updated from
# the sub-experiment. See ValueGetterBase and
# DeferredDictLookup for more details about value-getters.
#
# For each custom encoder parameter to be exposed to the sub-experiment/
# permutation overrides, define a variable in this section, using key names
# beginning with a single underscore character to avoid collisions with
# pre-defined keys (e.g., _dsEncoderFieldName2_N).
#
# Example:
# config = dict(
# _dsEncoderFieldName2_N = 70,
# _dsEncoderFieldName2_W = 5,
# dsEncoderSchema = [
# base=dict(
# fieldname='Name2', type='ScalarEncoder',
# name='Name2', minval=0, maxval=270, clipInput=True,
# n=DeferredDictLookup('_dsEncoderFieldName2_N'),
# w=DeferredDictLookup('_dsEncoderFieldName2_W')),
# ],
# )
# updateConfigFromSubConfig(config)
# applyValueGettersToContainer(config)
config = {
# Type of model that the rest of these parameters apply to.
'model': "CLA",
# Version that specifies the format of the config.
'version': 1,
# Intermediate variables used to compute fields in modelParams and also
# referenced from the control section.
'aggregationInfo': {
'fields': [],
'days': 0,
'hours': 0,
'microseconds': 0,
'milliseconds': 0,
'minutes': 0,
'months': 0,
'seconds': 0,
'weeks': 0,
'years': 0
},
'predictAheadTime': None,
# Model parameter dictionary.
'modelParams': {
# The type of inference that this model will perform
'inferenceType': 'NontemporalClassification',
'sensorParams': {
# Sensor diagnostic output verbosity control;
# if > 0: sensor region will print out on screen what it's sensing
# at each step 0: silent; >=1: some info; >=2: more info;
# >=3: even more info (see compute() in py/regions/RecordSensor.py)
'verbosity' : 0,
# Example:
# dsEncoderSchema = [
# DeferredDictLookup('__field_name_encoder'),
# ],
#
# (value generated from DS_ENCODER_SCHEMA)
'encoders': {
'address': {
'fieldname': u'address',
'n': 300,
'name': u'address',
'type': 'SDRCategoryEncoder',
'w': 21
},
'_classifierInput': {
'name': u'_classifierInput',
'fieldname': u'consumption',
'classifierOnly': True,
'clipInput': True,
'maxval': 200,
'minval': 0,
'n': 1500,
'type': 'ScalarEncoder',
'w': 21
},
'gym': {
'fieldname': u'gym',
'n': 300,
'name': u'gym',
'type': 'SDRCategoryEncoder',
'w': 21
},
'timestamp_dayOfWeek': {
'dayOfWeek': (7, 3),
'fieldname': u'timestamp',
'name': u'timestamp_dayOfWeek',
'type': 'DateEncoder'
},
'timestamp_timeOfDay': {
'fieldname': u'timestamp',
'name': u'timestamp_timeOfDay',
'timeOfDay': (7, 8),
'type': 'DateEncoder'
}
},
# A dictionary specifying the period for automatically-generated
# resets from a RecordSensor;
#
# None = disable automatically-generated resets (also disabled if
# all of the specified values evaluate to 0).
# Valid keys is the desired combination of the following:
# days, hours, minutes, seconds, milliseconds, microseconds, weeks
#
# Example for 1.5 days: sensorAutoReset = dict(days=1,hours=12),
#
# (value generated from SENSOR_AUTO_RESET)
'sensorAutoReset' : None,
},
'spEnable': False,
'spParams': {
# SP diagnostic output verbosity control;
# 0: silent; >=1: some info; >=2: more info;
'spVerbosity' : 0,
'globalInhibition': 1,
# Number of cell columns in the cortical region (same number for
# SP and TP)
# (see also tpNCellsPerCol)
'columnCount': 2048,
'inputWidth': 0,
# SP inhibition control (absolute value);
# Maximum number of active columns in the SP region's output (when
# there are more, the weaker ones are suppressed)
'numActiveColumnsPerInhArea': 40,
'seed': 1956,
# potentialPct
# What percent of the columns's receptive field is available
# for potential synapses. At initialization time, we will
# choose potentialPct * (2*potentialRadius+1)^2
'potentialPct': 0.5,
# The default connected threshold. Any synapse whose
# permanence value is above the connected threshold is
# a "connected synapse", meaning it can contribute to the
# cell's firing. Typical value is 0.10. Cells whose activity
# level before inhibition falls below minDutyCycleBeforeInh
# will have their own internal synPermConnectedCell
# threshold set below this default value.
# (This concept applies to both SP and TP and so 'cells'
# is correct here as opposed to 'columns')
'synPermConnected': 0.1,
'synPermActiveInc': 0.1,
'synPermInactiveDec': 0.01,
},
# Controls whether TP is enabled or disabled;
# TP is necessary for making temporal predictions, such as predicting
# the next inputs. Without TP, the model is only capable of
# reconstructing missing sensor inputs (via SP).
'tpEnable' : False,
'tpParams': {
# TP diagnostic output verbosity control;
# 0: silent; [1..6]: increasing levels of verbosity
# (see verbosity in nupic/trunk/py/nupic/research/TP.py and TP10X*.py)
'verbosity': 0,
# Number of cell columns in the cortical region (same number for
# SP and TP)
# (see also tpNCellsPerCol)
'columnCount': 2048,
# The number of cells (i.e., states), allocated per column.
'cellsPerColumn': 32,
'inputWidth': 2048,
'seed': 1960,
# Temporal Pooler implementation selector (see _getTPClass in
# CLARegion.py).
'temporalImp': 'cpp',
# New Synapse formation count
# NOTE: If None, use spNumActivePerInhArea
#
# TODO: need better explanation
'newSynapseCount': 20,
# Maximum number of synapses per segment
# > 0 for fixed-size CLA
# -1 for non-fixed-size CLA
#
# TODO: for Ron: once the appropriate value is placed in TP
# constructor, see if we should eliminate this parameter from
# description.py.
'maxSynapsesPerSegment': 32,
# Maximum number of segments per cell
# > 0 for fixed-size CLA
# -1 for non-fixed-size CLA
#
# TODO: for Ron: once the appropriate value is placed in TP
# constructor, see if we should eliminate this parameter from
# description.py.
'maxSegmentsPerCell': 128,
# Initial Permanence
# TODO: need better explanation
'initialPerm': 0.21,
# Permanence Increment
'permanenceInc': 0.1,
# Permanence Decrement
# If set to None, will automatically default to tpPermanenceInc
# value.
'permanenceDec' : 0.1,
'globalDecay': 0.0,
'maxAge': 0,
# Minimum number of active synapses for a segment to be considered
# during search for the best-matching segments.
# None=use default
# Replaces: tpMinThreshold
'minThreshold': 12,
# Segment activation threshold.
# A segment is active if it has >= tpSegmentActivationThreshold
# connected synapses that are active due to infActiveState
# None=use default
# Replaces: tpActivationThreshold
'activationThreshold': 16,
'outputType': 'normal',
# "Pay Attention Mode" length. This tells the TP how many new
# elements to append to the end of a learned sequence at a time.
# Smaller values are better for datasets with short sequences,
# higher values are better for datasets with long sequences.
'pamLength': 1,
},
'clParams': {
'regionName' : 'CLAClassifierRegion',
'implementation': 'py',
# Classifier diagnostic output verbosity control;
# 0: silent; [1..6]: increasing levels of verbosity
'clVerbosity' : 0,
# This controls how fast the classifier learns/forgets. Higher values
# make it adapt faster and forget older patterns faster.
'alpha': 0.001,
# This is set after the call to updateConfigFromSubConfig and is
# computed from the aggregationInfo and predictAheadTime.
'steps': '0',
},
'anomalyParams': { u'anomalyCacheRecords': None,
u'autoDetectThreshold': None,
u'autoDetectWaitRecords': None},
'trainSPNetOnlyIfRequested': False,
},
}
# end of config dictionary
# Adjust base config dictionary for any modifications if imported from a
# sub-experiment
updateConfigFromSubConfig(config)
# Compute predictionSteps based on the predictAheadTime and the aggregation
# period, which may be permuted over.
if config['predictAheadTime'] is not None:
predictionSteps = int(round(aggregationDivide(
config['predictAheadTime'], config['aggregationInfo'])))
assert (predictionSteps >= 1)
config['modelParams']['clParams']['steps'] = str(predictionSteps)
# Adjust config by applying ValueGetterBase-derived
# futures. NOTE: this MUST be called after updateConfigFromSubConfig() in order
# to support value-getter-based substitutions from the sub-experiment (if any)
applyValueGettersToContainer(config)
control = {
# The environment that the current model is being run in
"environment": 'nupic',
# Input stream specification per py/nupic/frameworks/opf/jsonschema/stream_def.json.
#
'dataset' : { u'info': u'testSpatialClassification',
u'streams': [ { u'columns': [u'*'],
u'info': u'test data',
u'source': u'file://swarming/test_data.csv'}],
u'version': 1},
# Iteration count: maximum number of iterations. Each iteration corresponds
# to one record from the (possibly aggregated) dataset. The task is
# terminated when either number of iterations reaches iterationCount or
# all records in the (possibly aggregated) database have been processed,
# whichever occurs first.
#
# iterationCount of -1 = iterate over the entire dataset
#'iterationCount' : ITERATION_COUNT,
# A dictionary containing all the supplementary parameters for inference
"inferenceArgs":{u'predictedField': u'consumption', u'predictionSteps': [0]},
# Metrics: A list of MetricSpecs that instantiate the metrics that are
# computed for this experiment
'metrics':[
MetricSpec(field=u'consumption', metric='multiStep',
inferenceElement='multiStepBestPredictions',
params={'window': 1000, 'steps': [0], 'errorMetric': 'avg_err'})
],
# Logged Metrics: A sequence of regular expressions that specify which of
# the metrics from the Inference Specifications section MUST be logged for
# every prediction. The regex's correspond to the automatically generated
# metric labels. This is similar to the way the optimization metric is
# specified in permutations.py.
'loggedMetrics': ['.*'],
}
################################################################################
################################################################################
descriptionInterface = ExperimentDescriptionAPI(modelConfig=config,
control=control)
| gpl-3.0 |
danielfree/srs | trunk/research/api-server/server.py | 13 | 48099 | #!/usr/bin/python
'''
The MIT License (MIT)
Copyright (c) 2013-2014 winlin
Permission is hereby granted, free of charge, to any person obtaining a copy of
this software and associated documentation files (the "Software"), to deal in
the Software without restriction, including without limitation the rights to
use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of
the Software, and to permit persons to whom the Software is furnished to do so,
subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS
FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR
COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
'''
"""
the api-server is a default demo server for srs to call
when srs get some event, for example, when client connect
to srs, srs can invoke the http api of the api-server
"""
import sys
# reload sys model to enable the getdefaultencoding method.
reload(sys)
# set the default encoding to utf-8
# using exec to set the encoding, to avoid error in IDE.
exec("sys.setdefaultencoding('utf-8')")
assert sys.getdefaultencoding().lower() == "utf-8"
import os, json, time, datetime, cherrypy, threading
# simple log functions.
def trace(msg):
date = datetime.datetime.now().strftime("%Y-%m-%d %H:%M:%S")
print "[%s][trace] %s"%(date, msg)
# enable crossdomain access for js-client
# define the following method:
# def OPTIONS(self, *args, **kwargs)
# enable_crossdomain()
# invoke this method to enable js to request crossdomain.
def enable_crossdomain():
cherrypy.response.headers["Access-Control-Allow-Origin"] = "*"
cherrypy.response.headers["Access-Control-Allow-Methods"] = "GET, POST, HEAD, PUT, DELETE"
# generate allow headers for crossdomain.
allow_headers = ["Cache-Control", "X-Proxy-Authorization", "X-Requested-With", "Content-Type"]
cherrypy.response.headers["Access-Control-Allow-Headers"] = ",".join(allow_headers)
# error codes definition
class Error:
# ok, success, completed.
success = 0
# error when parse json
system_parse_json = 100
# request action invalid
request_invalid_action = 200
# cdn node not exists
cdn_node_not_exists = 201
'''
handle the clients requests: connect/disconnect vhost/app.
'''
class RESTClients(object):
exposed = True
def GET(self):
enable_crossdomain()
clients = {}
return json.dumps(clients)
'''
for SRS hook: on_connect/on_close
on_connect:
when client connect to vhost/app, call the hook,
the request in the POST data string is a object encode by json:
{
"action": "on_connect",
"client_id": 1985,
"ip": "192.168.1.10", "vhost": "video.test.com", "app": "live",
"pageUrl": "http://www.test.com/live.html"
}
on_close:
when client close/disconnect to vhost/app/stream, call the hook,
the request in the POST data string is a object encode by json:
{
"action": "on_close",
"client_id": 1985,
"ip": "192.168.1.10", "vhost": "video.test.com", "app": "live"
}
if valid, the hook must return HTTP code 200(Stauts OK) and response
an int value specifies the error code(0 corresponding to success):
0
'''
def POST(self):
enable_crossdomain()
# return the error code in str
code = Error.success
req = cherrypy.request.body.read()
trace("post to clients, req=%s"%(req))
try:
json_req = json.loads(req)
except Exception, ex:
code = Error.system_parse_json
trace("parse the request to json failed, req=%s, ex=%s, code=%s"%(req, ex, code))
return str(code)
action = json_req["action"]
if action == "on_connect":
code = self.__on_connect(json_req)
elif action == "on_close":
code = self.__on_close(json_req)
else:
trace("invalid request action: %s"%(json_req["action"]))
code = Error.request_invalid_action
return str(code)
def OPTIONS(self, *args, **kwargs):
enable_crossdomain()
def __on_connect(self, req):
code = Error.success
trace("srs %s: client id=%s, ip=%s, vhost=%s, app=%s, tcUrl=%s, pageUrl=%s"%(
req["action"], req["client_id"], req["ip"], req["vhost"], req["app"], req["tcUrl"], req["pageUrl"]
))
# TODO: process the on_connect event
return code
def __on_close(self, req):
code = Error.success
trace("srs %s: client id=%s, ip=%s, vhost=%s, app=%s"%(
req["action"], req["client_id"], req["ip"], req["vhost"], req["app"]
))
# TODO: process the on_close event
return code
'''
handle the streams requests: publish/unpublish stream.
'''
class RESTStreams(object):
exposed = True
def GET(self):
enable_crossdomain()
streams = {}
return json.dumps(streams)
'''
for SRS hook: on_publish/on_unpublish
on_publish:
when client(encoder) publish to vhost/app/stream, call the hook,
the request in the POST data string is a object encode by json:
{
"action": "on_publish",
"client_id": 1985,
"ip": "192.168.1.10", "vhost": "video.test.com", "app": "live",
"stream": "livestream"
}
on_unpublish:
when client(encoder) stop publish to vhost/app/stream, call the hook,
the request in the POST data string is a object encode by json:
{
"action": "on_unpublish",
"client_id": 1985,
"ip": "192.168.1.10", "vhost": "video.test.com", "app": "live",
"stream": "livestream"
}
if valid, the hook must return HTTP code 200(Stauts OK) and response
an int value specifies the error code(0 corresponding to success):
0
'''
def POST(self):
enable_crossdomain()
# return the error code in str
code = Error.success
req = cherrypy.request.body.read()
trace("post to streams, req=%s"%(req))
try:
json_req = json.loads(req)
except Exception, ex:
code = Error.system_parse_json
trace("parse the request to json failed, req=%s, ex=%s, code=%s"%(req, ex, code))
return str(code)
action = json_req["action"]
if action == "on_publish":
code = self.__on_publish(json_req)
elif action == "on_unpublish":
code = self.__on_unpublish(json_req)
else:
trace("invalid request action: %s"%(json_req["action"]))
code = Error.request_invalid_action
return str(code)
def OPTIONS(self, *args, **kwargs):
enable_crossdomain()
def __on_publish(self, req):
code = Error.success
trace("srs %s: client id=%s, ip=%s, vhost=%s, app=%s, stream=%s"%(
req["action"], req["client_id"], req["ip"], req["vhost"], req["app"], req["stream"]
))
# TODO: process the on_publish event
return code
def __on_unpublish(self, req):
code = Error.success
trace("srs %s: client id=%s, ip=%s, vhost=%s, app=%s, stream=%s"%(
req["action"], req["client_id"], req["ip"], req["vhost"], req["app"], req["stream"]
))
# TODO: process the on_unpublish event
return code
'''
handle the sessions requests: client play/stop stream
'''
class RESTSessions(object):
exposed = True
def GET(self):
enable_crossdomain()
sessions = {}
return json.dumps(sessions)
'''
for SRS hook: on_play/on_stop
on_play:
when client(encoder) publish to vhost/app/stream, call the hook,
the request in the POST data string is a object encode by json:
{
"action": "on_play",
"client_id": 1985,
"ip": "192.168.1.10", "vhost": "video.test.com", "app": "live",
"stream": "livestream"
}
on_stop:
when client(encoder) stop publish to vhost/app/stream, call the hook,
the request in the POST data string is a object encode by json:
{
"action": "on_stop",
"client_id": 1985,
"ip": "192.168.1.10", "vhost": "video.test.com", "app": "live",
"stream": "livestream"
}
if valid, the hook must return HTTP code 200(Stauts OK) and response
an int value specifies the error code(0 corresponding to success):
0
'''
def POST(self):
enable_crossdomain()
# return the error code in str
code = Error.success
req = cherrypy.request.body.read()
trace("post to sessions, req=%s"%(req))
try:
json_req = json.loads(req)
except Exception, ex:
code = Error.system_parse_json
trace("parse the request to json failed, req=%s, ex=%s, code=%s"%(req, ex, code))
return str(code)
action = json_req["action"]
if action == "on_play":
code = self.__on_play(json_req)
elif action == "on_stop":
code = self.__on_stop(json_req)
else:
trace("invalid request action: %s"%(json_req["action"]))
code = Error.request_invalid_action
return str(code)
def OPTIONS(self, *args, **kwargs):
enable_crossdomain()
def __on_play(self, req):
code = Error.success
trace("srs %s: client id=%s, ip=%s, vhost=%s, app=%s, stream=%s"%(
req["action"], req["client_id"], req["ip"], req["vhost"], req["app"], req["stream"]
))
# TODO: process the on_play event
return code
def __on_stop(self, req):
code = Error.success
trace("srs %s: client id=%s, ip=%s, vhost=%s, app=%s, stream=%s"%(
req["action"], req["client_id"], req["ip"], req["vhost"], req["app"], req["stream"]
))
# TODO: process the on_stop event
return code
global_arm_server_id = os.getpid();
class ArmServer:
def __init__(self):
global global_arm_server_id
global_arm_server_id += 1
self.id = str(global_arm_server_id)
self.ip = None
self.device_id = None
self.public_ip = cherrypy.request.remote.ip
self.heartbeat = time.time()
self.clients = 0
def dead(self):
dead_time_seconds = 20
if time.time() - self.heartbeat > dead_time_seconds:
return True
return False
def json_dump(self):
data = {}
data["id"] = self.id
data["ip"] = self.ip
data["device_id"] = self.device_id
data["public_ip"] = self.public_ip
data["heartbeat"] = self.heartbeat
data["heartbeat_h"] = time.strftime("%Y-%m-%d %H:%M:%S",time.localtime(self.heartbeat))
data["summaries"] = "http://%s:1985/api/v1/summaries"%(self.ip)
return data
'''
the server list
'''
class RESTServers(object):
exposed = True
def __init__(self):
self.__nodes = []
self.__last_update = datetime.datetime.now();
self.__lock = threading.Lock()
def __get_node(self, device_id):
for node in self.__nodes:
if node.device_id == device_id:
return node
return None
def __refresh_nodes(self):
while len(self.__nodes) > 0:
has_dead_node = False
for node in self.__nodes:
if node.dead():
self.__nodes.remove(node)
has_dead_node = True
if not has_dead_node:
break
def __json_dump_nodes(self, peers):
data = []
for node in peers:
data.append(node.json_dump())
return data
def __get_peers_for_play(self, device_id):
peers = []
for node in self.__nodes:
if node.device_id == device_id:
peers.append(node)
return peers
def __select_peer(self, peers, device_id):
target = None
for peer in peers:
if target is None or target.clients > peer.clients:
target = peer
if target is None:
return None
target.clients += 1
return target.ip
'''
post to update server ip.
request body: the new raspberry-pi server ip. TODO: FIXME: more info.
'''
def POST(self):
enable_crossdomain()
try:
self.__lock.acquire()
req = cherrypy.request.body.read()
trace("post to nodes, req=%s"%(req))
try:
json_req = json.loads(req)
except Exception, ex:
code = Error.system_parse_json
trace("parse the request to json failed, req=%s, ex=%s, code=%s"%(req, ex, code))
return json.dumps({"code":code, "data": None})
device_id = json_req["device_id"]
node = self.__get_node(device_id)
if node is None:
node = ArmServer()
self.__nodes.append(node)
node.ip = json_req["ip"]
node.device_id = device_id
node.public_ip = cherrypy.request.remote.ip
node.heartbeat = time.time()
return json.dumps({"code":Error.success, "data": {"id":node.id}})
finally:
self.__lock.release()
'''
id canbe:
pi: the pi demo, raspberry-pi default demo.
device_id: the id of device to get.
action: canbe play or mgmt, play to play the inest stream, mgmt to get api/v1/versions.
stream: the stream to play, for example, live/livestream for http://server:8080/live/livestream.html
meeting: the meeting demo. jump to web meeting if index is None.
device_id: the id of device to get.
local: whether view the local raspberry-pi stream. if "true", redirect to the local(internal) api server.
index: the meeting stream index, dynamic get the streams from root.api.v1.chats.get_url_by_index(index)
gslb: the gslb to get edge ip
device_id: the id of device to get.
ingest: deprecated, alias for pi.
'''
def GET(self, id=None, action="play", stream="live/livestream", index=None, local="false", device_id=None):
enable_crossdomain()
try:
self.__lock.acquire()
self.__refresh_nodes()
data = self.__json_dump_nodes(self.__nodes)
server_ip = "demo.chnvideo.com"
ip = cherrypy.request.remote.ip
if type is not None:
peers = self.__get_peers_for_play(device_id)
if len(peers) > 0:
server_ip = self.__select_peer(peers, device_id)
# demo, srs meeting urls.
if id == "meeting":
if index is None:
url = "http://%s:8085"%(server_ip)
elif local == "true":
url = "http://%s:8085/api/v1/servers?id=%s&index=%s&local=false"%(server_ip, id, index)
else:
rtmp_url = root.api.v1.chats.get_url_by_index(index)
if rtmp_url is None:
return "meeting stream not found"
urls = rtmp_url.replace("...vhost...", "?vhost=").replace("rtmp://", "").split("/")
hls_url = "http://%s:8080/%s/%s.m3u8"%(urls[0].strip(":19350").strip(":1935"), urls[1].split("?")[0], urls[2])
return self.__generate_hls(hls_url)
# raspberry-pi urls.
elif id == "ingest" or id == "pi":
if action == "play":
url = "http://%s:8080/%s.html"%(server_ip, stream)
elif action == "rtmp":
url = "../../players/srs_player.html?server=%s&vhost=%s&app=%s&stream=%s&autostart=true"%(server_ip, server_ip, stream.split("/")[0], stream.split("/")[1])
elif action == "hls":
hls_url = "http://%s:8080/%s.m3u8"%(server_ip, stream);
if stream.startswith("http://"):
hls_url = stream;
return self.__generate_hls(hls_url.replace(".m3u8.m3u8", ".m3u8"))
else:
url = "http://%s:8080/api/v1/versions"%(server_ip)
elif id == "gslb":
return json.dumps({"code":Error.success, "data": {
"edge":server_ip, "client":ip,
"peers":self.__json_dump_nodes(peers),
"streams": {
"pi": {
"livestream": {
"sales-pi-hls": "http://demo.chnvideo.com:8085/api/v1/servers?id=ingest&action=hls&device_id=chnvideo-sales-arm&stream=live/livestream",
"dev-pi-hls": "http://demo.chnvideo.com:8085/api/v1/servers?id=ingest&action=hls&device_id=chnvideo-dev-arm&stream=live/livestream"
},
"cztv": {
"sales-pi-hls": "http://demo.chnvideo.com:8085/api/v1/servers?id=ingest&action=hls&device_id=chnvideo-sales-arm&stream=live/rtmp_cztv01-sd",
"dev-pi-hls": "http://demo.chnvideo.com:8085/api/v1/servers?id=ingest&action=hls&device_id=chnvideo-dev-arm&stream=live/rtmp_cztv01-sd"
}
},
"hiwifi": {
"hls": {
"dev-livestream": "http://demo.chnvideo.com:8085/api/v1/servers?id=ingest&action=hls&device_id=chnvideo-dev-hiwifi&stream=live/livestream",
"sales-livestream": "http://demo.chnvideo.com:8085/api/v1/servers?id=ingest&action=hls&device_id=chnvideo-sales-hiwifi&stream=live/livestream"
},
"rtmp":{
"dev-livestream": "http://demo.chnvideo.com:8085/api/v1/servers?id=ingest&action=rtmp&device_id=chnvideo-dev-hiwifi&stream=live/livestream",
"sales-livestream": "http://demo.chnvideo.com:8085/api/v1/servers?id=ingest&action=rtmp&device_id=chnvideo-sales-hiwifi&stream=live/livestream"
},
"meiyi": {
"rtmp": {
"avatar": "http://demo.chnvideo.com:8085/api/v1/servers?id=ingest&action=rtmp&device_id=chnvideo-sales-hiwifi&stream=live/avatar",
"MenInBlack3": "http://demo.chnvideo.com:8085/api/v1/servers?id=ingest&action=rtmp&device_id=chnvideo-sales-hiwifi&stream=live/MenInBlack3",
"skyfall": "http://demo.chnvideo.com:8085/api/v1/servers?id=ingest&action=rtmp&device_id=chnvideo-sales-hiwifi&stream=live/skyfall",
"SpiderMan": "http://demo.chnvideo.com:8085/api/v1/servers?id=ingest&action=rtmp&device_id=chnvideo-sales-hiwifi&stream=live/SpiderMan",
"thehobbit": "http://demo.chnvideo.com:8085/api/v1/servers?id=ingest&action=rtmp&device_id=chnvideo-sales-hiwifi&stream=live/thehobbit",
"thorthedarkworld": "http://demo.chnvideo.com:8085/api/v1/servers?id=ingest&action=rtmp&device_id=chnvideo-sales-hiwifi&stream=live/thorthedarkworld",
"transformers": "http://demo.chnvideo.com:8085/api/v1/servers?id=ingest&action=rtmp&device_id=chnvideo-sales-hiwifi&stream=live/transformers"
},
"hls": {
"avatar": "http://demo.chnvideo.com:8085/api/v1/servers?id=ingest&action=hls&device_id=chnvideo-sales-hiwifi&stream=live/avatar",
"MenInBlack3": "http://demo.chnvideo.com:8085/api/v1/servers?id=ingest&action=hls&device_id=chnvideo-sales-hiwifi&stream=live/MenInBlack3",
"skyfall": "http://demo.chnvideo.com:8085/api/v1/servers?id=ingest&action=hls&device_id=chnvideo-sales-hiwifi&stream=live/skyfall",
"SpiderMan": "http://demo.chnvideo.com:8085/api/v1/servers?id=ingest&action=hls&device_id=chnvideo-sales-hiwifi&stream=live/SpiderMan",
"thehobbit": "http://demo.chnvideo.com:8085/api/v1/servers?id=ingest&action=hls&device_id=chnvideo-sales-hiwifi&stream=live/thehobbit",
"thorthedarkworld": "http://demo.chnvideo.com:8085/api/v1/servers?id=ingest&action=hls&device_id=chnvideo-sales-hiwifi&stream=live/thorthedarkworld",
"transformers": "http://demo.chnvideo.com:8085/api/v1/servers?id=ingest&action=hls&device_id=chnvideo-sales-hiwifi&stream=live/transformers"
}
}
},
"cubieboard": {
"meiyi": {
"rtmp": {
"livesteam": "http://demo.chnvideo.com:8085/api/v1/servers?id=ingest&action=rtmp&device_id=chnvideo-meiyi-cubieboard&stream=live/livestream",
"stream1": "http://demo.chnvideo.com:8085/api/v1/servers?id=ingest&action=rtmp&device_id=chnvideo-meiyi-cubieboard&stream=live/stream1",
"stream2": "http://demo.chnvideo.com:8085/api/v1/servers?id=ingest&action=rtmp&device_id=chnvideo-meiyi-cubieboard&stream=live/stream2",
"stream3": "http://demo.chnvideo.com:8085/api/v1/servers?id=ingest&action=rtmp&device_id=chnvideo-meiyi-cubieboard&stream=live/stream3",
"stream4": "http://demo.chnvideo.com:8085/api/v1/servers?id=ingest&action=rtmp&device_id=chnvideo-meiyi-cubieboard&stream=live/stream4",
"stream5": "http://demo.chnvideo.com:8085/api/v1/servers?id=ingest&action=rtmp&device_id=chnvideo-meiyi-cubieboard&stream=live/stream5",
"stream6": "http://demo.chnvideo.com:8085/api/v1/servers?id=ingest&action=rtmp&device_id=chnvideo-meiyi-cubieboard&stream=live/stream6",
"stream7": "http://demo.chnvideo.com:8085/api/v1/servers?id=ingest&action=rtmp&device_id=chnvideo-meiyi-cubieboard&stream=live/stream7"
},
"hls": {
"livesteam": "http://demo.chnvideo.com:8085/api/v1/servers?id=ingest&action=hls&device_id=chnvideo-meiyi-cubieboard&stream=live/livestream",
"stream1": "http://demo.chnvideo.com:8085/api/v1/servers?id=ingest&action=hls&device_id=chnvideo-meiyi-cubieboard&stream=live/stream1",
"stream2": "http://demo.chnvideo.com:8085/api/v1/servers?id=ingest&action=hls&device_id=chnvideo-meiyi-cubieboard&stream=live/stream2",
"stream3": "http://demo.chnvideo.com:8085/api/v1/servers?id=ingest&action=hls&device_id=chnvideo-meiyi-cubieboard&stream=live/stream3",
"stream4": "http://demo.chnvideo.com:8085/api/v1/servers?id=ingest&action=hls&device_id=chnvideo-meiyi-cubieboard&stream=live/stream4",
"stream5": "http://demo.chnvideo.com:8085/api/v1/servers?id=ingest&action=hls&device_id=chnvideo-meiyi-cubieboard&stream=live/stream5",
"stream6": "http://demo.chnvideo.com:8085/api/v1/servers?id=ingest&action=hls&device_id=chnvideo-meiyi-cubieboard&stream=live/stream6",
"stream7": "http://demo.chnvideo.com:8085/api/v1/servers?id=ingest&action=hls&device_id=chnvideo-meiyi-cubieboard&stream=live/stream7"
}
},
"meiyi-house": {
"rtmp": {
"livesteam": "http://demo.chnvideo.com:8085/api/v1/servers?id=ingest&action=rtmp&device_id=chnvideo-meiyi-cubieboard-house&stream=live/livestream"
},
"hls": {
"livesteam": "http://demo.chnvideo.com:8085/api/v1/servers?id=ingest&action=hls&device_id=chnvideo-meiyi-cubieboard-house&stream=live/livestream"
}
},
"meiyi-bk": {
"rtmp": {
"livesteam": "http://demo.chnvideo.com:8085/api/v1/servers?id=ingest&action=rtmp&device_id=chnvideo-meiyi-cubieboard-bk&stream=live/livestream",
"stream1": "http://demo.chnvideo.com:8085/api/v1/servers?id=ingest&action=rtmp&device_id=chnvideo-meiyi-cubieboard-bk&stream=live/stream1",
"stream2": "http://demo.chnvideo.com:8085/api/v1/servers?id=ingest&action=rtmp&device_id=chnvideo-meiyi-cubieboard-bk&stream=live/stream2",
"stream3": "http://demo.chnvideo.com:8085/api/v1/servers?id=ingest&action=rtmp&device_id=chnvideo-meiyi-cubieboard-bk&stream=live/stream3",
"stream4": "http://demo.chnvideo.com:8085/api/v1/servers?id=ingest&action=rtmp&device_id=chnvideo-meiyi-cubieboard-bk&stream=live/stream4",
"stream5": "http://demo.chnvideo.com:8085/api/v1/servers?id=ingest&action=rtmp&device_id=chnvideo-meiyi-cubieboard-bk&stream=live/stream5",
"stream6": "http://demo.chnvideo.com:8085/api/v1/servers?id=ingest&action=rtmp&device_id=chnvideo-meiyi-cubieboard-bk&stream=live/stream6",
"stream7": "http://demo.chnvideo.com:8085/api/v1/servers?id=ingest&action=rtmp&device_id=chnvideo-meiyi-cubieboard-bk&stream=live/stream7"
},
"hls": {
"livesteam": "http://demo.chnvideo.com:8085/api/v1/servers?id=ingest&action=hls&device_id=chnvideo-meiyi-cubieboard-bk&stream=live/livestream",
"stream1": "http://demo.chnvideo.com:8085/api/v1/servers?id=ingest&action=hls&device_id=chnvideo-meiyi-cubieboard-bk&stream=live/stream1",
"stream2": "http://demo.chnvideo.com:8085/api/v1/servers?id=ingest&action=hls&device_id=chnvideo-meiyi-cubieboard-bk&stream=live/stream2",
"stream3": "http://demo.chnvideo.com:8085/api/v1/servers?id=ingest&action=hls&device_id=chnvideo-meiyi-cubieboard-bk&stream=live/stream3",
"stream4": "http://demo.chnvideo.com:8085/api/v1/servers?id=ingest&action=hls&device_id=chnvideo-meiyi-cubieboard-bk&stream=live/stream4",
"stream5": "http://demo.chnvideo.com:8085/api/v1/servers?id=ingest&action=hls&device_id=chnvideo-meiyi-cubieboard-bk&stream=live/stream5",
"stream6": "http://demo.chnvideo.com:8085/api/v1/servers?id=ingest&action=hls&device_id=chnvideo-meiyi-cubieboard-bk&stream=live/stream6",
"stream7": "http://demo.chnvideo.com:8085/api/v1/servers?id=ingest&action=hls&device_id=chnvideo-meiyi-cubieboard-bk&stream=live/stream7"
}
},
"meiyi-dev1": {
"rtmp": {
"livesteam": "http://demo.chnvideo.com:8085/api/v1/servers?id=ingest&action=rtmp&device_id=chnvideo-meiyi-cubieboard-dev1&stream=live/livestream"
},
"hls": {
"livesteam": "http://demo.chnvideo.com:8085/api/v1/servers?id=ingest&action=hls&device_id=chnvideo-meiyi-cubieboard-dev1&stream=live/livestream"
}
},
"meiyi-dev2": {
"rtmp": {
"livesteam": "http://demo.chnvideo.com:8085/api/v1/servers?id=ingest&action=rtmp&device_id=chnvideo-meiyi-cubieboard-dev2&stream=live/livestream"
},
"hls": {
"livesteam": "http://demo.chnvideo.com:8085/api/v1/servers?id=ingest&action=hls&device_id=chnvideo-meiyi-cubieboard-dev2&stream=live/livestream"
}
}
}
}
}})
# others, default.
else:
return json.dumps(data)
#return "id=%s, action=%s, stream=%s, url=%s, index=%s, local=%s"%(id, action, stream, url, index, local)
raise cherrypy.HTTPRedirect(url)
finally:
self.__lock.release()
def DELETE(self, id):
enable_crossdomain()
raise cherrypy.HTTPError(405, "Not allowed.")
def PUT(self, id):
enable_crossdomain()
raise cherrypy.HTTPError(405, "Not allowed.")
def OPTIONS(self, *args, **kwargs):
enable_crossdomain()
def __generate_hls(self, hls_url):
return SrsUtility().hls_html(hls_url)
class SrsUtility:
def hls_html(self, hls_url):
return """
<h1>%s</h1>
<video width="640" height="360"
autoplay controls autobuffer
src="%s"
type="application/vnd.apple.mpegurl">
</video>"""%(hls_url, hls_url);
global_cdn_id = os.getpid();
class CdnNode:
def __init__(self):
global global_cdn_id
global_cdn_id += 1
self.id = str(global_cdn_id)
self.ip = None
self.origin = None
self.os = None
self.srs_status = None
self.public_ip = cherrypy.request.remote.ip
self.heartbeat = time.time()
self.clients = 0
def dead(self):
dead_time_seconds = 10
if time.time() - self.heartbeat > dead_time_seconds:
return True
return False
def json_dump(self):
data = {}
data["id"] = self.id
data["ip"] = self.ip
data["origin"] = self.origin
data["os"] = self.os
data["srs_status"] = self.srs_status
data["public_ip"] = self.public_ip
data["heartbeat"] = self.heartbeat
data["heartbeat_h"] = time.strftime("%Y-%m-%d %H:%M:%S",time.localtime(self.heartbeat))
data["clients"] = self.clients
data["summaries"] = "http://%s:1985/api/v1/summaries"%(self.ip)
return data
'''
the cdn nodes list
'''
class RESTNodes(object):
exposed = True
def __init__(self):
self.__nodes = []
# @remark, if there is shared data, such as the self.__nodes,
# we must use lock for cherrypy, or the cpu of cherrypy will high
# and performance suffer.
self.__lock = threading.Lock()
def __get_node(self, id):
for node in self.__nodes:
if node.id == id:
return node
return None
def __refresh_nodes(self):
while len(self.__nodes) > 0:
has_dead_node = False
for node in self.__nodes:
if node.dead():
self.__nodes.remove(node)
has_dead_node = True
if not has_dead_node:
break
def __get_peers(self, target_node):
peers = []
for node in self.__nodes:
if str(node.id).strip() == str(target_node.id).strip():
continue
if node.public_ip == target_node.public_ip and node.srs_status == "running" and node.origin != target_node.ip:
peers.append(node)
return peers
def __get_peers_for_play(self, ip):
peers = []
for node in self.__nodes:
if node.public_ip == ip and node.srs_status == "running":
peers.append(node)
return peers
def __json_dump_nodes(self, peers):
data = []
for node in peers:
data.append(node.json_dump())
return data
def __select_peer(self, peers, ip):
target = None
for peer in peers:
if target is None or target.clients > peer.clients:
target = peer
if target is None:
return None
target.clients += 1
return target.ip
def GET(self, type=None, format=None, origin=None, vhost=None, port=None, stream=None, node_id=None):
enable_crossdomain()
try:
self.__lock.acquire()
self.__refresh_nodes()
data = self.__json_dump_nodes(self.__nodes)
ip = cherrypy.request.remote.ip
if type is not None:
server = origin
peers = self.__get_peers_for_play(ip)
if len(peers) > 0:
server = self.__select_peer(peers, ip)
if type == "hls":
hls_url = "http://%s:%s/%s.m3u8"%(server, port, stream)
hls_url = hls_url.replace(".m3u8.m3u8", ".m3u8")
if format == "html":
return SrsUtility().hls_html(hls_url)
else:
#return hls_url
raise cherrypy.HTTPRedirect(hls_url)
elif type == "rtmp":
rtmp_url = "rtmp://%s:%s/%s?vhost=%s/%s"%(server, port, stream.split("/")[0], vhost, stream.split("/")[1])
if format == "html":
html = "%s?server=%s&port=%s&vhost=%s&app=%s&stream=%s&autostart=true"%(
"http://demo.chnvideo.com:8085/srs/trunk/research/players/srs_player.html",
server, port, vhost, stream.split("/")[0], stream.split("/")[1])
#return html
raise cherrypy.HTTPRedirect(html)
return rtmp_url
elif type == "gslb":
return json.dumps({"code":Error.success, "data": {
"edge":server, "client":ip,
"peers":self.__json_dump_nodes(peers),
"streams": {
"cztv": {
"hls": "http://demo.chnvideo.com:8085/api/v1/nodes?type=hls&format=html&origin=demo.chnvideo.com&port=8080&stream=live/rtmp_cztv01-sd",
"rtmp": "http://demo.chnvideo.com:8085/api/v1/nodes?type=rtmp&format=html&origin=demo.chnvideo.com&vhost=android&port=1935&stream=live/rtmp_cztv01-sd"
},
"livestream": {
"hls": "http://demo.chnvideo.com:8085/api/v1/nodes?type=hls&format=html&origin=demo.chnvideo.com&port=8080&stream=live/livestream",
"rtmp": "http://demo.chnvideo.com:8085/api/v1/nodes?type=rtmp&format=html&origin=demo.chnvideo.com&vhost=demo.srs.com&port=1935&stream=live/livestream"
},
"apk": "http://demo.chnvideo.com/android.srs.apk"
}
}})
return json.dumps({"code":Error.success, "data": data})
finally:
self.__lock.release()
def PUT(self):
enable_crossdomain()
try:
self.__lock.acquire()
req = cherrypy.request.body.read()
trace("put to nodes, req=%s"%(req))
try:
json_req = json.loads(req)
except Exception, ex:
code = Error.system_parse_json
trace("parse the request to json failed, req=%s, ex=%s, code=%s"%(req, ex, code))
return json.dumps({"code":code, "data": None})
id = str(json_req["id"])
node = self.__get_node(id)
if node is None:
code = Error.cdn_node_not_exists
trace("cdn node not exists, req=%s, id=%s, code=%s"%(req, id, code))
return json.dumps({"code":code, "data": None})
node.heartbeat = time.time()
node.srs_status = str(json_req["srs_status"])
node.ip = str(json_req["ip"])
if "origin" in json_req:
node.origin = str(json_req["origin"]);
node.public_ip = cherrypy.request.remote.ip
# reset if restart.
if node.srs_status != "running":
node.clients = 0
self.__refresh_nodes()
peers = self.__get_peers(node)
peers_data = self.__json_dump_nodes(peers)
res = json.dumps({"code":Error.success, "data": {"id":node.id, "peers":peers_data}})
trace(res)
return res
finally:
self.__lock.release()
def POST(self):
enable_crossdomain()
try:
self.__lock.acquire()
req = cherrypy.request.body.read()
trace("post to nodes, req=%s"%(req))
try:
json_req = json.loads(req)
except Exception, ex:
code = Error.system_parse_json
trace("parse the request to json failed, req=%s, ex=%s, code=%s"%(req, ex, code))
return json.dumps({"code":code, "data": None})
node = CdnNode()
node.ip = str(json_req["ip"]);
node.os = str(json_req["os"]);
if "origin" in json_req:
node.origin = str(json_req["origin"]);
node.srs_status = str(json_req["srs_status"])
self.__nodes.append(node)
self.__refresh_nodes()
peers = self.__get_peers(node)
peers_data = self.__json_dump_nodes(peers)
res = json.dumps({"code":Error.success, "data": {"id":node.id, "peers":peers_data}})
trace(res)
return res
finally:
self.__lock.release()
def OPTIONS(self, *args, **kwargs):
enable_crossdomain()
global_chat_id = os.getpid();
'''
the chat streams, public chat room.
'''
class RESTChats(object):
exposed = True
global_id = 100
def __init__(self):
# object fields:
# id: an int value indicates the id of user.
# username: a str indicates the user name.
# url: a str indicates the url of user stream.
# agent: a str indicates the agent of user.
# join_date: a number indicates the join timestamp in seconds.
# join_date_str: a str specifies the formated friendly time.
# heatbeat: a number indicates the heartbeat timestamp in seconds.
# vcodec: a dict indicates the video codec info.
# acodec: a dict indicates the audio codec info.
self.__chats = [];
self.__chat_lock = threading.Lock();
# dead time in seconds, if exceed, remove the chat.
self.__dead_time = 15;
'''
get the rtmp url of chat object. None if overflow.
'''
def get_url_by_index(self, index):
index = int(index)
if index is None or index >= len(self.__chats):
return None;
return self.__chats[index]["url"];
def GET(self):
enable_crossdomain()
try:
self.__chat_lock.acquire();
chats = [];
copy = self.__chats[:];
for chat in copy:
if time.time() - chat["heartbeat"] > self.__dead_time:
self.__chats.remove(chat);
continue;
chats.append({
"id": chat["id"],
"username": chat["username"],
"url": chat["url"],
"join_date_str": chat["join_date_str"],
"heartbeat": chat["heartbeat"],
});
finally:
self.__chat_lock.release();
return json.dumps({"code":0, "data": {"now": time.time(), "chats": chats}})
def POST(self):
enable_crossdomain()
req = cherrypy.request.body.read()
chat = json.loads(req)
global global_chat_id;
chat["id"] = global_chat_id
global_chat_id += 1
chat["join_date"] = time.time();
chat["heartbeat"] = time.time();
chat["join_date_str"] = time.strftime("%Y-%m-%d %H:%M:%S");
try:
self.__chat_lock.acquire();
self.__chats.append(chat)
finally:
self.__chat_lock.release();
trace("create chat success, id=%s"%(chat["id"]))
return json.dumps({"code":0, "data": chat["id"]})
def DELETE(self, id):
enable_crossdomain()
try:
self.__chat_lock.acquire();
for chat in self.__chats:
if str(id) != str(chat["id"]):
continue
self.__chats.remove(chat)
trace("delete chat success, id=%s"%(id))
return json.dumps({"code":0, "data": None})
finally:
self.__chat_lock.release();
raise cherrypy.HTTPError(405, "Not allowed.")
def PUT(self, id):
enable_crossdomain()
try:
self.__chat_lock.acquire();
for chat in self.__chats:
if str(id) != str(chat["id"]):
continue
chat["heartbeat"] = time.time();
trace("heartbeat chat success, id=%s"%(id))
return json.dumps({"code":0, "data": None})
finally:
self.__chat_lock.release();
raise cherrypy.HTTPError(405, "Not allowed.")
def OPTIONS(self, *args, **kwargs):
enable_crossdomain()
# HTTP RESTful path.
class Root(object):
exposed = True
def __init__(self):
self.api = Api()
def GET(self):
enable_crossdomain();
return json.dumps({"code":Error.success, "urls":{"api":"the api root"}})
def OPTIONS(self, *args, **kwargs):
enable_crossdomain();
# HTTP RESTful path.
class Api(object):
exposed = True
def __init__(self):
self.v1 = V1()
def GET(self):
enable_crossdomain();
return json.dumps({"code":Error.success,
"urls": {
"v1": "the api version 1.0"
}
});
def OPTIONS(self, *args, **kwargs):
enable_crossdomain();
# HTTP RESTful path. to access as:
# http://127.0.0.1:8085/api/v1/clients
class V1(object):
exposed = True
def __init__(self):
self.clients = RESTClients()
self.streams = RESTStreams()
self.sessions = RESTSessions()
self.chats = RESTChats()
self.servers = RESTServers()
self.nodes = RESTNodes()
def GET(self):
enable_crossdomain();
return json.dumps({"code":Error.success, "urls":{
"clients": "for srs http callback, to handle the clients requests: connect/disconnect vhost/app.",
"streams": "for srs http callback, to handle the streams requests: publish/unpublish stream.",
"sessions": "for srs http callback, to handle the sessions requests: client play/stop stream",
"chats": "for srs demo meeting, the chat streams, public chat room.",
"nodes": {
"summary": "for srs cdn node",
"POST ip=node_ip&os=node_os": "register a new node",
"GET": "get the active edge nodes",
"GET type=gslb&origin=demo.chnvideo.com": "get the gslb edge ip",
"GET type=hls&format=html&origin=demo.chnvideo.com&port=8080&stream=live/livestream": "get the play url, html for hls",
"GET type=rtmp&format=html&origin=demo.chnvideo.com&vhost=demo.srs.com&port=1935&stream=live/livestream": "get the play url, for rtmp"
},
"servers": {
"summary": "for srs raspberry-pi and meeting demo",
"GET": "get the current raspberry-pi servers info",
"GET id=gslb&device_id=chnvideo-sales-arm": "get the gslb edge ip",
"POST ip=node_ip&device_id=device_id": "the new raspberry-pi server info.",
"GET id=ingest&action=play&stream=live/livestream": "play the ingest HLS stream on raspberry-pi",
"GET id=ingest&action=rtmp&stream=live/livestream": "play the ingest RTMP stream on raspberry-pi",
"GET id=ingest&action=hls&stream=live/livestream": "play the ingest HLS stream on raspberry-pi",
"GET id=ingest&action=mgmt": "open the HTTP api url of raspberry-pi",
"GET id=meeting": "redirect to local raspberry-pi meeting url(local ignored)",
"GET id=meeting&local=false&index=0": "play the first(index=0) meeting HLS stream on demo.chnvideo.com(not local)",
"GET id=meeting&local=true&index=0": "play the first(index=0) meeting HLS stream on local server(local x86/x64 server), warn: raspberry-pi donot support HLS meeting."
}
}});
def OPTIONS(self, *args, **kwargs):
enable_crossdomain();
'''
main code start.
'''
# donot support use this module as library.
if __name__ != "__main__":
raise Exception("embed not support")
# check the user options
if len(sys.argv) <= 1:
print "SRS api callback server, Copyright (c) 2013-2014 winlin"
print "Usage: python %s <port>"%(sys.argv[0])
print " port: the port to listen at."
print "For example:"
print " python %s 8085"%(sys.argv[0])
print ""
print "See also: https://github.com/simple-rtmp-server/srs"
sys.exit(1)
# parse port from user options.
port = int(sys.argv[1])
static_dir = os.path.abspath(os.path.join(os.path.dirname(sys.argv[0]), "static-dir"))
trace("api server listen at port: %s, static_dir: %s"%(port, static_dir))
# cherrypy config.
conf = {
'global': {
'server.shutdown_timeout': 1,
'server.socket_host': '0.0.0.0',
'server.socket_port': port,
'tools.encode.on': True,
'tools.staticdir.on': True,
'tools.encode.encoding': "utf-8",
#'server.thread_pool': 2, # single thread server.
},
'/': {
'tools.staticdir.dir': static_dir,
'tools.staticdir.index': "index.html",
# for cherrypy RESTful api support
'request.dispatch': cherrypy.dispatch.MethodDispatcher()
}
}
# start cherrypy web engine
trace("start cherrypy server")
root = Root()
cherrypy.quickstart(root, '/', conf)
| mit |
jzt5132/scikit-learn | examples/linear_model/plot_polynomial_interpolation.py | 250 | 1895 | #!/usr/bin/env python
"""
========================
Polynomial interpolation
========================
This example demonstrates how to approximate a function with a polynomial of
degree n_degree by using ridge regression. Concretely, from n_samples 1d
points, it suffices to build the Vandermonde matrix, which is n_samples x
n_degree+1 and has the following form:
[[1, x_1, x_1 ** 2, x_1 ** 3, ...],
[1, x_2, x_2 ** 2, x_2 ** 3, ...],
...]
Intuitively, this matrix can be interpreted as a matrix of pseudo features (the
points raised to some power). The matrix is akin to (but different from) the
matrix induced by a polynomial kernel.
This example shows that you can do non-linear regression with a linear model,
using a pipeline to add non-linear features. Kernel methods extend this idea
and can induce very high (even infinite) dimensional feature spaces.
"""
print(__doc__)
# Author: Mathieu Blondel
# Jake Vanderplas
# License: BSD 3 clause
import numpy as np
import matplotlib.pyplot as plt
from sklearn.linear_model import Ridge
from sklearn.preprocessing import PolynomialFeatures
from sklearn.pipeline import make_pipeline
def f(x):
""" function to approximate by polynomial interpolation"""
return x * np.sin(x)
# generate points used to plot
x_plot = np.linspace(0, 10, 100)
# generate points and keep a subset of them
x = np.linspace(0, 10, 100)
rng = np.random.RandomState(0)
rng.shuffle(x)
x = np.sort(x[:20])
y = f(x)
# create matrix versions of these arrays
X = x[:, np.newaxis]
X_plot = x_plot[:, np.newaxis]
plt.plot(x_plot, f(x_plot), label="ground truth")
plt.scatter(x, y, label="training points")
for degree in [3, 4, 5]:
model = make_pipeline(PolynomialFeatures(degree), Ridge())
model.fit(X, y)
y_plot = model.predict(X_plot)
plt.plot(x_plot, y_plot, label="degree %d" % degree)
plt.legend(loc='lower left')
plt.show()
| bsd-3-clause |
MohammedWasim/scikit-learn | examples/linear_model/plot_polynomial_interpolation.py | 250 | 1895 | #!/usr/bin/env python
"""
========================
Polynomial interpolation
========================
This example demonstrates how to approximate a function with a polynomial of
degree n_degree by using ridge regression. Concretely, from n_samples 1d
points, it suffices to build the Vandermonde matrix, which is n_samples x
n_degree+1 and has the following form:
[[1, x_1, x_1 ** 2, x_1 ** 3, ...],
[1, x_2, x_2 ** 2, x_2 ** 3, ...],
...]
Intuitively, this matrix can be interpreted as a matrix of pseudo features (the
points raised to some power). The matrix is akin to (but different from) the
matrix induced by a polynomial kernel.
This example shows that you can do non-linear regression with a linear model,
using a pipeline to add non-linear features. Kernel methods extend this idea
and can induce very high (even infinite) dimensional feature spaces.
"""
print(__doc__)
# Author: Mathieu Blondel
# Jake Vanderplas
# License: BSD 3 clause
import numpy as np
import matplotlib.pyplot as plt
from sklearn.linear_model import Ridge
from sklearn.preprocessing import PolynomialFeatures
from sklearn.pipeline import make_pipeline
def f(x):
""" function to approximate by polynomial interpolation"""
return x * np.sin(x)
# generate points used to plot
x_plot = np.linspace(0, 10, 100)
# generate points and keep a subset of them
x = np.linspace(0, 10, 100)
rng = np.random.RandomState(0)
rng.shuffle(x)
x = np.sort(x[:20])
y = f(x)
# create matrix versions of these arrays
X = x[:, np.newaxis]
X_plot = x_plot[:, np.newaxis]
plt.plot(x_plot, f(x_plot), label="ground truth")
plt.scatter(x, y, label="training points")
for degree in [3, 4, 5]:
model = make_pipeline(PolynomialFeatures(degree), Ridge())
model.fit(X, y)
y_plot = model.predict(X_plot)
plt.plot(x_plot, y_plot, label="degree %d" % degree)
plt.legend(loc='lower left')
plt.show()
| bsd-3-clause |
MohammedWasim/scikit-learn | examples/plot_johnson_lindenstrauss_bound.py | 126 | 7477 | r"""
=====================================================================
The Johnson-Lindenstrauss bound for embedding with random projections
=====================================================================
The `Johnson-Lindenstrauss lemma`_ states that any high dimensional
dataset can be randomly projected into a lower dimensional Euclidean
space while controlling the distortion in the pairwise distances.
.. _`Johnson-Lindenstrauss lemma`: http://en.wikipedia.org/wiki/Johnson%E2%80%93Lindenstrauss_lemma
Theoretical bounds
==================
The distortion introduced by a random projection `p` is asserted by
the fact that `p` is defining an eps-embedding with good probability
as defined by:
.. math::
(1 - eps) \|u - v\|^2 < \|p(u) - p(v)\|^2 < (1 + eps) \|u - v\|^2
Where u and v are any rows taken from a dataset of shape [n_samples,
n_features] and p is a projection by a random Gaussian N(0, 1) matrix
with shape [n_components, n_features] (or a sparse Achlioptas matrix).
The minimum number of components to guarantees the eps-embedding is
given by:
.. math::
n\_components >= 4 log(n\_samples) / (eps^2 / 2 - eps^3 / 3)
The first plot shows that with an increasing number of samples ``n_samples``,
the minimal number of dimensions ``n_components`` increased logarithmically
in order to guarantee an ``eps``-embedding.
The second plot shows that an increase of the admissible
distortion ``eps`` allows to reduce drastically the minimal number of
dimensions ``n_components`` for a given number of samples ``n_samples``
Empirical validation
====================
We validate the above bounds on the the digits dataset or on the 20 newsgroups
text document (TF-IDF word frequencies) dataset:
- for the digits dataset, some 8x8 gray level pixels data for 500
handwritten digits pictures are randomly projected to spaces for various
larger number of dimensions ``n_components``.
- for the 20 newsgroups dataset some 500 documents with 100k
features in total are projected using a sparse random matrix to smaller
euclidean spaces with various values for the target number of dimensions
``n_components``.
The default dataset is the digits dataset. To run the example on the twenty
newsgroups dataset, pass the --twenty-newsgroups command line argument to this
script.
For each value of ``n_components``, we plot:
- 2D distribution of sample pairs with pairwise distances in original
and projected spaces as x and y axis respectively.
- 1D histogram of the ratio of those distances (projected / original).
We can see that for low values of ``n_components`` the distribution is wide
with many distorted pairs and a skewed distribution (due to the hard
limit of zero ratio on the left as distances are always positives)
while for larger values of n_components the distortion is controlled
and the distances are well preserved by the random projection.
Remarks
=======
According to the JL lemma, projecting 500 samples without too much distortion
will require at least several thousands dimensions, irrespective of the
number of features of the original dataset.
Hence using random projections on the digits dataset which only has 64 features
in the input space does not make sense: it does not allow for dimensionality
reduction in this case.
On the twenty newsgroups on the other hand the dimensionality can be decreased
from 56436 down to 10000 while reasonably preserving pairwise distances.
"""
print(__doc__)
import sys
from time import time
import numpy as np
import matplotlib.pyplot as plt
from sklearn.random_projection import johnson_lindenstrauss_min_dim
from sklearn.random_projection import SparseRandomProjection
from sklearn.datasets import fetch_20newsgroups_vectorized
from sklearn.datasets import load_digits
from sklearn.metrics.pairwise import euclidean_distances
# Part 1: plot the theoretical dependency between n_components_min and
# n_samples
# range of admissible distortions
eps_range = np.linspace(0.1, 0.99, 5)
colors = plt.cm.Blues(np.linspace(0.3, 1.0, len(eps_range)))
# range of number of samples (observation) to embed
n_samples_range = np.logspace(1, 9, 9)
plt.figure()
for eps, color in zip(eps_range, colors):
min_n_components = johnson_lindenstrauss_min_dim(n_samples_range, eps=eps)
plt.loglog(n_samples_range, min_n_components, color=color)
plt.legend(["eps = %0.1f" % eps for eps in eps_range], loc="lower right")
plt.xlabel("Number of observations to eps-embed")
plt.ylabel("Minimum number of dimensions")
plt.title("Johnson-Lindenstrauss bounds:\nn_samples vs n_components")
# range of admissible distortions
eps_range = np.linspace(0.01, 0.99, 100)
# range of number of samples (observation) to embed
n_samples_range = np.logspace(2, 6, 5)
colors = plt.cm.Blues(np.linspace(0.3, 1.0, len(n_samples_range)))
plt.figure()
for n_samples, color in zip(n_samples_range, colors):
min_n_components = johnson_lindenstrauss_min_dim(n_samples, eps=eps_range)
plt.semilogy(eps_range, min_n_components, color=color)
plt.legend(["n_samples = %d" % n for n in n_samples_range], loc="upper right")
plt.xlabel("Distortion eps")
plt.ylabel("Minimum number of dimensions")
plt.title("Johnson-Lindenstrauss bounds:\nn_components vs eps")
# Part 2: perform sparse random projection of some digits images which are
# quite low dimensional and dense or documents of the 20 newsgroups dataset
# which is both high dimensional and sparse
if '--twenty-newsgroups' in sys.argv:
# Need an internet connection hence not enabled by default
data = fetch_20newsgroups_vectorized().data[:500]
else:
data = load_digits().data[:500]
n_samples, n_features = data.shape
print("Embedding %d samples with dim %d using various random projections"
% (n_samples, n_features))
n_components_range = np.array([300, 1000, 10000])
dists = euclidean_distances(data, squared=True).ravel()
# select only non-identical samples pairs
nonzero = dists != 0
dists = dists[nonzero]
for n_components in n_components_range:
t0 = time()
rp = SparseRandomProjection(n_components=n_components)
projected_data = rp.fit_transform(data)
print("Projected %d samples from %d to %d in %0.3fs"
% (n_samples, n_features, n_components, time() - t0))
if hasattr(rp, 'components_'):
n_bytes = rp.components_.data.nbytes
n_bytes += rp.components_.indices.nbytes
print("Random matrix with size: %0.3fMB" % (n_bytes / 1e6))
projected_dists = euclidean_distances(
projected_data, squared=True).ravel()[nonzero]
plt.figure()
plt.hexbin(dists, projected_dists, gridsize=100, cmap=plt.cm.PuBu)
plt.xlabel("Pairwise squared distances in original space")
plt.ylabel("Pairwise squared distances in projected space")
plt.title("Pairwise distances distribution for n_components=%d" %
n_components)
cb = plt.colorbar()
cb.set_label('Sample pairs counts')
rates = projected_dists / dists
print("Mean distances rate: %0.2f (%0.2f)"
% (np.mean(rates), np.std(rates)))
plt.figure()
plt.hist(rates, bins=50, normed=True, range=(0., 2.))
plt.xlabel("Squared distances rate: projected / original")
plt.ylabel("Distribution of samples pairs")
plt.title("Histogram of pairwise distance rates for n_components=%d" %
n_components)
# TODO: compute the expected value of eps and add them to the previous plot
# as vertical lines / region
plt.show()
| bsd-3-clause |
tomsilver/nupic | nupic/datafiles/extra/regression/makeDataset.py | 9 | 5327 | #! /usr/bin/env python
# ----------------------------------------------------------------------
# Numenta Platform for Intelligent Computing (NuPIC)
# Copyright (C) 2013, Numenta, Inc. Unless you have an agreement
# with Numenta, Inc., for a separate license for this software code, the
# following terms and conditions apply:
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License version 3 as
# published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
# See the GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see http://www.gnu.org/licenses.
#
# http://numenta.org/licenses/
# ----------------------------------------------------------------------
"""
Generate artificial datasets
"""
import numpy
from nupic.data.file import File
###########################################################################
def scaleData(data, newScale=[0,100]):
minVals = data.min(axis=0)
maxVals = data.max(axis=0)
data = (data-minVals)*(newScale[1]-newScale[0])/(maxVals-minVals) + newScale[0]
return data
###########################################################################
def generatePolyData(numDataPoints=100,
coefficients=[1, 0],
noiseLevel = 0.1,
dataScale = [0,100],):
xvals = numpy.random.random(numDataPoints)
yvals = numpy.polyval(coefficients, xvals) + \
noiseLevel * numpy.random.randn(numDataPoints)
data = numpy.vstack((yvals, xvals)).transpose()
scaledData = scaleData(data, newScale=dataScale)
return scaledData
###########################################################################
def generateLinearData(numDataPoints=100,
coefficients=[1, 1],
noiseLevel = 0.1,
dataScale = [0,100],):
xvals = numpy.random.random((numDataPoints, len(coefficients)))
yvals = (xvals * coefficients).sum(axis=1) + \
noiseLevel * numpy.random.randn(numDataPoints)
data = numpy.hstack((yvals.reshape(-1,1), xvals))
scaledData = scaleData(data, newScale=dataScale)
return scaledData
###########################################################################
def _generateLinearModel(numTrainingRecords, numTestingRecords,
coefficients=[1], noiseLevel=0.1, dataScale=[0,100]):
"""
"""
data = generateLinearData(numDataPoints=numTrainingRecords+numTestingRecords,
coefficients=coefficients,
noiseLevel=noiseLevel,
dataScale=dataScale,)
trainData = data[:numTrainingRecords]
testData = data[numTrainingRecords:]
return trainData, testData
###########################################################################
def _generateFile(filename, data):
"""
Parameters:
----------------------------------------------------------------
filename: name of .csv file to generate
"""
# Create the file
print "Creating %s..." % (filename)
numRecords, numFields = data.shape
fields = [('field%d'%(i+1), 'float', '') for i in range(numFields)]
outFile = File(filename, fields)
for i in xrange(numRecords):
outFile.write(data[i].tolist())
outFile.close()
########################################################################
def generate(model, filenameTrain, filenameTest,
numTrainingRecords=10000, numTestingRecords=1000,):
"""
"""
numpy.random.seed(41)
# ====================================================================
# Generate the model
if model == 'linear0':
trainData, testData = _generateLinearModel(numTrainingRecords,
numTestingRecords,
coefficients=[1],
noiseLevel=0.1)
#import pylab
#pylab.figure()
#pylab.plot(trainData[:,1], trainData[:,0], 'b.')
##pylab.figure()
#pylab.plot(testData[:,1], testData[:,0],'g.')
#pylab.show()
elif model == 'linear1':
trainData, testData = _generateLinearModel(numTrainingRecords,
numTestingRecords,
coefficients=[1,1],
noiseLevel=0.1)
elif model == 'linear2':
trainData, testData = _generateLinearModel(numTrainingRecords,
numTestingRecords,
coefficients=[1,-3])
else:
raise RuntimeError("Unsupported model")
# ====================================================================
# Generate the training and testing files
_generateFile(filename=filenameTrain, data=trainData,)
_generateFile(filename=filenameTest, data=testData,)
| gpl-3.0 |
mlperf/training_results_v0.5 | v0.5.0/nvidia/submission/code/single_stage_detector/pytorch/ssd300.py | 1 | 7384 | # Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import torch
import torch.nn as nn
from base_model import L2Norm, ResNet18, ResNet34, ResNet50
from mlperf_compliance import mlperf_log
from mlperf_logger import ssd_print
from nhwc import resnet_nhwc
from nhwc.conv import Conv2d_NHWC
class SSD300(nn.Module):
"""
Build a SSD module to take 300x300 image input,
and output 8732 per class bounding boxes
vggt: pretrained vgg16 (partial) model
label_num: number of classes (including background 0)
"""
def __init__(self, label_num, backbone='resnet34', use_nhwc=False, pad_input=False):
super(SSD300, self).__init__()
self.label_num = label_num
self.use_nhwc = use_nhwc
self.pad_input = pad_input
if backbone == 'resnet18':
self.model = ResNet18(self.use_nhwc, self.pad_input)
out_channels = 256
out_size = 38
self.out_chan = [out_channels, 512, 512, 256, 256, 128]
elif backbone == 'resnet34':
self.model = ResNet34(self.use_nhwc, self.pad_input)
ssd_print(key=mlperf_log.BACKBONE, value='resnet34')
out_channels = 256
out_size = 38
self.out_chan = [out_channels, 512, 512, 256, 256, 256]
ssd_print(key=mlperf_log.LOC_CONF_OUT_CHANNELS,
value=self.out_chan)
elif backbone == 'resnet50':
self.model = ResNet50(self.use_nhwc, self.pad_input)
out_channels = 1024
out_size = 38
self.l2norm4 = L2Norm()
self.out_chan = [out_channels, 1024, 512, 512, 256, 256]
else:
print('Invalid backbone chosen')
self._build_additional_features(out_size, self.out_chan)
# after l2norm, conv7, conv8_2, conv9_2, conv10_2, conv11_2
# classifer 1, 2, 3, 4, 5 ,6
self.num_defaults = [4, 6, 6, 6, 4, 4]
ssd_print(key=mlperf_log.NUM_DEFAULTS_PER_CELL,
value=self.num_defaults)
self.loc = []
self.conf = []
for nd, oc in zip(self.num_defaults, self.out_chan):
self.loc.append(nn.Conv2d(oc, nd*4, kernel_size=3, padding=1))
self.conf.append(nn.Conv2d(oc, nd*label_num, kernel_size=3, padding=1))
self.loc = nn.ModuleList(self.loc)
self.conf = nn.ModuleList(self.conf)
# intitalize all weights
self._init_weights()
def _build_additional_features(self, input_size, input_channels):
idx = 0
if input_size == 38:
idx = 0
elif input_size == 19:
idx = 1
elif input_size == 10:
idx = 2
self.additional_blocks = []
if self.use_nhwc:
conv_fn = Conv2d_NHWC
else:
conv_fn = nn.Conv2d
#
if input_size == 38:
self.additional_blocks.append(nn.Sequential(
conv_fn(input_channels[idx], 256, kernel_size=1),
nn.ReLU(inplace=True),
conv_fn(256, input_channels[idx+1], kernel_size=3, padding=1, stride=2),
nn.ReLU(inplace=True),
))
idx += 1
self.additional_blocks.append(nn.Sequential(
conv_fn(input_channels[idx], 256, kernel_size=1),
nn.ReLU(inplace=True),
conv_fn(256, input_channels[idx+1], kernel_size=3, padding=1, stride=2),
nn.ReLU(inplace=True),
))
idx += 1
# conv9_1, conv9_2
self.additional_blocks.append(nn.Sequential(
conv_fn(input_channels[idx], 128, kernel_size=1),
nn.ReLU(inplace=True),
conv_fn(128, input_channels[idx+1], kernel_size=3, padding=1, stride=2),
nn.ReLU(inplace=True),
))
idx += 1
# conv10_1, conv10_2
self.additional_blocks.append(nn.Sequential(
conv_fn(input_channels[idx], 128, kernel_size=1),
nn.ReLU(inplace=True),
conv_fn(128, input_channels[idx+1], kernel_size=3),
nn.ReLU(inplace=True),
))
idx += 1
# Only necessary in VGG for now
if input_size >= 19:
# conv11_1, conv11_2
self.additional_blocks.append(nn.Sequential(
conv_fn(input_channels[idx], 128, kernel_size=1),
nn.ReLU(inplace=True),
conv_fn(128, input_channels[idx+1], kernel_size=3),
nn.ReLU(inplace=True),
))
self.additional_blocks = nn.ModuleList(self.additional_blocks)
def _init_weights(self):
addn_blocks = [
*self.additional_blocks]
layers = [
*self.loc, *self.conf]
# Need to handle additional blocks differently in NHWC case due to xavier initialization
for layer in addn_blocks:
for param in layer.parameters():
if param.dim() > 1:
if self.use_nhwc:
# xavier_uniform relies on fan-in/-out, so need to use NCHW here to get
# correct values (K, R) instead of the correct (K, C)
nn.init.xavier_uniform_(param.permute(0, 3, 1, 2).contiguous())
# Now permute correctly-initialized param back to NHWC
param = param.permute(0, 2, 3, 1).contiguous()
else:
nn.init.xavier_uniform_(param)
for layer in layers:
for param in layer.parameters():
if param.dim() > 1: nn.init.xavier_uniform_(param)
# Shape the classifier to the view of bboxes
def bbox_view(self, src, loc, conf):
ret = []
for s, l, c in zip(src, loc, conf):
if self.use_nhwc:
s = s.permute(0, 3, 1, 2).contiguous()
ret.append((l(s).view(s.size(0), 4, -1), c(s).view(s.size(0), self.label_num, -1)))
locs, confs = list(zip(*ret))
locs, confs = torch.cat(locs, 2).contiguous(), torch.cat(confs, 2).contiguous()
return locs, confs
def forward(self, data):
layers = self.model(data)
# last result from network goes into additional blocks
x = layers[-1]
# If necessary, transpose back to NCHW
additional_results = []
for i, l in enumerate(self.additional_blocks):
x = l(x)
additional_results.append(x)
# do we need the l2norm on the first result?
src = [*layers, *additional_results]
# Feature Map 38x38x4, 19x19x6, 10x10x6, 5x5x6, 3x3x4, 1x1x4
locs, confs = self.bbox_view(src, self.loc, self.conf)
# For SSD 300, shall return nbatch x 8732 x {nlabels, nlocs} results
return locs, confs
| apache-2.0 |