Datasets:
Modalities:
Text
Size:
10K - 100K
repo_name
stringlengths 6
103
| path
stringlengths 5
191
| copies
stringlengths 1
4
| size
stringlengths 4
6
| content
stringlengths 986
970k
| license
stringclasses 15
values |
---|---|---|---|---|---|
vdumoulin/fuel | fuel/transformers/sequences.py | 7 | 4963 | from fuel.transformers import Transformer
class Window(Transformer):
"""Return pairs of source and target windows from a stream.
This data stream wrapper takes as an input a data stream outputting
sequences of potentially varying lengths (e.g. sentences, audio tracks,
etc.). It then returns two sliding windows (source and target) over
these sequences.
For example, to train an n-gram model set `source_window` to n,
`target_window` to 1, no offset, and `overlapping` to false. This will
give chunks [1, N] and [N + 1]. To train an RNN you often want to set
the source and target window to the same size and use an offset of 1
with overlap, this would give you chunks [1, N] and [2, N + 1].
Parameters
----------
offset : int
The offset from the source window where the target window starts.
source_window : int
The size of the source window.
target_window : int
The size of the target window.
overlapping : bool
If true, the source and target windows overlap i.e. the offset of
the target window is taken to be from the beginning of the source
window. If false, the target window offset is taken to be from the
end of the source window.
data_stream : :class:`.DataStream` instance
The data stream providing sequences. Each example is assumed to be
an object that supports slicing.
target_source : str, optional
This data stream adds a new source for the target words. By default
this source is 'targets'.
"""
def __init__(self, offset, source_window, target_window,
overlapping, data_stream, target_source='targets', **kwargs):
if not data_stream.produces_examples:
raise ValueError('the wrapped data stream must produce examples, '
'not batches of examples.')
if len(data_stream.sources) > 1:
raise ValueError('{} expects only one source'
.format(self.__class__.__name__))
super(Window, self).__init__(data_stream, produces_examples=True,
**kwargs)
self.sources = self.sources + (target_source,)
self.offset = offset
self.source_window = source_window
self.target_window = target_window
self.overlapping = overlapping
self.sentence = []
self._set_index()
def _set_index(self):
"""Set the starting index of the source window."""
self.index = 0
# If offset is negative, target window might start before 0
self.index = -min(0, self._get_target_index())
def _get_target_index(self):
"""Return the index where the target window starts."""
return (self.index + self.source_window * (not self.overlapping) +
self.offset)
def _get_end_index(self):
"""Return the end of both windows."""
return max(self.index + self.source_window,
self._get_target_index() + self.target_window)
def get_data(self, request=None):
if request is not None:
raise ValueError
while not self._get_end_index() <= len(self.sentence):
self.sentence, = next(self.child_epoch_iterator)
self._set_index()
source = self.sentence[self.index:self.index + self.source_window]
target = self.sentence[self._get_target_index():
self._get_target_index() + self.target_window]
self.index += 1
return (source, target)
class NGrams(Window):
"""Return n-grams from a stream.
This data stream wrapper takes as an input a data stream outputting
sentences. From these sentences n-grams of a fixed order (e.g. bigrams,
trigrams, etc.) are extracted and returned. It also creates a
``targets`` data source. For each example, the target is the word
immediately following that n-gram. It is normally used for language
modeling, where we try to predict the next word from the previous *n*
words.
.. note::
Unlike the :class:`Window` stream, the target returned by
:class:`NGrams` is a single element instead of a window.
Parameters
----------
ngram_order : int
The order of the n-grams to output e.g. 3 for trigrams.
data_stream : :class:`.DataStream` instance
The data stream providing sentences. Each example is assumed to be
a list of integers.
target_source : str, optional
This data stream adds a new source for the target words. By default
this source is 'targets'.
"""
def __init__(self, ngram_order, *args, **kwargs):
super(NGrams, self).__init__(
0, ngram_order, 1, False, *args, **kwargs)
def get_data(self, *args, **kwargs):
source, target = super(NGrams, self).get_data(*args, **kwargs)
return (source, target[0])
| mit |
yafeunteun/wikipedia-spam-classifier | revscoring/revscoring/utilities/tune.py | 1 | 9729 | """
Tunes a set of models against a training set to identify the best
model/configuration.
Usage:
tune <params-config> <features> <label>
[--observations=<path>]
[--scoring=<type>]
[--test-prop=<prop>]
[--folds=<num>]
[--report=<path>]
[--label-type=<type>]
[--processes=<num>]
[--cv-timeout=<mins>]
[--scale-features]
[--verbose] [--debug]
Options:
<params-config> The path to a YAML configuration file containing the
models and parameter values to search when tuning
<features> The classpath to a feature_list to use when
interpreting the feature values of the observations
<label> The name of the field to be predicted
--observations=<path> The path to a file containing observations to train
and test against. [default: <stdin>]
--scoring=<type> The type of scoring strategy to optimize for when
choosing parameter sets [default: roc_auc]
--folds=<num> The number of cross-validation folds to try
[default: 5]
--report=<path> Path to a file to write the tuning report to
[default: <stdout>]
--processes=<num> The number of parallel processes to start for
model building [default: <cpu-count>]
--cv-timeout=<mins> The number of minutes to wait for a model to
cross-validate before timing out
[default: <forever>]
--scale-features Scales the feature values before tuning
--verbose Print progress information to stderr
--debug Print debug information to stderr
"""
import datetime
import json
import logging
import multiprocessing
import sys
import time
import traceback
from collections import defaultdict
import docopt
import numpy
import yamlconf
from sklearn import cross_validation, grid_search, preprocessing
from tabulate import tabulate
from . import metrics
from .. import __version__
from ..dependencies import solve
from .util import Timeout, read_observations
logger = logging.getLogger(__name__)
def main(argv=None):
args = docopt.docopt(__doc__, argv=argv)
logging.basicConfig(
level=logging.INFO if not args['--debug'] else logging.DEBUG,
format='%(asctime)s %(levelname)s:%(name)s -- %(message)s'
)
params_config = yamlconf.load(open(args['<params-config>']))
features_path = args['<features>']
features = yamlconf.import_path(features_path)
if args['--observations'] == "<stdin>":
observations = read_observations(sys.stdin)
else:
observations = read_observations(open(args['--observations']))
logger.info("Reading feature values & labels...")
label_name = args['<label>']
value_labels = \
[(list(solve(features, cache=ob['cache'])), ob[label_name])
for ob in observations]
# Get a sepecialized scorer if we have one
scoring = metrics.SCORERS.get(args['--scoring'], args['--scoring'])
folds = int(args['--folds'])
if args['--report'] == "<stdout>":
report = sys.stdout
else:
report = open(args['--report'], "w")
if args['--processes'] == "<cpu-count>":
processes = multiprocessing.cpu_count()
else:
processes = int(args['--processes'])
if args['--cv-timeout'] == "<forever>":
cv_timeout = None
else:
cv_timeout = float(args['--cv-timeout']) * 60 # Convert to seconds
scale_features = args['--scale-features']
verbose = args['--verbose']
run(params_config, features_path, value_labels, scoring, folds,
report, processes, cv_timeout, scale_features, verbose)
def run(params_config, features_path, value_labels, scoring, folds,
report, processes, cv_timeout, scale_features, verbose):
if scale_features:
logger.debug("Scaling features...")
ss = preprocessing.StandardScaler()
feature_values, labels = (list(vect) for vect in zip(*value_labels))
scaled_feature_values = ss.fit_transform(feature_values)
value_labels = list(zip(scaled_feature_values, labels))
# Prepare the worker pool
logger.debug("Starting up multiprocessing pool (processes={0})"
.format(processes))
pool = multiprocessing.Pool(processes=processes)
# Start writing the model tuning report
possible_labels = set(label for _, label in value_labels)
report.write("# Model tuning report\n")
report.write("- Revscoring version: {0}\n".format(__version__))
report.write("- Features: {0}\n".format(features_path))
report.write("- Date: {0}\n".format(datetime.datetime.now().isoformat()))
report.write("- Observations: {0}\n".format(len(value_labels)))
report.write("- Labels: {0}\n".format(json.dumps(list(possible_labels))))
report.write("- Scoring: {0}\n".format(scoring))
report.write("- Folds: {0}\n".format(folds))
report.write("\n")
# For each estimator and paramset, submit the job.
cv_result_sets = defaultdict(lambda: [])
for name, estimator, param_grid in _estimator_param_grid(params_config):
logger.debug("Submitting jobs for {0}:".format(name))
for params in param_grid:
logger.debug("\tsubmitting {0}..."
.format(format_params(params)))
result = pool.apply_async(_cross_validate,
[value_labels, estimator, params],
{'cv_timeout': cv_timeout,
'scoring': scoring, 'folds': folds})
cv_result_sets[name].append((params, result))
# Barrier synchronization
logger.info("Running gridsearch for {0} model/params pairs ..."
.format(sum(len(p_r) for p_r in cv_result_sets)))
grid_scores = []
for name, param_results in cv_result_sets.items():
for params, result in param_results:
scores = result.get() # This is a line that blocks
grid_scores.append((name, params, scores.mean(), scores.std()))
# Write the rest of the report! First, print the top 10 combinations
report.write("# Top scoring configurations\n")
grid_scores.sort(key=lambda gs: gs[2], reverse=True)
table = tabulate(
((name, round(mean_score, 3), round(std_score, 3),
format_params(params))
for name, params, mean_score, std_score in
grid_scores[:10]),
headers=["model", "mean(scores)", "std(scores)", "params"],
tablefmt="pipe"
)
report.write(table + "\n")
report.write("\n")
# Now print out scores for each model.
report.write("# Models\n")
for name, param_results in cv_result_sets.items():
report.write("## {0}\n".format(name))
param_scores = ((p, r.get()) for p, r in param_results)
param_stats = [(p, s.mean(), s.std()) for p, s in param_scores]
param_stats.sort(key=lambda v: v[1], reverse=True)
table = tabulate(
((round(mean_score, 3), round(std_score, 3),
format_params(params))
for params, mean_score, std_score in
param_stats),
headers=["mean(scores)", "std(scores)", "params"],
tablefmt="pipe"
)
report.write(table + "\n")
report.write("\n")
report.close()
def format_params(doc):
return ", ".join("{0}={1}".format(k, json.dumps(v))
for k, v in doc.items())
def _estimator_param_grid(params_config):
for name, config in params_config.items():
try:
EstimatorClass = yamlconf.import_module(config['class'])
estimator = EstimatorClass()
except Exception:
logger.warn("Could not load estimator {0}"
.format(config['class']))
logger.warn("Exception:\n" + traceback.format_exc())
continue
if not hasattr(estimator, "fit"):
logger.warn("Estimator {0} does not have a fit() method."
.format(config['class']))
continue
param_grid = grid_search.ParameterGrid(config['params'])
yield name, estimator, param_grid
def _cross_validate(value_labels, estimator, params, scoring="roc_auc",
folds=5, cv_timeout=None, verbose=False):
start = time.time()
feature_values, labels = (list(vect) for vect in zip(*value_labels))
estimator.set_params(**params)
try:
logger.debug("Running cross-validation for " +
"{0} with timeout of {1} seconds"
.format(estimator.__class__.__name__, cv_timeout))
with Timeout(cv_timeout):
scores = cross_validation.cross_val_score(
estimator, feature_values,
labels, scoring=scoring,
cv=folds)
duration = time.time() - start
logger.debug("Cross-validated {0} with {1} in {2} minutes: {3} ({4})"
.format(estimator.__class__.__name__,
format_params(params),
round(duration / 60, 3),
round(scores.mean(), 3),
round(scores.std(), 3)))
return scores
except Exception:
logger.warn("Could not cross-validate estimator {0}"
.format(estimator.__class__.__name__))
logger.warn("Exception:\n" + traceback.format_exc())
return numpy.array([0] * folds)
| mit |
lakshayg/tensorflow | tensorflow/contrib/learn/python/learn/preprocessing/categorical.py | 151 | 4269 | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Implements preprocessing transformers for categorical variables."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import math
import numpy as np
# pylint: disable=g-bad-import-order
from . import categorical_vocabulary
from ..learn_io.data_feeder import setup_processor_data_feeder
# pylint: enable=g-bad-import-order
class CategoricalProcessor(object):
"""Maps documents to sequences of word ids.
As a common convention, Nan values are handled as unknown tokens.
Both float('nan') and np.nan are accepted.
"""
def __init__(self, min_frequency=0, share=False, vocabularies=None):
"""Initializes a CategoricalProcessor instance.
Args:
min_frequency: Minimum frequency of categories in the vocabulary.
share: Share vocabulary between variables.
vocabularies: list of CategoricalVocabulary objects for each variable in
the input dataset.
Attributes:
vocabularies_: list of CategoricalVocabulary objects.
"""
self.min_frequency = min_frequency
self.share = share
self.vocabularies_ = vocabularies
def freeze(self, freeze=True):
"""Freeze or unfreeze all vocabularies.
Args:
freeze: Boolean, indicate if vocabularies should be frozen.
"""
for vocab in self.vocabularies_:
vocab.freeze(freeze)
def fit(self, x, unused_y=None):
"""Learn a vocabulary dictionary of all categories in `x`.
Args:
x: numpy matrix or iterable of lists/numpy arrays.
unused_y: to match fit format signature of estimators.
Returns:
self
"""
x = setup_processor_data_feeder(x)
for row in x:
# Create vocabularies if not given.
if self.vocabularies_ is None:
# If not share, one per column, else one shared across.
if not self.share:
self.vocabularies_ = [
categorical_vocabulary.CategoricalVocabulary() for _ in row
]
else:
vocab = categorical_vocabulary.CategoricalVocabulary()
self.vocabularies_ = [vocab for _ in row]
for idx, value in enumerate(row):
# Nans are handled as unknowns.
if (isinstance(value, float) and math.isnan(value)) or value == np.nan:
continue
self.vocabularies_[idx].add(value)
if self.min_frequency > 0:
for vocab in self.vocabularies_:
vocab.trim(self.min_frequency)
self.freeze()
return self
def fit_transform(self, x, unused_y=None):
"""Learn the vocabulary dictionary and return indexies of categories.
Args:
x: numpy matrix or iterable of lists/numpy arrays.
unused_y: to match fit_transform signature of estimators.
Returns:
x: iterable, [n_samples]. Category-id matrix.
"""
self.fit(x)
return self.transform(x)
def transform(self, x):
"""Transform documents to category-id matrix.
Converts categories to ids give fitted vocabulary from `fit` or
one provided in the constructor.
Args:
x: numpy matrix or iterable of lists/numpy arrays.
Yields:
x: iterable, [n_samples]. Category-id matrix.
"""
self.freeze()
x = setup_processor_data_feeder(x)
for row in x:
output_row = []
for idx, value in enumerate(row):
# Return <UNK> when it's Nan.
if (isinstance(value, float) and math.isnan(value)) or value == np.nan:
output_row.append(0)
continue
output_row.append(self.vocabularies_[idx].get(value))
yield np.array(output_row, dtype=np.int64)
| apache-2.0 |
musically-ut/statsmodels | examples/python/tsa_dates.py | 29 | 1169 |
## Dates in timeseries models
from __future__ import print_function
import statsmodels.api as sm
import pandas as pd
# ## Getting started
data = sm.datasets.sunspots.load()
# Right now an annual date series must be datetimes at the end of the year.
dates = sm.tsa.datetools.dates_from_range('1700', length=len(data.endog))
# ## Using Pandas
#
# Make a pandas TimeSeries or DataFrame
endog = pd.TimeSeries(data.endog, index=dates)
# Instantiate the model
ar_model = sm.tsa.AR(endog, freq='A')
pandas_ar_res = ar_model.fit(maxlag=9, method='mle', disp=-1)
# Out-of-sample prediction
pred = pandas_ar_res.predict(start='2005', end='2015')
print(pred)
# ## Using explicit dates
ar_model = sm.tsa.AR(data.endog, dates=dates, freq='A')
ar_res = ar_model.fit(maxlag=9, method='mle', disp=-1)
pred = ar_res.predict(start='2005', end='2015')
print(pred)
# This just returns a regular array, but since the model has date information attached, you can get the prediction dates in a roundabout way.
print(ar_res.data.predict_dates)
# Note: This attribute only exists if predict has been called. It holds the dates associated with the last call to predict.
| bsd-3-clause |
jpzk/evopy | evopy/examples/experiments/cv_ppv_dsesscv/plot_precisions.py | 1 | 4256 | '''
This file is part of evopy.
Copyright 2012 - 2013, Jendrik Poloczek
evopy is free software: you can redistribute it
and/or modify it under the terms of the GNU General Public License as published
by the Free Software Foundation, either version 3 of the License, or (at your
option) any later version.
evopy is distributed in the hope that it will be
useful, but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General
Public License for more details.
You should have received a copy of the GNU General Public License along with
evopy. If not, see <http://www.gnu.org/licenses/>.
'''
from sys import path
path.append("../../../..")
from pickle import load
from copy import deepcopy
from numpy import matrix, log10, array
from scipy.stats import wilcoxon
from itertools import chain
from pylab import errorbar
from matplotlib.backends.backend_pdf import PdfPages
from evopy.strategies.ori_dses_svc_repair import ORIDSESSVCR
from evopy.strategies.ori_dses_svc import ORIDSESSVC
from evopy.strategies.ori_dses import ORIDSES
from evopy.simulators.simulator import Simulator
from evopy.problems.sphere_problem_origin_r1 import SphereProblemOriginR1
from evopy.problems.sphere_problem_origin_r2 import SphereProblemOriginR2
from evopy.problems.schwefels_problem_26 import SchwefelsProblem26
from evopy.problems.tr_problem import TRProblem
from evopy.metamodel.dses_svc_linear_meta_model import DSESSVCLinearMetaModel
from sklearn.cross_validation import KFold
from evopy.operators.scaling.scaling_standardscore import ScalingStandardscore
from evopy.operators.scaling.scaling_dummy import ScalingDummy
from evopy.metamodel.cv.svc_cv_sklearn_grid_linear import SVCCVSkGridLinear
from evopy.operators.termination.or_combinator import ORCombinator
from evopy.operators.termination.accuracy import Accuracy
from evopy.operators.termination.generations import Generations
from evopy.operators.termination.convergence import Convergence
from evopy.helper.timeseries_aggregator import TimeseriesAggregator
import matplotlib.pyplot as plt
from setup import *
precisionfile = file("output/precision_file.save", "r")
precisions = load(precisionfile)
none = lambda x : type(x) != type(None)
for problem in precisions.keys():
figure_accs = plt.figure(figsize=(8,6), dpi=10, facecolor="w", edgecolor="k")
plt.xlabel("Generation")
plt.ylabel("Gemittelter Positiver Vorhersagewert")
plt.xlim([0, 50])
plt.ylim([0.0, 1.0])
o_colors = {
get_method_TR_none: "g",\
get_method_TR_nor: "k",\
get_method_TR_ssc: "#044977",\
get_method_SphereProblemR1_none: "g",\
get_method_SphereProblemR1_nor: "k",\
get_method_SphereProblemR1_ssc: "#044977",\
get_method_SphereProblemR2_none: "g",\
get_method_SphereProblemR2_nor: "k",\
get_method_SphereProblemR2_ssc: "#044977",\
get_method_Schwefel26_none: "g",\
get_method_Schwefel26_nor: "k",\
get_method_Schwefel26_ssc: "#044977"}
o_markers = {
get_method_TR_none: "x",\
get_method_TR_nor: "+",\
get_method_TR_ssc: ".",\
get_method_SphereProblemR1_none: "x",\
get_method_SphereProblemR1_nor: "+",\
get_method_SphereProblemR1_ssc: ".",\
get_method_SphereProblemR2_none: "x",\
get_method_SphereProblemR2_nor: "+",\
get_method_SphereProblemR2_ssc: ".",\
get_method_Schwefel26_none: "x",\
get_method_Schwefel26_nor: "+",\
get_method_Schwefel26_ssc: "."}
optimizers = precisions[problem].keys()
for index, optimizer in enumerate(optimizers):
precisions_po = precisions[problem][optimizer]
precisions_agg, errors_agg =\
TimeseriesAggregator(precisions_po).get_aggregate()
generations = range(0, len(precisions_agg))
eb = errorbar(generations,\
precisions_agg,\
marker=o_markers[optimizer],
color=o_colors[optimizer],\
ecolor="#CCCCCC",\
linestyle="none",
yerr=errors_agg)
pp = PdfPages("output/p_%s.pdf" % str(problem).split('.')[-1])
plt.savefig(pp, format='pdf')
pp.close()
| gpl-3.0 |
lakshayg/tensorflow | tensorflow/python/keras/_impl/keras/datasets/fashion_mnist.py | 12 | 2055 | # Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Fashion-MNIST dataset.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import gzip
import os
import numpy as np
from tensorflow.python.keras._impl.keras.utils.data_utils import get_file
def load_data():
"""Loads the Fashion-MNIST dataset.
Returns:
Tuple of Numpy arrays: `(x_train, y_train), (x_test, y_test)`.
"""
dirname = os.path.join('datasets', 'fashion-mnist')
base = 'http://fashion-mnist.s3-website.eu-central-1.amazonaws.com/'
files = [
'train-labels-idx1-ubyte.gz', 'train-images-idx3-ubyte.gz',
't10k-labels-idx1-ubyte.gz', 't10k-images-idx3-ubyte.gz'
]
paths = []
for given_file in files:
paths.append(
get_file(given_file, origin=base + given_file, cache_subdir=dirname))
with gzip.open(paths[0], 'rb') as lbpath:
y_train = np.frombuffer(lbpath.read(), np.uint8, offset=8)
with gzip.open(paths[1], 'rb') as imgpath:
x_train = np.frombuffer(
imgpath.read(), np.uint8, offset=16).reshape(len(y_train), 28, 28)
with gzip.open(paths[2], 'rb') as lbpath:
y_test = np.frombuffer(lbpath.read(), np.uint8, offset=8)
with gzip.open(paths[3], 'rb') as imgpath:
x_test = np.frombuffer(
imgpath.read(), np.uint8, offset=16).reshape(len(y_test), 28, 28)
return (x_train, y_train), (x_test, y_test)
| apache-2.0 |
musically-ut/statsmodels | statsmodels/datasets/statecrime/data.py | 25 | 3128 | #! /usr/bin/env python
"""Statewide Crime Data"""
__docformat__ = 'restructuredtext'
COPYRIGHT = """Public domain."""
TITLE = """Statewide Crime Data 2009"""
SOURCE = """
All data is for 2009 and was obtained from the American Statistical Abstracts except as indicated below.
"""
DESCRSHORT = """State crime data 2009"""
DESCRLONG = DESCRSHORT
#suggested notes
NOTE = """::
Number of observations: 51
Number of variables: 8
Variable name definitions:
state
All 50 states plus DC.
violent
Rate of violent crimes / 100,000 population. Includes murder, forcible
rape, robbery, and aggravated assault. Numbers for Illinois and
Minnesota do not include forcible rapes. Footnote included with the
American Statistical Abstract table reads:
"The data collection methodology for the offense of forcible
rape used by the Illinois and the Minnesota state Uniform Crime
Reporting (UCR) Programs (with the exception of Rockford, Illinois,
and Minneapolis and St. Paul, Minnesota) does not comply with
national UCR guidelines. Consequently, their state figures for
forcible rape and violent crime (of which forcible rape is a part)
are not published in this table."
murder
Rate of murders / 100,000 population.
hs_grad
Precent of population having graduated from high school or higher.
poverty
% of individuals below the poverty line
white
Percent of population that is one race - white only. From 2009 American
Community Survey
single
Calculated from 2009 1-year American Community Survey obtained obtained
from Census. Variable is Male householder, no wife present, family
household combined with Female household, no husband prsent, family
household, divided by the total number of Family households.
urban
% of population in Urbanized Areas as of 2010 Census. Urbanized
Areas are area of 50,000 or more people."""
import numpy as np
from statsmodels.datasets import utils as du
from os.path import dirname, abspath
def load():
"""
Load the statecrime data and return a Dataset class instance.
Returns
-------
Dataset instance:
See DATASET_PROPOSAL.txt for more information.
"""
data = _get_data()
##### SET THE INDICES #####
#NOTE: None for exog_idx is the complement of endog_idx
return du.process_recarray(data, endog_idx=2, exog_idx=[7, 4, 3, 5],
dtype=float)
def load_pandas():
data = _get_data()
##### SET THE INDICES #####
#NOTE: None for exog_idx is the complement of endog_idx
return du.process_recarray_pandas(data, endog_idx=2, exog_idx=[7,4,3,5],
dtype=float, index_idx=0)
def _get_data():
filepath = dirname(abspath(__file__))
##### EDIT THE FOLLOWING TO POINT TO DatasetName.csv #####
data = np.recfromtxt(open(filepath + '/statecrime.csv', 'rb'),
delimiter=",", names=True, dtype=None)
return data
| bsd-3-clause |
PAIR-code/recommendation-rudders | hyperbolic-rs/preprocess.py | 1 | 11964 | # Copyright 2017 The Rudders Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from absl import app, flags
import pickle
import tensorflow as tf
import numpy as np
import random
from tqdm import tqdm
from pathlib import Path
from rudders.relations import Relations
from rudders.datasets import movielens, keen, amazon, amazon_relations, synopsis
from rudders.config import CONFIG
from rudders.utils import set_seed, sort_items_by_popularity, save_as_pickle, add_to_train_split
FLAGS = flags.FLAGS
flags.DEFINE_string('prep_id', default='foobar', help='Name of prep to store')
flags.DEFINE_string('item', default='amazon', help='Item to process: "keen", "gem", "ml-1m", "amazon" or "synopsis"')
flags.DEFINE_string('dataset_path', default='data/amazon', help='Path to raw dataset')
flags.DEFINE_string('amazon_reviews', default='Musical_Instruments_5.json.gz',
help='Name of the 5-core amazon reviews file')
flags.DEFINE_string('amazon_meta', default='meta_Musical_Instruments.json.gz',
help='Name of the 5-core amazon reviews file')
flags.DEFINE_string('item_item_file', default='Musical_Instruments_th0.6_cosdistances.pickle',
help='Path to the item-item distance file')
flags.DEFINE_boolean('plot_graph', default=False, help='Plots the user-item graph')
flags.DEFINE_boolean('shuffle', default=False, help='Whether to shuffle the interactions or not')
flags.DEFINE_boolean('add_extra_relations', default=True, help='For the amazon dataset, adds extra relations')
flags.DEFINE_boolean('export_splits', default=True, help='Exports (user_id, item_id) pairs of all splits')
flags.DEFINE_integer('min_user_interactions', default=5,
help='Keens users with less than min_user_interactions are filtered')
flags.DEFINE_integer('min_item_interactions', default=2,
help='Keens/gems with less than this interactions are filtered')
flags.DEFINE_integer('max_item_interactions', default=150,
help='Keens/gems with more than this interactions are filtered')
flags.DEFINE_integer('similarity_items_per_item', default=10, help='Amount of similarity items to add per item')
flags.DEFINE_integer('seed', default=42, help='Random seed')
flags.DEFINE_integer('filter_most_popular', default=-1,
help='Filters out most popular keens/gems. If -1 it does not filter')
def plot_graph(samples):
"""Plot user-item graph, setting different colors for items and users."""
import networkx as nx
import matplotlib.pyplot as plt
graph = nx.Graph()
for uid, ints in samples.items():
for iid in ints:
graph.add_edge(uid, iid)
color_map = ["red" if node in samples else "blue" for node in graph]
fig = plt.figure()
pos = nx.spring_layout(graph, iterations=100)
nx.draw(graph, pos, ax=fig.add_subplot(111), node_size=20, node_color=color_map)
plt.show()
def map_raw_ids_to_sequential_ids(samples):
"""
For each unique user or item id, this function creates a mapping to a sequence of number starting in 0.
This will be the index of the embeddings in the model.
Items ids will be from 0 to n_items - 1.
Users ids will be from n_items to n_items + n_users - 1
This condition is required to later build the distance matrix
:param samples: dict of <user_id1>: [<item_id1>, <item_id2>, ...]
:return: dicts of {<user_idX>: indexY} and {<item_idX>: indexW}
"""
uid2id, iid2id = {}, {}
sorted_samples = sorted(samples.items(), key=lambda x: x[0])
# first sets items ids only
for _, ints in sorted_samples:
sorted_ints = sorted(ints)
for iid in sorted_ints:
if iid not in iid2id:
iid2id[iid] = len(iid2id)
# users ids come after item ids
for uid, _ in sorted_samples:
if uid not in uid2id:
uid2id[uid] = len(uid2id) + len(iid2id)
return uid2id, iid2id
def create_splits(samples, relation_id, do_random=False, seed=42):
"""
Splits (user, item) dataset to train, dev and test.
:param samples: Dict of sorted examples.
:param relation_id: number that identifies the user-item interaction relation to form the triplets
:param do_random: Bool whether to extract dev and test by random sampling. If False, dev, test are the last two
items per user.
:return: examples: Dictionary with 'train','dev','test' splits as numpy arrays
containing corresponding (user_id, item_id) pairs, and 'to_skip' to a Dictionary containing filters
for each user.
"""
train, dev, test = [], [], []
for uid, ints in samples.items():
if do_random:
random.seed(seed)
random.shuffle(ints)
if len(ints) >= 3:
test.append((uid, relation_id, ints[-1]))
dev.append((uid, relation_id, ints[-2]))
for iid in ints[:-2]:
train.append((uid, relation_id, iid))
else:
for iid in ints:
train.append((uid, relation_id, iid))
return {
'samples': samples,
'train': np.array(train).astype('int64'),
'dev': np.array(dev).astype('int64'),
'test': np.array(test).astype('int64')
}
def load_item_item_distances(item_item_file_path):
"""Loads item-item distances that were precomputed with item_graph.py."""
print(f"Loading data from {item_item_file_path}")
with tf.io.gfile.GFile(str(item_item_file_path), 'rb') as f:
data = pickle.load(f)
return data["item_item_distances"]
def build_item_item_triplets(item_item_distances_dict, iid2id, top_k):
"""
Builds item item triples from the item-item distances
:param item_item_distances_dict: dict of src_iid: [(dst_iid, distance)]
:param iid2id: dict of item ids
:param top_k: adds top_k items per item at most
:return:
"""
triplets = set()
for src_iid, dists in tqdm(item_item_distances_dict.items(), desc="item_item_triplets"):
if src_iid not in iid2id:
continue
src_id = iid2id[src_iid]
sorted_dists = sorted(dists, key=lambda t: t[1])
added = 0
for dst_iid, cos_dist in sorted_dists:
if dst_iid not in iid2id or cos_dist > 0.3:
continue
dst_id = iid2id[dst_iid]
if cos_dist <= 0.1:
triplets.add((src_id, Relations.SEM_HIGH_SIM.value, dst_id))
elif 0.2 >= cos_dist > 0.1:
triplets.add((src_id, Relations.SEM_MEDIUM_SIM.value, dst_id))
else: # 0.3 >= cos_dist > 0.2
triplets.add((src_id, Relations.SEM_LOW_SIM.value, dst_id))
added += 1
if added >= top_k:
break
return list(triplets)
def export_splits(data, to_save_dir, prep_id):
"""Exports (user_id, item_id) pairs of all splits splits"""
split_names = ["train", "dev", "test"]
id2uid, id2iid = data["id2uid"], data["id2iid"]
for split_name in split_names:
split = data[split_name]
if split_name == "train":
split = [(uid, r, iid) for uid, r, iid in split if r == Relations.USER_ITEM.value]
lines = [f"{id2uid[u_id]},{id2iid[i_id]}\n" for u_id, _, i_id in split]
with open(to_save_dir / f"{prep_id}_ui_{split_name}.csv", "w") as f:
f.writelines(lines)
def main(_):
set_seed(FLAGS.seed, set_tf_seed=True)
dataset_path = Path(FLAGS.dataset_path)
if FLAGS.item == "keen":
samples = keen.load_user_keen_interactions(dataset_path, min_user_ints=FLAGS.min_user_interactions,
min_item_ints=FLAGS.min_item_interactions,
max_item_ints=FLAGS.max_item_interactions)
iid2name = keen.build_iid2title(item_id_key="keen_id", item_title_key="keen_title")
elif FLAGS.item == "gem":
samples = keen.load_keen_gems_interactions(dataset_path, min_keen_keen_edges=2, max_keen_keen_edges=1000,
min_overlapping_users=2,
min_keen_ints=FLAGS.min_user_interactions,
min_item_ints=FLAGS.min_item_interactions,
max_item_ints=FLAGS.max_item_interactions)
iid2name = keen.build_iid2title(item_id_key="gem_id", item_title_key="gem_link_title")
elif FLAGS.item == "ml-1m":
samples = movielens.movielens_to_dict(dataset_path)
iid2name = movielens.build_movieid2title(dataset_path)
elif "amazon" in FLAGS.item:
samples = amazon.load_interactions(dataset_path / FLAGS.amazon_reviews)
iid2name = amazon.build_itemid2name(dataset_path / FLAGS.amazon_meta)
elif FLAGS.item == "synopsis":
samples = synopsis.synopsis_to_dict(dataset_path)
iid2name = synopsis.build_movieid2title(dataset_path)
else:
raise ValueError(f"Unknown item: {FLAGS.item}")
if FLAGS.filter_most_popular > 0:
print(f"Filtering {FLAGS.filter_most_popular} most popular items")
sorted_items = sort_items_by_popularity(samples)
iid_to_filter = set([iid for iid, _ in sorted_items[:FLAGS.filter_most_popular]])
samples = {uid: list(set(ints) - iid_to_filter) for uid, ints in samples.items()}
samples = {uid: ints for uid, ints in samples.items() if ints}
if FLAGS.plot_graph:
plot_graph(samples)
return
uid2id, iid2id = map_raw_ids_to_sequential_ids(samples)
id_samples = {}
for uid, ints in samples.items():
if FLAGS.item == "keen" or FLAGS.item == "gem":
ints = sorted(ints)
id_samples[uid2id[uid]] = [iid2id[iid] for iid in ints]
data = create_splits(id_samples, Relations.USER_ITEM.value, do_random=FLAGS.shuffle, seed=FLAGS.seed)
data["iid2name"] = {iid: iid2name.get(iid, "None") for iid in iid2id}
data["id2uid"] = {v: k for k, v in uid2id.items()}
data["id2iid"] = {v: k for k, v in iid2id.items()}
print(f"User item interaction triplets: {len(data['train'])}")
n_entities = len(uid2id) + len(iid2id)
# if there is an item-item graph, we preprocess it
if FLAGS.item_item_file:
item_item_distances_dict = load_item_item_distances(dataset_path / FLAGS.item_item_file)
item_item_triplets = build_item_item_triplets(item_item_distances_dict, iid2id, FLAGS.similarity_items_per_item)
add_to_train_split(data, item_item_triplets)
print(f"Added item-item similarity triplets: {len(item_item_triplets)}")
if "amazon" in FLAGS.item and FLAGS.add_extra_relations:
print("Adding extra relations")
n_entities = amazon_relations.load_relations(dataset_path / FLAGS.amazon_meta, data, iid2id, n_entities)
data["n_entities"] = n_entities
# creates directories to save preprocessed data
print(f"Final training split: {len(data['train'])} triplets")
prep_path = Path(CONFIG["string"]["prep_dir"][1])
prep_path.mkdir(parents=True, exist_ok=True)
to_save_dir = prep_path / FLAGS.item
to_save_dir.mkdir(parents=True, exist_ok=True)
save_as_pickle(to_save_dir / f'{FLAGS.prep_id}.pickle', data)
if FLAGS.export_splits:
export_splits(data, to_save_dir, FLAGS.prep_id)
print("Done!")
if __name__ == '__main__':
app.run(main)
| apache-2.0 |
openplans/shareabouts-api | src/sa_api_v2/tasks.py | 1 | 10690 |
import requests
import ujson as json
from celery import shared_task
from celery.result import AsyncResult
from django.db import transaction
from django.test.client import RequestFactory
from django.utils.timezone import now
from itertools import chain
#from social.apps.django_app.default.models import UserSocialAuth
from .models import DataSnapshotRequest, DataSnapshot, DataSet, User, Place, Submission
from .serializers import SimplePlaceSerializer, SimpleSubmissionSerializer, SimpleDataSetSerializer
from .renderers import CSVRenderer, JSONRenderer, GeoJSONRenderer
import logging
log = logging.getLogger(__name__)
# =========================================================
# Generating snapshots
#
def generate_bulk_content(dataset, submission_set_name, **flags):
renderer_classes = {
'csv': CSVRenderer,
'json': GeoJSONRenderer if submission_set_name == 'places' else JSONRenderer
}
if submission_set_name == 'places':
submissions = dataset.places.all()
serializer = SimplePlaceSerializer(submissions, many=True)
else:
submissions = dataset.submissions.filter(set_name=submission_set_name)
serializer = SimpleSubmissionSerializer(submissions, many=True)
# Construct a request for the serializer context
r_data = {}
for flag_attr, flag_val in flags.items():
if flag_val: r_data[flag_attr] = 'true'
r = RequestFactory().get('', data=r_data)
r.get_dataset = lambda: dataset
# Render the data in each format
serializer.context['request'] = r
data = serializer.data
content = {}
for format, renderer_class in list(renderer_classes.items()):
renderer = renderer_class()
content[format] = renderer.render(data)
return content
@shared_task
def store_bulk_data(request_id):
task_id = store_bulk_data.request.id
log.info('Creating a snapshot request with task id %s' % (task_id,))
datarequest = DataSnapshotRequest.objects.get(pk=request_id)
datarequest.guid = task_id
datarequest.save()
# Generate the content
content = generate_bulk_content(
datarequest.dataset,
datarequest.submission_set,
include_submissions=datarequest.include_submissions,
include_private=datarequest.include_private,
include_invisible=datarequest.include_invisible)
# Store the information
bulk_data = DataSnapshot(
request=datarequest,
csv=content['csv'],
json=content['json'])
bulk_data.save()
datarequest.fulfilled_at = now()
datarequest.save()
return task_id
@shared_task
def bulk_data_status_update(uuid):
"""
A callback task that updates the status of a data snapshot request, whether
successful or not.
"""
taskresult = AsyncResult(uuid)
datarequest = DataSnapshotRequest.objects.get(guid=uuid)
datarequest.status = taskresult.status.lower()
datarequest.save()
@shared_task
def clone_related_dataset_data(orig_dataset_id, new_dataset_id):
qs = DataSet.objects.select_related('owner')\
.filter(id__in=(orig_dataset_id, new_dataset_id))\
.prefetch_related('things',
'things__place',
'things__place__dataset',
'things__place__submitter',
'things__place__submissions',
'things__place__submissions__dataset',
'things__place__submissions__submitter',
'permissions',
'groups',
'groups__submitters',
'groups__permissions',
'keys',
'keys__permissions',
'origins',
'origins__permissions',
)
datasets = list(qs)
if datasets[0].id == orig_dataset_id:
orig_dataset, new_dataset = datasets
else:
new_dataset, orig_dataset = datasets
with transaction.atomic():
orig_dataset.clone_related(onto=new_dataset)
# =========================================================
# Loading a dataset
#
def get_twitter_extra_data(user_data):
return {
'id': user_data.get('provider_id'),
'profile_image_url': user_data.get('avatar_url'),
'access_token': {
'screen_name': user_data.get('username'),
'oauth_token_secret': 'abc',
'oauth_token': '123',
'user_id': user_data.get('provider_id')
},
'name': user_data.get('name')
}
def get_facebook_extra_data(user_data):
return {
'access_token': 'abc123',
'picture': {
"data": {
"url": user_data.get('avatar_url'),
}
},
"id": user_data.get('provider_id'),
"name": user_data.get('name'),
}
def get_or_create_user(user_data, users_map):
if user_data is None:
return
# Check whether the user is already cached
username = user_data.get('username')
user = users_map.get(username)
if user:
return user
# Create and cache the user
user = User.objects.create(username=username, password='!')
users_map[username] = user
# Create a social auth entry for the user, if appropriate
provider = user_data.get('provider_type')
uid = user_data.get('provider_id')
if provider and uid:
UserSocialAuth.objects.create(
user=user,
provider=provider,
uid=uid,
extra_data=
get_twitter_extra_data(user_data)
if provider == 'twitter' else
get_facebook_extra_data(user_data)
)
def preload_users(data):
"""
Construct a mapping from usernames to users for Users that already exist
in the API.
"""
usernames = set()
def collect_username(data):
submitter_data = data.get('submitter')
if submitter_data:
usernames.add(submitter_data.get('username'))
for place_data in data.get('features', []):
collect_username(place_data['properties'])
for _, submissions_data in place_data['properties'].get('submission_sets', {}).items():
for submission_data in submissions_data:
collect_username(submission_data)
users = User.objects.filter(username__in=usernames)
users_map = dict([(user.username, user) for user in users])
return users_map
def list_errors(errors):
errors_list = []
for key, l in list(errors.items()):
if isinstance(l, list):
for msg in l:
errors_list.append('%s: %s' % (key, str(msg)))
else:
msg = l
errors_list.append('%s: %s' % (key, str(msg)))
return errors_list
@shared_task
def load_dataset_archive(dataset_id, archive_url):
dataset = DataSet.objects.get(id=dataset_id)
archive_response = requests.get(archive_url)
if archive_response.status_code == 200:
data = archive_response.json()
# Preload users
users_map = preload_users(data)
with transaction.atomic():
# Construct the dataset from metadata
metadata = data.get('metadata')
if metadata:
metadata.pop('id', None)
metadata.pop('owner', None)
serializer = SimpleDataSetSerializer(dataset, data=data.get('metadata'))
assert serializer.is_valid, list_errors(serializer.errors)
serializer.save()
# Create a stub view object to use in serializer contexts.
class Stub (object): pass
view = Stub()
view.request = Stub()
view.request.META = {'HTTP_X_SHAREABOUTS_SILENT': 'True'}
view.request.user = Stub()
view.request.user.is_authenticated = lambda: False
# Construct each place and submission individually
for place_data in data.get('features'):
place_data.pop('type', None)
place_data.update(place_data.pop('properties', {}))
place_data.pop('id', None)
place_data.pop('dataset', None)
place_data.pop('created_datetime', None)
place_data.pop('updated_datetime', None)
submission_sets_data = place_data.pop('submission_sets', {})
submitter_data = place_data.pop('submitter', None)
serializer_context = {'view': view, 'request': view.request}
serializer = SimplePlaceSerializer(data=place_data, context=serializer_context)
assert serializer.is_valid(), list_errors(serializer.errors)
place = Place()
for attr, value in serializer.validated_data.items():
setattr(place, attr, value)
place.dataset = dataset
place.submitter = get_or_create_user(submitter_data, users_map)
place.save(silent=True, reindex=False)
for set_name, submissions_data in submission_sets_data.items():
for submission_data in submissions_data:
submission_data.pop('id', None)
submission_data.pop('place', None)
submission_data.pop('dataset', None)
submission_data.pop('attachments', None)
submission_data.pop('created_datetime', None)
submission_data.pop('updated_datetime', None)
submitter_data = submission_data.pop('submitter', None)
serializer_context = {'view': view, 'request': view.request}
serializer = SimpleSubmissionSerializer(data=submission_data, context=serializer_context)
assert serializer.is_valid(), list_errors(serializer.errors)
submission = Submission()
for attr, value in serializer.validated_data.items():
setattr(submission, attr, value)
submission.set_name = set_name
submission.place = place
submission.dataset = dataset
submission.submitter = get_or_create_user(submitter_data, users_map)
submission.save(silent=True, reindex=False)
dataset.reindex()
# Load meta-data like permissions and such
# metadata = data.get('metadata')
# for permission_data in metadata.get('permissions'):
| gpl-3.0 |
kastnerkyle/pylearn2 | pylearn2/datasets/stl10.py | 1 | 5304 | """
.. todo::
WRITEME
"""
__authors__ = "Ian Goodfellow"
__copyright__ = "Copyright 2010-2012, Universite de Montreal"
__credits__ = ["Ian Goodfellow"]
__license__ = "3-clause BSD"
__maintainer__ = "LISA Lab"
__email__ = "pylearn-dev@googlegroups"
import numpy as np
from pylearn2.datasets import dense_design_matrix
from pylearn2.utils.serial import load
from pylearn2.utils import contains_nan
class STL10(dense_design_matrix.DenseDesignMatrix):
"""
The STL-10 dataset
Adam Coates, Honglak Lee, Andrew Y. Ng An Analysis of Single Layer
Networks in Unsupervised Feature Learning AISTATS, 2011
http://www.stanford.edu/~acoates//stl10/
When reporting results on this dataset, you are meant to use a somewhat
unusal evaluation procedure.
Use STL10(which_set='train') to load the training set. Then restrict the
training set to one of the ten folds using the restrict function below. You
must then train only on the data from that fold.
For the test set, report the average test set performance over the ten
trials obtained by training on each of the ten folds.
The folds here do not define the splits you should use for cross
validation. You are free to make your own split within each fold.
Parameters
----------
which_set : WRITEME
center : WRITEME
example_range : WRITEME
"""
def __init__(self, which_set, center=False, example_range=None):
"""
.. todo::
WRITEME
"""
if which_set == 'train':
train = load('${PYLEARN2_DATA_PATH}/stl10/stl10_matlab/train.mat')
# Load the class names
self.class_names = [array[0].encode('utf-8')
for array in train['class_names'][0]]
# Load the fold indices
fold_indices = train['fold_indices']
assert fold_indices.shape == (1, 10)
self.fold_indices = np.zeros((10, 1000), dtype='uint16')
for i in xrange(10):
indices = fold_indices[0, i]
assert indices.shape == (1000, 1)
assert indices.dtype == 'uint16'
self.fold_indices[i, :] = indices[:, 0]
# The data is stored as uint8
# If we leave it as uint8, it will cause the CAE to silently fail
# since theano will treat derivatives wrt X as 0
X = np.cast['float32'](train['X'])
assert X.shape == (5000, 96 * 96 * 3)
if example_range is not None:
X = X[example_range[0]:example_range[1], :]
# this is uint8
y = train['y'][:, 0]
assert y.shape == (5000,)
elif which_set == 'test':
test = load('${PYLEARN2_DATA_PATH}/stl10/stl10_matlab/test.mat')
# Load the class names
self.class_names = [array[0].encode('utf-8')
for array in test['class_names'][0]]
# The data is stored as uint8
# If we leave it as uint8, it will cause the CAE to silently fail
# since theano will treat derivatives wrt X as 0
X = np.cast['float32'](test['X'])
assert X.shape == (8000, 96 * 96 * 3)
if example_range is not None:
X = X[example_range[0]:example_range[1], :]
# this is uint8
y = test['y'][:, 0]
assert y.shape == (8000,)
elif which_set == 'unlabeled':
unlabeled = load('${PYLEARN2_DATA_PATH}/stl10/stl10_matlab/'
'unlabeled.mat')
X = unlabeled['X']
# this file is stored in HDF format, which transposes everything
assert X.shape == (96 * 96 * 3, 100000)
assert X.dtype == 'uint8'
if example_range is None:
X = X.value
else:
X = X.value[:, example_range[0]:example_range[1]]
X = np.cast['float32'](X.T)
unlabeled.close()
y = None
else:
raise ValueError('"' + which_set + '" is not an STL10 dataset. '
'Recognized values are "train", "test", and '
'"unlabeled".')
if center:
X -= 127.5
view_converter = dense_design_matrix.DefaultViewConverter((96, 96, 3))
super(STL10, self).__init__(X=X, y=y, view_converter=view_converter)
for i in xrange(self.X.shape[0]):
mat = X[i:i + 1, :]
topo = self.get_topological_view(mat)
for j in xrange(topo.shape[3]):
temp = topo[0, :, :, j].T.copy()
topo[0, :, :, j] = temp
mat = self.get_design_matrix(topo)
X[i:i + 1, :] = mat
assert not contains_nan(self.X)
def restrict(dataset, fold):
"""
Restricts the dataset to use the specified fold (1 to 10).
dataset should be the training set.
"""
fold_indices = dataset.fold_indices
assert fold_indices.shape == (10, 1000)
idxs = fold_indices[fold, :] - 1
dataset.X = dataset.X[idxs, :].copy()
assert dataset.X.shape[0] == 1000
dataset.y = dataset.y[idxs, ...].copy()
assert dataset.y.shape[0] == 1000
return dataset
| bsd-3-clause |
nicproulx/mne-python | examples/connectivity/plot_mixed_source_space_connectity.py | 3 | 6976 | """
===============================================================================
Compute mixed source space connectivity and visualize it using a circular graph
===============================================================================
This example computes the all-to-all connectivity between 75 regions in
a mixed source space based on dSPM inverse solutions and a FreeSurfer cortical
parcellation. The connectivity is visualized using a circular graph which
is ordered based on the locations of the regions.
"""
# Author: Annalisa Pascarella <a.pascarella@iac.cnr.it>
#
# License: BSD (3-clause)
import os.path as op
import numpy as np
import mne
from mne.datasets import sample
from mne import setup_volume_source_space, setup_source_space
from mne import make_forward_solution
from mne.io import read_raw_fif
from mne.minimum_norm import make_inverse_operator, apply_inverse_epochs
from mne.connectivity import spectral_connectivity
from mne.viz import circular_layout, plot_connectivity_circle
# Set dir
data_path = sample.data_path()
subject = 'sample'
data_dir = op.join(data_path, 'MEG', subject)
subjects_dir = op.join(data_path, 'subjects')
bem_dir = op.join(subjects_dir, subject, 'bem')
# Set file names
fname_aseg = op.join(subjects_dir, subject, 'mri', 'aseg.mgz')
fname_model = op.join(bem_dir, '%s-5120-bem.fif' % subject)
fname_bem = op.join(bem_dir, '%s-5120-bem-sol.fif' % subject)
fname_raw = data_dir + '/sample_audvis_filt-0-40_raw.fif'
fname_trans = data_dir + '/sample_audvis_raw-trans.fif'
fname_cov = data_dir + '/ernoise-cov.fif'
fname_event = data_dir + '/sample_audvis_filt-0-40_raw-eve.fif'
# List of sub structures we are interested in. We select only the
# sub structures we want to include in the source space
labels_vol = ['Left-Amygdala',
'Left-Thalamus-Proper',
'Left-Cerebellum-Cortex',
'Brain-Stem',
'Right-Amygdala',
'Right-Thalamus-Proper',
'Right-Cerebellum-Cortex']
# Setup a surface-based source space
src = setup_source_space(subject, fname=None, subjects_dir=subjects_dir,
spacing='oct6', add_dist=False)
# Setup a volume source space
# set pos=7.0 for speed issue
vol_src = setup_volume_source_space(subject, mri=fname_aseg,
pos=7.0,
bem=fname_model,
volume_label=labels_vol,
subjects_dir=subjects_dir)
# Generate the mixed source space
src += vol_src
# compute the fwd matrix
fwd = make_forward_solution(fname_raw, fname_trans, src, fname_bem,
mindist=5.0, # ignore sources<=5mm from innerskull
meg=True, eeg=False,
n_jobs=1)
# Load data
raw = read_raw_fif(fname_raw, preload=True)
noise_cov = mne.read_cov(fname_cov)
events = mne.read_events(fname_event)
# Add a bad channel
raw.info['bads'] += ['MEG 2443']
# Pick MEG channels
picks = mne.pick_types(raw.info, meg=True, eeg=False, stim=False, eog=True,
exclude='bads')
# Define epochs for left-auditory condition
event_id, tmin, tmax = 1, -0.2, 0.5
epochs = mne.Epochs(raw, events, event_id, tmin, tmax, picks=picks,
baseline=(None, 0), reject=dict(mag=4e-12, grad=4000e-13,
eog=150e-6))
# Compute inverse solution and for each epoch
snr = 1.0 # use smaller SNR for raw data
inv_method = 'dSPM' # sLORETA, MNE, dSPM
parc = 'aparc' # the parcellation to use, e.g., 'aparc' 'aparc.a2009s'
lambda2 = 1.0 / snr ** 2
# Compute inverse operator
inverse_operator = make_inverse_operator(raw.info, fwd, noise_cov,
loose=None, depth=None,
fixed=False)
stcs = apply_inverse_epochs(epochs, inverse_operator, lambda2, inv_method,
pick_ori=None, return_generator=True)
# Get labels for FreeSurfer 'aparc' cortical parcellation with 34 labels/hemi
labels_parc = mne.read_labels_from_annot(subject, parc=parc,
subjects_dir=subjects_dir)
# Average the source estimates within each label of the cortical parcellation
# and each sub structures contained in the src space
# If mode = 'mean_flip' this option is used only for the cortical label
src = inverse_operator['src']
label_ts = mne.extract_label_time_course(stcs, labels_parc, src,
mode='mean_flip',
allow_empty=True,
return_generator=False)
# We compute the connectivity in the alpha band and plot it using a circular
# graph layout
fmin = 8.
fmax = 13.
sfreq = raw.info['sfreq'] # the sampling frequency
con, freqs, times, n_epochs, n_tapers = spectral_connectivity(
label_ts, method='pli', mode='multitaper', sfreq=sfreq, fmin=fmin,
fmax=fmax, faverage=True, mt_adaptive=True, n_jobs=1)
# We create a list of Label containing also the sub structures
labels_aseg = mne.get_volume_labels_from_src(src, subject, subjects_dir)
labels = labels_parc + labels_aseg
# read colors
node_colors = [label.color for label in labels]
# We reorder the labels based on their location in the left hemi
label_names = [label.name for label in labels]
lh_labels = [name for name in label_names if name.endswith('lh')]
rh_labels = [name for name in label_names if name.endswith('rh')]
# Get the y-location of the label
label_ypos_lh = list()
for name in lh_labels:
idx = label_names.index(name)
ypos = np.mean(labels[idx].pos[:, 1])
label_ypos_lh.append(ypos)
try:
idx = label_names.index('Brain-Stem')
ypos = np.mean(labels[idx].pos[:, 1])
lh_labels.append('Brain-Stem')
label_ypos_lh.append(ypos)
except ValueError:
pass
# Reorder the labels based on their location
lh_labels = [label for (yp, label) in sorted(zip(label_ypos_lh, lh_labels))]
# For the right hemi
rh_labels = [label[:-2] + 'rh' for label in lh_labels
if label != 'Brain-Stem' and label[:-2] + 'rh' in rh_labels]
# Save the plot order
node_order = list()
node_order = lh_labels[::-1] + rh_labels
node_angles = circular_layout(label_names, node_order, start_pos=90,
group_boundaries=[0, len(label_names) // 2])
# Plot the graph using node colors from the FreeSurfer parcellation. We only
# show the 300 strongest connections.
conmat = con[:, :, 0]
plot_connectivity_circle(conmat, label_names, n_lines=300,
node_angles=node_angles, node_colors=node_colors,
title='All-to-All Connectivity left-Auditory '
'Condition (PLI)')
# Uncomment the following line to save the figure
'''
import matplotlib.pyplot as plt
plt.savefig('circle.png', facecolor='black')
'''
| bsd-3-clause |
tabhitmy/MLTF | WORKFLOW/code/python_code/sklearnTrainer.py | 1 | 12746 | # sklearnTrainer
import numpy
import numpy as np
import copy
from toolkitJ import cell2dmatlab_jsp
import matplotlib as mpl
from matplotlib.font_manager import FontProperties
zhfont = FontProperties(fname="/usr/share/fonts/cjkuni-ukai/ukai.ttc") # 图片显示中文字体
mpl.use('Agg')
import pprint
from sklearn.externals.six import StringIO
# import pydot
import sklearn.model_selection as skmdls
import sklearn.ensemble as skemb
import sklearn.tree as sktree
import sklearn.linear_model as sklinmdl
import sklearn.discriminant_analysis as skdisa
import sklearn.svm as sksvm
import sklearn.naive_bayes as sknb
import GVal
from controlPanelSubFunc_NFDA_J import dVM
from trainerSubFunc_NFDA_J import *
###################################
# Classifier Subfunction ################
###################################
def adaboost(X_tra, y_tra, X_val, y_val, index_no, classifier_num):
y_tra, X_tra, y_val, X_val, weights = dataRegulationSKL(y_tra, X_tra, y_val, X_val, index_no)
weakClf_list = {
27: 'decisionTree',
271: 'decisionTree'
}
clf = skemb.AdaBoostClassifier(sktree.DecisionTreeClassifier(max_depth=2, min_samples_split=30, min_samples_leaf=5),
algorithm=dVM[2702][2], n_estimators=dVM[2700][2], learning_rate=dVM[2701][2], random_state=dVM[2703][2])
clf.fit(X_tra, y_tra)
return processLearning(clf, X_tra, y_tra, X_val, y_val)
##
def lda(X_tra, y_tra, X_val, y_val, index_no, classifier_num):
y_tra, X_tra, y_val, X_val, weights = dataRegulationSKL(y_tra, X_tra, y_val, X_val, index_no)
clf = skdisa.LinearDiscriminantAnalysis(solver=dVM[2300][2], n_components=dVM[2303][2])
clf.fit(X_tra, y_tra)
return processLearning(clf, X_tra, y_tra, X_val, y_val)
##
def qda(X_tra, y_tra, X_val, y_val, index_no, classifier_num):
y_tra, X_tra, y_val, X_val, weights = dataRegulationSKL(y_tra, X_tra, y_val, X_val, index_no)
clf = skdisa.QuadraticDiscriminantAnalysis()
clf.fit(X_tra, y_tra)
return processLearning(clf, X_tra, y_tra, X_val, y_val)
##
def naiveBayes(X_tra, y_tra, X_val, y_val, index_no, classifier_num):
y_tra, X_tra, y_val, X_val, weights = dataRegulationSKL(y_tra, X_tra, y_val, X_val, index_no)
clfname_list = {
25: sknb.GaussianNB,
251: sknb.GaussianNB,
252: sknb.MultinomialNB,
253: sknb.BernoulliNB,
}
clf = clfname_list[classifier_num]()
clf.fit(X_tra, y_tra, sample_weight=weights)
return processLearning(clf, X_tra, y_tra, X_val, y_val)
##
def svmKernel(X_tra, y_tra, X_val, y_val, index_no, classifier_num):
y_tra, X_tra, y_val, X_val, weights = dataRegulationSKL(y_tra, X_tra, y_val, X_val, index_no)
kernelname_list = {
22: 'rbf',
221: 'rbf',
222: 'poly',
223: 'sigmoid',
224: 'precompute'
}
kernelname = kernelname_list[classifier_num]
clf = sksvm.SVC(C=0.1, kernel=kernelname, degree=3, gamma=0.7)
clf.fit(X_tra, y_tra, sample_weight=weights)
return processLearning(clf, X_tra, y_tra, X_val, y_val)
##
def svmLinear(X_tra, y_tra, X_val, y_val, index_no, classifier_num):
y_tra, X_tra, y_val, X_val, weights = dataRegulationSKL(y_tra, X_tra, y_val, X_val, index_no)
clf = sksvm.LinearSVC(penalty=dVM[2100][2], loss=dVM[2101][2],
dual=dVM[2102][2], tol=dVM[2103][2], C=dVM[2104][2])
# clf = sksvm.LinearSVC()
clf.fit(X_tra, y_tra, sample_weight=weights)
cx = clf.coef_[0]
clfc = np.around(cx, decimals=2)
print('### Feature coefficient with L penalty: ' + str(clfc))
return processLearning(clf, X_tra, y_tra, X_val, y_val)
##
def linearRegression(X_tra, y_tra, X_val, y_val, index_no, classifier_num):
# Not Applicable at this moment
# weights = Y_train_raw[:, 0]
# weights[np.nonzero(weights == 0)[0]] = 1
# weights = weights / 7
# y_tra, y_val, X_val = dataRegulation(y_tra, y_val, X_val, index_no)
# clf = sklinmdl.LinearRegression()
# clf.fit(X_tra, y_tra, sample_weight=weights)
# score = clf.score(X_tra, y_tra, sample_weight=weights)
# print()
# Z = clf.predict(X_val)
# print(Z.shape)
# TP = np.nonzero(np.logical_and(Z == 1, y_val == 1))[0]
# print(TP)
# print(TP.shape)
# print(max(weights))
# print(min(weights))
return clf, score, FRAP
##
def sgdClassifier(X_tra, y_tra, X_val, y_val, index_no, classifier_num):
y_tra, X_tra, y_val, X_val, weights = dataRegulationSKL(y_tra, X_tra, y_val, X_val, index_no)
clf = sklinmdl.SGDClassifier(loss='hinge', penalty='l2', alpha=0.1)
clf.fit(X_tra, y_tra, sample_weight=weights)
return processLearning(clf, X_tra, y_tra, X_val, y_val)
##
def logiRegression(X_tra, y_tra, X_val, y_val, index_no, classifier_num):
y_tra, X_tra, y_val, X_val, weights = dataRegulationSKL(y_tra, X_tra, y_val, X_val, index_no)
clf = sklinmdl.LogisticRegression(penalty=dVM[3000][2], dual=dVM[3001][2], tol=dVM[3002][2],
C=dVM[3003][2], random_state=dVM[3007][2],
solver=dVM[3008][2], max_iter=dVM[3009][2])
clf.fit(X_tra, y_tra, sample_weight=weights)
return processLearning(clf, X_tra, y_tra, X_val, y_val)
##
def decisionTree(X_tra, y_tra, X_val, y_val, index_no, classifier_num):
y_tra, X_tra, y_val, X_val, weights = dataRegulationSKL(y_tra, X_tra, y_val, X_val, index_no)
clf = sktree.DecisionTreeClassifier(criterion=dVM[3100][2], splitter=dVM[3101][2],
max_depth=dVM[3102][2], min_samples_split=dVM[3103][2],
min_samples_leaf=dVM[3104][2], max_features=dVM[3106][2],
random_state=dVM[3107][2])
clf.fit(X_tra, y_tra, sample_weight=weights)
path = GVal.getPARA('path_PARA')
with open(path['fig_path'] + 'dtclf.dot', 'w') as f:
f = sktree.export_graphviz(clf, out_file=f, class_names=['0', '1'])
# sktree.export_graphviz(clf, out_file=path['fig_path'] + 'tree.dot')
# exit()
return processLearning(clf, X_tra, y_tra, X_val, y_val)
##
def randomForest(X_tra, y_tra, X_val, y_val, index_no, classifier_num):
y_tra, X_tra, y_val, X_val, weights = dataRegulationSKL(y_tra, X_tra, y_val, X_val, index_no)
# http://blog.csdn.net/xuxiatian/article/details/54410086
clf = skemb.RandomForestClassifier(n_estimators=dVM[3200][2],
criterion=dVM[3201][2], max_features=dVM[3202][2],
max_depth=dVM[3203][2], min_samples_split=dVM[3204][2],
min_samples_leaf=dVM[3205][2], min_weight_fraction_leaf=dVM[3206][2],
random_state=dVM[3213][2])
# GVal.show('dVM_PARA')
clf.fit(X_tra, y_tra, sample_weight=weights)
# print(clf.get_params())
# print(clf)
path = GVal.getPARA('path_PARA')
i_tree = 0
for tree_in_forest in clf.estimators_:
with open(path['fig_path'] + '/RF/tree_' + str(i_tree) + '.dot', 'w') as my_file:
my_file = sktree.export_graphviz(tree_in_forest, out_file=my_file, class_names=['0', '1'])
i_tree = i_tree + 1
return processLearning(clf, X_tra, y_tra, X_val, y_val)
def bagging(X_tra, y_tra, X_val, y_val, index_no, classifier_num):
y_tra, X_tra, y_val, X_val, weights = dataRegulationSKL(y_tra, X_tra, y_val, X_val, index_no)
clf = skemb.BaggingClassifier(base_estimator=sktree.DecisionTreeClassifier(max_depth=2, min_samples_split=30, min_samples_leaf=5),
n_estimators=dVM[3300][2], max_samples=dVM[3301][2], max_features=dVM[3302][2],
bootstrap=dVM[3303][2], random_state=dVM[3308][2])
clf.fit(X_tra, y_tra, sample_weight=weights)
return processLearning(clf, X_tra, y_tra, X_val, y_val)
def voting(X_tra, y_tra, X_val, y_val, index_no, classifier_num):
#
classifier_list = GVal.getPARA('classifier_list_PARA')
# dVM[3400] = ['estimators', [21, 23, 25, 30, 31], [21, 23, 25, 30, 31]]
estims = []
for i in range(len(dVM[3400][2])):
clf_temp = (classifier_list[dVM[3400][2][i]][1], classifier_list[int(str(dVM[3400][2][i])[0:2])][0](X_tra, y_tra, X_val, y_val, index_no, dVM[3400][2][i])[0])
estims.append(clf_temp)
y_tra, X_tra, y_val, X_val, weights = dataRegulationSKL(y_tra, X_tra, y_val, X_val, index_no)
clf = skemb.VotingClassifier(estimators=estims, voting=dVM[3401][2])
clf.fit(X_tra, y_tra)
return processLearning(clf, X_tra, y_tra, X_val, y_val)
def gradboost(X_tra, y_tra, X_val, y_val, index_no, classifier_num):
y_tra, X_tra, y_val, X_val, weights = dataRegulationSKL(y_tra, X_tra, y_val, X_val, index_no)
clf = skemb.GradientBoostingClassifier(loss=dVM[3500][2], learning_rate=dVM[3501][2],
n_estimators=dVM[3502][2], max_depth=dVM[3503][2], criterion=dVM[3504][2],
min_samples_split=dVM[3505][2], min_samples_leaf=dVM[3506][2],
subsample=dVM[3508][2], random_state=dVM[3515][2])
clf.fit(X_tra, y_tra, sample_weight=weights)
return processLearning(clf, X_tra, y_tra, X_val, y_val)
###################################
# Main #############################
###################################
def sklearnTrainer(classifier_num, X_train_raw, Y_train_raw, X_valid_raw, Y_valid_raw, path):
feature_index = GVal.getPARA('feature_index_PARA')
X, y, X_valid, y_valid, index_no = dataSetPreparation(feature_index, X_train_raw, Y_train_raw, X_valid_raw, Y_valid_raw)
classifier_list = {
21: [svmLinear, 'Linear SVM', []],
22: [svmKernel, 'Kernel SVM (Default:rbf)'],
221: [svmKernel, 'Kernel SVM (rbf)'],
222: [svmKernel, 'Kernel SVM (poly)'],
223: [svmKernel, 'Kernel SVM (sigmoid)'],
224: [svmKernel, 'Kernel SVM (precompute)'],
23: [lda, 'LDA'],
24: [qda, 'QDA'],
25: [naiveBayes, 'Naive Bayes (Default: Gaussian)'],
251: [naiveBayes, 'Naive Bayes (Guassian)'],
252: [naiveBayes, 'Naive Bayes (Multinominal)'],
253: [naiveBayes, 'Naive Bayes (Bernoulli)'],
# 26: neuralNetwork,
27: [adaboost, 'Adaboost'],
271: [adaboost, 'Adaboost(WC:DecisionTree)'],
# 28: [linearRegression, 'Linear Regression'],
29: [sgdClassifier, 'SGD Classifier'],
30: [logiRegression, 'Logistic Regression'],
31: [decisionTree, 'Decision Tree'],
32: [randomForest, 'Random Forest'],
33: [bagging, 'bagging with DT'],
34: [voting, 'Voter'],
35: [gradboost, 'Gradient Tree Boosting']
}
GVal.setPARA('classifier_list_cache', classifier_list)
# classifier serial code: [[model], [training score], [predicting rate]]
clf_cache = {
21: cell2dmatlab_jsp([1], 1, []),
22: cell2dmatlab_jsp([1], 1, []),
221: cell2dmatlab_jsp([1], 1, []),
222: cell2dmatlab_jsp([1], 1, []),
223: cell2dmatlab_jsp([1], 1, []),
224: cell2dmatlab_jsp([1], 1, []),
23: cell2dmatlab_jsp([1], 1, []),
24: cell2dmatlab_jsp([1], 1, []),
25: cell2dmatlab_jsp([1], 1, []),
251: cell2dmatlab_jsp([1], 1, []),
252: cell2dmatlab_jsp([1], 1, []),
253: cell2dmatlab_jsp([1], 1, []),
27: cell2dmatlab_jsp([1], 1, []),
271: cell2dmatlab_jsp([1], 1, []),
28: cell2dmatlab_jsp([1], 1, []),
29: cell2dmatlab_jsp([1], 1, []),
30: cell2dmatlab_jsp([1], 1, []),
31: cell2dmatlab_jsp([1], 1, []),
32: cell2dmatlab_jsp([1], 1, []),
33: cell2dmatlab_jsp([1], 1, []),
34: cell2dmatlab_jsp([1], 1, [])
}
print('### With model: [' + classifier_list[classifier_num][1] + ']')
# Loading model to do the classification
clf, score, FRAP = classifier_list[int(str(classifier_num)[0:2])][0](X, y, X_valid, y_valid, index_no, classifier_num)
clf_cache[classifier_num] = clf
# return clf,score,FRAP
clf_info = cell2dmatlab_jsp([3], 1, [])
clf_info[0] = classifier_num
clf_info[1] = classifier_list[classifier_num][1]
clf_info[2] = clf
return clf_info, score, FRAP
| mit |
aranega/pyecore | experimental/m2m/transfo_example.py | 2 | 2226 | import motra
# generated using
# https://github.com/kolovos/datasets/blob/master/github-mde/ghmde.ecore
# as input metamodel
import ghmde
from pyecore.ecore import *
# Define a graph like metamodel in a static way
eClass = EPackage('graph', nsURI='http://graph/1.0', nsPrefix='graph')
@EMetaclass
class Node(object):
name = EAttribute(eType=EString)
@EMetaclass
class Graph(object):
name = EAttribute(eType=EString)
nodes = EReference(eType=Node, upper=-1, containment=True)
# Transfo definition
ghmde2graph = motra.Transformation('ghmde2graph',
inputs=['ghmde_model'],
outputs=['graph_model'])
@ghmde2graph.main
def main(ghmde_model=None, graph_model=None):
print('Transforming repository to graph', graph_model)
for repository in motra.objects_of_kind(ghmde_model, ghmde.File):
file2node(repository)
for repository in motra.objects_of_kind(ghmde_model, ghmde.Repository):
repository2graph(repository, postfix='_graph')
def does_not_starts_with(self, postfix):
return not self.name.startswith(postfix)
@ghmde2graph.mapping(when=does_not_starts_with)
def repository2graph(self: ghmde.Repository, postfix: str) -> Graph:
result.name = self.name + postfix
for repo_file in self.files:
result.nodes.append(file2node(repo_file))
@ghmde2graph.mapping
def file2node(self: ghmde.File) -> Node:
result.name = self.path
# @transfo.main
# def main(inputs, outputs):
# print('in main')
# print(inputs.ghmde.contents)
# for o in motra.objects_of_kind(inputs.ghmde, ghmde.Repository):
# test_dispatch(o)
#
#
# @transfo.mapping(when=lambda self: self.name is not None)
# def test1(self: ghmde.Repository) -> ghmde.Repository:
# print('changing name', result is self, self.name)
# result.name = self.name
# self.name += '_toto'
#
#
# @transfo.mapping(output_model='test2',
# when=lambda self: self.name is None)
# def test2(self: ghmde.Repository) -> ghmde.Repository:
# result.name = 'from_empty_' + str(self)
#
#
# @transfo.disjunct(mappings=[test1, test2])
# def test_dispatch(self: ghmde.Repository) -> ghmde.Repository:
# pass
| bsd-3-clause |
vdumoulin/fuel | fuel/converters/cifar100.py | 18 | 3576 | import os
import tarfile
import h5py
import numpy
import six
from six.moves import cPickle
from fuel.converters.base import fill_hdf5_file, check_exists
DISTRIBUTION_FILE = 'cifar-100-python.tar.gz'
@check_exists(required_files=[DISTRIBUTION_FILE])
def convert_cifar100(directory, output_directory,
output_filename='cifar100.hdf5'):
"""Converts the CIFAR-100 dataset to HDF5.
Converts the CIFAR-100 dataset to an HDF5 dataset compatible with
:class:`fuel.datasets.CIFAR100`. The converted dataset is saved as
'cifar100.hdf5'.
This method assumes the existence of the following file:
`cifar-100-python.tar.gz`
Parameters
----------
directory : str
Directory in which the required input files reside.
output_directory : str
Directory in which to save the converted dataset.
output_filename : str, optional
Name of the saved dataset. Defaults to 'cifar100.hdf5'.
Returns
-------
output_paths : tuple of str
Single-element tuple containing the path to the converted dataset.
"""
output_path = os.path.join(output_directory, output_filename)
h5file = h5py.File(output_path, mode="w")
input_file = os.path.join(directory, 'cifar-100-python.tar.gz')
tar_file = tarfile.open(input_file, 'r:gz')
file = tar_file.extractfile('cifar-100-python/train')
try:
if six.PY3:
train = cPickle.load(file, encoding='latin1')
else:
train = cPickle.load(file)
finally:
file.close()
train_features = train['data'].reshape(train['data'].shape[0],
3, 32, 32)
train_coarse_labels = numpy.array(train['coarse_labels'],
dtype=numpy.uint8)
train_fine_labels = numpy.array(train['fine_labels'],
dtype=numpy.uint8)
file = tar_file.extractfile('cifar-100-python/test')
try:
if six.PY3:
test = cPickle.load(file, encoding='latin1')
else:
test = cPickle.load(file)
finally:
file.close()
test_features = test['data'].reshape(test['data'].shape[0],
3, 32, 32)
test_coarse_labels = numpy.array(test['coarse_labels'], dtype=numpy.uint8)
test_fine_labels = numpy.array(test['fine_labels'], dtype=numpy.uint8)
data = (('train', 'features', train_features),
('train', 'coarse_labels', train_coarse_labels.reshape((-1, 1))),
('train', 'fine_labels', train_fine_labels.reshape((-1, 1))),
('test', 'features', test_features),
('test', 'coarse_labels', test_coarse_labels.reshape((-1, 1))),
('test', 'fine_labels', test_fine_labels.reshape((-1, 1))))
fill_hdf5_file(h5file, data)
h5file['features'].dims[0].label = 'batch'
h5file['features'].dims[1].label = 'channel'
h5file['features'].dims[2].label = 'height'
h5file['features'].dims[3].label = 'width'
h5file['coarse_labels'].dims[0].label = 'batch'
h5file['coarse_labels'].dims[1].label = 'index'
h5file['fine_labels'].dims[0].label = 'batch'
h5file['fine_labels'].dims[1].label = 'index'
h5file.flush()
h5file.close()
return (output_path,)
def fill_subparser(subparser):
"""Sets up a subparser to convert the CIFAR100 dataset files.
Parameters
----------
subparser : :class:`argparse.ArgumentParser`
Subparser handling the `cifar100` command.
"""
return convert_cifar100
| mit |
agnusfeec/tattCBIR | lib_sistema.py | 1 | 25313 | # -*- coding: utf-8 -*-
"""
Created on Thu Jul 14 13:36:05 2016
@author: agnus
"""
#%%
def monta_lista_imagens(path = '.', ext='.png'):
import os
imagens = {}
for dirname, dirnames, filenames in os.walk(path):
# print path to all filenames with extension py.
for filename in filenames:
fname_path = os.path.join(dirname, filename)
fext = os.path.splitext(fname_path)[1]
if fext == ext:
#file_dat = [filename, dirname]
#imagens.append(file_dat)
imagens[filename]=dirname
else:
continue
return imagens
#%%
def grava_db_imagens(arquivo, imagens):
#arquivo = './tatt_c.db'
with open(arquivo, 'wb') as db_image_file:
for nome_img, caminho in imagens.items():
db_image_file.write(nome_img+ '\t' + caminho + '\n')
db_image_file.close()
#%%
def grava_config(arquivo = './example_mem.cfg'):
import ConfigParser
config = ConfigParser.RawConfigParser()
# When adding sections or items, add them in the reverse order of
# how you want them to be displayed in the actual file.
# In addition, please note that using RawConfigParser's and the raw
# mode of ConfigParser's respective set functions, you can assign
# non-string values to keys internally, but will receive an error
# when attempting to write to a file or when you get it in non-raw
# mode. SafeConfigParser does not allow such assignments to take place.
config.add_section('Geral')
config.set('Geral', 'Image Database', 'Tatt-C')
config.set('Geral', 'Database Image Folder', '/media/sf_Projeto/dataset/tatt_dca/')
config.set('Geral', 'Indexa image database', 'True')
config.set('Geral', 'Database filename', './tatt_c.db')
config.set('Geral', 'Image filename extension','.jpg')
config.set('Geral', 'Training File', 'train1')
config.set('Geral', 'Testing File', 'test1')
config.add_section('Folds')
config.set('Folds', 'Folds Folder', '/media/sf_Projeto/dataset/tatt_dca/folds/')
config.set('Folds', 'Quantidade subsets', '3')
config.set('Folds', 'Subset_1', 'gallery{1}.txt')
config.set('Folds', 'Subset_2', 'probes{1}.txt')
config.set('Folds', 'Subset_3', 'bg{1}.txt')
config.set('Folds', 'Ground_truth', 'ground_truth.txt')
config.add_section('SIFT')
config.set('SIFT','SIFT Folder', '/media/sf_Projeto/dataset/tatt_dca/SIFT/')
# Writing our configuration file to 'example.cfg'
with open(arquivo, 'wb') as configfile:
config.write(configfile)
#%%
def folds_construct(subsets, folds_folder):
n_folds =len(subsets[0])
n_subsets = len(subsets)
folds = []
for i in range(n_folds):
sub = []
for j in range(n_subsets):
arquivo = subsets[j][i]
aux = []
with open(folds_folder+arquivo, 'r') as imagefiles:
for nomef in imagefiles:
if nomef[-1] == '\n' : nomef = nomef[:-1]
aux.append(nomef)
imagefiles.close()
sub.append(aux)
folds.append(sub)
return folds
#%%
def le_config():
import ConfigParser
config = ConfigParser.RawConfigParser()
config.read('./example_mem.cfg')
# getfloat() raises an exception if the value is not a float
# getint() and getboolean() also do this for their respective types
base = config.get('Geral', 'image database')
indexa = config.getboolean('Geral', 'indexa image database')
print base
if indexa:
print "indexa base"
arquivo = config.get('Geral','database filename')
caminho = config.get('Geral', 'database image folder')
extensao = config.get('Geral', 'image filename extension')
print arquivo, caminho, extensao
imagens = monta_lista_imagens(caminho, extensao)
grava_db_imagens(arquivo, imagens)
folds_folder = config.get('Folds','folds folder')
n_subsets = config.getint('Folds', 'quantidade subsets')
subsets=[]
for i in range(n_subsets):
sub = config.get('Folds', 'subset_'+str(i+1))
ps = sub.find("{")
pe = sub.find("}")
ped = sub[ps+1:pe]
indices = ped.split(',')
aux = []
for ind in indices:
aux.append(sub[:ps]+ind+'.txt') # incluir extensão variável
subsets.append(aux)
#print subsets
#n_folds = config.getint('Folds', 'quantidade folds')
n_folds =len(subsets[0])
folds = []
for i in range(n_folds):
sub = []
for j in range(n_subsets):
arquivo = subsets[j][i]
aux = []
with open(folds_folder+arquivo, 'r') as imagefiles:
for nomef in imagefiles:
if nomef[-1] == '\n' : nomef = nomef[:-1]
aux.append(nomef)
imagefiles.close()
sub.append(aux)
folds.append(sub)
#print folds[0]
gt_filename = config.get('Folds', 'ground_truth')
sift_folder = config.get('SIFT', 'sift folder')
print sift_folder, folds_folder, caminho
return (folds, imagens, gt_filename, sift_folder, folds_folder, caminho, subsets)
#%%
def sift(nomes_imagens, imagens, sift_folder):
import cv2
import os
from math import sqrt
#ds = []
#kp = []
t = len(nomes_imagens)
i=1
for filename in nomes_imagens:
fname = os.path.join(sift_folder, filename[:-3]+'sift_ds')
if os.path.isfile(fname) == False :
print filename
#file_img = os.path.join(diretorio, filename)
diretorio = imagens[filename]
img = cv2.imread(os.path.join(diretorio, filename)) #file_img)
# Redimensiona imagem para aplicação do Fisher Vectors
#img = cv2.resize(img, (256,256))
aux = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
gray = cv2.equalizeHist(aux)
k = sqrt((240.0*480.0*0.5)/(gray.shape[0]*gray.shape[1]))
res = cv2.resize(gray,None,fx=k, fy=k, interpolation = cv2.INTER_CUBIC)
cv2.imwrite("/media/sf_Projeto/dataset/tatt_dca//img_Reduzido/"+filename,res)
sift = cv2.xfeatures2d.SIFT_create()
(kps, descs) = sift.detectAndCompute(res, None)
#ds.append(descs)
#kp.append(kps)
arquivo = os.path.join(sift_folder, filename[:-3]+'sift_ds')
with open(arquivo, 'wb') as sift_file:
for desc in descs:
sift_file.write(','.join(str(x) for x in desc)+'\n')
sift_file.close()
arquivo = os.path.join(sift_folder, filename[:-3]+'sift_kp')
with open(arquivo, 'wb') as sift_file:
for point in kps:
temp = [point.pt[0], point.pt[1], point.size, point.angle,
point.response, point.octave, point.class_id]
sift_file.write(','.join(str(x) for x in temp)+'\n')
sift_file.close()
print (i*100)/t,
i=i+1
#return ds
#%%
def sift_match(ds1, kp1, ds2, kp2):
import cv2
MIN_MATCH_COUNT = 10
bf = cv2.BFMatcher()
matches = bf.knnMatch(ds1,ds2, k=2)
# Apply ratio test
good = []
for m,n in matches:
if m.distance < 0.7*n.distance:
good.append(m)
qm = len(good)
(nr1,c) = ds1.shape
(nr2,c) = ds2.shape
# if qm>MIN_MATCH_COUNT:
# src_pts = np.float32([ kp1[m.queryIdx].pt for m in good ]).reshape(-1,1,2)
# dst_pts = np.float32([ kp2[m.trainIdx].pt for m in good ]).reshape(-1,1,2)
#
# M, mask = cv2.findHomography(src_pts, dst_pts, cv2.RANSAC,5.0)
# if mask != None:
# matchesMask = mask.ravel().tolist()
# rt = np.sum(np.asarray(matchesMask))
# else:
# rt = 0
# else:
# #print "Not enough matches are found - %d/%d" % (len(good),MIN_MATCH_COUNT)
# #matchesMask = None
# rt = 0
nr = nr1
if nr2>nr:
nr = nr2
rt = (100.0*qm/nr)
# if qm > 0:
# rt = 1.0/qm
# else:
# rt = 10^8
return rt
#%%
def gera_sift_base(folds, imagens, sift_folder):
# Inicialmente gera se necessario o SIFT para as imagens de treinamento e teste
# pode ser otimizado, gerando para toda a base, caso se utilize toda a base
# o que pode ter um custo alto pois na base existem imagens para outros casos
# de uso.
n_folds = len(folds)
#Poder ser implementado diferente pois as linhas abaixo apenas agregram os nomes
#das imagens para que sejam gerados os sifts para cada um dos folds
for i in range(n_folds):
test = folds[i][1]
train = folds[i][0]
bg = folds[i][2]
for j in range(n_folds):
if j!=i :
train = train + folds[j][0]+folds[j][1]+folds[j][2]
print 'Gerando sift do conjunto de treinamento'
#train_kp, train_ds = sift(train, imagens, sift_folder)
sift(train, imagens, sift_folder)
print 'Gerando sift do conjunto de teste'
#test_kp, test_ds = sift(test, imagens)
sift(test, imagens, sift_folder)
print 'Gerando sift do conjunto de bg'
#bg_kp, bg_ds = sift(bg, imagens)
sift(bg, imagens, sift_folder)
#%%
def processa_sift(folds, imagens, sift_folder):
import numpy as np
import os
import cv2
n_folds = len(folds)
#Alterei para que inclua nas imagens da galeria i no conj. train, de forma a que as
# imagens correspondentes ao probe existam na galeria (train)
for i in range(n_folds):
test = folds[i][1]
bg = folds[i][2]
train = folds[i][0]#+bg
for j in range(n_folds):
if j!=i :
train = train + folds[j][0]+folds[j][1]+folds[j][2]
n_test = len(test)
n_train = len(train)
dist = np.zeros((n_train), dtype=np.float)
nn = n_test * n_train
print 'Gerando o match entre o treinamento e o conjunto de teste'
mem = True
if mem==True :
ds=[]
ks=[]
arquivo = './clist_mem_'+str(i+1)+'.txt'
with open(arquivo, 'w') as clist_file:
l = 0
for file_test in test:
fname = os.path.join(sift_folder, file_test[:-3]+'sift_ds')
ds1 = (np.loadtxt(open(fname,"r"),delimiter=",")).astype(np.uint8) #,skiprows=1)
fname = os.path.join(sift_folder, file_test[:-3]+'sift_kp')
kps = (np.loadtxt(open(fname,"r"),delimiter=",")).astype(np.float) #,skiprows=1)
kp1=[]
kp2=[]
for kp in kps:
kpoint = cv2.KeyPoint(float(kp[0]), float(kp[1]),
float(kp[2]), float(kp[3]),
float(kp[4]), int(kp[5]), int(kp[6]))
kp1.append(kpoint)
diretorio = imagens[file_test]
img1 = cv2.imread(os.path.join(diretorio, file_test),0)
#print os.path.join(diretorio, file_test)
j = 0
for file_train in train:
diretorio = imagens[file_train]
img2 = cv2.imread(os.path.join(diretorio, file_train),0)
#print os.path.join(diretorio, file_train)
if (mem == True and len(ds)<len(train)):
fname = os.path.join(sift_folder, file_train[:-3]+'sift_ds')
ds.append ( np.asarray((np.loadtxt(open(fname,"r"),delimiter=",")).astype(np.uint8)) ) #,skiprows=1)
ds2 = ds[j]
fname = os.path.join(sift_folder, file_train[:-3]+'sift_kp')
kps = (np.loadtxt(open(fname,"r"),delimiter=",")).astype(np.float) #,skiprows=1)
aux =[]
for kp in kps:
kpoint = cv2.KeyPoint(float(kp[0]), float(kp[1]),
float(kp[2]), float(kp[3]),
float(kp[4]), int(kp[5]), int(kp[6]))
aux.append(kpoint)
ks.append(aux)
kp2 = ks[j]
elif (mem == True and len(ds)==len(train)):
ds2 = ds[j]
kp2 = ks[j]
elif mem == False:
fname = os.path.join(sift_folder, file_train[:-3]+'sift_ds')
ds2 = ( (np.loadtxt(open(fname,"r"),delimiter=",")).astype(np.uint8) )
fname = os.path.join(sift_folder, file_train[:-3]+'sift_kp')
kps = (np.loadtxt(open(fname,"r"),delimiter=",")).astype(np.float) #,skiprows=1)
kp2 = []
for kp in kps:
kpoint = cv2.KeyPoint(float(kp[0]), float(kp[1]),
float(kp[2]), float(kp[3]),
float(kp[4]), int(kp[5]), int(kp[6]))
kp2.append(kpoint)
#print ds1
#print ds2
rt = sift_match(ds1, np.asarray(kp1), ds2, np.asarray(kp2))
dist[j] = rt
j = j + 1
print i,(((l*n_train)+j)*100)/nn,
indice = np.argsort(dist)[::-1]
k = 1
for id in indice:
clist_file.write(file_test+'|'+ str(k) + '|' + train[id] + '|' + str(dist[id]) +'\n')
k = k + 1
l = l + 1
clist_file.close()
break
#%%
def ground_truth(folds_folder, gt_filename):
"""Reads a ground truth table from text file.
Keyword arguments:
folds_folder -- the path for the ground truth file
gt_filename -- the file name of the ground truth file with extension
Returns:
gt_images -- ground truth table stored in a dictionary
"""
#folds_folder = '/media/sf_Projeto/dataset/tatt-c_update_v1.4/5-fold/tattoo_identification/'
#gt_filename = 'ground_truth.txt'
gt_imagens = {}
with open(folds_folder+gt_filename, 'r') as gt_arq:
for nomef in gt_arq:
imgs = nomef.split('|')
if imgs[1][-1] == '\n' : imgs[1] = imgs[1][:-1]
#print imgs[0], imgs[1]
gt_imagens[imgs[0]] = imgs[1]
gt_arq.close()
return gt_imagens
#%%
def compute_cmc(arquivo, gt_imagens):
"""Reads a classification list from text file and sumarize rank results for
every image reference based in the ground truth dictionary.
Keyword arguments:
arquivo -- the filename of classification list file
gt_images -- ground truth table stored in a dictionary
Returns:
cmc -- acummulated accuracy for each rank stored in a numpy array
"""
import numpy as np
i = 0
acc = np.zeros(400)
#arquivo = './clist_mem_'+str(i+1)+'.txt'
with open(arquivo, 'r') as clist_file:
for nomef in clist_file:
imgs = nomef.split('|')
if imgs[3][-1] == '\n' : imgs[3] = imgs[3][:-1]
if gt_imagens[imgs[0]] == imgs[2] :
r = int(imgs[1])
acc[r] = acc[r]+1
clist_file.close()
#print cmc
ft = sum(acc)
#print cmc/ft
cmc = np.zeros(400)
for i in range(1,400):
cmc[i] = cmc[i-1]+acc[i]/ft
#print cmc1
return cmc
#%%
def plot_cmc(cmc, ni=200):
import matplotlib.pyplot as plt
import pylab as P
import numpy as np
fig = P.figure()
fig.suptitle('Acumulative Match Characteristic', fontsize=18, fontweight='bold')
P.ylabel('%', fontsize=16)
P.xlabel('Rank', fontsize=16)
P.xlim(0, ni)
P.ylim(0,101)
P.xticks(np.arange(0, ni, 10.0))
P.yticks(np.arange(0, 101, 5.0))
xticklabels = P.getp(P.gca(), 'xticklabels')
yticklabels = P.getp(P.gca(), 'yticklabels')
P.setp(yticklabels, 'color', 'k', fontsize='x-large')
P.setp(xticklabels, 'color', 'k', fontsize='x-large')
P.grid(True)
fig.set_size_inches(19,7)
#P.plot(cmc*100)
P.plot(cmc*100)
fig.savefig('cmc_bf_knn.png')
P.show()
#%%%
#Author: Jacob Gildenblat, 2014
#http://jacobcv.blogspot.com.br/2014/12/fisher-vector-in-python.html
#License: you may use this for whatever you like
#Adaptation: Agnus A. Horta
def fv_dictionary(descriptors, N):
import numpy as np
import cv2
em = cv2.ml.EM_create()
em.setClustersNumber(N)
#em = cv2.EM(N)
em.trainEM(descriptors)
return np.float32(em.getMeans()), \
np.float32(em.getCovs()), np.float32(em.getWeights())[0]
def fv_generate_gmm(descriptors, N, dt):
import numpy as np
words = np.concatenate(descriptors)
#np.concatenate([folder_descriptors(folder) for folder in glob.glob(input_folder + '*')])
#print("Training GMM of size", N)
means, covs, weights = fv_dictionary(words, N)
#Throw away gaussians with weights that are too small:
th = 1.0 / N
means = np.float32([m for k,m in zip(range(0, len(weights)), means) if weights[k] > th])
covs = np.float32([m for k,m in zip(range(0, len(weights)), covs) if weights[k] > th])
weights = np.float32([m for k,m in zip(range(0, len(weights)), weights) if weights[k] > th])
#print 'Means: ',means
#print 'Covs: ',covs
#print 'Weights: ',weights
np.save("./dat/means" + dt + ".gmm", means)
np.save("./dat/covs" + dt + ".gmm", covs)
np.save("./dat/weights" + dt + ".gmm", weights)
return means, covs, weights
def fv_load_gmm(dt, folder = "./dat"):
import numpy as np
files = ["means" + dt + ".gmm" +".npy", "covs" + dt + ".gmm.npy", "weights" + dt + ".gmm.npy"]
try:
return map(lambda file: np.load(file), map(lambda s : folder + "/" + s , files))
except IOError:
return (None, None, None)
def fv_likelihood_moment(x, ytk, moment):
import numpy as np
x_moment = np.power(np.float32(x), moment) if moment > 0 else np.float32([1])
return x_moment * ytk
def fv_likelihood_statistics(samples, means, covs, weights):
from scipy.stats import multivariate_normal
import numpy as np
gaussians, s0, s1,s2 = {}, {}, {}, {}
samples = zip(range(0, len(samples)), samples)
#print samples
g = [multivariate_normal(mean=means[k], cov=covs[k]) for k in range(0, len(weights)) ]
for index, x in samples:
gaussians[index] = np.array([g_k.pdf(x) for g_k in g])
for k in range(0, len(weights)):
s0[k], s1[k], s2[k] = 0, 0, 0
for index, x in samples:
probabilities = np.multiply(gaussians[index], weights)
probabilities = probabilities / np.sum(probabilities)
s0[k] = s0[k] + fv_likelihood_moment(x, probabilities[k], 0)
s1[k] = s1[k] + fv_likelihood_moment(x, probabilities[k], 1)
s2[k] = s2[k] + fv_likelihood_moment(x, probabilities[k], 2)
return s0, s1, s2
def fv_fisher_vector_weights(s0, s1, s2, means, covs, w, T):
import numpy as np
return np.float32([((s0[k] - T * w[k]) / np.sqrt(w[k]) ) for k in range(0, len(w))])
def fv_fisher_vector_means(s0, s1, s2, means, sigma, w, T):
import numpy as np
return np.float32([(s1[k] - means[k] * s0[k]) / (np.sqrt(w[k] * sigma[k])) for k in range(0, len(w))])
def fv_fisher_vector_sigma(s0, s1, s2, means, sigma, w, T):
import numpy as np
return np.float32([(s2[k] - 2 * means[k]*s1[k] + (means[k]*means[k] - sigma[k]) * s0[k]) / (np.sqrt(2*w[k])*sigma[k]) for k in range(0, len(w))])
def fv_normalize(fisher_vector):
import numpy as np
v = np.sqrt(abs(fisher_vector)) * np.sign(fisher_vector)
return v / np.sqrt(np.dot(v, v))
def fv_fisher_vector(samples, means, covs, w):
import numpy as np
#print 'fisher_vector(samples, means, covs, w)'
s0, s1, s2 = fv_likelihood_statistics(samples, means, covs, w)
T = samples.shape[0]
covs = np.float32([np.diagonal(covs[k]) for k in range(0, covs.shape[0])])
a = fv_fisher_vector_weights(s0, s1, s2, means, covs, w, T)
b = fv_fisher_vector_means(s0, s1, s2, means, covs, w, T)
c = fv_fisher_vector_sigma(s0, s1, s2, means, covs, w, T)
fv = np.concatenate([np.concatenate(a), np.concatenate(b), np.concatenate(c)])
fv = fv_normalize(fv)
#print 'fv = ', fv
return fv
def le_descritores(sift_folder, subset, tipo=1):
import os
import numpy as np
#n_folds = len(folds)
#Alterei para que inclua nas imagens da galeria i no conj. train, de forma a que as
# imagens correspondentes ao probe existam na galeria (train)
# for i in range(n_folds):
# train = folds[i][0]
# for j in range(n_folds):
# if j!=i :
# train = train + folds[j][0]+folds[j][1]+folds[j][2]
#
# n_train = len(train)
ch = 0
ds = []
id_ds = []
for image in subset:
fname = os.path.join(sift_folder, image[:-3]+'sift_ds')
ds1 = (np.loadtxt(open(fname,"r"),delimiter=",")).astype(np.uint8) #,skiprows=1)
if tipo == 1:
if ch == 0:
ch = 1
ds = []
ds.append(ds1)
id_ds.append(ds1.shape[0])
else:
ds.append(ds1)
id_ds.append(ds1.shape[0])
else:
if ch == 0:
ch = 1
ds = np.empty_like(ds1)
ds[:] = ds1
id_ds.append(ds1.shape[0])
else:
print ds.shape, ds1.shape
ds = np.concatenate((ds, ds1), axis=0)
id_ds.append(ds1.shape[0])
return ds, id_ds
#%%
def bov_histogramas_grava(arquivo, hists, dt):
resultFile = open(arquivo, 'w')
i = len(hists)
for h in hists:
line = (''.join(str(e) + ", " for e in h.tolist()))[:-2]
resultFile.write(line)
if i > 0:
resultFile.write("\n")
i = i - 1
resultFile.close()
#%%
def bov_codebook_gera(l_sift, nc, tipo):
if tipo == 1:
# http://scikit-learn.org/stable/modules/generated/sklearn.cluster.KMeans.html#sklearn.cluster.KMeans.fit
from sklearn.cluster import KMeans
est = KMeans(n_clusters=nc, init='k-means++', n_init=10, max_iter=100,
tol=0.0001, precompute_distances='auto', verbose=0,
random_state=None, copy_x=True, n_jobs=4)
est.fit(l_sift)
labels = est.labels_
centers = est.cluster_centers_
elif tipo == 2:
from sklearn.cluster import MiniBatchKMeans
est = MiniBatchKMeans(n_clusters=nc, init='k-means++', max_iter=100,
batch_size=3*nc, verbose=0, compute_labels=True,
random_state=None, tol=0.0, max_no_improvement=10,
init_size=None, n_init=3, reassignment_ratio=0.01)
est.fit(l_sift)
labels = est.labels_
centers = est.cluster_centers_
else:
import random
from scipy.cluster.vq import vq
import numpy as np
list_of_random_items = random.sample(np.arange(l_sift.shape[0]), nc)
l_centroids = []
for i in list_of_random_items:
l_centroids.append(l_sift[i])
centers = np.asarray(l_centroids)
labels, _ = vq(l_sift, centers)
return (centers, labels)
#%%
def bov_histogramas_gera(labels, id_ds, k, nomes_imagens, vis=False):
from matplotlib import pyplot as plt
import numpy as np
#fv = np.vectorize(f)
hists = []
i = 0
for j in range(len(nomes_imagens)):
#ld = X[indices[j]].tolist()
n = id_ds[j]
sl = labels[i:i+n]
hist, bins = np.histogram(sl, bins=k, range=(0, k), normed=False,
weights=None, density=True)
if vis == True:
width = 0.7 * (bins[1] - bins[0])
center = (bins[:-1] + bins[1:]) / 2
plt.title("Histogram "+nomes_imagens[j])
plt.xlabel("Visual Word")
plt.ylabel("Frequency")
plt.bar(center, hist, align='center', width=width)
plt.show()
#print j
hists.append(hist)
#print hist
i = i + n
#j = j +1
return hists
def bov_descritores_codifica(X, centers):
from scipy.cluster.vq import vq
labels,_ = vq(X,centers)
return labels
| gpl-3.0 |
aalmah/pylearn2 | pylearn2/devtools/tests/test_format.py | 24 | 25785 | """
Unit tests for format checking
"""
from __future__ import print_function
from nose.plugins.skip import SkipTest
import os
import pylearn2
from pylearn2.devtools.tests.docscrape import docstring_errors
from pylearn2.devtools.list_files import list_files
from pylearn2.devtools.tests.pep8.pep8 import StyleGuide
whitelist_pep8 = [
"rbm_tools.py",
"distributions/mnd.py",
"models/sparse_autoencoder.py",
"models/tests/test_dbm.py",
"models/tests/test_s3c_inference.py",
"models/tests/test_mnd.py",
"models/tests/test_s3c_misc.py",
"models/gsn.py",
"models/dbm/layer.py",
"models/dbm/__init__.py",
"models/dbm/ising.py",
"models/differentiable_sparse_coding.py",
"models/local_coordinate_coding.py",
"models/mnd.py",
"models/s3c.py",
"tests/test_monitor.py",
"kmeans.py",
"packaged_dependencies/theano_linear/conv2d.py",
"packaged_dependencies/theano_linear/imaging.py",
"packaged_dependencies/theano_linear/pyramid.py",
"packaged_dependencies/theano_linear/unshared_conv/"
"test_gpu_unshared_conv.py",
"packaged_dependencies/theano_linear/unshared_conv/"
"test_localdot.py",
"packaged_dependencies/theano_linear/unshared_conv/localdot.py",
"packaged_dependencies/theano_linear/unshared_conv/"
"unshared_conv.py",
"packaged_dependencies/theano_linear/linear.py",
"packaged_dependencies/theano_linear/test_spconv.py",
"packaged_dependencies/theano_linear/test_matrixmul.py",
"packaged_dependencies/theano_linear/spconv.py",
"expr/tests/test_coding.py",
"expr/tests/test_normalize.py",
"expr/tests/test_stochastic_pool.py",
"expr/stochastic_pool.py",
"expr/sampling.py",
"expr/information_theory.py",
"expr/basic.py",
"gui/graph_2D.py",
"sandbox/cuda_convnet/weight_acts.py",
"sandbox/cuda_convnet/filter_acts.py",
"sandbox/cuda_convnet/tests/test_filter_acts_strided.py",
"sandbox/cuda_convnet/tests/test_probabilistic_max_pooling.py",
"sandbox/cuda_convnet/tests/test_filter_acts.py",
"sandbox/cuda_convnet/tests/test_weight_acts_strided.py",
"sandbox/cuda_convnet/tests/test_image_acts_strided.py",
"sandbox/cuda_convnet/tests/test_img_acts.py",
"sandbox/cuda_convnet/tests/test_stochastic_pool.py",
"sandbox/cuda_convnet/specialized_bench.py",
"sandbox/cuda_convnet/response_norm.py",
"sandbox/cuda_convnet/__init__.py",
"sandbox/cuda_convnet/img_acts.py",
"sandbox/cuda_convnet/convnet_compile.py",
"sandbox/cuda_convnet/pthreads.py",
"sandbox/cuda_convnet/pool.py",
"sandbox/cuda_convnet/bench.py",
"sandbox/cuda_convnet/stochastic_pool.py",
"sandbox/cuda_convnet/probabilistic_max_pooling.py",
"sandbox/tuple_var.py",
"sandbox/lisa_rl/bandit/average_agent.py",
"sandbox/lisa_rl/bandit/classifier_bandit.py",
"sandbox/lisa_rl/bandit/classifier_agent.py",
"sandbox/lisa_rl/bandit/plot_reward.py",
"config/old_config.py",
"utils/utlc.py",
"utils/tests/test_serial.py",
"utils/common_strings.py",
"utils/mem.py",
"dataset_get/dataset-get.py",
"dataset_get/helper-scripts/make-archive.py",
"dataset_get/dataset_resolver.py",
"optimization/minres.py",
"linear/conv2d.py",
"linear/local_c01b.py",
"linear/linear_transform.py",
"linear/conv2d_c01b.py",
"energy_functions/rbm_energy.py",
"scripts/pkl_inspector.py",
"scripts/show_binocular_greyscale_examples.py",
"scripts/jobman/tester.py",
"scripts/dbm/dbm_metrics.py",
"scripts/papers/maxout/svhn_preprocessing.py",
"scripts/papers/jia_huang_wkshp_11/fit_final_model.py",
"scripts/papers/jia_huang_wkshp_11/evaluate.py",
"scripts/papers/jia_huang_wkshp_11/extract_features.py",
"scripts/papers/jia_huang_wkshp_11/assemble.py",
"scripts/gpu_pkl_to_cpu_pkl.py",
"scripts/gsn_example.py",
"scripts/tutorials/deep_trainer/run_deep_trainer.py",
"scripts/tutorials/grbm_smd/test_grbm_smd.py",
"scripts/icml_2013_wrepl/multimodal/"
"extract_layer_2_kmeans_features.py",
"scripts/icml_2013_wrepl/multimodal/make_submission.py",
"scripts/icml_2013_wrepl/multimodal/lcn.py",
"scripts/icml_2013_wrepl/multimodal/extract_kmeans_features.py",
"scripts/icml_2013_wrepl/emotions/emotions_dataset.py",
"scripts/icml_2013_wrepl/emotions/make_submission.py",
"scripts/icml_2013_wrepl/black_box/black_box_dataset.py",
"scripts/icml_2013_wrepl/black_box/make_submission.py",
"scripts/diff_monitor.py",
"corruption.py",
"sandbox/lisa_rl/bandit/gaussian_bandit.py",
"utils/track_version.py",
"scripts/get_version.py",
"training_algorithms/tests/test_bgd.py",
"training_algorithms/tests/test_default.py",
"training_algorithms/default.py",
"training_algorithms/training_algorithm.py",
"distributions/tests/test_mnd.py",
"distributions/parzen.py",
"distributions/uniform_hypersphere.py",
"models/setup.py",
"models/independent_multiclass_logistic.py",
"models/softmax_regression.py",
"models/tests/test_reflection_clip.py",
"models/tests/test_maxout.py",
"models/tests/test_convelemwise_sigm.py",
"models/dbm/sampling_procedure.py",
"models/rbm.py",
"models/pca.py",
"tests/test_train.py",
"packaged_dependencies/theano_linear/unshared_conv/gpu_unshared_conv.py",
"packaged_dependencies/theano_linear/unshared_conv/test_unshared_conv.py",
"packaged_dependencies/theano_linear/linearmixin.py",
"packaged_dependencies/theano_linear/util.py",
"packaged_dependencies/theano_linear/__init__.py",
"packaged_dependencies/theano_linear/test_linear.py",
"expr/tests/test_nnet.py",
"expr/image.py",
"expr/coding.py",
"expr/normalize.py",
"expr/probabilistic_max_pooling.py",
"testing/tests/test.py",
"testing/skip.py",
"testing/prereqs.py",
"testing/__init__.py",
"gui/get_weights_report.py",
"gui/patch_viewer.py",
"sandbox/cuda_convnet/tests/test_response_norm.py",
"sandbox/cuda_convnet/tests/profile_probabilistic_max_pooling.py",
"sandbox/cuda_convnet/tests/test_rop_pool.py",
"sandbox/cuda_convnet/tests/test_pool.py",
"sandbox/cuda_convnet/tests/test_common.py",
"sandbox/cuda_convnet/shared_code.py",
"sandbox/cuda_convnet/code_templates.py",
"sandbox/lisa_rl/bandit/agent.py",
"sandbox/lisa_rl/bandit/algorithm.py",
"sandbox/lisa_rl/bandit/environment.py",
"sandbox/lisa_rl/__init__.py",
"datasets/avicenna.py",
"datasets/iris.py",
"datasets/adult.py",
"datasets/npy_npz.py",
"datasets/control.py",
"datasets/cifar100.py",
"datasets/transformer_dataset.py",
"termination_criteria/__init__.py",
"__init__.py",
"utils/logger.py",
"utils/tests/test_mnist_ubyte.py",
"utils/tests/test_data_specs.py",
"utils/tests/test_bit_strings.py",
"utils/tests/test_iteration.py",
"utils/theano_graph.py",
"utils/__init__.py",
"utils/datasets.py",
"utils/data_specs.py",
"utils/insert_along_axis.py",
"utils/environ.py",
"utils/call_check.py",
"utils/python26.py",
"deprecated/classifier.py",
"train.py",
"classifier.py",
"dataset_get/helper-scripts/make-sources.py",
"pca.py",
"optimization/test_linesearch.py",
"optimization/test_minres.py",
"optimization/test_batch_gradient_descent.py",
"optimization/linear_cg.py",
"optimization/test_feature_sign.py",
"optimization/feature_sign.py",
"optimization/test_linear_cg.py",
"optimization/linesearch.py",
"linear/tests/test_conv2d.py",
"linear/tests/test_conv2d_c01b.py",
"linear/matrixmul.py",
"energy_functions/energy_function.py",
"scripts/make_weights_image.py",
"scripts/plot_monitor.py",
"scripts/print_monitor.py",
"scripts/num_parameters.py",
"scripts/benchmark/time_relu.py",
"scripts/jobman/experiment.py",
"scripts/jobman/__init__.py",
"scripts/dbm/show_negative_chains.py",
"scripts/papers/maxout/compute_test_err.py",
"scripts/papers/jia_huang_wkshp_11/npy2mat.py",
"scripts/datasets/step_through_small_norb.py",
"scripts/datasets/step_through_norb_foveated.py",
"scripts/datasets/make_downsampled_stl10.py",
"scripts/datasets/browse_small_norb.py",
"scripts/datasets/make_mnistplus.py",
"scripts/mlp/predict_csv.py",
"scripts/find_gpu_fields.py",
"scripts/tutorials/deep_trainer/test_deep_trainer.py",
"scripts/icml_2013_wrepl/multimodal/make_wordlist.py",
"base.py",
"devtools/tests/test_via_pyflakes.py",
"devtools/tests/test_shebangs.py",
"devtools/tests/pep8/pep8.py",
"devtools/tests/docscrape.py",
"devtools/run_pyflakes.py",
"devtools/record.py",
"train_extensions/tests/test_window_flip.py",
"train_extensions/__init__.py",
]
whitelist_docstrings = [
'scripts/datasets/step_through_norb_foveated.py',
'blocks.py',
'datasets/hdf5.py',
'rbm_tools.py',
'training_algorithms/tests/test_bgd.py',
'training_algorithms/tests/test_sgd.py',
'training_algorithms/tests/test_default.py',
'training_algorithms/bgd.py',
'training_algorithms/default.py',
'training_algorithms/training_algorithm.py',
'training_algorithms/__init__.py',
'training_algorithms/sgd.py',
'distributions/tests/test_mnd.py',
'distributions/multinomial.py',
'distributions/parzen.py',
'distributions/__init__.py',
'distributions/mnd.py',
'distributions/uniform_hypersphere.py',
'models/setup.py',
'models/independent_multiclass_logistic.py',
'models/softmax_regression.py',
'models/sparse_autoencoder.py',
'models/tests/test_reflection_clip.py',
'models/tests/test_dbm.py',
'models/tests/test_gsn.py',
'models/tests/test_dropout.py',
'models/tests/test_autoencoder.py',
'models/tests/test_mlp.py',
'models/tests/test_s3c_inference.py',
'models/tests/test_maxout.py',
'models/tests/test_mnd.py',
'models/tests/test_vae.py',
'models/tests/test_rbm.py',
'models/tests/test_s3c_misc.py',
'models/gsn.py',
'models/dbm/sampling_procedure.py',
'models/dbm/layer.py',
'models/dbm/__init__.py',
'models/dbm/dbm.py',
'models/dbm/ising.py',
'models/differentiable_sparse_coding.py',
'models/local_coordinate_coding.py',
'models/maxout.py',
'models/s3c.py',
'models/mnd.py',
'models/rbm.py',
'models/autoencoder.py',
'tests/test_dbm_metrics.py',
'tests/test_monitor.py',
'tests/test_train.py',
'tests/rbm/test_ais.py',
'kmeans.py',
'packaged_dependencies/__init__.py',
'packaged_dependencies/theano_linear/imaging.py',
'packaged_dependencies/theano_linear/unshared_conv/__init__.py',
'packaged_dependencies/theano_linear/unshared_conv/unshared_conv.py',
'packaged_dependencies/theano_linear/linearmixin.py',
'packaged_dependencies/theano_linear/linear.py',
'packaged_dependencies/theano_linear/test_spconv.py',
'expr/activations.py',
'expr/tests/test_probabilistic_max_pooling.py',
'expr/tests/test_preprocessing.py',
'expr/tests/test_nnet.py',
'expr/tests/test_coding.py',
'expr/tests/test_normalize.py',
'expr/tests/test_stochastic_pool.py',
'expr/preprocessing.py',
'expr/image.py',
'expr/coding.py',
'expr/__init__.py',
'expr/stochastic_pool.py',
'expr/sampling.py',
'expr/normalize.py',
'expr/probabilistic_max_pooling.py',
'expr/information_theory.py',
'expr/basic.py',
'testing/tests/test.py',
'testing/skip.py',
'testing/prereqs.py',
'testing/__init__.py',
'testing/datasets.py',
'gui/get_weights_report.py',
'gui/__init__.py',
'gui/patch_viewer.py',
'scalar.py',
'sandbox/cuda_convnet/weight_acts.py',
'sandbox/cuda_convnet/filter_acts.py',
'sandbox/cuda_convnet/tests/test_filter_acts_strided.py',
'sandbox/cuda_convnet/tests/test_probabilistic_max_pooling.py',
'sandbox/cuda_convnet/tests/test_filter_acts.py',
'sandbox/cuda_convnet/tests/test_img_acts.py',
'sandbox/cuda_convnet/tests/test_response_norm.py',
'sandbox/cuda_convnet/tests/profile_probabilistic_max_pooling.py',
'sandbox/cuda_convnet/tests/test_weight_acts.py',
'sandbox/cuda_convnet/tests/test_rop_pool.py',
'sandbox/cuda_convnet/tests/test_pool.py',
'sandbox/cuda_convnet/tests/test_common.py',
'sandbox/cuda_convnet/tests/test_stochastic_pool.py',
'sandbox/cuda_convnet/shared_code.py',
'sandbox/cuda_convnet/__init__.py',
'sandbox/cuda_convnet/img_acts.py',
'sandbox/cuda_convnet/base_acts.py',
'sandbox/cuda_convnet/pool.py',
'sandbox/cuda_convnet/stochastic_pool.py',
'sandbox/cuda_convnet/code_templates.py',
'sandbox/cuda_convnet/probabilistic_max_pooling.py',
'sandbox/tuple_var.py',
'sandbox/__init__.py',
'sandbox/lisa_rl/bandit/simulator.py',
'sandbox/lisa_rl/bandit/agent.py',
'sandbox/lisa_rl/bandit/algorithm.py',
'sandbox/lisa_rl/bandit/environment.py',
'sandbox/lisa_rl/bandit/average_agent.py',
'sandbox/lisa_rl/bandit/classifier_bandit.py',
'sandbox/lisa_rl/bandit/__init__.py',
'sandbox/lisa_rl/bandit/classifier_agent.py',
'sandbox/lisa_rl/bandit/gaussian_bandit.py',
'sandbox/lisa_rl/__init__.py',
'config/old_config.py',
'config/tests/test_yaml_parse.py',
'config/yaml_parse.py',
'space/tests/test_space.py',
'space/__init__.py',
'datasets/norb.py',
'datasets/utlc.py',
'datasets/mnistplus.py',
'datasets/cos_dataset.py',
'datasets/cifar10.py',
'datasets/svhn.py',
'datasets/tests/test_preprocessing.py',
'datasets/tests/test_mnist.py',
'datasets/tests/test_imports.py',
'datasets/tests/test_cifar10.py',
'datasets/tests/test_norb.py',
'datasets/tests/test_dense_design_matrix.py',
'datasets/tests/test_vector_spaces_dataset.py',
'datasets/tests/test_four_regions.py',
'datasets/tests/test_csv_dataset.py',
'datasets/tests/test_icml07.py',
'datasets/tests/test_utlc.py',
'datasets/preprocessing.py',
'datasets/avicenna.py',
'datasets/iris.py',
'datasets/config.py',
'datasets/dense_design_matrix.py',
'datasets/adult.py',
'datasets/tfd.py',
'datasets/icml07.py',
'datasets/filetensor.py',
'datasets/npy_npz.py',
'datasets/hepatitis.py',
'datasets/wiskott.py',
'datasets/control.py',
'datasets/exc.py',
'datasets/__init__.py',
'datasets/mnist.py',
'datasets/sparse_dataset.py',
'datasets/csv_dataset.py',
'datasets/cifar100.py',
'datasets/tl_challenge.py',
'datasets/transformer_dataset.py',
'datasets/norb_small.py',
'datasets/retina.py',
'datasets/ocr.py',
'datasets/stl10.py',
'datasets/matlab_dataset.py',
'datasets/vector_spaces_dataset.py',
'datasets/four_regions.py',
'datasets/debug.py',
'datasets/binarizer.py',
'termination_criteria/__init__.py',
'__init__.py',
'utils/utlc.py',
'utils/setup.py',
'utils/compile.py',
'utils/logger.py',
'utils/general.py',
'utils/testing.py',
'utils/tests/test_mnist_ubyte.py',
'utils/tests/test_data_specs.py',
'utils/tests/test_video.py',
'utils/tests/test_bit_strings.py',
'utils/tests/test_rng.py',
'utils/tests/test_pooling.py',
'utils/tests/test_iteration.py',
'utils/tests/test_insert_along_axis.py',
'utils/tests/test_utlc.py',
'utils/tests/test_compile.py',
'utils/tests/test_key_aware.py',
'utils/key_aware.py',
'utils/video.py',
'utils/bit_strings.py',
'utils/iteration.py',
'utils/pooling.py',
'utils/theano_graph.py',
'utils/common_strings.py',
'utils/datasets.py',
'utils/data_specs.py',
'utils/shell.py',
'utils/rng.py',
'utils/insert_along_axis.py',
'utils/environ.py',
'utils/call_check.py',
'utils/mnist_ubyte.py',
'utils/track_version.py',
'utils/mem.py',
'utils/python26.py',
'utils/timing.py',
'deprecated/__init__.py',
'deprecated/classifier.py',
'train.py',
'format/tests/test_target_format.py',
'format/__init__.py',
'dataset_get/dataset-get.py',
'dataset_get/helper-scripts/make-sources.py',
'dataset_get/helper-scripts/make-archive.py',
'dataset_get/dataset_resolver.py',
'pca.py',
'monitor.py',
'optimization/batch_gradient_descent.py',
'optimization/__init__.py',
'optimization/test_batch_gradient_descent.py',
'optimization/linear_cg.py',
'optimization/minres.py',
'optimization/test_feature_sign.py',
'optimization/feature_sign.py',
'optimization/linesearch.py',
'linear/conv2d.py',
'linear/tests/test_matrixmul.py',
'linear/local_c01b.py',
'linear/matrixmul.py',
'linear/__init__.py',
'linear/linear_transform.py',
'linear/conv2d_c01b.py',
'energy_functions/tests/__init__.py',
'energy_functions/rbm_energy.py',
'energy_functions/__init__.py',
'energy_functions/energy_function.py',
'scripts/plot_monitor.py',
'scripts/print_model.py',
'scripts/tests/__init__.py',
'scripts/pkl_inspector.py',
'scripts/get_version.py',
'scripts/print_monitor.py',
'scripts/show_binocular_greyscale_examples.py',
'scripts/num_parameters.py',
'scripts/jobman/tester.py',
'scripts/jobman/experiment.py',
'scripts/jobman/__init__.py',
'scripts/dbm/__init__.py',
'scripts/dbm/dbm_metrics.py',
'scripts/papers/__init__.py',
'scripts/papers/jia_huang_wkshp_11/extract_features.py',
'scripts/print_channel_doc.py',
'scripts/gpu_pkl_to_cpu_pkl.py',
'scripts/datasets/step_through_small_norb.py',
'scripts/datasets/download_mnist.py',
'scripts/datasets/download_binarized_mnist.py',
'scripts/datasets/browse_small_norb.py',
'scripts/datasets/make_mnistplus.py',
'scripts/__init__.py',
'scripts/gsn_example.py',
'scripts/mlp/predict_csv.py',
'scripts/mlp/__init__.py',
'scripts/find_gpu_fields.py',
'scripts/tutorials/dbm_demo/train_dbm.py',
'scripts/tutorials/dbm_demo/__init__.py',
'scripts/tutorials/tests/test_dbm.py',
'scripts/tutorials/tests/test_mlp_nested.py',
'scripts/tutorials/multilayer_perceptron/tests/test_mlp.py',
'scripts/tutorials/softmax_regression/tests/test_softmaxreg.py',
'scripts/tutorials/deep_trainer/__init__.py',
'scripts/tutorials/deep_trainer/run_deep_trainer.py',
'scripts/tutorials/grbm_smd/make_dataset.py',
'scripts/tutorials/grbm_smd/__init__.py',
'scripts/tutorials/grbm_smd/test_grbm_smd.py',
'scripts/tutorials/__init__.py',
'scripts/tutorials/jobman_demo/utils.py',
'scripts/tutorials/jobman_demo/__init__.py',
'scripts/tutorials/stacked_autoencoders/tests/test_dae.py',
'scripts/icml_2013_wrepl/__init__.py',
'scripts/icml_2013_wrepl/multimodal/extract_layer_2_kmeans_features.py',
'scripts/icml_2013_wrepl/multimodal/make_submission.py',
'scripts/icml_2013_wrepl/multimodal/lcn.py',
'scripts/icml_2013_wrepl/multimodal/__init__.py',
'scripts/icml_2013_wrepl/multimodal/extract_kmeans_features.py',
'scripts/icml_2013_wrepl/emotions/emotions_dataset.py',
'scripts/icml_2013_wrepl/emotions/make_submission.py',
'scripts/icml_2013_wrepl/emotions/__init__.py',
'scripts/icml_2013_wrepl/black_box/black_box_dataset.py',
'scripts/icml_2013_wrepl/black_box/make_submission.py',
'scripts/icml_2013_wrepl/black_box/__init__.py',
'scripts/diff_monitor.py',
'base.py',
'devtools/tests/test_via_pyflakes.py',
'devtools/tests/test_shebangs.py',
'devtools/tests/__init__.py',
'devtools/tests/docscrape.py',
'devtools/run_pyflakes.py',
'devtools/__init__.py',
'devtools/record.py',
'corruption.py',
'datasets/tests/test_tl_challenge.py',
'datasets/tests/test_tfd.py',
'datasets/tests/test_npy_npz.py',
'linear/tests/test_conv2d.py',
'devtools/tests/pep8/pep8.py',
'devtools/tests/pep8/__init__.py',
'scripts/lcc_tangents/make_dataset.py',
'scripts/icml_2013_wrepl/multimodal/make_wordlist.py',
'scripts/datasets/make_stl10_whitened.py',
'scripts/datasets/make_stl10_patches_8x8.py',
'scripts/datasets/make_stl10_patches.py',
'scripts/datasets/make_cifar10_whitened.py',
'scripts/datasets/make_cifar10_gcn_whitened.py',
'scripts/datasets/make_cifar100_patches.py',
'scripts/datasets/make_cifar100_gcn_whitened.py',
'scripts/datasets/make_svhn_pytables.py',
'energy_functions/tests/test_rbm_energy.py',
]
# add files which fail to run to whitelist_docstrings
whitelist_docstrings.extend([
'sandbox/rnn/models/mlp_hook.py',
'training_algorithms/tests/test_learning_rule.py',
'models/pca.py',
'datasets/tests/test_hdf5.py',
'linear/tests/test_conv2d_c01b.py',
'packaged_dependencies/theano_linear/conv2d.py',
'packaged_dependencies/theano_linear/pyramid.py',
'packaged_dependencies/theano_linear/unshared_conv/gpu_unshared_conv.py',
'packaged_dependencies/theano_linear/unshared_conv/'
'test_gpu_unshared_conv.py',
'packaged_dependencies/theano_linear/unshared_conv/test_localdot.py',
'packaged_dependencies/theano_linear/unshared_conv/test_unshared_conv.py',
'packaged_dependencies/theano_linear/unshared_conv/localdot.py',
'packaged_dependencies/theano_linear/util.py',
'packaged_dependencies/theano_linear/__init__.py',
'packaged_dependencies/theano_linear/test_matrixmul.py',
'packaged_dependencies/theano_linear/test_linear.py',
'packaged_dependencies/theano_linear/spconv.py',
'sandbox/cuda_convnet/tests/test_weight_acts_strided.py',
'sandbox/cuda_convnet/tests/test_image_acts_strided.py',
'sandbox/cuda_convnet/specialized_bench.py',
'sandbox/cuda_convnet/response_norm.py',
'sandbox/cuda_convnet/convnet_compile.py',
'sandbox/cuda_convnet/pthreads.py',
'sandbox/cuda_convnet/bench.py',
'sandbox/lisa_rl/bandit/plot_reward.py',
'sandbox/lisa_rl/bandit/simulate.py',
'config/__init__.py',
'utils/__init__.py',
'optimization/test_linesearch.py',
'optimization/test_minres.py',
'optimization/test_linear_cg.py',
'scripts/papers/maxout/svhn_preprocessing.py',
'scripts/papers/maxout/compute_test_err.py',
'scripts/papers/jia_huang_wkshp_11/fit_final_model.py',
'scripts/papers/jia_huang_wkshp_11/evaluate.py',
'scripts/papers/jia_huang_wkshp_11/npy2mat.py',
'scripts/papers/jia_huang_wkshp_11/assemble.py',
'scripts/datasets/make_cifar100_patches_8x8.py',
'scripts/datasets/make_downsampled_stl10.py',
'scripts/datasets/make_cifar100_whitened.py',
'scripts/tutorials/deep_trainer/test_deep_trainer.py',
'scripts/icml_2013_wrepl/black_box/learn_zca.py',
'train_extensions/tests/test_window_flip.py',
'train_extensions/window_flip.py',
'linear/tests/test_local_c01b.py',
'sandbox/cuda_convnet/debug.py', ])
def test_format_pep8():
"""
Test if pep8 is respected.
"""
pep8_checker = StyleGuide()
files_to_check = []
for path in list_files(".py"):
rel_path = os.path.relpath(path, pylearn2.__path__[0])
if rel_path in whitelist_pep8:
continue
else:
files_to_check.append(path)
report = pep8_checker.check_files(files_to_check)
if report.total_errors > 0:
raise AssertionError("PEP8 Format not respected")
def print_files_information_pep8():
"""
Print the list of files which can be removed from the whitelist and the
list of files which do not respect PEP8 formatting that aren't in the
whitelist
"""
infracting_files = []
non_infracting_files = []
pep8_checker = StyleGuide(quiet=True)
for path in list_files(".py"):
number_of_infractions = pep8_checker.input_file(path)
rel_path = os.path.relpath(path, pylearn2.__path__[0])
if number_of_infractions > 0:
if rel_path not in whitelist_pep8:
infracting_files.append(path)
else:
if rel_path in whitelist_pep8:
non_infracting_files.append(path)
print("Files that must be corrected or added to whitelist:")
for file in infracting_files:
print(file)
print("Files that can be removed from whitelist:")
for file in non_infracting_files:
print(file)
def test_format_docstrings():
"""
Test if docstrings are well formatted.
"""
try:
verify_format_docstrings()
except SkipTest as e:
import traceback
traceback.print_exc(e)
raise AssertionError(
"Some file raised SkipTest on import, and inadvertently"
" canceled the documentation testing."
)
def verify_format_docstrings():
"""
Implementation of `test_format_docstrings`. The implementation is
factored out so it can be placed inside a guard against SkipTest.
"""
format_infractions = []
for path in list_files(".py"):
rel_path = os.path.relpath(path, pylearn2.__path__[0])
if rel_path in whitelist_docstrings:
continue
try:
format_infractions.extend(docstring_errors(path))
except Exception as e:
format_infractions.append(["%s failed to run so format cannot "
"be checked. Error message:\n %s" %
(rel_path, e)])
if len(format_infractions) > 0:
msg = "\n".join(':'.join(line) for line in format_infractions)
raise AssertionError("Docstring format not respected:\n%s" % msg)
if __name__ == "__main__":
print_files_information_pep8()
| bsd-3-clause |
jfsantos/ift6266h14 | old/test_timit_iy.py | 1 | 2996 | from timit_full import TimitFullCorpusReader
import itertools
import numpy as np
from pylearn2.datasets import DenseDesignMatrix
from pylearn2.models.mlp import *
from pylearn2.costs.mlp.dropout import Dropout
from pylearn2.termination_criteria import EpochCounter
from pylearn2.training_algorithms.sgd import SGD
from pylearn2.training_algorithms import learning_rule
from pylearn2.train import Train
from pylearn2.train_extensions import best_params
import cPickle as pickle
import theano
# Gets all utterances from <spkrid>, splits them into <framelen>
# frames with <overlap> overlaps. Returns the frames and correspondent
# phone symbols.
spkrid = 'MTCS0'
class TimitPhoneData(DenseDesignMatrix):
def __init__(self, spkrid, phone, framelen, overlap, start, stop):
data = TimitFullCorpusReader('/home/jfsantos/data/TIMIT/')
# Some list comprehension/zip magic here (but it works!)
spkrfr = [data.frames(z, 160, 159) for z in
data.utteranceids(spkrid=spkrid)]
fr, ph = zip(*[(x[0], x[1]) for x in spkrfr])
fr = np.vstack(fr)*2**-15
ph = list(itertools.chain(*ph))
# Get all elements for which the phone is 'iy'
iy_idx = [i for i,x in enumerate(ph) if x == 'iy']
fr_iy = fr[iy_idx]
X = fr_iy[:,0:159]
y = np.array([fr_iy[:,159]]).T # y.ndim has to be 2
super(TimitPhoneData,self).__init__(X=X[start:stop], y=y[start:stop])
train = TimitPhoneData(spkrid='FPLS0', phone='iy', framelen=160, overlap=159, start=0, stop=10000)
valid = TimitPhoneData(spkrid='FPLS0', phone='iy', framelen=160, overlap=159, start=10000, stop=12000)
test = TimitPhoneData(spkrid='FPLS0', phone='iy', framelen=160, overlap=159, start=12000, stop=18000)
i0 = VectorSpace(159)
s0 = Sigmoid(layer_name='h0', dim=500, sparse_init=15)
l0 = Linear(layer_name='y', dim=1, sparse_init=15)
mdl = MLP(layers=[s0, l0], nvis=159, input_space=i0)
trainer = SGD(batch_size=512, learning_rate = .01, init_momentum = .5,
monitoring_dataset = {'train' : train, 'valid': valid,
'test' : test}, termination_criterion =
EpochCounter(max_epochs=200))
watcher = best_params.MonitorBasedSaveBest(
channel_name='test_objective',
save_path='nextsample_iy_FPLS0_mlp_sig_lin_watcher.pkl')
experiment = Train(dataset=train,
model=mdl,
algorithm=trainer, extensions = [watcher])
experiment.main_loop()
# Now we have the best model, let's load it and use it to generate some
# samples!
bestmdl = pickle.load(open('nextsample_iy_FPLS0_mlp_sig_lin_watcher.pkl'))
X = theano.tensor.dmatrix('X')
y = bestmdl.fprop(X)
predict = theano.function([X], y)
# Let's start with a all zero vector, then use the prediction to populate the next sample
x0 = np.asmatrix(np.zeros((1,16000)))
for k in np.arange(160,16000):
frame = x0[:,k-160:k-1]
x0[0,k] = predict(frame)
| mit |
neuroneuro15/natnetclient | build/lib/natnetclient/utils.py | 1 | 1318 | __author__ = 'ratcave'
import numpy as np
from sklearn.decomposition import PCA
def rotate_to_var(markers):
"""Returns degrees to rotate about y axis so greatest marker variance points in +X direction"""
# Mean-Center
markers -= np.mean(markers, axis=0)
# Vector in direction of greatest variance
pca = PCA(n_components=1).fit(markers[:, [0, 2]])
coeff_vec = pca.components_[0]
# # Flip coeff_vec in direction of max variance along the vector.
# marker_var = markers[markers[:,2].argsort(), 2] # Check variance along component to determine whether to flip.
# winlen = int(len(marker_var)/2+1) # Window length for moving mean (two steps, with slight overlap)
# var_means = np.array([marker_var[:winlen], marker_var[-winlen:]]).mean(axis=1)
# coeff_vec = coeff_vec * -1 if np.diff(var_means)[0] < 0 else coeff_vec
# Rotation amount, in radians
base_vec = np.array([1, 0]) # Vector in +X direction
msin, mcos = np.cross(coeff_vec, base_vec), np.dot(coeff_vec, base_vec)
angle = np.degrees(np.arctan2(msin, mcos))
print("Angle within function: {}".format(angle))
return angle
# def get_pca_rotation(markers):
#
# markers_2d = markers[:, [0, 2]]
# pca = PCA(n_components=1).fit(markers[:, [0, 2]])
#
# coeff = pca.components_[0]
| gpl-2.0 |
joshbohde/scikit-learn | sklearn/linear_model/tests/test_sgd.py | 1 | 16225 | import numpy as np
from numpy.testing import assert_array_equal, assert_approx_equal
from numpy.testing import assert_almost_equal, assert_array_almost_equal
from sklearn import linear_model, datasets, metrics
from sklearn import preprocessing
import unittest
from nose.tools import raises
from nose.tools import assert_raises
##
## Test Data
##
# test sample 1
X = np.array([[-2, -1], [-1, -1], [-1, -2], [1, 1], [1, 2], [2, 1]])
Y = [1, 1, 1, 2, 2, 2]
T = np.array([[-1, -1], [2, 2], [3, 2]])
true_result = [1, 2, 2]
# test sample 2
X2 = np.array([[-1, 1], [-0.75, 0.5], [-1.5, 1.5],
[1, 1], [0.75, 0.5], [1.5, 1.5],
[-1, -1], [0, -0.5], [1, -1]])
Y2 = [1, 1, 1, 2, 2, 2, 3, 3, 3]
T2 = np.array([[-1.5, 0.5], [1, 2], [0, -2]])
true_result2 = [1, 2, 3]
# test sample 3
X3 = np.array([[1, 1, 0, 0, 0, 0], [1, 1, 0, 0, 0, 0],
[0, 0, 1, 0, 0, 0], [0, 0, 1, 0, 0, 0],
[0, 0, 0, 0, 1, 1], [0, 0, 0, 0, 1, 1],
[0, 0, 0, 1, 0, 0], [0, 0, 0, 1, 0, 0]])
Y3 = np.array([1, 1, 1, 1, 2, 2, 2, 2])
# test sample 4 - two more or less redundent feature groups
X4 = np.array([[1, 0.9, 0.8, 0, 0, 0], [1, .84, .98, 0, 0, 0],
[1, .96, .88, 0, 0, 0], [1, .91, .99, 0, 0, 0],
[0, 0, 0, .89, .91, 1], [0, 0, 0, .79, .84, 1],
[0, 0, 0, .91, .95, 1], [0, 0, 0, .93, 1, 1]])
Y4 = np.array([1, 1, 1, 1, 2, 2, 2, 2])
iris = datasets.load_iris()
##
## Classification Test Case
##
class DenseSGDClassifierTestCase(unittest.TestCase):
"""Test suite for the dense representation variant of SGD"""
factory = linear_model.SGDClassifier
def test_sgd(self):
"""Check that SGD gives any results :-)"""
clf = self.factory(penalty='l2', alpha=0.01, fit_intercept=True,
n_iter=10, shuffle=True)
clf.fit(X, Y)
#assert_almost_equal(clf.coef_[0], clf.coef_[1], decimal=7)
assert_array_equal(clf.predict(T), true_result)
def test_sgd_penalties(self):
"""Check whether penalties and hyperparameters are set properly"""
clf = self.factory(penalty='l2')
assert clf.rho == 1.0
clf = self.factory(penalty='l1')
assert clf.rho == 0.0
clf = self.factory(penalty='elasticnet', rho=0.85)
assert clf.rho == 0.85
@raises(ValueError)
def test_sgd_bad_penalty(self):
"""Check whether expected ValueError on bad penalty"""
self.factory(penalty='foobar', rho=0.85)
def test_sgd_losses(self):
"""Check whether losses and hyperparameters are set properly"""
clf = self.factory(loss='hinge')
assert isinstance(clf.loss_function, linear_model.Hinge)
clf = self.factory(loss='log')
assert isinstance(clf.loss_function, linear_model.Log)
clf = self.factory(loss='modified_huber')
assert isinstance(clf.loss_function, linear_model.ModifiedHuber)
@raises(ValueError)
def test_sgd_bad_loss(self):
"""Check whether expected ValueError on bad loss"""
self.factory(loss="foobar")
@raises(ValueError)
def test_sgd_n_iter_param(self):
"""Test parameter validity check"""
self.factory(n_iter=-10000)
@raises(ValueError)
def test_sgd_shuffle_param(self):
"""Test parameter validity check"""
self.factory(shuffle="false")
@raises(TypeError)
def test_arument_coef(self):
"""Checks coef_init not allowed as model argument (only fit)"""
# Provided coef_ does not match dataset.
self.factory(coef_init=np.zeros((3,))).fit(X, Y)
@raises(ValueError)
def test_provide_coef(self):
"""Checks coef_init shape for the warm starts"""
# Provided coef_ does not match dataset.
self.factory().fit(X, Y, coef_init=np.zeros((3,)))
@raises(ValueError)
def test_set_intercept(self):
"""Checks intercept_ shape for the warm starts"""
# Provided intercept_ does not match dataset.
self.factory().fit(X, Y, intercept_init=np.zeros((3,)))
@raises(ValueError)
def test_sgd_at_least_two_labels(self):
"""Target must have at least two labels"""
self.factory(alpha=0.01, n_iter=20).fit(X2, np.ones(9))
def test_sgd_multiclass(self):
"""Multi-class test case"""
clf = self.factory(alpha=0.01, n_iter=20).fit(X2, Y2)
assert clf.coef_.shape == (3, 2)
assert clf.intercept_.shape == (3,)
assert clf.decision_function([0, 0]).shape == (1, 3)
pred = clf.predict(T2)
assert_array_equal(pred, true_result2)
def test_sgd_multiclass_with_init_coef(self):
"""Multi-class test case"""
clf = self.factory(alpha=0.01, n_iter=20)
clf.fit(X2, Y2, coef_init=np.zeros((3, 2)),
intercept_init=np.zeros(3))
assert clf.coef_.shape == (3, 2)
assert clf.intercept_.shape == (3,)
pred = clf.predict(T2)
assert_array_equal(pred, true_result2)
def test_sgd_multiclass_njobs(self):
"""Multi-class test case with multi-core support"""
clf = self.factory(alpha=0.01, n_iter=20, n_jobs=2).fit(X2, Y2)
assert clf.coef_.shape == (3, 2)
assert clf.intercept_.shape == (3,)
assert clf.decision_function([0, 0]).shape == (1, 3)
pred = clf.predict(T2)
assert_array_equal(pred, true_result2)
def test_set_coef_multiclass(self):
"""Checks coef_init and intercept_init shape for for multi-class
problems"""
# Provided coef_ does not match dataset
clf = self.factory()
assert_raises(ValueError, clf.fit, X2, Y2, coef_init=np.zeros((2, 2)))
# Provided coef_ does match dataset
clf = self.factory().fit(X2, Y2, coef_init=np.zeros((3, 2)))
# Provided intercept_ does not match dataset
clf = self.factory()
assert_raises(ValueError, clf.fit, X2, Y2,
intercept_init=np.zeros((1,)))
# Provided intercept_ does match dataset.
clf = self.factory().fit(X2, Y2, intercept_init=np.zeros((3,)))
def test_sgd_proba(self):
"""Check SGD.predict_proba for log loss only"""
# hinge loss does not allow for conditional prob estimate
clf = self.factory(loss="hinge", alpha=0.01, n_iter=10).fit(X, Y)
assert_raises(NotImplementedError, clf.predict_proba, [3, 2])
# log loss implements the logistic regression prob estimate
clf = self.factory(loss="log", alpha=0.01, n_iter=10).fit(X, Y)
p = clf.predict_proba([3, 2])
assert p > 0.5
p = clf.predict_proba([-1, -1])
assert p < 0.5
def test_sgd_l1(self):
"""Test L1 regularization"""
n = len(X4)
np.random.seed(13)
idx = np.arange(n)
np.random.shuffle(idx)
X = X4[idx, :]
Y = Y4[idx, :]
clf = self.factory(penalty='l1', alpha=.2, fit_intercept=False,
n_iter=2000)
clf.fit(X, Y)
assert_array_equal(clf.coef_[0, 1:-1], np.zeros((4,)))
pred = clf.predict(X)
assert_array_equal(pred, Y)
def test_class_weight(self):
"""
Test class weights.
"""
X = np.array([[-1.0, -1.0], [-1.0, 0], [-.8, -1.0],
[1.0, 1.0], [1.0, 0.0]])
y = [1, 1, 1, -1, -1]
clf = self.factory(alpha=0.1, n_iter=1000, fit_intercept=False)
clf.fit(X, y)
assert_array_equal(clf.predict([[0.2, -1.0]]), np.array([1]))
# we give a small weights to class 1
clf.fit(X, y, class_weight={1: 0.001})
# now the hyperplane should rotate clock-wise and
# the prediction on this point should shift
assert_array_equal(clf.predict([[0.2, -1.0]]), np.array([-1]))
def test_equal_class_weight(self):
"""Test if equal class weights approx. equals no class weights. """
X = [[1, 0], [1, 0], [0, 1], [0, 1]]
y = [0, 0, 1, 1]
clf = self.factory(alpha=0.1, n_iter=1000)
clf.fit(X, y)
X = [[1, 0], [0, 1]]
y = [0, 1]
clf_weighted = self.factory(alpha=0.1, n_iter=1000)
clf_weighted.fit(X, y, class_weight={0: 0.5, 1: 0.5})
# should be similar up to some epsilon due to learning rate schedule
assert_almost_equal(clf.coef_, clf_weighted.coef_, decimal=2)
@raises(ValueError)
def test_wrong_class_weight_label(self):
"""ValueError due to not existing class label."""
clf = self.factory(alpha=0.1, n_iter=1000)
clf.fit(X, Y, class_weight={0: 0.5})
@raises(ValueError)
def test_wrong_class_weight_format(self):
"""ValueError due to wrong class_weight argument type."""
clf = self.factory(alpha=0.1, n_iter=1000)
clf.fit(X, Y, class_weight=[0.5])
def test_auto_weight(self):
"""Test class weights for imbalanced data"""
# compute reference metrics on iris dataset that is quite balanced by
# default
X, y = iris.data, iris.target
X = preprocessing.scale(X)
idx = np.arange(X.shape[0])
np.random.seed(13)
np.random.shuffle(idx)
X = X[idx]
y = y[idx]
clf = self.factory(alpha=0.0001, n_iter=1000).fit(X, y)
assert_approx_equal(metrics.f1_score(y, clf.predict(X)), 0.96, 2)
# make the same prediction using automated class_weight
clf_auto = self.factory(alpha=0.0001,
n_iter=1000).fit(X, y, class_weight="auto")
assert_approx_equal(metrics.f1_score(y, clf_auto.predict(X)), 0.96, 2)
# Make sure that in the balanced case it does not change anything
# to use "auto"
assert_array_almost_equal(clf.coef_, clf_auto.coef_, 6)
# build an very very imbalanced dataset out of iris data
X_0 = X[y == 0, :]
y_0 = y[y == 0]
X_imbalanced = np.vstack([X] + [X_0] * 10)
y_imbalanced = np.concatenate([y] + [y_0] * 10)
# fit a model on the imbalanced data without class weight info
clf = self.factory(n_iter=1000)
clf.fit(X_imbalanced, y_imbalanced)
y_pred = clf.predict(X)
assert metrics.f1_score(y, y_pred) < 0.96
# fit a model with auto class_weight enabled
clf = self.factory(n_iter=1000)
clf.fit(X_imbalanced, y_imbalanced, class_weight="auto")
y_pred = clf.predict(X)
assert metrics.f1_score(y, y_pred) > 0.96
def test_sample_weights(self):
"""
Test weights on individual samples
"""
X = np.array([[-1.0, -1.0], [-1.0, 0], [-.8, -1.0],
[1.0, 1.0], [1.0, 0.0]])
y = [1, 1, 1, -1, -1]
clf = self.factory(alpha=0.1, n_iter=1000, fit_intercept=False)
clf.fit(X, y)
assert_array_equal(clf.predict([[0.2, -1.0]]), np.array([1]))
# we give a small weights to class 1
clf.fit(X, y, sample_weight=[0.001] * 3 + [1] * 2)
# now the hyperplane should rotate clock-wise and
# the prediction on this point should shift
assert_array_equal(clf.predict([[0.2, -1.0]]), np.array([-1]))
@raises(ValueError)
def test_wrong_sample_weights(self):
"""Test if ValueError is raised if sample_weight has wrong shape"""
clf = self.factory(alpha=0.1, n_iter=1000, fit_intercept=False)
# provided sample_weight too long
clf.fit(X, Y, sample_weight=range(7))
class SparseSGDClassifierTestCase(DenseSGDClassifierTestCase):
"""Run exactly the same tests using the sparse representation variant"""
factory = linear_model.sparse.SGDClassifier
################################################################################
# Regression Test Case
class DenseSGDRegressorTestCase(unittest.TestCase):
"""Test suite for the dense representation variant of SGD"""
factory = linear_model.SGDRegressor
def test_sgd(self):
"""Check that SGD gives any results."""
clf = self.factory(alpha=0.1, n_iter=2,
fit_intercept=False)
clf.fit([[0, 0], [1, 1], [2, 2]], [0, 1, 2])
assert clf.coef_[0] == clf.coef_[1]
def test_sgd_penalties(self):
"""Check whether penalties and hyperparameters are set properly"""
clf = self.factory(penalty='l2')
assert clf.rho == 1.0
clf = self.factory(penalty='l1')
assert clf.rho == 0.0
clf = self.factory(penalty='elasticnet', rho=0.85)
assert clf.rho == 0.85
@raises(ValueError)
def test_sgd_bad_penalty(self):
"""Check whether expected ValueError on bad penalty"""
self.factory(penalty='foobar', rho=0.85)
def test_sgd_losses(self):
"""Check whether losses and hyperparameters are set properly"""
clf = self.factory(loss='squared_loss')
assert isinstance(clf.loss_function, linear_model.SquaredLoss)
clf = self.factory(loss='huber', p=0.5)
assert isinstance(clf.loss_function, linear_model.Huber)
assert clf.p == 0.5
@raises(ValueError)
def test_sgd_bad_loss(self):
"""Check whether expected ValueError on bad loss"""
self.factory(loss="foobar")
def test_sgd_least_squares_fit(self):
xmin, xmax = -5, 5
n_samples = 100
X = np.linspace(xmin, xmax, n_samples).reshape(n_samples, 1)
# simple linear function without noise
y = 0.5 * X.ravel()
clf = self.factory(loss='squared_loss', alpha=0.1, n_iter=20,
fit_intercept=False)
clf.fit(X, y)
score = clf.score(X, y)
assert score > 0.99
# simple linear function with noise
y = 0.5 * X.ravel() \
+ np.random.randn(n_samples, 1).ravel()
clf = self.factory(loss='squared_loss', alpha=0.1, n_iter=20,
fit_intercept=False)
clf.fit(X, y)
score = clf.score(X, y)
assert score > 0.5
def test_sgd_huber_fit(self):
xmin, xmax = -5, 5
n_samples = 100
X = np.linspace(xmin, xmax, n_samples).reshape(n_samples, 1)
# simple linear function without noise
y = 0.5 * X.ravel()
clf = self.factory(loss="huber", p=0.1, alpha=0.1, n_iter=20,
fit_intercept=False)
clf.fit(X, y)
score = clf.score(X, y)
assert score > 0.99
# simple linear function with noise
y = 0.5 * X.ravel() \
+ np.random.randn(n_samples, 1).ravel()
clf = self.factory(loss="huber", p=0.1, alpha=0.1, n_iter=20,
fit_intercept=False)
clf.fit(X, y)
score = clf.score(X, y)
assert score > 0.5
def test_elasticnet_convergence(self):
"""Check that the SGD ouput is consistent with coordinate descent"""
n_samples, n_features = 1000, 5
np.random.seed(0)
X = np.random.randn(n_samples, n_features)
# ground_truth linear model that generate y from X and to which the
# models should converge if the regularizer would be set to 0.0
ground_truth_coef = np.random.randn(n_features)
y = np.dot(X, ground_truth_coef)
# XXX: alpha = 0.1 seems to cause convergence problems
for alpha in [0.01, 0.001]:
for rho in [0.5, 0.8, 1.0]:
cd = linear_model.ElasticNet(alpha=alpha, rho=rho,
fit_intercept=False)
cd.fit(X, y)
sgd = self.factory(penalty='elasticnet', n_iter=50,
alpha=alpha, rho=rho, fit_intercept=False)
sgd.fit(X, y)
err_msg = ("cd and sgd did not converge to comparable "
"results for alpha=%f and rho=%f" % (alpha, rho))
assert_almost_equal(cd.coef_, sgd.coef_, decimal=2,
err_msg=err_msg)
class SparseSGDRegressorTestCase(DenseSGDRegressorTestCase):
"""Run exactly the same tests using the sparse representation variant"""
factory = linear_model.sparse.SGDRegressor
| bsd-3-clause |
ephes/scikit-learn | sklearn/semi_supervised/label_propagation.py | 127 | 15312 | # coding=utf8
"""
Label propagation in the context of this module refers to a set of
semisupervised classification algorithms. In the high level, these algorithms
work by forming a fully-connected graph between all points given and solving
for the steady-state distribution of labels at each point.
These algorithms perform very well in practice. The cost of running can be very
expensive, at approximately O(N^3) where N is the number of (labeled and
unlabeled) points. The theory (why they perform so well) is motivated by
intuitions from random walk algorithms and geometric relationships in the data.
For more information see the references below.
Model Features
--------------
Label clamping:
The algorithm tries to learn distributions of labels over the dataset. In the
"Hard Clamp" mode, the true ground labels are never allowed to change. They
are clamped into position. In the "Soft Clamp" mode, they are allowed some
wiggle room, but some alpha of their original value will always be retained.
Hard clamp is the same as soft clamping with alpha set to 1.
Kernel:
A function which projects a vector into some higher dimensional space. This
implementation supprots RBF and KNN kernels. Using the RBF kernel generates
a dense matrix of size O(N^2). KNN kernel will generate a sparse matrix of
size O(k*N) which will run much faster. See the documentation for SVMs for
more info on kernels.
Examples
--------
>>> from sklearn import datasets
>>> from sklearn.semi_supervised import LabelPropagation
>>> label_prop_model = LabelPropagation()
>>> iris = datasets.load_iris()
>>> random_unlabeled_points = np.where(np.random.random_integers(0, 1,
... size=len(iris.target)))
>>> labels = np.copy(iris.target)
>>> labels[random_unlabeled_points] = -1
>>> label_prop_model.fit(iris.data, labels)
... # doctest: +NORMALIZE_WHITESPACE +ELLIPSIS
LabelPropagation(...)
Notes
-----
References:
[1] Yoshua Bengio, Olivier Delalleau, Nicolas Le Roux. In Semi-Supervised
Learning (2006), pp. 193-216
[2] Olivier Delalleau, Yoshua Bengio, Nicolas Le Roux. Efficient
Non-Parametric Function Induction in Semi-Supervised Learning. AISTAT 2005
"""
# Authors: Clay Woolam <clay@woolam.org>
# Licence: BSD
from abc import ABCMeta, abstractmethod
from scipy import sparse
import numpy as np
from ..base import BaseEstimator, ClassifierMixin
from ..metrics.pairwise import rbf_kernel
from ..utils.graph import graph_laplacian
from ..utils.extmath import safe_sparse_dot
from ..utils.validation import check_X_y, check_is_fitted
from ..externals import six
from ..neighbors.unsupervised import NearestNeighbors
### Helper functions
def _not_converged(y_truth, y_prediction, tol=1e-3):
"""basic convergence check"""
return np.abs(y_truth - y_prediction).sum() > tol
class BaseLabelPropagation(six.with_metaclass(ABCMeta, BaseEstimator,
ClassifierMixin)):
"""Base class for label propagation module.
Parameters
----------
kernel : {'knn', 'rbf'}
String identifier for kernel function to use.
Only 'rbf' and 'knn' kernels are currently supported..
gamma : float
Parameter for rbf kernel
alpha : float
Clamping factor
max_iter : float
Change maximum number of iterations allowed
tol : float
Convergence tolerance: threshold to consider the system at steady
state
n_neighbors : integer > 0
Parameter for knn kernel
"""
def __init__(self, kernel='rbf', gamma=20, n_neighbors=7,
alpha=1, max_iter=30, tol=1e-3):
self.max_iter = max_iter
self.tol = tol
# kernel parameters
self.kernel = kernel
self.gamma = gamma
self.n_neighbors = n_neighbors
# clamping factor
self.alpha = alpha
def _get_kernel(self, X, y=None):
if self.kernel == "rbf":
if y is None:
return rbf_kernel(X, X, gamma=self.gamma)
else:
return rbf_kernel(X, y, gamma=self.gamma)
elif self.kernel == "knn":
if self.nn_fit is None:
self.nn_fit = NearestNeighbors(self.n_neighbors).fit(X)
if y is None:
return self.nn_fit.kneighbors_graph(self.nn_fit._fit_X,
self.n_neighbors,
mode='connectivity')
else:
return self.nn_fit.kneighbors(y, return_distance=False)
else:
raise ValueError("%s is not a valid kernel. Only rbf and knn"
" are supported at this time" % self.kernel)
@abstractmethod
def _build_graph(self):
raise NotImplementedError("Graph construction must be implemented"
" to fit a label propagation model.")
def predict(self, X):
"""Performs inductive inference across the model.
Parameters
----------
X : array_like, shape = [n_samples, n_features]
Returns
-------
y : array_like, shape = [n_samples]
Predictions for input data
"""
probas = self.predict_proba(X)
return self.classes_[np.argmax(probas, axis=1)].ravel()
def predict_proba(self, X):
"""Predict probability for each possible outcome.
Compute the probability estimates for each single sample in X
and each possible outcome seen during training (categorical
distribution).
Parameters
----------
X : array_like, shape = [n_samples, n_features]
Returns
-------
probabilities : array, shape = [n_samples, n_classes]
Normalized probability distributions across
class labels
"""
check_is_fitted(self, 'X_')
if sparse.isspmatrix(X):
X_2d = X
else:
X_2d = np.atleast_2d(X)
weight_matrices = self._get_kernel(self.X_, X_2d)
if self.kernel == 'knn':
probabilities = []
for weight_matrix in weight_matrices:
ine = np.sum(self.label_distributions_[weight_matrix], axis=0)
probabilities.append(ine)
probabilities = np.array(probabilities)
else:
weight_matrices = weight_matrices.T
probabilities = np.dot(weight_matrices, self.label_distributions_)
normalizer = np.atleast_2d(np.sum(probabilities, axis=1)).T
probabilities /= normalizer
return probabilities
def fit(self, X, y):
"""Fit a semi-supervised label propagation model based
All the input data is provided matrix X (labeled and unlabeled)
and corresponding label matrix y with a dedicated marker value for
unlabeled samples.
Parameters
----------
X : array-like, shape = [n_samples, n_features]
A {n_samples by n_samples} size matrix will be created from this
y : array_like, shape = [n_samples]
n_labeled_samples (unlabeled points are marked as -1)
All unlabeled samples will be transductively assigned labels
Returns
-------
self : returns an instance of self.
"""
X, y = check_X_y(X, y)
self.X_ = X
# actual graph construction (implementations should override this)
graph_matrix = self._build_graph()
# label construction
# construct a categorical distribution for classification only
classes = np.unique(y)
classes = (classes[classes != -1])
self.classes_ = classes
n_samples, n_classes = len(y), len(classes)
y = np.asarray(y)
unlabeled = y == -1
clamp_weights = np.ones((n_samples, 1))
clamp_weights[unlabeled, 0] = self.alpha
# initialize distributions
self.label_distributions_ = np.zeros((n_samples, n_classes))
for label in classes:
self.label_distributions_[y == label, classes == label] = 1
y_static = np.copy(self.label_distributions_)
if self.alpha > 0.:
y_static *= 1 - self.alpha
y_static[unlabeled] = 0
l_previous = np.zeros((self.X_.shape[0], n_classes))
remaining_iter = self.max_iter
if sparse.isspmatrix(graph_matrix):
graph_matrix = graph_matrix.tocsr()
while (_not_converged(self.label_distributions_, l_previous, self.tol)
and remaining_iter > 1):
l_previous = self.label_distributions_
self.label_distributions_ = safe_sparse_dot(
graph_matrix, self.label_distributions_)
# clamp
self.label_distributions_ = np.multiply(
clamp_weights, self.label_distributions_) + y_static
remaining_iter -= 1
normalizer = np.sum(self.label_distributions_, axis=1)[:, np.newaxis]
self.label_distributions_ /= normalizer
# set the transduction item
transduction = self.classes_[np.argmax(self.label_distributions_,
axis=1)]
self.transduction_ = transduction.ravel()
self.n_iter_ = self.max_iter - remaining_iter
return self
class LabelPropagation(BaseLabelPropagation):
"""Label Propagation classifier
Read more in the :ref:`User Guide <label_propagation>`.
Parameters
----------
kernel : {'knn', 'rbf'}
String identifier for kernel function to use.
Only 'rbf' and 'knn' kernels are currently supported..
gamma : float
Parameter for rbf kernel
n_neighbors : integer > 0
Parameter for knn kernel
alpha : float
Clamping factor
max_iter : float
Change maximum number of iterations allowed
tol : float
Convergence tolerance: threshold to consider the system at steady
state
Attributes
----------
X_ : array, shape = [n_samples, n_features]
Input array.
classes_ : array, shape = [n_classes]
The distinct labels used in classifying instances.
label_distributions_ : array, shape = [n_samples, n_classes]
Categorical distribution for each item.
transduction_ : array, shape = [n_samples]
Label assigned to each item via the transduction.
n_iter_ : int
Number of iterations run.
Examples
--------
>>> from sklearn import datasets
>>> from sklearn.semi_supervised import LabelPropagation
>>> label_prop_model = LabelPropagation()
>>> iris = datasets.load_iris()
>>> random_unlabeled_points = np.where(np.random.random_integers(0, 1,
... size=len(iris.target)))
>>> labels = np.copy(iris.target)
>>> labels[random_unlabeled_points] = -1
>>> label_prop_model.fit(iris.data, labels)
... # doctest: +NORMALIZE_WHITESPACE +ELLIPSIS
LabelPropagation(...)
References
----------
Xiaojin Zhu and Zoubin Ghahramani. Learning from labeled and unlabeled data
with label propagation. Technical Report CMU-CALD-02-107, Carnegie Mellon
University, 2002 http://pages.cs.wisc.edu/~jerryzhu/pub/CMU-CALD-02-107.pdf
See Also
--------
LabelSpreading : Alternate label propagation strategy more robust to noise
"""
def _build_graph(self):
"""Matrix representing a fully connected graph between each sample
This basic implementation creates a non-stochastic affinity matrix, so
class distributions will exceed 1 (normalization may be desired).
"""
if self.kernel == 'knn':
self.nn_fit = None
affinity_matrix = self._get_kernel(self.X_)
normalizer = affinity_matrix.sum(axis=0)
if sparse.isspmatrix(affinity_matrix):
affinity_matrix.data /= np.diag(np.array(normalizer))
else:
affinity_matrix /= normalizer[:, np.newaxis]
return affinity_matrix
class LabelSpreading(BaseLabelPropagation):
"""LabelSpreading model for semi-supervised learning
This model is similar to the basic Label Propgation algorithm,
but uses affinity matrix based on the normalized graph Laplacian
and soft clamping across the labels.
Read more in the :ref:`User Guide <label_propagation>`.
Parameters
----------
kernel : {'knn', 'rbf'}
String identifier for kernel function to use.
Only 'rbf' and 'knn' kernels are currently supported.
gamma : float
parameter for rbf kernel
n_neighbors : integer > 0
parameter for knn kernel
alpha : float
clamping factor
max_iter : float
maximum number of iterations allowed
tol : float
Convergence tolerance: threshold to consider the system at steady
state
Attributes
----------
X_ : array, shape = [n_samples, n_features]
Input array.
classes_ : array, shape = [n_classes]
The distinct labels used in classifying instances.
label_distributions_ : array, shape = [n_samples, n_classes]
Categorical distribution for each item.
transduction_ : array, shape = [n_samples]
Label assigned to each item via the transduction.
n_iter_ : int
Number of iterations run.
Examples
--------
>>> from sklearn import datasets
>>> from sklearn.semi_supervised import LabelSpreading
>>> label_prop_model = LabelSpreading()
>>> iris = datasets.load_iris()
>>> random_unlabeled_points = np.where(np.random.random_integers(0, 1,
... size=len(iris.target)))
>>> labels = np.copy(iris.target)
>>> labels[random_unlabeled_points] = -1
>>> label_prop_model.fit(iris.data, labels)
... # doctest: +NORMALIZE_WHITESPACE +ELLIPSIS
LabelSpreading(...)
References
----------
Dengyong Zhou, Olivier Bousquet, Thomas Navin Lal, Jason Weston,
Bernhard Schoelkopf. Learning with local and global consistency (2004)
http://citeseer.ist.psu.edu/viewdoc/summary?doi=10.1.1.115.3219
See Also
--------
LabelPropagation : Unregularized graph based semi-supervised learning
"""
def __init__(self, kernel='rbf', gamma=20, n_neighbors=7, alpha=0.2,
max_iter=30, tol=1e-3):
# this one has different base parameters
super(LabelSpreading, self).__init__(kernel=kernel, gamma=gamma,
n_neighbors=n_neighbors,
alpha=alpha, max_iter=max_iter,
tol=tol)
def _build_graph(self):
"""Graph matrix for Label Spreading computes the graph laplacian"""
# compute affinity matrix (or gram matrix)
if self.kernel == 'knn':
self.nn_fit = None
n_samples = self.X_.shape[0]
affinity_matrix = self._get_kernel(self.X_)
laplacian = graph_laplacian(affinity_matrix, normed=True)
laplacian = -laplacian
if sparse.isspmatrix(laplacian):
diag_mask = (laplacian.row == laplacian.col)
laplacian.data[diag_mask] = 0.0
else:
laplacian.flat[::n_samples + 1] = 0.0 # set diag to 0.0
return laplacian
| bsd-3-clause |
google-research/scenic | scenic/projects/robust_segvit/datasets/cityscapes_variants.py | 1 | 11019 | """Data generators for the Cityscapes dataset variants.
Supported datasets, set by dataset_configs.dataset_name in the config file:
cityscapes_corrupted: https://arxiv.org/pdf/1907.07484.pdf
fishyscapes: https://link.springer.com/article/10.1007/s11263-021-01511-6
Implementation details:
cityscapes_c: https://github.com/ekellbuch/cityscapes-c
"""
import functools
from typing import Optional
from absl import logging
from flax import jax_utils
import jax.numpy as jnp
from scenic.dataset_lib import cityscapes_dataset
from scenic.dataset_lib import dataset_utils
from scenic.dataset_lib import datasets
import tensorflow as tf
import tensorflow_datasets as tfds
CITYSCAPES_C_CORRUPTIONS = [
'gaussian_noise',
]
FISHYSCAPES_CORRUPTIONS = [
'Static',
]
CITYSCAPES_C_SEVERITIES = range(1, 6)
DATASET_INFO = {
'cityscapes': {
'tfds_name': 'cityscapes',
'split': 'validation',
'num_of_examples': 500,
},
'cityscapes_corrupted': {
'tfds_name': 'internal',
'split': 'validation',
'num_of_examples': 500,
},
'fishycapes': {
'tfds_name': 'internal',
'split': 'validation',
'num_of_examples': 30,
},
}
# Adds cityscapes_c
for severity in CITYSCAPES_C_SEVERITIES:
for corruption in CITYSCAPES_C_CORRUPTIONS:
temp_dataset_name = f'cityscapes_corrupted/semantic_segmentation_{corruption}_{severity}'
DATASET_INFO[temp_dataset_name] = {
'tfds_name': temp_dataset_name,
'split': 'validation',
'num_of_examples': 500,
}
# Adds fishyscapes
for corruption in FISHYSCAPES_CORRUPTIONS:
temp_dataset_name = f'fishyscapes/{corruption}'
DATASET_INFO[temp_dataset_name] = {
'tfds_name': temp_dataset_name,
'split': 'validation',
'num_of_examples': 30,
}
cityscapes_meta_data = {
'num_classes':
len([c.id for c in cityscapes_dataset.CLASSES if not c.ignore_in_eval]),
'class_names':
cityscapes_dataset.get_class_names(),
'class_colors':
cityscapes_dataset.get_class_colors(),
'class_proportions':
cityscapes_dataset.get_class_proportions(),
}
fishyscapes_meta_data = {
'num_classes': 2,
'class_names': ['ind', 'ood'],
'class_colors': [(0, 0, 1), (1, 0, 0)],
}
def normalize(image, dtype=tf.float32):
"""Normalizes the value of pixels in the given image.
Args:
image: `Tensor` representing an image binary of arbitrary size.
dtype: Tensorflow data type, Data type of the image.
Returns:
A normalized image `Tensor`.
"""
image = tf.cast(image, dtype=dtype)
if dtype not in [tf.int32, tf.int64, tf.uint32, tf.uint64]:
image /= tf.constant(255.0, shape=[1, 1, 1], dtype=dtype)
return image
def preprocess_example_fishyscapes(example,
train,
dtype=tf.float32,
resize=None,
include_mask=True):
"""Preprocesses the given image.
Args:
example: dict; Example coming from TFDS.
train: bool; Whether to apply training-specific preprocessing or not.
dtype: Tensorflow data type; Data type of the image.
resize: sequence; [H, W] to which image and labels should be resized.
include_mask: include batch_mask to ignore specific classes.
Returns:
An example dict as required by the model.
"""
image = normalize(example['image_left'], dtype)
mask = example['mask']
# Resize test images (train images are cropped/resized during augmentation):
if not train:
if resize is not None:
image = tf.image.resize(image, resize, 'bilinear')
mask = tf.image.resize(mask, resize, 'nearest')
image = tf.cast(image, dtype)
mask = tf.cast(mask, dtype)
mask = tf.squeeze(mask, axis=2)
outputs = {'inputs': image, 'label': mask}
if include_mask:
# Fishyscapes mask has values 0,1, 255, background pixels are set as 255.
# create batch_mask array and set background pixels to 0 and
# pixels that should be included during eval to 1
batch_mask = tf.ones_like(mask, dtype)
batch_mask = tf.cast(batch_mask*(1-tf.cast(mask == 255, dtype)), dtype)
# update the mask array to be 0 or by setting cls 255 to cls 0.
mask = tf.cast(mask*(1-tf.cast(mask == 255, dtype)), dtype)
outputs = {'inputs': image, 'label': mask, 'batch_mask': batch_mask}
return outputs
preprocess_examples = {
'cityscapes': cityscapes_dataset.preprocess_example,
'fishyscapes': preprocess_example_fishyscapes,
}
def cityscapes_load_split(
dataset_name,
batch_size,
train=False,
dtype=tf.float32,
shuffle_buffer_size=10,
shuffle_seed=None,
data_augmentations=None,
preprocess_ex_eval=None,
cache=True,
data_dir: Optional[str] = None,
):
"""Creates a split from the Cityscapes dataset using TensorFlow Datasets.
For the training set, we drop the last partial batch. This is fine to do
because we additionally shuffle the data randomly each epoch, thus the trainer
will see all data in expectation. For the validation set, we pad the final
batch to the desired batch size.
Args:
dataset_name: string; Dataset name defined in DATASET_INFO.
batch_size: int; The batch size returned by the data pipeline.
train: bool; Whether to load the train or evaluation split.
dtype: TF data type; Data type of the image.
shuffle_buffer_size: int; Buffer size for the TFDS prefetch.
shuffle_seed: The seed to use when shuffling the train split.
data_augmentations: list(str); Types of data augmentation applied on
preprocess_ex_eval: preprocessing function. Default None.
cache: bool; Whether to cache dataset in memory.
data_dir: directory with data.
Returns:
A `tf.data.Dataset`.
"""
assert not train, 'Only evaluation is supported.'
assert dataset_name in DATASET_INFO
del data_augmentations
cityscapes_variant_info = DATASET_INFO.get(dataset_name, {})
split = cityscapes_variant_info['split'] # only supports validation
# Load the preprocessing function
if 'cityscapes' in cityscapes_variant_info.get('tfds_name'):
if dataset_name == 'cityscapes':
builder = tfds.builder(dataset_name, dtype=dtype)
elif 'cityscapes_corrupted' in dataset_name:
if data_dir is None:
# pylint: disable=line-too-long
data_dir = 'gs://ub-ekb/tensorflow_datasets/cityscapes_corrupted/tfrecords/v.0.0' # pylint: disable=line-too-long
# pylint: enable=line-too-long
builder = tfds.builder(dataset_name, data_dir=data_dir)
elif 'fishyscapes' in cityscapes_variant_info.get('tfds_name'):
if data_dir is None:
data_dir = 'gs://ub-ekb/tensorflow_datasets/fishyscapes/tfrecords/v.0.0'
builder = tfds.builder(dataset_name, data_dir=data_dir)
else:
raise NotImplementedError(f'{dataset_name} not available')
ds, ds_info = dataset_utils.load_split_from_tfds_builder(
builder=builder,
batch_size=batch_size,
split=split,
preprocess_example=preprocess_ex_eval,
shuffle_buffer_size=shuffle_buffer_size,
shuffle_seed=shuffle_seed,
cache=cache)
return ds, ds_info
def _check_dataset_exists(dataset_configs):
assert 'dataset_name' in dataset_configs, ('Must specify dataset_name in '
'dataset_configs.')
dataset_name = dataset_configs['dataset_name']
assert dataset_configs[
'dataset_name'] in DATASET_INFO, f'{dataset_name} is not supported.'
return dataset_name
@datasets.add_dataset('cityscapes_variants')
def get_dataset(*,
batch_size,
eval_batch_size,
num_shards,
dtype_str='float32',
shuffle_seed=0,
prefetch_buffer_size=2,
rng=None,
dataset_configs=None,
dataset_service_address: Optional[str] = None):
"""Returns generators for the Cityscapes validation, and test set.
Args:
batch_size: int; Determines the train batch size.
eval_batch_size: int; Determines the evaluation batch size.
num_shards: int; Number of shards --> batch shape: [num_shards, bs, ...].
dtype_str: Data type of the image (e.g. 'float32').
shuffle_seed: int; Seed for shuffling the training data.
prefetch_buffer_size: int; Buffer size for the TFDS prefetch.
rng: JAX rng key, which can be used for augmentation, shuffling, etc.
dataset_configs: dict; Dataset specific configurations.
dataset_service_address: If set, will distribute the training dataset using
the given tf.data service at the given address.
Returns:
A dataset_utils.Dataset() which includes a train_iter, a valid_iter,
a test_iter, and a dict of meta_data.
"""
del batch_size
del shuffle_seed, rng
del dataset_service_address
dtype = getattr(tf, dtype_str)
dataset_configs = dataset_configs or {}
dataset_name = _check_dataset_exists(dataset_configs)
cityscapes_variant_info = DATASET_INFO.get(dataset_name)
target_size = dataset_configs.get('target_size', None)
if 'cityscapes' in dataset_name:
preprocess_example = preprocess_examples['cityscapes']
elif 'fishyscapes' in dataset_name:
preprocess_example = preprocess_examples['fishyscapes']
preprocess_ex_eval = functools.partial(
preprocess_example, train=False, dtype=dtype, resize=target_size)
logging.info('Loading validation split of the %s dataset.', dataset_name)
eval_ds, _ = cityscapes_load_split(
dataset_name=dataset_name,
batch_size=eval_batch_size,
train=False,
dtype=dtype,
preprocess_ex_eval=preprocess_ex_eval)
maybe_pad_batches_eval = functools.partial(
dataset_utils.maybe_pad_batch,
train=False,
batch_size=eval_batch_size,
pixel_level=True)
shard_batches = functools.partial(dataset_utils.shard, n_devices=num_shards)
exclude_classes = functools.partial(
cityscapes_dataset.exclude_bad_classes,
new_labels=cityscapes_dataset.get_post_exclusion_labels())
eval_iter = iter(eval_ds)
eval_iter = map(dataset_utils.tf_to_numpy, eval_iter)
eval_iter = map(maybe_pad_batches_eval, eval_iter)
if 'cityscapes' in dataset_name:
eval_iter = map(exclude_classes, eval_iter)
eval_iter = map(shard_batches, eval_iter)
eval_iter = jax_utils.prefetch_to_device(eval_iter, prefetch_buffer_size)
if target_size is None:
input_shape = (-1, 1024, 2048, 3)
else:
input_shape = (-1,) + tuple(target_size) + (3,)
meta_data = {
'input_shape': input_shape,
'num_train_examples': 0,
'num_eval_examples': cityscapes_variant_info['num_of_examples'],
'input_dtype': getattr(jnp, dtype_str),
'target_is_onehot': False,
}
if 'cityscapes' in dataset_name:
meta_data.update(cityscapes_meta_data)
elif 'fishyscapes' in dataset_name:
meta_data.update(fishyscapes_meta_data)
return dataset_utils.Dataset(None, eval_iter, None, meta_data)
| apache-2.0 |
starimpact/fast-rcnn | tools/train_net.py | 23 | 3134 | #!/usr/bin/env python
# --------------------------------------------------------
# Fast R-CNN
# Copyright (c) 2015 Microsoft
# Licensed under The MIT License [see LICENSE for details]
# Written by Ross Girshick
# --------------------------------------------------------
"""Train a Fast R-CNN network on a region of interest database."""
import _init_paths
from fast_rcnn.train import get_training_roidb, train_net
from fast_rcnn.config import cfg, cfg_from_file, cfg_from_list, get_output_dir
from datasets.factory import get_imdb
import caffe
import argparse
import pprint
import numpy as np
import sys
def parse_args():
"""
Parse input arguments
"""
parser = argparse.ArgumentParser(description='Train a Fast R-CNN network')
parser.add_argument('--gpu', dest='gpu_id',
help='GPU device id to use [0]',
default=0, type=int)
parser.add_argument('--solver', dest='solver',
help='solver prototxt',
default=None, type=str)
parser.add_argument('--iters', dest='max_iters',
help='number of iterations to train',
default=40000, type=int)
parser.add_argument('--weights', dest='pretrained_model',
help='initialize with pretrained model weights',
default=None, type=str)
parser.add_argument('--cfg', dest='cfg_file',
help='optional config file',
default=None, type=str)
parser.add_argument('--imdb', dest='imdb_name',
help='dataset to train on',
default='voc_2007_trainval', type=str)
parser.add_argument('--rand', dest='randomize',
help='randomize (do not use a fixed seed)',
action='store_true')
parser.add_argument('--set', dest='set_cfgs',
help='set config keys', default=None,
nargs=argparse.REMAINDER)
if len(sys.argv) == 1:
parser.print_help()
sys.exit(1)
args = parser.parse_args()
return args
if __name__ == '__main__':
args = parse_args()
print('Called with args:')
print(args)
if args.cfg_file is not None:
cfg_from_file(args.cfg_file)
if args.set_cfgs is not None:
cfg_from_list(args.set_cfgs)
print('Using config:')
pprint.pprint(cfg)
if not args.randomize:
# fix the random seeds (numpy and caffe) for reproducibility
np.random.seed(cfg.RNG_SEED)
caffe.set_random_seed(cfg.RNG_SEED)
# set up caffe
caffe.set_mode_gpu()
if args.gpu_id is not None:
caffe.set_device(args.gpu_id)
imdb = get_imdb(args.imdb_name)
print 'Loaded dataset `{:s}` for training'.format(imdb.name)
roidb = get_training_roidb(imdb)
output_dir = get_output_dir(imdb, None)
print 'Output will be saved to `{:s}`'.format(output_dir)
train_net(args.solver, roidb, output_dir,
pretrained_model=args.pretrained_model,
max_iters=args.max_iters)
| mit |
End of preview. Expand
in Dataset Viewer.
No dataset card yet
New: Create and edit this dataset card directly on the website!
Contribute a Dataset Card- Downloads last month
- 19