repo_name
stringlengths 6
103
| path
stringlengths 5
191
| copies
stringlengths 1
4
| size
stringlengths 4
6
| content
stringlengths 986
970k
| license
stringclasses 15
values |
---|---|---|---|---|---|
anntzer/scikit-learn | sklearn/ensemble/__init__.py | 17 | 1502 | """
The :mod:`sklearn.ensemble` module includes ensemble-based methods for
classification, regression and anomaly detection.
"""
from ._base import BaseEnsemble
from ._forest import RandomForestClassifier
from ._forest import RandomForestRegressor
from ._forest import RandomTreesEmbedding
from ._forest import ExtraTreesClassifier
from ._forest import ExtraTreesRegressor
from ._bagging import BaggingClassifier
from ._bagging import BaggingRegressor
from ._iforest import IsolationForest
from ._weight_boosting import AdaBoostClassifier
from ._weight_boosting import AdaBoostRegressor
from ._gb import GradientBoostingClassifier
from ._gb import GradientBoostingRegressor
from ._voting import VotingClassifier
from ._voting import VotingRegressor
from ._stacking import StackingClassifier
from ._stacking import StackingRegressor
from ._hist_gradient_boosting.gradient_boosting import (
HistGradientBoostingRegressor,
HistGradientBoostingClassifier,
)
__all__ = [
"BaseEnsemble",
"RandomForestClassifier",
"RandomForestRegressor",
"RandomTreesEmbedding",
"ExtraTreesClassifier",
"ExtraTreesRegressor",
"BaggingClassifier",
"BaggingRegressor",
"IsolationForest",
"GradientBoostingClassifier",
"GradientBoostingRegressor",
"AdaBoostClassifier",
"AdaBoostRegressor",
"VotingClassifier",
"VotingRegressor",
"StackingClassifier",
"StackingRegressor",
"HistGradientBoostingClassifier",
"HistGradientBoostingRegressor",
]
| bsd-3-clause |
florian-f/sklearn | sklearn/manifold/tests/test_spectral_embedding.py | 6 | 8149 | import warnings
from nose.tools import assert_true
from nose.tools import assert_equal
from scipy.sparse import csr_matrix
from scipy.sparse import csc_matrix
import numpy as np
from numpy.testing import assert_array_almost_equal
from nose.tools import assert_raises
from nose.plugins.skip import SkipTest
from sklearn.manifold.spectral_embedding import SpectralEmbedding
from sklearn.manifold.spectral_embedding import _graph_is_connected
from sklearn.metrics.pairwise import rbf_kernel
from sklearn.metrics import normalized_mutual_info_score
from sklearn.cluster import KMeans
from sklearn.datasets.samples_generator import make_blobs
# non centered, sparse centers to check the
centers = np.array([
[0.0, 5.0, 0.0, 0.0, 0.0],
[0.0, 0.0, 4.0, 0.0, 0.0],
[1.0, 0.0, 0.0, 5.0, 1.0],
])
n_samples = 1000
n_clusters, n_features = centers.shape
S, true_labels = make_blobs(n_samples=n_samples, centers=centers,
cluster_std=1., random_state=42)
def _check_with_col_sign_flipping(A, B, tol=0.0):
""" Check array A and B are equal with possible sign flipping on
each columns"""
sign = True
for column_idx in range(A.shape[1]):
sign = sign and ((((A[:, column_idx] -
B[:, column_idx]) ** 2).mean() <= tol ** 2) or
(((A[:, column_idx] +
B[:, column_idx]) ** 2).mean() <= tol ** 2))
if not sign:
return False
return True
def test_spectral_embedding_two_components(seed=36):
"""Test spectral embedding with two components"""
random_state = np.random.RandomState(seed)
n_sample = 100
affinity = np.zeros(shape=[n_sample * 2,
n_sample * 2])
# first component
affinity[0:n_sample,
0:n_sample] = np.abs(random_state.randn(n_sample, n_sample)) + 2
# second component
affinity[n_sample::,
n_sample::] = np.abs(random_state.randn(n_sample, n_sample)) + 2
# connection
affinity[0, n_sample + 1] = 1
affinity[n_sample + 1, 0] = 1
affinity.flat[::2 * n_sample + 1] = 0
affinity = 0.5 * (affinity + affinity.T)
true_label = np.zeros(shape=2 * n_sample)
true_label[0:n_sample] = 1
se_precomp = SpectralEmbedding(n_components=1, affinity="precomputed",
random_state=np.random.RandomState(seed))
embedded_coordinate = se_precomp.fit_transform(affinity)
# thresholding on the first components using 0.
label_ = np.array(embedded_coordinate.ravel() < 0, dtype="float")
assert_equal(normalized_mutual_info_score(true_label, label_), 1.0)
# test that we can still import spectral embedding
from sklearn.cluster import spectral_embedding as se_deprecated
with warnings.catch_warnings(record=True) as warning_list:
embedded_depr = se_deprecated(affinity, n_components=1,
random_state=np.random.RandomState(seed))
assert_equal(len(warning_list), 1)
assert_true(_check_with_col_sign_flipping(embedded_coordinate,
embedded_depr, 0.05))
def test_spectral_embedding_precomputed_affinity(seed=36):
"""Test spectral embedding with precomputed kernel"""
gamma = 1.0
se_precomp = SpectralEmbedding(n_components=2, affinity="precomputed",
random_state=np.random.RandomState(seed))
se_rbf = SpectralEmbedding(n_components=2, affinity="rbf",
gamma=gamma,
random_state=np.random.RandomState(seed))
embed_precomp = se_precomp.fit_transform(rbf_kernel(S, gamma=gamma))
embed_rbf = se_rbf.fit_transform(S)
assert_array_almost_equal(
se_precomp.affinity_matrix_, se_rbf.affinity_matrix_)
assert_true(_check_with_col_sign_flipping(embed_precomp, embed_rbf, 0.05))
def test_spectral_embedding_callable_affinity(seed=36):
"""Test spectral embedding with callable affinity"""
gamma = 0.9
kern = rbf_kernel(S, gamma=gamma)
se_callable = SpectralEmbedding(n_components=2,
affinity=(
lambda x: rbf_kernel(x, gamma=gamma)),
gamma=gamma,
random_state=np.random.RandomState(seed))
se_rbf = SpectralEmbedding(n_components=2, affinity="rbf",
gamma=gamma,
random_state=np.random.RandomState(seed))
embed_rbf = se_rbf.fit_transform(S)
embed_callable = se_callable.fit_transform(S)
embed_rbf = se_rbf.fit_transform(S)
embed_callable = se_callable.fit_transform(S)
assert_array_almost_equal(
se_callable.affinity_matrix_, se_rbf.affinity_matrix_)
assert_array_almost_equal(kern, se_rbf.affinity_matrix_)
assert_true(
_check_with_col_sign_flipping(embed_rbf, embed_callable, 0.05))
def test_spectral_embedding_amg_solver(seed=36):
"""Test spectral embedding with amg solver"""
try:
from pyamg import smoothed_aggregation_solver
except ImportError:
raise SkipTest
se_amg = SpectralEmbedding(n_components=2, affinity="nearest_neighbors",
eigen_solver="amg", n_neighbors=5,
random_state=np.random.RandomState(seed))
se_arpack = SpectralEmbedding(n_components=2, affinity="nearest_neighbors",
eigen_solver="arpack", n_neighbors=5,
random_state=np.random.RandomState(seed))
embed_amg = se_amg.fit_transform(S)
embed_arpack = se_arpack.fit_transform(S)
assert_true(_check_with_col_sign_flipping(embed_amg, embed_arpack, 0.05))
def test_pipline_spectral_clustering(seed=36):
"""Test using pipline to do spectral clustering"""
random_state = np.random.RandomState(seed)
se_rbf = SpectralEmbedding(n_components=n_clusters,
affinity="rbf",
random_state=random_state)
se_knn = SpectralEmbedding(n_components=n_clusters,
affinity="nearest_neighbors",
n_neighbors=5,
random_state=random_state)
for se in [se_rbf, se_knn]:
km = KMeans(n_clusters=n_clusters, random_state=random_state)
km.fit(se.fit_transform(S))
assert_array_almost_equal(
normalized_mutual_info_score(
km.labels_,
true_labels), 1.0, 2)
def test_spectral_embedding_unknown_eigensolver(seed=36):
"""Test that SpectralClustering fails with an unknown eigensolver"""
se = SpectralEmbedding(n_components=1, affinity="precomputed",
random_state=np.random.RandomState(seed),
eigen_solver="<unknown>")
assert_raises(ValueError, se.fit, S)
def test_spectral_embedding_unknown_affinity(seed=36):
"""Test that SpectralClustering fails with an unknown affinity type"""
se = SpectralEmbedding(n_components=1, affinity="<unknown>",
random_state=np.random.RandomState(seed))
assert_raises(ValueError, se.fit, S)
def test_connectivity(seed=36):
"""Test that graph connectivity test works as expected"""
graph = np.array([[1, 0, 0, 0, 0],
[0, 1, 1, 0, 0],
[0, 1, 1, 1, 0],
[0, 0, 1, 1, 1],
[0, 0, 0, 1, 1]])
assert_equal(_graph_is_connected(graph), False)
assert_equal(_graph_is_connected(csr_matrix(graph)), False)
assert_equal(_graph_is_connected(csc_matrix(graph)), False)
graph = np.array([[1, 1, 0, 0, 0],
[1, 1, 1, 0, 0],
[0, 1, 1, 1, 0],
[0, 0, 1, 1, 1],
[0, 0, 0, 1, 1]])
assert_equal(_graph_is_connected(graph), True)
assert_equal(_graph_is_connected(csr_matrix(graph)), True)
assert_equal(_graph_is_connected(csc_matrix(graph)), True)
| bsd-3-clause |
pytorch/fairseq | tests/speech/__init__.py | 1 | 7147 | # Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
from argparse import Namespace
import os
import re
import unittest
from pathlib import Path
from tqdm import tqdm
from typing import List, Dict, Optional
import torch
from fairseq.checkpoint_utils import load_model_ensemble_and_task
from fairseq.scoring.wer import WerScorer
from fairseq.scoring.bleu import SacrebleuScorer
from fairseq import utils
import zipfile
S3_BASE_URL = "https://dl.fbaipublicfiles.com/fairseq"
class TestFairseqSpeech(unittest.TestCase):
@classmethod
def download(cls, base_url: str, out_root: Path, filename: str):
url = f"{base_url}/{filename}"
path = out_root / filename
if not path.exists():
torch.hub.download_url_to_file(url, path.as_posix(), progress=True)
return path
def _set_up(self, dataset_id: str, s3_dir: str, data_filenames: List[str]):
self.use_cuda = torch.cuda.is_available()
self.root = Path.home() / ".cache" / "fairseq" / dataset_id
self.root.mkdir(exist_ok=True, parents=True)
os.chdir(self.root)
self.base_url = (
s3_dir if re.search("^https:", s3_dir) else f"{S3_BASE_URL}/{s3_dir}"
)
for filename in data_filenames:
self.download(self.base_url, self.root, filename)
def set_up_librispeech(self):
self._set_up(
"librispeech",
"s2t/librispeech",
[
"cfg_librispeech.yaml",
"spm_librispeech_unigram10000.model",
"spm_librispeech_unigram10000.txt",
"librispeech_test-other.tsv",
"librispeech_test-other.zip",
],
)
def set_up_ljspeech(self):
self._set_up(
"ljspeech",
"s2/ljspeech",
[
"cfg_ljspeech_g2p.yaml",
"ljspeech_g2p_gcmvn_stats.npz",
"ljspeech_g2p.txt",
"ljspeech_test.tsv",
"ljspeech_test.zip",
],
)
def set_up_sotasty_es_en(self):
self._set_up(
"sotasty_es_en",
"s2t/big/es-en",
[
"cfg_es_en.yaml",
"spm_bpe32768_es_en.model",
"spm_bpe32768_es_en.txt",
"sotasty_es_en_test_ted.tsv",
"sotasty_es_en_test_ted.zip",
],
)
def set_up_mustc_de_fbank(self):
self._set_up(
"mustc_de_fbank",
"https://dl.fbaipublicfiles.com/joint_speech_text_4_s2t/must_c/en_de",
[
"config.yaml",
"spm.model",
"dict.txt",
"src_dict.txt",
"tgt_dict.txt",
"tst-COMMON.tsv",
"tst-COMMON.zip",
],
)
def download_and_load_checkpoint(
self,
checkpoint_filename: str,
arg_overrides: Optional[Dict[str, str]] = None,
strict: bool = True,
):
path = self.download(self.base_url, self.root, checkpoint_filename)
_arg_overrides = arg_overrides or {}
_arg_overrides["data"] = self.root.as_posix()
models, cfg, task = load_model_ensemble_and_task(
[path.as_posix()], arg_overrides=_arg_overrides, strict=strict
)
if self.use_cuda:
for model in models:
model.cuda()
return models, cfg, task, self.build_generator(task, models, cfg)
def build_generator(
self,
task,
models,
cfg,
):
return task.build_generator(models, cfg)
@classmethod
def get_batch_iterator(cls, task, test_split, max_tokens, max_positions):
task.load_dataset(test_split)
return task.get_batch_iterator(
dataset=task.dataset(test_split),
max_tokens=max_tokens,
max_positions=max_positions,
num_workers=1,
).next_epoch_itr(shuffle=False)
@classmethod
def get_wer_scorer(
cls, tokenizer="none", lowercase=False, remove_punct=False, char_level=False
):
scorer_args = {
"wer_tokenizer": tokenizer,
"wer_lowercase": lowercase,
"wer_remove_punct": remove_punct,
"wer_char_level": char_level,
}
return WerScorer(Namespace(**scorer_args))
@classmethod
def get_bleu_scorer(cls, tokenizer="13a", lowercase=False, char_level=False):
scorer_args = {
"sacrebleu_tokenizer": tokenizer,
"sacrebleu_lowercase": lowercase,
"sacrebleu_char_level": char_level,
}
return SacrebleuScorer(Namespace(**scorer_args))
@torch.no_grad()
def base_test(
self,
ckpt_name,
reference_score,
score_delta=0.3,
dataset="librispeech_test-other",
max_tokens=65_536,
max_positions=(4_096, 1_024),
arg_overrides=None,
strict=True,
score_type="wer",
):
models, _, task, generator = self.download_and_load_checkpoint(
ckpt_name, arg_overrides=arg_overrides, strict=strict
)
if not self.use_cuda:
return
batch_iterator = self.get_batch_iterator(
task, dataset, max_tokens, max_positions
)
if score_type == "bleu":
scorer = self.get_bleu_scorer()
elif score_type == "wer":
scorer = self.get_wer_scorer()
else:
raise Exception(f"Unsupported score type {score_type}")
progress = tqdm(enumerate(batch_iterator), total=len(batch_iterator))
for batch_idx, sample in progress:
sample = utils.move_to_cuda(sample) if self.use_cuda else sample
hypo = task.inference_step(generator, models, sample)
for i, sample_id in enumerate(sample["id"].tolist()):
tgt_str, hypo_str = self.postprocess_tokens(
task,
sample["target"][i, :],
hypo[i][0]["tokens"].int().cpu(),
)
if batch_idx == 0 and i < 3:
print(f"T-{sample_id} {tgt_str}")
print(f"H-{sample_id} {hypo_str}")
scorer.add_string(tgt_str, hypo_str)
print(scorer.result_string() + f" (reference: {reference_score})")
self.assertAlmostEqual(scorer.score(), reference_score, delta=score_delta)
def postprocess_tokens(self, task, target, hypo_tokens):
tgt_tokens = utils.strip_pad(target, task.tgt_dict.pad()).int().cpu()
tgt_str = task.tgt_dict.string(tgt_tokens, "sentencepiece")
hypo_str = task.tgt_dict.string(hypo_tokens, "sentencepiece")
return tgt_str, hypo_str
def unzip_files(self, zip_file_name):
zip_file_path = self.root / zip_file_name
with zipfile.ZipFile(zip_file_path, "r") as zip_ref:
zip_ref.extractall(self.root / zip_file_name.strip(".zip"))
| mit |
quole/gensim | gensim/test/test_word2vec.py | 6 | 37389 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Copyright (C) 2010 Radim Rehurek <radimrehurek@seznam.cz>
# Licensed under the GNU LGPL v2.1 - http://www.gnu.org/licenses/lgpl.html
"""
Automated tests for checking transformation algorithms (the models package).
"""
import logging
import unittest
import os
import tempfile
import itertools
import bz2
import sys
import numpy as np
from gensim import utils, matutils
from gensim.utils import check_output
from subprocess import PIPE
from gensim.models import word2vec, keyedvectors
from testfixtures import log_capture
try:
from pyemd import emd
PYEMD_EXT = True
except ImportError:
PYEMD_EXT = False
module_path = os.path.dirname(__file__) # needed because sample data files are located in the same folder
datapath = lambda fname: os.path.join(module_path, 'test_data', fname)
class LeeCorpus(object):
def __iter__(self):
with open(datapath('lee_background.cor')) as f:
for line in f:
yield utils.simple_preprocess(line)
list_corpus = list(LeeCorpus())
sentences = [
['human', 'interface', 'computer'],
['survey', 'user', 'computer', 'system', 'response', 'time'],
['eps', 'user', 'interface', 'system'],
['system', 'human', 'system', 'eps'],
['user', 'response', 'time'],
['trees'],
['graph', 'trees'],
['graph', 'minors', 'trees'],
['graph', 'minors', 'survey']
]
new_sentences = [
['computer', 'artificial', 'intelligence'],
['artificial', 'trees'],
['human', 'intelligence'],
['artificial', 'graph'],
['intelligence'],
['artificial', 'intelligence', 'system']
]
def testfile():
# temporary data will be stored to this file
return os.path.join(tempfile.gettempdir(), 'gensim_word2vec.tst')
def _rule(word, count, min_count):
if word == "human":
return utils.RULE_DISCARD # throw out
else:
return utils.RULE_DEFAULT # apply default rule, i.e. min_count
def load_on_instance():
# Save and load a Word2Vec Model on instance for test
model = word2vec.Word2Vec(sentences, min_count=1)
model.save(testfile())
model = word2vec.Word2Vec() # should fail at this point
return model.load(testfile())
class TestWord2VecModel(unittest.TestCase):
def testOnlineLearning(self):
"""Test that the algorithm is able to add new words to the
vocabulary and to a trained model when using a sorted vocabulary"""
model_hs = word2vec.Word2Vec(sentences, size=10, min_count=0, seed=42, hs=1, negative=0)
model_neg = word2vec.Word2Vec(sentences, size=10, min_count=0, seed=42, hs=0, negative=5)
self.assertTrue(len(model_hs.wv.vocab), 12)
self.assertTrue(model_hs.wv.vocab['graph'].count, 3)
model_hs.build_vocab(new_sentences, update=True)
model_neg.build_vocab(new_sentences, update=True)
self.assertTrue(model_hs.wv.vocab['graph'].count, 4)
self.assertTrue(model_hs.wv.vocab['artificial'].count, 4)
self.assertEqual(len(model_hs.wv.vocab), 14)
self.assertEqual(len(model_neg.wv.vocab), 14)
def testOnlineLearningAfterSave(self):
"""Test that the algorithm is able to add new words to the
vocabulary and to a trained model when using a sorted vocabulary"""
model_neg = word2vec.Word2Vec(sentences, size=10, min_count=0, seed=42, hs=0, negative=5)
model_neg.save(testfile())
model_neg = word2vec.Word2Vec.load(testfile())
self.assertTrue(len(model_neg.wv.vocab), 12)
model_neg.build_vocab(new_sentences, update=True)
model_neg.train(new_sentences, total_examples=model_neg.corpus_count, epochs=model_neg.iter)
self.assertEqual(len(model_neg.wv.vocab), 14)
def onlineSanity(self, model):
terro, others = [], []
for l in list_corpus:
if 'terrorism' in l:
terro.append(l)
else:
others.append(l)
self.assertTrue(all(['terrorism' not in l for l in others]))
model.build_vocab(others)
model.train(others, total_examples=model.corpus_count, epochs=model.iter)
self.assertFalse('terrorism' in model.wv.vocab)
model.build_vocab(terro, update=True)
self.assertTrue('terrorism' in model.wv.vocab)
orig0 = np.copy(model.wv.syn0)
model.train(terro, total_examples=len(terro), epochs=model.iter)
self.assertFalse(np.allclose(model.wv.syn0, orig0))
sim = model.n_similarity(['war'], ['terrorism'])
self.assertLess(0., sim)
def test_sg_hs_online(self):
"""Test skipgram w/ hierarchical softmax"""
model = word2vec.Word2Vec(sg=1, window=5, hs=1, negative=0, min_count=3, iter=10, seed=42, workers=2)
self.onlineSanity(model)
def test_sg_neg_online(self):
"""Test skipgram w/ negative sampling"""
model = word2vec.Word2Vec(sg=1, window=4, hs=0, negative=15, min_count=3, iter=10, seed=42, workers=2)
self.onlineSanity(model)
def test_cbow_hs_online(self):
"""Test CBOW w/ hierarchical softmax"""
model = word2vec.Word2Vec(sg=0, cbow_mean=1, alpha=0.05, window=5, hs=1, negative=0,
min_count=3, iter=10, seed=42, workers=2)
self.onlineSanity(model)
def test_cbow_neg_online(self):
"""Test CBOW w/ negative sampling"""
model = word2vec.Word2Vec(sg=0, cbow_mean=1, alpha=0.05, window=5, hs=0, negative=15,
min_count=5, iter=10, seed=42, workers=2, sample=0)
self.onlineSanity(model)
def testPersistence(self):
"""Test storing/loading the entire model."""
model = word2vec.Word2Vec(sentences, min_count=1)
model.save(testfile())
self.models_equal(model, word2vec.Word2Vec.load(testfile()))
# test persistence of the KeyedVectors of a model
wv = model.wv
wv.save(testfile())
loaded_wv = keyedvectors.KeyedVectors.load(testfile())
self.assertTrue(np.allclose(wv.syn0, loaded_wv.syn0))
self.assertEqual(len(wv.vocab), len(loaded_wv.vocab))
def testPersistenceWithConstructorRule(self):
"""Test storing/loading the entire model with a vocab trimming rule passed in the constructor."""
model = word2vec.Word2Vec(sentences, min_count=1, trim_rule=_rule)
model.save(testfile())
self.models_equal(model, word2vec.Word2Vec.load(testfile()))
def testRuleWithMinCount(self):
"""Test that returning RULE_DEFAULT from trim_rule triggers min_count."""
model = word2vec.Word2Vec(sentences + [["occurs_only_once"]], min_count=2, trim_rule=_rule)
self.assertTrue("human" not in model.wv.vocab)
self.assertTrue("occurs_only_once" not in model.wv.vocab)
self.assertTrue("interface" in model.wv.vocab)
def testRule(self):
"""Test applying vocab trim_rule to build_vocab instead of constructor."""
model = word2vec.Word2Vec(min_count=1)
model.build_vocab(sentences, trim_rule=_rule)
self.assertTrue("human" not in model.wv.vocab)
def testLambdaRule(self):
"""Test that lambda trim_rule works."""
rule = lambda word, count, min_count: utils.RULE_DISCARD if word == "human" else utils.RULE_DEFAULT
model = word2vec.Word2Vec(sentences, min_count=1, trim_rule=rule)
self.assertTrue("human" not in model.wv.vocab)
def testSyn0NormNotSaved(self):
"""Test syn0norm isn't saved in model file"""
model = word2vec.Word2Vec(sentences, min_count=1)
model.init_sims()
model.save(testfile())
loaded_model = word2vec.Word2Vec.load(testfile())
self.assertTrue(loaded_model.wv.syn0norm is None)
wv = model.wv
wv.save(testfile())
loaded_kv = keyedvectors.KeyedVectors.load(testfile())
self.assertTrue(loaded_kv.syn0norm is None)
def testLoadPreKeyedVectorModel(self):
"""Test loading pre-KeyedVectors word2vec model"""
if sys.version_info[:2] == (3,4):
model_file_suffix = '_py3_4'
elif sys.version_info < (3,):
model_file_suffix = '_py2'
else:
model_file_suffix = '_py3'
# Model stored in one file
model_file = 'word2vec_pre_kv%s' % model_file_suffix
model = word2vec.Word2Vec.load(datapath(model_file))
self.assertTrue(model.wv.syn0.shape == (len(model.wv.vocab), model.vector_size))
self.assertTrue(model.syn1neg.shape == (len(model.wv.vocab), model.vector_size))
# Model stored in multiple files
model_file = 'word2vec_pre_kv_sep%s' % model_file_suffix
model = word2vec.Word2Vec.load(datapath(model_file))
self.assertTrue(model.wv.syn0.shape == (len(model.wv.vocab), model.vector_size))
self.assertTrue(model.syn1neg.shape == (len(model.wv.vocab), model.vector_size))
def testLoadPreKeyedVectorModelCFormat(self):
"""Test loading pre-KeyedVectors word2vec model saved in word2vec format"""
model = keyedvectors.KeyedVectors.load_word2vec_format(datapath('word2vec_pre_kv_c'))
self.assertTrue(model.syn0.shape[0] == len(model.vocab))
def testPersistenceWord2VecFormat(self):
"""Test storing/loading the entire model in word2vec format."""
model = word2vec.Word2Vec(sentences, min_count=1)
model.init_sims()
model.wv.save_word2vec_format(testfile(), binary=True)
binary_model_kv = keyedvectors.KeyedVectors.load_word2vec_format(testfile(), binary=True)
binary_model_kv.init_sims(replace=False)
self.assertTrue(np.allclose(model['human'], binary_model_kv['human']))
norm_only_model = keyedvectors.KeyedVectors.load_word2vec_format(testfile(), binary=True)
norm_only_model.init_sims(replace=True)
self.assertFalse(np.allclose(model['human'], norm_only_model['human']))
self.assertTrue(np.allclose(model.wv.syn0norm[model.wv.vocab['human'].index], norm_only_model['human']))
limited_model_kv = keyedvectors.KeyedVectors.load_word2vec_format(testfile(), binary=True, limit=3)
self.assertEquals(len(limited_model_kv.syn0), 3)
half_precision_model_kv = keyedvectors.KeyedVectors.load_word2vec_format(testfile(), binary=True, datatype=np.float16)
self.assertEquals(binary_model_kv.syn0.nbytes, half_precision_model_kv.syn0.nbytes * 2)
def testNoTrainingCFormat(self):
model = word2vec.Word2Vec(sentences, min_count=1)
model.init_sims()
model.wv.save_word2vec_format(testfile(), binary=True)
kv = keyedvectors.KeyedVectors.load_word2vec_format(testfile(), binary=True)
binary_model = word2vec.Word2Vec()
binary_model.wv = kv
self.assertRaises(ValueError, binary_model.train, sentences)
def testTooShortBinaryWord2VecFormat(self):
tfile = testfile()
model = word2vec.Word2Vec(sentences, min_count=1)
model.init_sims()
model.wv.save_word2vec_format(tfile, binary=True)
f = open(tfile, 'r+b')
f.write(b'13') # write wrong (too-long) vector count
f.close()
self.assertRaises(EOFError, keyedvectors.KeyedVectors.load_word2vec_format, tfile, binary=True)
def testTooShortTextWord2VecFormat(self):
tfile = testfile()
model = word2vec.Word2Vec(sentences, min_count=1)
model.init_sims()
model.wv.save_word2vec_format(tfile, binary=False)
f = open(tfile, 'r+b')
f.write(b'13') # write wrong (too-long) vector count
f.close()
self.assertRaises(EOFError, keyedvectors.KeyedVectors.load_word2vec_format, tfile, binary=False)
def testPersistenceWord2VecFormatNonBinary(self):
"""Test storing/loading the entire model in word2vec non-binary format."""
model = word2vec.Word2Vec(sentences, min_count=1)
model.init_sims()
model.wv.save_word2vec_format(testfile(), binary=False)
text_model = keyedvectors.KeyedVectors.load_word2vec_format(testfile(), binary=False)
text_model.init_sims(False)
self.assertTrue(np.allclose(model['human'], text_model['human'], atol=1e-6))
norm_only_model = keyedvectors.KeyedVectors.load_word2vec_format(testfile(), binary=False)
norm_only_model.init_sims(True)
self.assertFalse(np.allclose(model['human'], norm_only_model['human'], atol=1e-6))
self.assertTrue(np.allclose(model.wv.syn0norm[model.wv.vocab['human'].index], norm_only_model['human'], atol=1e-4))
def testPersistenceWord2VecFormatWithVocab(self):
"""Test storing/loading the entire model and vocabulary in word2vec format."""
model = word2vec.Word2Vec(sentences, min_count=1)
model.init_sims()
testvocab = os.path.join(tempfile.gettempdir(), 'gensim_word2vec.vocab')
model.wv.save_word2vec_format(testfile(), testvocab, binary=True)
binary_model_with_vocab_kv = keyedvectors.KeyedVectors.load_word2vec_format(testfile(), testvocab, binary=True)
self.assertEqual(model.wv.vocab['human'].count, binary_model_with_vocab_kv.vocab['human'].count)
def testPersistenceKeyedVectorsFormatWithVocab(self):
"""Test storing/loading the entire model and vocabulary in word2vec format."""
model = word2vec.Word2Vec(sentences, min_count=1)
model.init_sims()
testvocab = os.path.join(tempfile.gettempdir(), 'gensim_word2vec.vocab')
model.wv.save_word2vec_format(testfile(), testvocab, binary=True)
kv_binary_model_with_vocab = keyedvectors.KeyedVectors.load_word2vec_format(testfile(), testvocab, binary=True)
self.assertEqual(model.wv.vocab['human'].count, kv_binary_model_with_vocab.vocab['human'].count)
def testPersistenceWord2VecFormatCombinationWithStandardPersistence(self):
"""Test storing/loading the entire model and vocabulary in word2vec format chained with
saving and loading via `save` and `load` methods`.
It was possible prior to 1.0.0 release, now raises Exception"""
model = word2vec.Word2Vec(sentences, min_count=1)
model.init_sims()
testvocab = os.path.join(tempfile.gettempdir(), 'gensim_word2vec.vocab')
model.wv.save_word2vec_format(testfile(), testvocab, binary=True)
binary_model_with_vocab_kv = keyedvectors.KeyedVectors.load_word2vec_format(testfile(), testvocab, binary=True)
binary_model_with_vocab_kv.save(testfile())
self.assertRaises(AttributeError, word2vec.Word2Vec.load, testfile())
def testLargeMmap(self):
"""Test storing/loading the entire model."""
model = word2vec.Word2Vec(sentences, min_count=1)
# test storing the internal arrays into separate files
model.save(testfile(), sep_limit=0)
self.models_equal(model, word2vec.Word2Vec.load(testfile()))
# make sure mmaping the arrays back works, too
self.models_equal(model, word2vec.Word2Vec.load(testfile(), mmap='r'))
def testVocab(self):
"""Test word2vec vocabulary building."""
corpus = LeeCorpus()
total_words = sum(len(sentence) for sentence in corpus)
# try vocab building explicitly, using all words
model = word2vec.Word2Vec(min_count=1, hs=1, negative=0)
model.build_vocab(corpus)
self.assertTrue(len(model.wv.vocab) == 6981)
# with min_count=1, we're not throwing away anything, so make sure the word counts add up to be the entire corpus
self.assertEqual(sum(v.count for v in model.wv.vocab.values()), total_words)
# make sure the binary codes are correct
np.allclose(model.wv.vocab['the'].code, [1, 1, 0, 0])
# test building vocab with default params
model = word2vec.Word2Vec(hs=1, negative=0)
model.build_vocab(corpus)
self.assertTrue(len(model.wv.vocab) == 1750)
np.allclose(model.wv.vocab['the'].code, [1, 1, 1, 0])
# no input => "RuntimeError: you must first build vocabulary before training the model"
self.assertRaises(RuntimeError, word2vec.Word2Vec, [])
# input not empty, but rather completely filtered out
self.assertRaises(RuntimeError, word2vec.Word2Vec, corpus, min_count=total_words+1)
def testTraining(self):
"""Test word2vec training."""
# build vocabulary, don't train yet
model = word2vec.Word2Vec(size=2, min_count=1, hs=1, negative=0)
model.build_vocab(sentences)
self.assertTrue(model.wv.syn0.shape == (len(model.wv.vocab), 2))
self.assertTrue(model.syn1.shape == (len(model.wv.vocab), 2))
model.train(sentences, total_examples=model.corpus_count, epochs=model.iter)
sims = model.most_similar('graph', topn=10)
# self.assertTrue(sims[0][0] == 'trees', sims) # most similar
# test querying for "most similar" by vector
graph_vector = model.wv.syn0norm[model.wv.vocab['graph'].index]
sims2 = model.most_similar(positive=[graph_vector], topn=11)
sims2 = [(w, sim) for w, sim in sims2 if w != 'graph'] # ignore 'graph' itself
self.assertEqual(sims, sims2)
# build vocab and train in one step; must be the same as above
model2 = word2vec.Word2Vec(sentences, size=2, min_count=1, hs=1, negative=0)
self.models_equal(model, model2)
def testScoring(self):
"""Test word2vec scoring."""
model = word2vec.Word2Vec(sentences, size=2, min_count=1, hs=1, negative=0)
# just score and make sure they exist
scores = model.score(sentences, len(sentences))
self.assertEqual(len(scores), len(sentences))
def testLocking(self):
"""Test word2vec training doesn't change locked vectors."""
corpus = LeeCorpus()
# build vocabulary, don't train yet
for sg in range(2): # test both cbow and sg
model = word2vec.Word2Vec(size=4, hs=1, negative=5, min_count=1, sg=sg, window=5)
model.build_vocab(corpus)
# remember two vectors
locked0 = np.copy(model.wv.syn0[0])
unlocked1 = np.copy(model.wv.syn0[1])
# lock the vector in slot 0 against change
model.syn0_lockf[0] = 0.0
model.train(corpus, total_examples=model.corpus_count, epochs=model.iter)
self.assertFalse((unlocked1 == model.wv.syn0[1]).all()) # unlocked vector should vary
self.assertTrue((locked0 == model.wv.syn0[0]).all()) # locked vector should not vary
def testAccuracy(self):
"""Test Word2Vec accuracy and KeyedVectors accuracy give the same result"""
model = word2vec.Word2Vec(LeeCorpus())
w2v_accuracy = model.accuracy(datapath('questions-words.txt'))
kv_accuracy = model.wv.accuracy(datapath('questions-words.txt'))
self.assertEqual(w2v_accuracy, kv_accuracy)
def testEvaluateWordPairs(self):
"""Test Spearman and Pearson correlation coefficients give sane results on similarity datasets"""
corpus = word2vec.LineSentence(datapath('head500.noblanks.cor.bz2'))
model = word2vec.Word2Vec(corpus, min_count=3, iter=10)
correlation = model.evaluate_word_pairs(datapath('wordsim353.tsv'))
pearson = correlation[0][0]
spearman = correlation[1][0]
oov = correlation[2]
self.assertTrue(0.1 < pearson < 1.0)
self.assertTrue(0.1 < spearman < 1.0)
self.assertTrue(0.0 <= oov < 90.0)
def model_sanity(self, model, train=True):
"""Even tiny models trained on LeeCorpus should pass these sanity checks"""
# run extra before/after training tests if train=True
if train:
model.build_vocab(list_corpus)
orig0 = np.copy(model.wv.syn0[0])
model.train(list_corpus, total_examples=model.corpus_count, epochs=model.iter)
self.assertFalse((orig0 == model.wv.syn0[1]).all()) # vector should vary after training
sims = model.most_similar('war', topn=len(model.wv.index2word))
t_rank = [word for word, score in sims].index('terrorism')
# in >200 calibration runs w/ calling parameters, 'terrorism' in 50-most_sim for 'war'
self.assertLess(t_rank, 50)
war_vec = model['war']
sims2 = model.most_similar([war_vec], topn=51)
self.assertTrue('war' in [word for word, score in sims2])
self.assertTrue('terrorism' in [word for word, score in sims2])
def test_sg_hs(self):
"""Test skipgram w/ hierarchical softmax"""
model = word2vec.Word2Vec(sg=1, window=4, hs=1, negative=0, min_count=5, iter=10, workers=2)
self.model_sanity(model)
def test_sg_neg(self):
"""Test skipgram w/ negative sampling"""
model = word2vec.Word2Vec(sg=1, window=4, hs=0, negative=15, min_count=5, iter=10, workers=2)
self.model_sanity(model)
def test_cbow_hs(self):
"""Test CBOW w/ hierarchical softmax"""
model = word2vec.Word2Vec(sg=0, cbow_mean=1, alpha=0.05, window=8, hs=1, negative=0,
min_count=5, iter=10, workers=2, batch_words=1000)
self.model_sanity(model)
def test_cbow_neg(self):
"""Test CBOW w/ negative sampling"""
model = word2vec.Word2Vec(sg=0, cbow_mean=1, alpha=0.05, window=5, hs=0, negative=15,
min_count=5, iter=10, workers=2, sample=0)
self.model_sanity(model)
def test_cosmul(self):
model = word2vec.Word2Vec(sentences, size=2, min_count=1, hs=1, negative=0)
sims = model.most_similar_cosmul('graph', topn=10)
# self.assertTrue(sims[0][0] == 'trees', sims) # most similar
# test querying for "most similar" by vector
graph_vector = model.wv.syn0norm[model.wv.vocab['graph'].index]
sims2 = model.most_similar_cosmul(positive=[graph_vector], topn=11)
sims2 = [(w, sim) for w, sim in sims2 if w != 'graph'] # ignore 'graph' itself
self.assertEqual(sims, sims2)
def testTrainingCbow(self):
"""Test CBOW word2vec training."""
# to test training, make the corpus larger by repeating its sentences over and over
# build vocabulary, don't train yet
model = word2vec.Word2Vec(size=2, min_count=1, sg=0, hs=1, negative=0)
model.build_vocab(sentences)
self.assertTrue(model.wv.syn0.shape == (len(model.wv.vocab), 2))
self.assertTrue(model.syn1.shape == (len(model.wv.vocab), 2))
model.train(sentences, total_examples=model.corpus_count, epochs=model.iter)
sims = model.most_similar('graph', topn=10)
# self.assertTrue(sims[0][0] == 'trees', sims) # most similar
# test querying for "most similar" by vector
graph_vector = model.wv.syn0norm[model.wv.vocab['graph'].index]
sims2 = model.most_similar(positive=[graph_vector], topn=11)
sims2 = [(w, sim) for w, sim in sims2 if w != 'graph'] # ignore 'graph' itself
self.assertEqual(sims, sims2)
# build vocab and train in one step; must be the same as above
model2 = word2vec.Word2Vec(sentences, size=2, min_count=1, sg=0, hs=1, negative=0)
self.models_equal(model, model2)
def testTrainingSgNegative(self):
"""Test skip-gram (negative sampling) word2vec training."""
# to test training, make the corpus larger by repeating its sentences over and over
# build vocabulary, don't train yet
model = word2vec.Word2Vec(size=2, min_count=1, sg=1, hs=0, negative=2)
model.build_vocab(sentences)
self.assertTrue(model.wv.syn0.shape == (len(model.wv.vocab), 2))
self.assertTrue(model.syn1neg.shape == (len(model.wv.vocab), 2))
model.train(sentences, total_examples=model.corpus_count, epochs=model.iter)
sims = model.most_similar('graph', topn=10)
# self.assertTrue(sims[0][0] == 'trees', sims) # most similar
# test querying for "most similar" by vector
graph_vector = model.wv.syn0norm[model.wv.vocab['graph'].index]
sims2 = model.most_similar(positive=[graph_vector], topn=11)
sims2 = [(w, sim) for w, sim in sims2 if w != 'graph'] # ignore 'graph' itself
self.assertEqual(sims, sims2)
# build vocab and train in one step; must be the same as above
model2 = word2vec.Word2Vec(sentences, size=2, min_count=1, sg=1, hs=0, negative=2)
self.models_equal(model, model2)
def testTrainingCbowNegative(self):
"""Test CBOW (negative sampling) word2vec training."""
# to test training, make the corpus larger by repeating its sentences over and over
# build vocabulary, don't train yet
model = word2vec.Word2Vec(size=2, min_count=1, sg=0, hs=0, negative=2)
model.build_vocab(sentences)
self.assertTrue(model.wv.syn0.shape == (len(model.wv.vocab), 2))
self.assertTrue(model.syn1neg.shape == (len(model.wv.vocab), 2))
model.train(sentences, total_examples=model.corpus_count, epochs=model.iter)
sims = model.most_similar('graph', topn=10)
# self.assertTrue(sims[0][0] == 'trees', sims) # most similar
# test querying for "most similar" by vector
graph_vector = model.wv.syn0norm[model.wv.vocab['graph'].index]
sims2 = model.most_similar(positive=[graph_vector], topn=11)
sims2 = [(w, sim) for w, sim in sims2 if w != 'graph'] # ignore 'graph' itself
self.assertEqual(sims, sims2)
# build vocab and train in one step; must be the same as above
model2 = word2vec.Word2Vec(sentences, size=2, min_count=1, sg=0, hs=0, negative=2)
self.models_equal(model, model2)
def testSimilarities(self):
"""Test similarity and n_similarity methods."""
# The model is trained using CBOW
model = word2vec.Word2Vec(size=2, min_count=1, sg=0, hs=0, negative=2)
model.build_vocab(sentences)
model.train(sentences, total_examples=model.corpus_count, epochs=model.iter)
self.assertTrue(model.n_similarity(['graph', 'trees'], ['trees', 'graph']))
self.assertTrue(model.n_similarity(['graph'], ['trees']) == model.similarity('graph', 'trees'))
self.assertRaises(ZeroDivisionError, model.n_similarity, ['graph', 'trees'], [])
self.assertRaises(ZeroDivisionError, model.n_similarity, [], ['graph', 'trees'])
self.assertRaises(ZeroDivisionError, model.n_similarity, [], [])
def testSimilarBy(self):
"""Test word2vec similar_by_word and similar_by_vector."""
model = word2vec.Word2Vec(sentences, size=2, min_count=1, hs=1, negative=0)
wordsims = model.similar_by_word('graph', topn=10)
wordsims2 = model.most_similar(positive='graph', topn=10)
vectorsims = model.similar_by_vector(model['graph'], topn=10)
vectorsims2 = model.most_similar([model['graph']], topn=10)
self.assertEqual(wordsims, wordsims2)
self.assertEqual(vectorsims, vectorsims2)
def testParallel(self):
"""Test word2vec parallel training."""
if word2vec.FAST_VERSION < 0: # don't test the plain np version for parallelism (too slow)
return
corpus = utils.RepeatCorpus(LeeCorpus(), 10000)
for workers in [2, 4]:
model = word2vec.Word2Vec(corpus, workers=workers)
sims = model.most_similar('israeli')
# the exact vectors and therefore similarities may differ, due to different thread collisions/randomization
# so let's test only for top3
# TODO: commented out for now; find a more robust way to compare against "gold standard"
# self.assertTrue('palestinian' in [sims[i][0] for i in range(3)])
def testRNG(self):
"""Test word2vec results identical with identical RNG seed."""
model = word2vec.Word2Vec(sentences, min_count=2, seed=42, workers=1)
model2 = word2vec.Word2Vec(sentences, min_count=2, seed=42, workers=1)
self.models_equal(model, model2)
def models_equal(self, model, model2):
self.assertEqual(len(model.wv.vocab), len(model2.wv.vocab))
self.assertTrue(np.allclose(model.wv.syn0, model2.wv.syn0))
if model.hs:
self.assertTrue(np.allclose(model.syn1, model2.syn1))
if model.negative:
self.assertTrue(np.allclose(model.syn1neg, model2.syn1neg))
most_common_word = max(model.wv.vocab.items(), key=lambda item: item[1].count)[0]
self.assertTrue(np.allclose(model[most_common_word], model2[most_common_word]))
def testDeleteTemporaryTrainingData(self):
"""Test word2vec model after delete_temporary_training_data"""
for i in [0, 1]:
for j in [0, 1]:
model = word2vec.Word2Vec(sentences, size=10, min_count=0, seed=42, hs=i, negative=j)
if i:
self.assertTrue(hasattr(model, 'syn1'))
if j:
self.assertTrue(hasattr(model, 'syn1neg'))
self.assertTrue(hasattr(model, 'syn0_lockf'))
model.delete_temporary_training_data(replace_word_vectors_with_normalized=True)
self.assertTrue(len(model['human']), 10)
self.assertTrue(len(model.wv.vocab), 12)
self.assertTrue(model.wv.vocab['graph'].count, 3)
self.assertTrue(not hasattr(model, 'syn1'))
self.assertTrue(not hasattr(model, 'syn1neg'))
self.assertTrue(not hasattr(model, 'syn0_lockf'))
def testNormalizeAfterTrainingData(self):
model = word2vec.Word2Vec(sentences, min_count=1)
model.save(testfile())
norm_only_model = word2vec.Word2Vec.load(testfile())
norm_only_model.delete_temporary_training_data(replace_word_vectors_with_normalized=True)
self.assertFalse(np.allclose(model['human'], norm_only_model['human']))
def testPredictOutputWord(self):
'''Test word2vec predict_output_word method handling for negative sampling scheme'''
#under normal circumstances
model_with_neg = word2vec.Word2Vec(sentences, min_count=1)
predictions_with_neg = model_with_neg.predict_output_word(['system', 'human'], topn=5)
self.assertTrue(len(predictions_with_neg)==5)
#out-of-vobaculary scenario
predictions_out_of_vocab = model_with_neg.predict_output_word(['some', 'random', 'words'], topn=5)
self.assertEqual(predictions_out_of_vocab, None)
#when required model parameters have been deleted
model_with_neg.init_sims()
model_with_neg.wv.save_word2vec_format(testfile(), binary=True)
kv_model_with_neg = keyedvectors.KeyedVectors.load_word2vec_format(testfile(), binary=True)
binary_model_with_neg = word2vec.Word2Vec()
binary_model_with_neg.wv = kv_model_with_neg
self.assertRaises(RuntimeError, binary_model_with_neg.predict_output_word, ['system', 'human'])
#negative sampling scheme not used
model_without_neg = word2vec.Word2Vec(sentences, min_count=1, negative=0)
self.assertRaises(RuntimeError, model_without_neg.predict_output_word, ['system', 'human'])
@log_capture()
def testBuildVocabWarning(self, l):
"""Test if warning is raised on non-ideal input to a word2vec model"""
sentences = ['human', 'machine']
model = word2vec.Word2Vec()
model.build_vocab(sentences)
warning = "Each 'sentences' item should be a list of words (usually unicode strings)."
self.assertTrue(warning in str(l))
@log_capture()
def testTrainWarning(self, l):
"""Test if warning is raised if alpha rises during subsequent calls to train()"""
sentences = [['human'],
['graph', 'trees']]
model = word2vec.Word2Vec(min_count=1)
model.build_vocab(sentences)
for epoch in range(10):
model.train(sentences, total_examples=model.corpus_count, epochs=model.iter)
model.alpha -= 0.002
model.min_alpha = model.alpha
if epoch == 5:
model.alpha += 0.05
warning = "Effective 'alpha' higher than previous training cycles"
self.assertTrue(warning in str(l))
def test_train_with_explicit_param(self):
model = word2vec.Word2Vec(size=2, min_count=1, hs=1, negative=0)
model.build_vocab(sentences)
with self.assertRaises(ValueError):
model.train(sentences, total_examples=model.corpus_count)
with self.assertRaises(ValueError):
model.train(sentences, epochs=model.iter)
with self.assertRaises(ValueError):
model.train(sentences)
def test_sentences_should_not_be_a_generator(self):
"""
Is sentences a generator object?
"""
gen = (s for s in sentences)
self.assertRaises(TypeError, word2vec.Word2Vec, (gen,))
def testLoadOnClassError(self):
"""Test if exception is raised when loading word2vec model on instance"""
self.assertRaises(AttributeError, load_on_instance)
def test_reset_from(self):
"""Test if reset_from() uses pre-built structures from other model"""
model = word2vec.Word2Vec(sentences, min_count=1)
other_model = word2vec.Word2Vec(new_sentences, min_count=1)
other_vocab = other_model.wv.vocab
model.reset_from(other_model)
self.assertEqual(model.wv.vocab, other_vocab)
#endclass TestWord2VecModel
class TestWMD(unittest.TestCase):
def testNonzero(self):
'''Test basic functionality with a test sentence.'''
if not PYEMD_EXT:
return
model = word2vec.Word2Vec(sentences, min_count=2, seed=42, workers=1)
sentence1 = ['human', 'interface', 'computer']
sentence2 = ['survey', 'user', 'computer', 'system', 'response', 'time']
distance = model.wmdistance(sentence1, sentence2)
# Check that distance is non-zero.
self.assertFalse(distance == 0.0)
def testSymmetry(self):
'''Check that distance is symmetric.'''
if not PYEMD_EXT:
return
model = word2vec.Word2Vec(sentences, min_count=2, seed=42, workers=1)
sentence1 = ['human', 'interface', 'computer']
sentence2 = ['survey', 'user', 'computer', 'system', 'response', 'time']
distance1 = model.wmdistance(sentence1, sentence2)
distance2 = model.wmdistance(sentence2, sentence1)
self.assertTrue(np.allclose(distance1, distance2))
def testIdenticalSentences(self):
'''Check that the distance from a sentence to itself is zero.'''
if not PYEMD_EXT:
return
model = word2vec.Word2Vec(sentences, min_count=1)
sentence = ['survey', 'user', 'computer', 'system', 'response', 'time']
distance = model.wmdistance(sentence, sentence)
self.assertEqual(0.0, distance)
class TestWord2VecSentenceIterators(unittest.TestCase):
def testLineSentenceWorksWithFilename(self):
"""Does LineSentence work with a filename argument?"""
with utils.smart_open(datapath('lee_background.cor')) as orig:
sentences = word2vec.LineSentence(datapath('lee_background.cor'))
for words in sentences:
self.assertEqual(words, utils.to_unicode(orig.readline()).split())
def testLineSentenceWorksWithCompressedFile(self):
"""Does LineSentence work with a compressed file object argument?"""
with utils.smart_open(datapath('head500.noblanks.cor')) as orig:
sentences = word2vec.LineSentence(bz2.BZ2File(datapath('head500.noblanks.cor.bz2')))
for words in sentences:
self.assertEqual(words, utils.to_unicode(orig.readline()).split())
def testLineSentenceWorksWithNormalFile(self):
"""Does LineSentence work with a file object argument, rather than filename?"""
with utils.smart_open(datapath('head500.noblanks.cor')) as orig:
with utils.smart_open(datapath('head500.noblanks.cor')) as fin:
sentences = word2vec.LineSentence(fin)
for words in sentences:
self.assertEqual(words, utils.to_unicode(orig.readline()).split())
#endclass TestWord2VecSentenceIterators
# TODO: get correct path to Python binary
# class TestWord2VecScripts(unittest.TestCase):
# def testWord2VecStandAloneScript(self):
# """Does Word2Vec script launch standalone?"""
# cmd = 'python -m gensim.scripts.word2vec_standalone -train ' + datapath('testcorpus.txt') + ' -output vec.txt -size 200 -sample 1e-4 -binary 0 -iter 3 -min_count 1'
# output = check_output(cmd, stderr=PIPE)
# self.assertEqual(output, '0')
# #endclass TestWord2VecScripts
if not hasattr(TestWord2VecModel, 'assertLess'):
# workaround for python 2.6
def assertLess(self, a, b, msg=None):
self.assertTrue(a < b, msg="%s is not less than %s" % (a, b))
setattr(TestWord2VecModel, 'assertLess', assertLess)
if __name__ == '__main__':
logging.basicConfig(
format='%(asctime)s : %(threadName)s : %(levelname)s : %(message)s',
level=logging.DEBUG)
logging.info("using optimization %s", word2vec.FAST_VERSION)
unittest.main()
| lgpl-2.1 |
anntzer/scikit-learn | sklearn/tests/test_kernel_approximation.py | 4 | 16064 | import re
import numpy as np
from scipy.sparse import csr_matrix
import pytest
from sklearn.utils._testing import assert_array_equal
from sklearn.utils._testing import assert_array_almost_equal
from sklearn.utils._testing import assert_allclose
from sklearn.metrics.pairwise import kernel_metrics
from sklearn.kernel_approximation import RBFSampler
from sklearn.kernel_approximation import AdditiveChi2Sampler
from sklearn.kernel_approximation import SkewedChi2Sampler
from sklearn.kernel_approximation import Nystroem
from sklearn.kernel_approximation import PolynomialCountSketch
from sklearn.datasets import make_classification
from sklearn.metrics.pairwise import polynomial_kernel, rbf_kernel, chi2_kernel
# generate data
rng = np.random.RandomState(0)
X = rng.random_sample(size=(300, 50))
Y = rng.random_sample(size=(300, 50))
X /= X.sum(axis=1)[:, np.newaxis]
Y /= Y.sum(axis=1)[:, np.newaxis]
@pytest.mark.parametrize("gamma", [0.1, 1, 2.5])
@pytest.mark.parametrize("degree, n_components", [(1, 500), (2, 500), (3, 5000)])
@pytest.mark.parametrize("coef0", [0, 2.5])
def test_polynomial_count_sketch(gamma, degree, coef0, n_components):
# test that PolynomialCountSketch approximates polynomial
# kernel on random data
# compute exact kernel
kernel = polynomial_kernel(X, Y, gamma=gamma, degree=degree, coef0=coef0)
# approximate kernel mapping
ps_transform = PolynomialCountSketch(
n_components=n_components,
gamma=gamma,
coef0=coef0,
degree=degree,
random_state=42,
)
X_trans = ps_transform.fit_transform(X)
Y_trans = ps_transform.transform(Y)
kernel_approx = np.dot(X_trans, Y_trans.T)
error = kernel - kernel_approx
assert np.abs(np.mean(error)) <= 0.05 # close to unbiased
np.abs(error, out=error)
assert np.max(error) <= 0.1 # nothing too far off
assert np.mean(error) <= 0.05 # mean is fairly close
@pytest.mark.parametrize("gamma", [0.1, 1.0])
@pytest.mark.parametrize("degree", [1, 2, 3])
@pytest.mark.parametrize("coef0", [0, 2.5])
def test_polynomial_count_sketch_dense_sparse(gamma, degree, coef0):
"""Check that PolynomialCountSketch results are the same for dense and sparse
input.
"""
ps_dense = PolynomialCountSketch(
n_components=500, gamma=gamma, degree=degree, coef0=coef0, random_state=42
)
Xt_dense = ps_dense.fit_transform(X)
Yt_dense = ps_dense.transform(Y)
ps_sparse = PolynomialCountSketch(
n_components=500, gamma=gamma, degree=degree, coef0=coef0, random_state=42
)
Xt_sparse = ps_sparse.fit_transform(csr_matrix(X))
Yt_sparse = ps_sparse.transform(csr_matrix(Y))
assert_allclose(Xt_dense, Xt_sparse)
assert_allclose(Yt_dense, Yt_sparse)
def _linear_kernel(X, Y):
return np.dot(X, Y.T)
def test_additive_chi2_sampler():
# test that AdditiveChi2Sampler approximates kernel on random data
# compute exact kernel
# abbreviations for easier formula
X_ = X[:, np.newaxis, :]
Y_ = Y[np.newaxis, :, :]
large_kernel = 2 * X_ * Y_ / (X_ + Y_)
# reduce to n_samples_x x n_samples_y by summing over features
kernel = large_kernel.sum(axis=2)
# approximate kernel mapping
transform = AdditiveChi2Sampler(sample_steps=3)
X_trans = transform.fit_transform(X)
Y_trans = transform.transform(Y)
kernel_approx = np.dot(X_trans, Y_trans.T)
assert_array_almost_equal(kernel, kernel_approx, 1)
X_sp_trans = transform.fit_transform(csr_matrix(X))
Y_sp_trans = transform.transform(csr_matrix(Y))
assert_array_equal(X_trans, X_sp_trans.A)
assert_array_equal(Y_trans, Y_sp_trans.A)
# test error is raised on negative input
Y_neg = Y.copy()
Y_neg[0, 0] = -1
msg = "Negative values in data passed to"
with pytest.raises(ValueError, match=msg):
transform.transform(Y_neg)
# test error on invalid sample_steps
transform = AdditiveChi2Sampler(sample_steps=4)
msg = re.escape(
"If sample_steps is not in [1, 2, 3], you need to provide sample_interval"
)
with pytest.raises(ValueError, match=msg):
transform.fit(X)
# test that the sample interval is set correctly
sample_steps_available = [1, 2, 3]
for sample_steps in sample_steps_available:
# test that the sample_interval is initialized correctly
transform = AdditiveChi2Sampler(sample_steps=sample_steps)
assert transform.sample_interval is None
# test that the sample_interval is changed in the fit method
transform.fit(X)
assert transform.sample_interval_ is not None
# test that the sample_interval is set correctly
sample_interval = 0.3
transform = AdditiveChi2Sampler(sample_steps=4, sample_interval=sample_interval)
assert transform.sample_interval == sample_interval
transform.fit(X)
assert transform.sample_interval_ == sample_interval
def test_skewed_chi2_sampler():
# test that RBFSampler approximates kernel on random data
# compute exact kernel
c = 0.03
# set on negative component but greater than c to ensure that the kernel
# approximation is valid on the group (-c; +\infty) endowed with the skewed
# multiplication.
Y[0, 0] = -c / 2.0
# abbreviations for easier formula
X_c = (X + c)[:, np.newaxis, :]
Y_c = (Y + c)[np.newaxis, :, :]
# we do it in log-space in the hope that it's more stable
# this array is n_samples_x x n_samples_y big x n_features
log_kernel = (
(np.log(X_c) / 2.0) + (np.log(Y_c) / 2.0) + np.log(2.0) - np.log(X_c + Y_c)
)
# reduce to n_samples_x x n_samples_y by summing over features in log-space
kernel = np.exp(log_kernel.sum(axis=2))
# approximate kernel mapping
transform = SkewedChi2Sampler(skewedness=c, n_components=1000, random_state=42)
X_trans = transform.fit_transform(X)
Y_trans = transform.transform(Y)
kernel_approx = np.dot(X_trans, Y_trans.T)
assert_array_almost_equal(kernel, kernel_approx, 1)
assert np.isfinite(kernel).all(), "NaNs found in the Gram matrix"
assert np.isfinite(kernel_approx).all(), "NaNs found in the approximate Gram matrix"
# test error is raised on when inputs contains values smaller than -c
Y_neg = Y.copy()
Y_neg[0, 0] = -c * 2.0
msg = "X may not contain entries smaller than -skewedness"
with pytest.raises(ValueError, match=msg):
transform.transform(Y_neg)
def test_additive_chi2_sampler_exceptions():
"""Ensures correct error message"""
transformer = AdditiveChi2Sampler()
X_neg = X.copy()
X_neg[0, 0] = -1
with pytest.raises(ValueError, match="X in AdditiveChi2Sampler.fit"):
transformer.fit(X_neg)
with pytest.raises(ValueError, match="X in AdditiveChi2Sampler.transform"):
transformer.fit(X)
transformer.transform(X_neg)
def test_rbf_sampler():
# test that RBFSampler approximates kernel on random data
# compute exact kernel
gamma = 10.0
kernel = rbf_kernel(X, Y, gamma=gamma)
# approximate kernel mapping
rbf_transform = RBFSampler(gamma=gamma, n_components=1000, random_state=42)
X_trans = rbf_transform.fit_transform(X)
Y_trans = rbf_transform.transform(Y)
kernel_approx = np.dot(X_trans, Y_trans.T)
error = kernel - kernel_approx
assert np.abs(np.mean(error)) <= 0.01 # close to unbiased
np.abs(error, out=error)
assert np.max(error) <= 0.1 # nothing too far off
assert np.mean(error) <= 0.05 # mean is fairly close
def test_rbf_sampler_fitted_attributes_dtype(global_dtype):
"""Check that the fitted attributes are stored accordingly to the
data type of X."""
rbf = RBFSampler()
X = np.array([[1, 2], [3, 4], [5, 6]], dtype=global_dtype)
rbf.fit(X)
assert rbf.random_offset_.dtype == global_dtype
assert rbf.random_weights_.dtype == global_dtype
def test_rbf_sampler_dtype_equivalence():
"""Check the equivalence of the results with 32 and 64 bits input."""
rbf32 = RBFSampler(random_state=42)
X32 = np.array([[1, 2], [3, 4], [5, 6]], dtype=np.float32)
rbf32.fit(X32)
rbf64 = RBFSampler(random_state=42)
X64 = np.array([[1, 2], [3, 4], [5, 6]], dtype=np.float64)
rbf64.fit(X64)
assert_allclose(rbf32.random_offset_, rbf64.random_offset_)
assert_allclose(rbf32.random_weights_, rbf64.random_weights_)
def test_skewed_chi2_sampler_fitted_attributes_dtype(global_dtype):
"""Check that the fitted attributes are stored accordingly to the
data type of X."""
skewed_chi2_sampler = SkewedChi2Sampler()
X = np.array([[1, 2], [3, 4], [5, 6]], dtype=global_dtype)
skewed_chi2_sampler.fit(X)
assert skewed_chi2_sampler.random_offset_.dtype == global_dtype
assert skewed_chi2_sampler.random_weights_.dtype == global_dtype
def test_skewed_chi2_sampler_dtype_equivalence():
"""Check the equivalence of the results with 32 and 64 bits input."""
skewed_chi2_sampler_32 = SkewedChi2Sampler(random_state=42)
X_32 = np.array([[1, 2], [3, 4], [5, 6]], dtype=np.float32)
skewed_chi2_sampler_32.fit(X_32)
skewed_chi2_sampler_64 = SkewedChi2Sampler(random_state=42)
X_64 = np.array([[1, 2], [3, 4], [5, 6]], dtype=np.float64)
skewed_chi2_sampler_64.fit(X_64)
assert_allclose(
skewed_chi2_sampler_32.random_offset_, skewed_chi2_sampler_64.random_offset_
)
assert_allclose(
skewed_chi2_sampler_32.random_weights_, skewed_chi2_sampler_64.random_weights_
)
def test_input_validation():
# Regression test: kernel approx. transformers should work on lists
# No assertions; the old versions would simply crash
X = [[1, 2], [3, 4], [5, 6]]
AdditiveChi2Sampler().fit(X).transform(X)
SkewedChi2Sampler().fit(X).transform(X)
RBFSampler().fit(X).transform(X)
X = csr_matrix(X)
RBFSampler().fit(X).transform(X)
def test_nystroem_approximation():
# some basic tests
rnd = np.random.RandomState(0)
X = rnd.uniform(size=(10, 4))
# With n_components = n_samples this is exact
X_transformed = Nystroem(n_components=X.shape[0]).fit_transform(X)
K = rbf_kernel(X)
assert_array_almost_equal(np.dot(X_transformed, X_transformed.T), K)
trans = Nystroem(n_components=2, random_state=rnd)
X_transformed = trans.fit(X).transform(X)
assert X_transformed.shape == (X.shape[0], 2)
# test callable kernel
trans = Nystroem(n_components=2, kernel=_linear_kernel, random_state=rnd)
X_transformed = trans.fit(X).transform(X)
assert X_transformed.shape == (X.shape[0], 2)
# test that available kernels fit and transform
kernels_available = kernel_metrics()
for kern in kernels_available:
trans = Nystroem(n_components=2, kernel=kern, random_state=rnd)
X_transformed = trans.fit(X).transform(X)
assert X_transformed.shape == (X.shape[0], 2)
def test_nystroem_default_parameters():
rnd = np.random.RandomState(42)
X = rnd.uniform(size=(10, 4))
# rbf kernel should behave as gamma=None by default
# aka gamma = 1 / n_features
nystroem = Nystroem(n_components=10)
X_transformed = nystroem.fit_transform(X)
K = rbf_kernel(X, gamma=None)
K2 = np.dot(X_transformed, X_transformed.T)
assert_array_almost_equal(K, K2)
# chi2 kernel should behave as gamma=1 by default
nystroem = Nystroem(kernel="chi2", n_components=10)
X_transformed = nystroem.fit_transform(X)
K = chi2_kernel(X, gamma=1)
K2 = np.dot(X_transformed, X_transformed.T)
assert_array_almost_equal(K, K2)
def test_nystroem_singular_kernel():
# test that nystroem works with singular kernel matrix
rng = np.random.RandomState(0)
X = rng.rand(10, 20)
X = np.vstack([X] * 2) # duplicate samples
gamma = 100
N = Nystroem(gamma=gamma, n_components=X.shape[0]).fit(X)
X_transformed = N.transform(X)
K = rbf_kernel(X, gamma=gamma)
assert_array_almost_equal(K, np.dot(X_transformed, X_transformed.T))
assert np.all(np.isfinite(Y))
def test_nystroem_poly_kernel_params():
# Non-regression: Nystroem should pass other parameters beside gamma.
rnd = np.random.RandomState(37)
X = rnd.uniform(size=(10, 4))
K = polynomial_kernel(X, degree=3.1, coef0=0.1)
nystroem = Nystroem(
kernel="polynomial", n_components=X.shape[0], degree=3.1, coef0=0.1
)
X_transformed = nystroem.fit_transform(X)
assert_array_almost_equal(np.dot(X_transformed, X_transformed.T), K)
def test_nystroem_callable():
# Test Nystroem on a callable.
rnd = np.random.RandomState(42)
n_samples = 10
X = rnd.uniform(size=(n_samples, 4))
def logging_histogram_kernel(x, y, log):
"""Histogram kernel that writes to a log."""
log.append(1)
return np.minimum(x, y).sum()
kernel_log = []
X = list(X) # test input validation
Nystroem(
kernel=logging_histogram_kernel,
n_components=(n_samples - 1),
kernel_params={"log": kernel_log},
).fit(X)
assert len(kernel_log) == n_samples * (n_samples - 1) / 2
# if degree, gamma or coef0 is passed, we raise a ValueError
msg = "Don't pass gamma, coef0 or degree to Nystroem"
params = ({"gamma": 1}, {"coef0": 1}, {"degree": 2})
for param in params:
ny = Nystroem(kernel=_linear_kernel, n_components=(n_samples - 1), **param)
with pytest.raises(ValueError, match=msg):
ny.fit(X)
def test_nystroem_precomputed_kernel():
# Non-regression: test Nystroem on precomputed kernel.
# PR - 14706
rnd = np.random.RandomState(12)
X = rnd.uniform(size=(10, 4))
K = polynomial_kernel(X, degree=2, coef0=0.1)
nystroem = Nystroem(kernel="precomputed", n_components=X.shape[0])
X_transformed = nystroem.fit_transform(K)
assert_array_almost_equal(np.dot(X_transformed, X_transformed.T), K)
# if degree, gamma or coef0 is passed, we raise a ValueError
msg = "Don't pass gamma, coef0 or degree to Nystroem"
params = ({"gamma": 1}, {"coef0": 1}, {"degree": 2})
for param in params:
ny = Nystroem(kernel="precomputed", n_components=X.shape[0], **param)
with pytest.raises(ValueError, match=msg):
ny.fit(K)
def test_nystroem_component_indices():
"""Check that `component_indices_` corresponds to the subset of
training points used to construct the feature map.
Non-regression test for:
https://github.com/scikit-learn/scikit-learn/issues/20474
"""
X, _ = make_classification(n_samples=100, n_features=20)
feature_map_nystroem = Nystroem(
n_components=10,
random_state=0,
)
feature_map_nystroem.fit(X)
assert feature_map_nystroem.component_indices_.shape == (10,)
@pytest.mark.parametrize(
"Estimator", [PolynomialCountSketch, RBFSampler, SkewedChi2Sampler, Nystroem]
)
def test_get_feature_names_out(Estimator):
"""Check get_feature_names_out"""
est = Estimator().fit(X)
X_trans = est.transform(X)
names_out = est.get_feature_names_out()
class_name = Estimator.__name__.lower()
expected_names = [f"{class_name}{i}" for i in range(X_trans.shape[1])]
assert_array_equal(names_out, expected_names)
def test_additivechi2sampler_get_feature_names_out():
"""Check get_feature_names_out for AdditiveChi2Sampler."""
rng = np.random.RandomState(0)
X = rng.random_sample(size=(300, 3))
chi2_sampler = AdditiveChi2Sampler(sample_steps=3).fit(X)
input_names = ["f0", "f1", "f2"]
suffixes = [
"f0_sqrt",
"f1_sqrt",
"f2_sqrt",
"f0_cos1",
"f1_cos1",
"f2_cos1",
"f0_sin1",
"f1_sin1",
"f2_sin1",
"f0_cos2",
"f1_cos2",
"f2_cos2",
"f0_sin2",
"f1_sin2",
"f2_sin2",
]
names_out = chi2_sampler.get_feature_names_out(input_features=input_names)
expected_names = [f"additivechi2sampler_{suffix}" for suffix in suffixes]
assert_array_equal(names_out, expected_names)
| bsd-3-clause |
agartland/utils | mysuperlearner.py | 1 | 12884 | import numpy as np
import pandas as pd
import itertools
from scipy import optimize
import sklearn
from sklearn import metrics
from sklearn import linear_model
from sklearn.model_selection import StratifiedKFold
__all__ = ['Superlearner',
'SuperLearnerCV',
'binary_classification_score_wrapper']
"""TODO:
- add NNLS LinearRegression(positive=True) with log link for LogisticRegression"""
""""SuperLearner with sklearn"""
def binary_classification_score_wrapper(metric, **kwargs):
def wrapped(y_true, y_pred, **kwargs):
return metric(y_true, np.round(y_pred), **kwargs)
return wrapped
class SuperLearnerCV:
def __init__(self, learners, meta_learner=None, inner_cv=None, outer_cv=None, scorers=[]):
self.learners = learners
if meta_learner is None:
self.meta_learner = AUCMinimizer()
else:
self.meta_learner = meta_learner
if inner_cv is None:
self.inner_cv = StratifiedKFold(n_splits=5)
elif np.isscalar(inner_cv):
self.inner_cv = StratifiedKFold(n_splits=inner_cv)
else:
self.inner_cv = inner_cv
if outer_cv is None:
self.outer_cv = StratifiedKFold(n_splits=1)
elif np.isscalar(outer_cv):
self.outer_cv = StratifiedKFold(n_splits=outer_cv)
else:
self.outer_cv = outer_cv
self.scorers = scorers
self.sl_mod = SuperLearner(learners=self.learners,
meta_learner=self.meta_learner,
cv=self.inner_cv,
scorers=self.scorers)
def fit_cv(self, X, y, subsets=[]):
if subsets is None:
self.subsets = [('all', X.columns)]
else:
self.subsets = subsets
n_splits = self.outer_cv.n_splits
scores = np.zeros((n_splits, len(self.scorers)))
for i, (train_idxs, test_idxs) in enumerate(self.outer_cv.split(X, y)):
X_train, X_test = X.iloc[train_idxs], X.iloc[test_idxs]
y_train, y_test = y.iloc[train_idxs], y.iloc[test_idxs]
self.sl_mod.fit(X_train, y_train, subsets)
yhat_test = self.sl_mod.predict(X_test)
for score_i, (scorer_name, scorer) in enumerate(self.scorers):
scores[i, score_i] = scorer(y_test, yhat_test)
self.scores = pd.DataFrame(scores, index=range(n_splits), columns=[s[0] for s in self.scorers])
return self.sl_mod.fit(X_train, y_train)
def fit(self, X, y, subsets=[]):
if subsets is None:
self.subsets = [('all', X.columns)]
else:
self.subsets = subsets
return self.sl_mod.fit(X, y, subsets)
def predict(self, X):
yhat = self.sl_mod.predict(X)
scores = np.zeros(len(self.scorers))
for score_i, (scorer_name, scorer) in enumerate(self.scorers):
scores[score_i] = scorer(y, yhat)
self.scores = pd.Series(scores, index=[s[0] for s in self.scorers])
return yhat
class SuperLearner:
def __init__(self, learners, meta_learner=None, cv=None, scorers=[]):
self.learners = learners
if meta_learner is None:
"""QUESTION: Can I use linear regression for the SL? If so, could use positive=True for NNLS)
TODO: Add Nelder-Mean for binary outcome and AUC-ROC loss"""
self.meta_learner = linear_model.LogisticRegression()
else:
self.meta_learner = meta_learner
if cv is None:
self.cv = StratifiedKFold(n_splits=5, shuffle=True, random_state=110820)
elif np.isscalar(cv):
self.cv = StratifiedKFold(n_splits=cv, shuffle=True, random_state=110820)
else:
self.cv = cv
"""Scorers expected to be list of tuples [('auc', sklearn.metrics.roc_auc), ...]"""
self.scorers = scorers
self.frozen_learners = {}
self.scores = None
def get_learner_labels(self, as_str=True):
labels = []
for j, ((name, mod), (ss_name, ss)) in enumerate(itertools.product(self.learners, self.subsets)):
if as_str:
labels.append(f'{name} [{ss_name}]')
else:
labels.append((name, ss_name))
return labels
def iter_learners(self):
for j, ((name, mod), (ss_name, ss)) in enumerate(itertools.product(self.learners, self.subsets)):
yield j, name, mod, ss_name, ss
def fit(self, X, y, subsets=None):
"""Will fit the meta_learner on test data from a CV loop
and store the frozen learners in self.frozen_learners"""
if subsets is None:
self.subsets = [('all', X.columns)]
else:
self.subsets = subsets
n_splits = self.cv.n_splits
n_learners = len(self.learners) * len(self.subsets)
"""For each of the learners in cross-validation, generating training data for the meta-learner"""
X_meta = np.zeros((X.shape[0], n_learners))
y_meta = np.zeros(X.shape[0])
scores = np.zeros((n_splits, n_learners, len(self.scorers)))
data_i = 0
"""================CROSS-VALIDATION LOOP==============="""
for i, (train_idxs, test_idxs) in enumerate(self.cv.split(X, y)):
X_train, X_test = X.iloc[train_idxs], X.iloc[test_idxs]
y_train, y_test = y.iloc[train_idxs], y.iloc[test_idxs]
"""y_test becomes y_meta and yhat_test becomes X_meta"""
y_meta[data_i : data_i + len(y_test)] = y_test
for learner_i, learner_name, mod, ss_name, ss_cols in self.iter_learners():
mod.fit(X_train[ss_cols], y_train)
"""Use column 1 which contains probability of the indicator = 1 value"""
yhat_test = mod.predict_proba(X_test[ss_cols])[:, 1]
"""Using the predicted probabilities from each learner"""
X_meta[data_i : data_i + len(y_test), learner_i] = yhat_test
for score_i, (scorer_name, scorer) in enumerate(self.scorers):
scores[i, learner_i, score_i] = scorer(y_test, yhat_test)
data_i += len(y_test)
"""Fit the meta/super-learner to the inner CV predictions"""
"""X_meta are predicted probabilities via the learners using test data
y_meta are observed binary class labels fom the data"""
self.X_meta = X_meta
self.y_meta = y_meta
self.meta_learner.fit(X_meta, y_meta)
"""Fit each of the learners to all the data and store"""
for learner_i, learner_name, mod, ss_name, ss_cols in self.iter_learners():
self.frozen_learners[(learner_name, ss_name)] = deepcopy(mod.fit(X[ss_cols], y))
index = pd.MultiIndex.from_tuples(self.get_learner_labels(as_str=False),
names=['Learner', 'Subset'])
tmpl = []
for score_i, (scorer_name, scorer) in enumerate(self.scorers):
tmp = pd.DataFrame(scores[:, :, score_i].T, index=index)
tmp = tmp.assign(scorer=scorer_name).set_index('scorer', append=True)
tmpl.append(tmp)
self.scores = pd.concat(tmpl, axis=0)
return self
def predict(self, X):
"""Use the frozen fitted base-learners to predict on the full dataset"""
n_learners = len(self.learners) * len(self.subsets)
X_meta_full = np.zeros((X.shape[0], n_learners))
for learner_i, learner_name, mod, ss_name, ss_cols in self.iter_learners():
yhat = self.frozen_learners[(learner_name, ss_name)].predict_proba(X[ss_cols])[:, 1]
X_meta_full[:, learner_i] = yhat
"""Finally, use the fitted meta-learner to predict the labels"""
yhat_full = self.meta_learner.predict_proba(X_meta_full)[:, 1]
return yhat_full
def evaluator(self, X, y):
n_learners = len(self.learners) * len(self.subsets)
scores = np.zeros((n_learners, len(self.scorers)))
"""Use the frozen fitted base-learners to predict and produce scores for each learner (not CV)"""
for learner_i, learner_name, mod, ss_name, ss_cols in self.iter_learners():
yhat = self.frozen_learners[(learner_name, ss_name)].predict_proba(X[ss_cols])[:, 1]
#print(learner_name)
#print(yhat)
#print(yhat.round())
for score_i, (scorer_name, scorer) in enumerate(self.scorers):
scores[learner_i, score_i] = scorer(y, yhat)
index = pd.MultiIndex.from_tuples(self.get_learner_labels(as_str=False),
names=['Learner', 'Subset'])
return pd.DataFrame(scores, index=index, columns=[s[0] for s in self.scorers])
def evaluator_cv(self, X, y):
n_splits = self.cv.n_splits
n_learners = len(self.learners) * len(self.subsets)
scores = np.zeros((n_splits, n_learners, len(self.scorers)))
data_i = 0
"""================CROSS-VALIDATION LOOP==============="""
for i, (train_idxs, test_idxs) in enumerate(self.cv.split(X, y)):
X_train, X_test = X.iloc[train_idxs], X.iloc[test_idxs]
y_train, y_test = y.iloc[train_idxs], y.iloc[test_idxs]
#print(np.sum(y_train), np.sum(y_test))
#print(X.mean())
for learner_i, learner_name, mod, ss_name, ss_cols in self.iter_learners():
mod.fit(X_train[ss_cols], y_train)
"""Use column 1 which contains probability of the indicator = 1 value"""
yhat_test = mod.predict_proba(X_test[ss_cols])[:, 1]
for score_i, (scorer_name, scorer) in enumerate(self.scorers):
scores[i, learner_i, score_i] = scorer(y_test, yhat_test)
index = pd.MultiIndex.from_tuples(self.get_learner_labels(as_str=False),
names=['Learner', 'Subset'])
tmpl = []
for score_i, (scorer_name, scorer) in enumerate(self.scorers):
tmp = pd.DataFrame(scores[:, :, score_i].T, index=index)
tmp = tmp.assign(scorer=scorer_name).set_index('scorer', append=True)
tmpl.append(tmp)
return pd.concat(tmpl, axis=0)
class AUCMinimizer():
"""Use Nelder-Mead optimization and AUC loss.
Translated directly from R SuperLearner package.
Uses Nelder-Mead on one coef per learner, constrained to be positive.
https://github.com/ecpolley/SuperLearner/blob/ac1aa02fc8b92d4044949102df8eeea4952da753/R/method.R#L359"""
def __init__(self, maxiter=1000, disp=False):
self.coef = None
self.auc_i = None
self.auc = None
self.optim = None
self.disp = disp
self.maxiter = maxiter
@staticmethod
def _auc_diagnostic(X_data, y_data):
auc = np.zeros(X_data.shape[1])
for i in range(X_data.shape[1]):
auc[i] = roc_auc_np(y_data, X_data[:, i])
return auc
@staticmethod
def _auc_loss(x, X_data, y_data):
auc = roc_auc_np(y_data, np.dot(X_data, x))
return 1 - auc
def fit(self, X, y):
n_learners = X.shape[1]
bounds = [(0, 1)] * n_learners
options = dict(maxiter=self.maxiter, disp=self.disp, return_all=True, xatol=0.0001, fatol=0.0001)
res = optimize.minimize(fun=self._auc_loss,
x0=np.ones(n_learners)/n_learners,
args=(X_meta, y_meta),
method='Nelder-Mead',
bounds=bounds,
callback=None,
options=options)
self.coef = res.x / np.sum(res.x)
self.optim = res
self.auc = 1 - self._auc_loss(res.x, X, y)
self.auc_i = self._auc_diagnostic(X, y)
return self
def predict_proba(self, X):
tmp = np.dot(X, self.coef)
return np.concatenate((1 - tmp[:, None], tmp[:, None]), axis=1)
def roc_auc_np(y_true, y_prob):
y_true = np.asarray(y_true)
y_true = y_true[np.argsort(y_prob)]
nfalse = 0
auc = 0
n = len(y_true)
nfalse = np.cumsum(1 - y_true)
auc = np.cumsum(y_true * nfalse)
auc = auc[-1] / (nfalse[-1] * (n - nfalse[-1]))
return auc
class nnls_logistic_regression(linear_model.LinearRegression):
def __init__():
super().__init__(fit_intercept=False, positive=True)
def fit(self, X, y):
"""THIS WON"T WORK AS A SIMPLE WRAPPER: MAYBE CHECKOUT FIT CODE
TO APPLY LOGIT TO RHS"""
def predict_proba(self, X):
yhat = super().predict(X)
return yhat
@staticmethod
def logit(p):
return np.log(p / (1 - p))
@staticmethod
def inv_logit(y):
return 1 / (1 + np.exp(y))
| mit |
coderbone/SickRage | lib/guessit/transfo/guess_movie_title_from_position.py | 28 | 8579 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# GuessIt - A library for guessing information from filenames
# Copyright (c) 2013 Nicolas Wack <wackou@gmail.com>
#
# GuessIt is free software; you can redistribute it and/or modify it under
# the terms of the Lesser GNU General Public License as published by
# the Free Software Foundation; either version 3 of the License, or
# (at your option) any later version.
#
# GuessIt is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# Lesser GNU General Public License for more details.
#
# You should have received a copy of the Lesser GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
from __future__ import absolute_import, division, print_function, unicode_literals
from guessit.plugins.transformers import Transformer
from guessit.matcher import found_property
from guessit import u
from guessit.patterns.list import all_separators
from guessit.language import all_lang_prefixes_suffixes
class GuessMovieTitleFromPosition(Transformer):
def __init__(self):
Transformer.__init__(self, -200)
def supported_properties(self):
return ['title']
def should_process(self, mtree, options=None):
options = options or {}
return not options.get('skip_title') and not mtree.guess.get('type', '').startswith('episode')
@staticmethod
def excluded_word(*values):
for value in values:
if value.clean_value.lower() in all_separators + all_lang_prefixes_suffixes:
return True
return False
def process(self, mtree, options=None):
"""
try to identify the remaining unknown groups by looking at their
position relative to other known elements
"""
if 'title' in mtree.info:
return
path_nodes = list(filter(lambda x: x.category == 'path', mtree.nodes()))
basename = path_nodes[-2]
all_valid = lambda leaf: len(leaf.clean_value) > 0
basename_leftover = list(basename.unidentified_leaves(valid=all_valid))
try:
folder = path_nodes[-3]
folder_leftover = list(folder.unidentified_leaves())
except IndexError:
folder = None
folder_leftover = []
self.log.debug('folder: %s' % u(folder_leftover))
self.log.debug('basename: %s' % u(basename_leftover))
# specific cases:
# if we find the same group both in the folder name and the filename,
# it's a good candidate for title
if (folder_leftover and basename_leftover and
folder_leftover[0].clean_value == basename_leftover[0].clean_value and
not GuessMovieTitleFromPosition.excluded_word(folder_leftover[0])):
found_property(folder_leftover[0], 'title', confidence=0.8)
return
# specific cases:
# if the basename contains a number first followed by an unidentified
# group, and the folder only contains 1 unidentified one, then we have
# a series
# ex: Millenium Trilogy (2009)/(1)The Girl With The Dragon Tattoo(2009).mkv
if len(folder_leftover) > 0 and len(basename_leftover) > 1:
series = folder_leftover[0]
film_number = basename_leftover[0]
title = basename_leftover[1]
basename_leaves = list(basename.leaves())
num = None
try:
num = int(film_number.clean_value)
except ValueError:
pass
if num:
self.log.debug('series: %s' % series.clean_value)
self.log.debug('title: %s' % title.clean_value)
if (series.clean_value != title.clean_value and
series.clean_value != film_number.clean_value and
basename_leaves.index(film_number) == 0 and
basename_leaves.index(title) == 1 and
not GuessMovieTitleFromPosition.excluded_word(title, series)):
found_property(title, 'title', confidence=0.6)
found_property(series, 'filmSeries', confidence=0.6)
found_property(film_number, 'filmNumber', num, confidence=0.6)
return
if folder:
year_group = folder.first_leaf_containing('year')
if year_group:
groups_before = folder.previous_unidentified_leaves(year_group)
if groups_before:
try:
node = next(groups_before)
if not GuessMovieTitleFromPosition.excluded_word(node):
found_property(node, 'title', confidence=0.8)
return
except StopIteration:
pass
# if we have either format or videoCodec in the folder containing the
# file or one of its parents, then we should probably look for the title
# in there rather than in the basename
try:
props = list(mtree.previous_leaves_containing(mtree.children[-2],
['videoCodec',
'format',
'language']))
except IndexError:
props = []
if props:
group_idx = props[0].node_idx[0]
if all(g.node_idx[0] == group_idx for g in props):
# if they're all in the same group, take leftover info from there
leftover = mtree.node_at((group_idx,)).unidentified_leaves()
try:
node = next(leftover)
if not GuessMovieTitleFromPosition.excluded_word(node):
found_property(node, 'title', confidence=0.7)
return
except StopIteration:
pass
# look for title in basename if there are some remaining unidentified
# groups there
if basename_leftover:
# if basename is only one word and the containing folder has at least
# 3 words in it, we should take the title from the folder name
# ex: Movies/Alice in Wonderland DVDRip.XviD-DiAMOND/dmd-aw.avi
# ex: Movies/Somewhere.2010.DVDRip.XviD-iLG/i-smwhr.avi <-- TODO: gets caught here?
if (basename_leftover[0].clean_value.count(' ') == 0 and
folder_leftover and folder_leftover[0].clean_value.count(' ') >= 2 and
not GuessMovieTitleFromPosition.excluded_word(folder_leftover[0])):
found_property(folder_leftover[0], 'title', confidence=0.7)
return
# if there are only many unidentified groups, take the first of which is
# not inside brackets or parentheses.
# ex: Movies/[阿维达].Avida.2006.FRENCH.DVDRiP.XViD-PROD.avi
if basename_leftover[0].is_explicit():
for basename_leftover_elt in basename_leftover:
if not basename_leftover_elt.is_explicit() and not GuessMovieTitleFromPosition.excluded_word(basename_leftover_elt):
found_property(basename_leftover_elt, 'title', confidence=0.8)
return
# if all else fails, take the first remaining unidentified group in the
# basename as title
if not GuessMovieTitleFromPosition.excluded_word(basename_leftover[0]):
found_property(basename_leftover[0], 'title', confidence=0.6)
return
# if there are no leftover groups in the basename, look in the folder name
if folder_leftover and not GuessMovieTitleFromPosition.excluded_word(folder_leftover[0]):
found_property(folder_leftover[0], 'title', confidence=0.5)
return
# if nothing worked, look if we have a very small group at the beginning
# of the basename
basename_leftover = basename.unidentified_leaves(valid=lambda leaf: True)
try:
node = next(basename_leftover)
if not GuessMovieTitleFromPosition.excluded_word(node):
found_property(node, 'title', confidence=0.4)
return
except StopIteration:
pass
| gpl-3.0 |
ECP-CANDLE/Benchmarks | Pilot1/UnoMT/networks/functions/drug_qed_func.py | 1 | 2363 | """
File Name: UnoPytorch/drug_qed_func.py
Author: Xiaotian Duan (xduan7)
Email: xduan7@uchicago.edu
Date: 9/4/18
Python Version: 3.6.6
File Description:
"""
import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
from sklearn.metrics import r2_score
def train_drug_qed(
device: torch.device,
drug_qed_net: nn.Module,
data_loader: torch.utils.data.DataLoader,
max_num_batches: int,
loss_func: callable,
optimizer: torch.optim,
):
drug_qed_net.train()
total_loss = 0.0
num_samples = 0
for batch_idx, (drug_feature, target) in enumerate(data_loader):
if batch_idx >= max_num_batches:
break
drug_feature, target = drug_feature.to(device), target.to(device)
drug_qed_net.zero_grad()
pred_target = drug_qed_net(drug_feature)
loss = loss_func(pred_target, target)
loss.backward()
optimizer.step()
num_samples += target.shape[0]
total_loss += loss.item() * target.shape[0]
print("\tDrug Weighted QED Regression Loss: %8.6f" % (total_loss / num_samples))
def valid_drug_qed(
device: torch.device,
drug_qed_net: nn.Module,
data_loader: torch.utils.data.DataLoader,
):
drug_qed_net.eval()
mse, mae = 0.0, 0.0
target_array, pred_array = np.array([]), np.array([])
with torch.no_grad():
for drug_feature, target in data_loader:
drug_feature, target = drug_feature.to(device), target.to(device)
pred_target = drug_qed_net(drug_feature)
num_samples = target.shape[0]
mse += F.mse_loss(pred_target, target).item() * num_samples
mae += F.l1_loss(pred_target, target).item() * num_samples
target_array = np.concatenate(
(target_array, target.cpu().numpy().flatten())
)
pred_array = np.concatenate(
(pred_array, pred_target.cpu().numpy().flatten())
)
mse /= len(data_loader.dataset)
mae /= len(data_loader.dataset)
r2 = r2_score(y_pred=pred_array, y_true=target_array)
print(
"\tDrug Weighted QED Regression\n"
"\t\tMSE: %8.6f \t MAE: %8.6f \t R2: %+4.2f" % (mse, mae, r2)
)
return mse, mae, r2
| mit |
anntzer/scikit-learn | sklearn/_loss/tests/test_loss.py | 12 | 42228 | import pickle
import numpy as np
from numpy.testing import assert_allclose, assert_array_equal
import pytest
from pytest import approx
from scipy.optimize import (
minimize,
minimize_scalar,
newton,
LinearConstraint,
)
from scipy.special import logsumexp
from sklearn._loss.link import _inclusive_low_high, IdentityLink
from sklearn._loss.loss import (
_LOSSES,
BaseLoss,
AbsoluteError,
HalfBinomialLoss,
HalfGammaLoss,
HalfMultinomialLoss,
HalfPoissonLoss,
HalfSquaredError,
HalfTweedieLoss,
HalfTweedieLossIdentity,
PinballLoss,
)
from sklearn.utils import assert_all_finite
from sklearn.utils._testing import create_memmap_backed_data, skip_if_32bit
ALL_LOSSES = list(_LOSSES.values())
LOSS_INSTANCES = [loss() for loss in ALL_LOSSES]
# HalfTweedieLoss(power=1.5) is already there as default
LOSS_INSTANCES += [
PinballLoss(quantile=0.25),
HalfTweedieLoss(power=-1.5),
HalfTweedieLoss(power=0),
HalfTweedieLoss(power=1),
HalfTweedieLoss(power=2),
HalfTweedieLoss(power=3.0),
HalfTweedieLossIdentity(power=0),
HalfTweedieLossIdentity(power=1),
HalfTweedieLossIdentity(power=2),
HalfTweedieLossIdentity(power=3.0),
]
def loss_instance_name(param):
if isinstance(param, BaseLoss):
loss = param
name = loss.__class__.__name__
if hasattr(loss, "quantile"):
name += f"(quantile={loss.closs.quantile})"
elif hasattr(loss, "power"):
name += f"(power={loss.closs.power})"
return name
else:
return str(param)
def random_y_true_raw_prediction(
loss, n_samples, y_bound=(-100, 100), raw_bound=(-5, 5), seed=42
):
"""Random generate y_true and raw_prediction in valid range."""
rng = np.random.RandomState(seed)
if loss.is_multiclass:
raw_prediction = np.empty((n_samples, loss.n_classes))
raw_prediction.flat[:] = rng.uniform(
low=raw_bound[0],
high=raw_bound[1],
size=n_samples * loss.n_classes,
)
y_true = np.arange(n_samples).astype(float) % loss.n_classes
else:
# If link is identity, we must respect the interval of y_pred:
if isinstance(loss.link, IdentityLink):
low, high = _inclusive_low_high(loss.interval_y_pred)
low = np.amax([low, raw_bound[0]])
high = np.amin([high, raw_bound[1]])
raw_bound = (low, high)
raw_prediction = rng.uniform(
low=raw_bound[0], high=raw_bound[1], size=n_samples
)
# generate a y_true in valid range
low, high = _inclusive_low_high(loss.interval_y_true)
low = max(low, y_bound[0])
high = min(high, y_bound[1])
y_true = rng.uniform(low, high, size=n_samples)
# set some values at special boundaries
if loss.interval_y_true.low == 0 and loss.interval_y_true.low_inclusive:
y_true[:: (n_samples // 3)] = 0
if loss.interval_y_true.high == 1 and loss.interval_y_true.high_inclusive:
y_true[1 :: (n_samples // 3)] = 1
return y_true, raw_prediction
def numerical_derivative(func, x, eps):
"""Helper function for numerical (first) derivatives."""
# For numerical derivatives, see
# https://en.wikipedia.org/wiki/Numerical_differentiation
# https://en.wikipedia.org/wiki/Finite_difference_coefficient
# We use central finite differences of accuracy 4.
h = np.full_like(x, fill_value=eps)
f_minus_2h = func(x - 2 * h)
f_minus_1h = func(x - h)
f_plus_1h = func(x + h)
f_plus_2h = func(x + 2 * h)
return (-f_plus_2h + 8 * f_plus_1h - 8 * f_minus_1h + f_minus_2h) / (12.0 * eps)
@pytest.mark.parametrize("loss", LOSS_INSTANCES, ids=loss_instance_name)
def test_loss_boundary(loss):
"""Test interval ranges of y_true and y_pred in losses."""
# make sure low and high are always within the interval, used for linspace
if loss.is_multiclass:
y_true = np.linspace(0, 9, num=10)
else:
low, high = _inclusive_low_high(loss.interval_y_true)
y_true = np.linspace(low, high, num=10)
# add boundaries if they are included
if loss.interval_y_true.low_inclusive:
y_true = np.r_[y_true, loss.interval_y_true.low]
if loss.interval_y_true.high_inclusive:
y_true = np.r_[y_true, loss.interval_y_true.high]
assert loss.in_y_true_range(y_true)
n = y_true.shape[0]
low, high = _inclusive_low_high(loss.interval_y_pred)
if loss.is_multiclass:
y_pred = np.empty((n, 3))
y_pred[:, 0] = np.linspace(low, high, num=n)
y_pred[:, 1] = 0.5 * (1 - y_pred[:, 0])
y_pred[:, 2] = 0.5 * (1 - y_pred[:, 0])
else:
y_pred = np.linspace(low, high, num=n)
assert loss.in_y_pred_range(y_pred)
# calculating losses should not fail
raw_prediction = loss.link.link(y_pred)
loss.loss(y_true=y_true, raw_prediction=raw_prediction)
# Fixture to test valid value ranges.
Y_COMMON_PARAMS = [
# (loss, [y success], [y fail])
(HalfSquaredError(), [-100, 0, 0.1, 100], [-np.inf, np.inf]),
(AbsoluteError(), [-100, 0, 0.1, 100], [-np.inf, np.inf]),
(PinballLoss(), [-100, 0, 0.1, 100], [-np.inf, np.inf]),
(HalfPoissonLoss(), [0.1, 100], [-np.inf, -3, -0.1, np.inf]),
(HalfGammaLoss(), [0.1, 100], [-np.inf, -3, -0.1, 0, np.inf]),
(HalfTweedieLoss(power=-3), [0.1, 100], [-np.inf, np.inf]),
(HalfTweedieLoss(power=0), [0.1, 100], [-np.inf, np.inf]),
(HalfTweedieLoss(power=1.5), [0.1, 100], [-np.inf, -3, -0.1, np.inf]),
(HalfTweedieLoss(power=2), [0.1, 100], [-np.inf, -3, -0.1, 0, np.inf]),
(HalfTweedieLoss(power=3), [0.1, 100], [-np.inf, -3, -0.1, 0, np.inf]),
(HalfTweedieLossIdentity(power=-3), [0.1, 100], [-np.inf, np.inf]),
(HalfTweedieLossIdentity(power=0), [-3, -0.1, 0, 0.1, 100], [-np.inf, np.inf]),
(HalfTweedieLossIdentity(power=1.5), [0.1, 100], [-np.inf, -3, -0.1, np.inf]),
(HalfTweedieLossIdentity(power=2), [0.1, 100], [-np.inf, -3, -0.1, 0, np.inf]),
(HalfTweedieLossIdentity(power=3), [0.1, 100], [-np.inf, -3, -0.1, 0, np.inf]),
(HalfBinomialLoss(), [0.1, 0.5, 0.9], [-np.inf, -1, 2, np.inf]),
(HalfMultinomialLoss(), [], [-np.inf, -1, 1.1, np.inf]),
]
# y_pred and y_true do not always have the same domain (valid value range).
# Hence, we define extra sets of parameters for each of them.
Y_TRUE_PARAMS = [ # type: ignore
# (loss, [y success], [y fail])
(HalfPoissonLoss(), [0], []),
(HalfTweedieLoss(power=-3), [-100, -0.1, 0], []),
(HalfTweedieLoss(power=0), [-100, 0], []),
(HalfTweedieLoss(power=1.5), [0], []),
(HalfTweedieLossIdentity(power=-3), [-100, -0.1, 0], []),
(HalfTweedieLossIdentity(power=0), [-100, 0], []),
(HalfTweedieLossIdentity(power=1.5), [0], []),
(HalfBinomialLoss(), [0, 1], []),
(HalfMultinomialLoss(), [0.0, 1.0, 2], []),
]
Y_PRED_PARAMS = [
# (loss, [y success], [y fail])
(HalfPoissonLoss(), [], [0]),
(HalfTweedieLoss(power=-3), [], [-3, -0.1, 0]),
(HalfTweedieLoss(power=0), [], [-3, -0.1, 0]),
(HalfTweedieLoss(power=1.5), [], [0]),
(HalfTweedieLossIdentity(power=-3), [], [-3, -0.1, 0]),
(HalfTweedieLossIdentity(power=0), [-3, -0.1, 0], []),
(HalfTweedieLossIdentity(power=1.5), [], [0]),
(HalfBinomialLoss(), [], [0, 1]),
(HalfMultinomialLoss(), [0.1, 0.5], [0, 1]),
]
@pytest.mark.parametrize(
"loss, y_true_success, y_true_fail", Y_COMMON_PARAMS + Y_TRUE_PARAMS
)
def test_loss_boundary_y_true(loss, y_true_success, y_true_fail):
"""Test boundaries of y_true for loss functions."""
for y in y_true_success:
assert loss.in_y_true_range(np.array([y]))
for y in y_true_fail:
assert not loss.in_y_true_range(np.array([y]))
@pytest.mark.parametrize(
"loss, y_pred_success, y_pred_fail", Y_COMMON_PARAMS + Y_PRED_PARAMS # type: ignore
)
def test_loss_boundary_y_pred(loss, y_pred_success, y_pred_fail):
"""Test boundaries of y_pred for loss functions."""
for y in y_pred_success:
assert loss.in_y_pred_range(np.array([y]))
for y in y_pred_fail:
assert not loss.in_y_pred_range(np.array([y]))
@pytest.mark.parametrize(
"loss, y_true, raw_prediction, loss_true",
[
(HalfSquaredError(), 1.0, 5.0, 8),
(AbsoluteError(), 1.0, 5.0, 4),
(PinballLoss(quantile=0.5), 1.0, 5.0, 2),
(PinballLoss(quantile=0.25), 1.0, 5.0, 4 * (1 - 0.25)),
(PinballLoss(quantile=0.25), 5.0, 1.0, 4 * 0.25),
(HalfPoissonLoss(), 2.0, np.log(4), 4 - 2 * np.log(4)),
(HalfGammaLoss(), 2.0, np.log(4), np.log(4) + 2 / 4),
(HalfTweedieLoss(power=3), 2.0, np.log(4), -1 / 4 + 1 / 4**2),
(HalfTweedieLossIdentity(power=1), 2.0, 4.0, 2 - 2 * np.log(2)),
(HalfTweedieLossIdentity(power=2), 2.0, 4.0, np.log(2) - 1 / 2),
(HalfTweedieLossIdentity(power=3), 2.0, 4.0, -1 / 4 + 1 / 4**2 + 1 / 2 / 2),
(HalfBinomialLoss(), 0.25, np.log(4), np.log(5) - 0.25 * np.log(4)),
(
HalfMultinomialLoss(n_classes=3),
0.0,
[0.2, 0.5, 0.3],
logsumexp([0.2, 0.5, 0.3]) - 0.2,
),
(
HalfMultinomialLoss(n_classes=3),
1.0,
[0.2, 0.5, 0.3],
logsumexp([0.2, 0.5, 0.3]) - 0.5,
),
(
HalfMultinomialLoss(n_classes=3),
2.0,
[0.2, 0.5, 0.3],
logsumexp([0.2, 0.5, 0.3]) - 0.3,
),
],
ids=loss_instance_name,
)
def test_loss_on_specific_values(loss, y_true, raw_prediction, loss_true):
"""Test losses at specific values."""
assert loss(
y_true=np.array([y_true]), raw_prediction=np.array([raw_prediction])
) == approx(loss_true, rel=1e-11, abs=1e-12)
@pytest.mark.parametrize("loss", ALL_LOSSES)
@pytest.mark.parametrize("readonly_memmap", [False, True])
@pytest.mark.parametrize("dtype_in", [np.float32, np.float64])
@pytest.mark.parametrize("dtype_out", [np.float32, np.float64])
@pytest.mark.parametrize("sample_weight", [None, 1])
@pytest.mark.parametrize("out1", [None, 1])
@pytest.mark.parametrize("out2", [None, 1])
@pytest.mark.parametrize("n_threads", [1, 2])
def test_loss_dtype(
loss, readonly_memmap, dtype_in, dtype_out, sample_weight, out1, out2, n_threads
):
"""Test acceptance of dtypes, readonly and writeable arrays in loss functions.
Check that loss accepts if all input arrays are either all float32 or all
float64, and all output arrays are either all float32 or all float64.
Also check that input arrays can be readonly, e.g. memory mapped.
"""
loss = loss()
# generate a y_true and raw_prediction in valid range
n_samples = 5
y_true, raw_prediction = random_y_true_raw_prediction(
loss=loss,
n_samples=n_samples,
y_bound=(-100, 100),
raw_bound=(-10, 10),
seed=42,
)
y_true = y_true.astype(dtype_in)
raw_prediction = raw_prediction.astype(dtype_in)
if sample_weight is not None:
sample_weight = np.array([2.0] * n_samples, dtype=dtype_in)
if out1 is not None:
out1 = np.empty_like(y_true, dtype=dtype_out)
if out2 is not None:
out2 = np.empty_like(raw_prediction, dtype=dtype_out)
if readonly_memmap:
y_true = create_memmap_backed_data(y_true, aligned=True)
raw_prediction = create_memmap_backed_data(raw_prediction, aligned=True)
if sample_weight is not None:
sample_weight = create_memmap_backed_data(sample_weight, aligned=True)
loss.loss(
y_true=y_true,
raw_prediction=raw_prediction,
sample_weight=sample_weight,
loss_out=out1,
n_threads=n_threads,
)
loss.gradient(
y_true=y_true,
raw_prediction=raw_prediction,
sample_weight=sample_weight,
gradient_out=out2,
n_threads=n_threads,
)
loss.loss_gradient(
y_true=y_true,
raw_prediction=raw_prediction,
sample_weight=sample_weight,
loss_out=out1,
gradient_out=out2,
n_threads=n_threads,
)
if out1 is not None and loss.is_multiclass:
out1 = np.empty_like(raw_prediction, dtype=dtype_out)
loss.gradient_hessian(
y_true=y_true,
raw_prediction=raw_prediction,
sample_weight=sample_weight,
gradient_out=out1,
hessian_out=out2,
n_threads=n_threads,
)
loss(y_true=y_true, raw_prediction=raw_prediction, sample_weight=sample_weight)
loss.fit_intercept_only(y_true=y_true, sample_weight=sample_weight)
loss.constant_to_optimal_zero(y_true=y_true, sample_weight=sample_weight)
if hasattr(loss, "predict_proba"):
loss.predict_proba(raw_prediction=raw_prediction)
if hasattr(loss, "gradient_proba"):
loss.gradient_proba(
y_true=y_true,
raw_prediction=raw_prediction,
sample_weight=sample_weight,
gradient_out=out1,
proba_out=out2,
n_threads=n_threads,
)
@pytest.mark.parametrize("loss", LOSS_INSTANCES, ids=loss_instance_name)
@pytest.mark.parametrize("sample_weight", [None, "range"])
def test_loss_same_as_C_functions(loss, sample_weight):
"""Test that Python and Cython functions return same results."""
y_true, raw_prediction = random_y_true_raw_prediction(
loss=loss,
n_samples=20,
y_bound=(-100, 100),
raw_bound=(-10, 10),
seed=42,
)
if sample_weight == "range":
sample_weight = np.linspace(1, y_true.shape[0], num=y_true.shape[0])
out_l1 = np.empty_like(y_true)
out_l2 = np.empty_like(y_true)
out_g1 = np.empty_like(raw_prediction)
out_g2 = np.empty_like(raw_prediction)
out_h1 = np.empty_like(raw_prediction)
out_h2 = np.empty_like(raw_prediction)
assert_allclose(
loss.loss(
y_true=y_true,
raw_prediction=raw_prediction,
sample_weight=sample_weight,
loss_out=out_l1,
),
loss.closs.loss(
y_true=y_true,
raw_prediction=raw_prediction,
sample_weight=sample_weight,
loss_out=out_l2,
),
)
assert_allclose(
loss.gradient(
y_true=y_true,
raw_prediction=raw_prediction,
sample_weight=sample_weight,
gradient_out=out_g1,
),
loss.closs.gradient(
y_true=y_true,
raw_prediction=raw_prediction,
sample_weight=sample_weight,
gradient_out=out_g2,
),
)
loss.closs.loss_gradient(
y_true=y_true,
raw_prediction=raw_prediction,
sample_weight=sample_weight,
loss_out=out_l1,
gradient_out=out_g1,
)
loss.closs.loss_gradient(
y_true=y_true,
raw_prediction=raw_prediction,
sample_weight=sample_weight,
loss_out=out_l2,
gradient_out=out_g2,
)
assert_allclose(out_l1, out_l2)
assert_allclose(out_g1, out_g2)
loss.gradient_hessian(
y_true=y_true,
raw_prediction=raw_prediction,
sample_weight=sample_weight,
gradient_out=out_g1,
hessian_out=out_h1,
)
loss.closs.gradient_hessian(
y_true=y_true,
raw_prediction=raw_prediction,
sample_weight=sample_weight,
gradient_out=out_g2,
hessian_out=out_h2,
)
assert_allclose(out_g1, out_g2)
assert_allclose(out_h1, out_h2)
@pytest.mark.parametrize("loss", LOSS_INSTANCES, ids=loss_instance_name)
@pytest.mark.parametrize("sample_weight", [None, "range"])
def test_loss_gradients_are_the_same(loss, sample_weight, global_random_seed):
"""Test that loss and gradient are the same across different functions.
Also test that output arguments contain correct results.
"""
y_true, raw_prediction = random_y_true_raw_prediction(
loss=loss,
n_samples=20,
y_bound=(-100, 100),
raw_bound=(-10, 10),
seed=global_random_seed,
)
if sample_weight == "range":
sample_weight = np.linspace(1, y_true.shape[0], num=y_true.shape[0])
out_l1 = np.empty_like(y_true)
out_l2 = np.empty_like(y_true)
out_g1 = np.empty_like(raw_prediction)
out_g2 = np.empty_like(raw_prediction)
out_g3 = np.empty_like(raw_prediction)
out_h3 = np.empty_like(raw_prediction)
l1 = loss.loss(
y_true=y_true,
raw_prediction=raw_prediction,
sample_weight=sample_weight,
loss_out=out_l1,
)
g1 = loss.gradient(
y_true=y_true,
raw_prediction=raw_prediction,
sample_weight=sample_weight,
gradient_out=out_g1,
)
l2, g2 = loss.loss_gradient(
y_true=y_true,
raw_prediction=raw_prediction,
sample_weight=sample_weight,
loss_out=out_l2,
gradient_out=out_g2,
)
g3, h3 = loss.gradient_hessian(
y_true=y_true,
raw_prediction=raw_prediction,
sample_weight=sample_weight,
gradient_out=out_g3,
hessian_out=out_h3,
)
assert_allclose(l1, l2)
assert_array_equal(l1, out_l1)
assert np.shares_memory(l1, out_l1)
assert_array_equal(l2, out_l2)
assert np.shares_memory(l2, out_l2)
assert_allclose(g1, g2)
assert_allclose(g1, g3)
assert_array_equal(g1, out_g1)
assert np.shares_memory(g1, out_g1)
assert_array_equal(g2, out_g2)
assert np.shares_memory(g2, out_g2)
assert_array_equal(g3, out_g3)
assert np.shares_memory(g3, out_g3)
if hasattr(loss, "gradient_proba"):
assert loss.is_multiclass # only for HalfMultinomialLoss
out_g4 = np.empty_like(raw_prediction)
out_proba = np.empty_like(raw_prediction)
g4, proba = loss.gradient_proba(
y_true=y_true,
raw_prediction=raw_prediction,
sample_weight=sample_weight,
gradient_out=out_g4,
proba_out=out_proba,
)
assert_allclose(g1, out_g4)
assert_allclose(g1, g4)
assert_allclose(proba, out_proba)
assert_allclose(np.sum(proba, axis=1), 1, rtol=1e-11)
@pytest.mark.parametrize("loss", LOSS_INSTANCES, ids=loss_instance_name)
@pytest.mark.parametrize("sample_weight", ["ones", "random"])
def test_sample_weight_multiplies(loss, sample_weight, global_random_seed):
"""Test sample weights in loss, gradients and hessians.
Make sure that passing sample weights to loss, gradient and hessian
computation methods is equivalent to multiplying by the weights.
"""
n_samples = 100
y_true, raw_prediction = random_y_true_raw_prediction(
loss=loss,
n_samples=n_samples,
y_bound=(-100, 100),
raw_bound=(-5, 5),
seed=global_random_seed,
)
if sample_weight == "ones":
sample_weight = np.ones(shape=n_samples, dtype=np.float64)
else:
rng = np.random.RandomState(global_random_seed)
sample_weight = rng.normal(size=n_samples).astype(np.float64)
assert_allclose(
loss.loss(
y_true=y_true,
raw_prediction=raw_prediction,
sample_weight=sample_weight,
),
sample_weight
* loss.loss(
y_true=y_true,
raw_prediction=raw_prediction,
sample_weight=None,
),
)
losses, gradient = loss.loss_gradient(
y_true=y_true,
raw_prediction=raw_prediction,
sample_weight=None,
)
losses_sw, gradient_sw = loss.loss_gradient(
y_true=y_true,
raw_prediction=raw_prediction,
sample_weight=sample_weight,
)
assert_allclose(losses * sample_weight, losses_sw)
if not loss.is_multiclass:
assert_allclose(gradient * sample_weight, gradient_sw)
else:
assert_allclose(gradient * sample_weight[:, None], gradient_sw)
gradient, hessian = loss.gradient_hessian(
y_true=y_true,
raw_prediction=raw_prediction,
sample_weight=None,
)
gradient_sw, hessian_sw = loss.gradient_hessian(
y_true=y_true,
raw_prediction=raw_prediction,
sample_weight=sample_weight,
)
if not loss.is_multiclass:
assert_allclose(gradient * sample_weight, gradient_sw)
assert_allclose(hessian * sample_weight, hessian_sw)
else:
assert_allclose(gradient * sample_weight[:, None], gradient_sw)
assert_allclose(hessian * sample_weight[:, None], hessian_sw)
@pytest.mark.parametrize("loss", LOSS_INSTANCES, ids=loss_instance_name)
def test_graceful_squeezing(loss):
"""Test that reshaped raw_prediction gives same results."""
y_true, raw_prediction = random_y_true_raw_prediction(
loss=loss,
n_samples=20,
y_bound=(-100, 100),
raw_bound=(-10, 10),
seed=42,
)
if raw_prediction.ndim == 1:
raw_prediction_2d = raw_prediction[:, None]
assert_allclose(
loss.loss(y_true=y_true, raw_prediction=raw_prediction_2d),
loss.loss(y_true=y_true, raw_prediction=raw_prediction),
)
assert_allclose(
loss.loss_gradient(y_true=y_true, raw_prediction=raw_prediction_2d),
loss.loss_gradient(y_true=y_true, raw_prediction=raw_prediction),
)
assert_allclose(
loss.gradient(y_true=y_true, raw_prediction=raw_prediction_2d),
loss.gradient(y_true=y_true, raw_prediction=raw_prediction),
)
assert_allclose(
loss.gradient_hessian(y_true=y_true, raw_prediction=raw_prediction_2d),
loss.gradient_hessian(y_true=y_true, raw_prediction=raw_prediction),
)
@pytest.mark.parametrize("loss", LOSS_INSTANCES, ids=loss_instance_name)
@pytest.mark.parametrize("sample_weight", [None, "range"])
def test_loss_of_perfect_prediction(loss, sample_weight):
"""Test value of perfect predictions.
Loss of y_pred = y_true plus constant_to_optimal_zero should sums up to
zero.
"""
if not loss.is_multiclass:
# Use small values such that exp(value) is not nan.
raw_prediction = np.array([-10, -0.1, 0, 0.1, 3, 10])
# If link is identity, we must respect the interval of y_pred:
if isinstance(loss.link, IdentityLink):
eps = 1e-10
low = loss.interval_y_pred.low
if not loss.interval_y_pred.low_inclusive:
low = low + eps
high = loss.interval_y_pred.high
if not loss.interval_y_pred.high_inclusive:
high = high - eps
raw_prediction = np.clip(raw_prediction, low, high)
y_true = loss.link.inverse(raw_prediction)
else:
# HalfMultinomialLoss
y_true = np.arange(loss.n_classes).astype(float)
# raw_prediction with entries -exp(10), but +exp(10) on the diagonal
# this is close enough to np.inf which would produce nan
raw_prediction = np.full(
shape=(loss.n_classes, loss.n_classes),
fill_value=-np.exp(10),
dtype=float,
)
raw_prediction.flat[:: loss.n_classes + 1] = np.exp(10)
if sample_weight == "range":
sample_weight = np.linspace(1, y_true.shape[0], num=y_true.shape[0])
loss_value = loss.loss(
y_true=y_true,
raw_prediction=raw_prediction,
sample_weight=sample_weight,
)
constant_term = loss.constant_to_optimal_zero(
y_true=y_true, sample_weight=sample_weight
)
# Comparing loss_value + constant_term to zero would result in large
# round-off errors.
assert_allclose(loss_value, -constant_term, atol=1e-14, rtol=1e-15)
@pytest.mark.parametrize("loss", LOSS_INSTANCES, ids=loss_instance_name)
@pytest.mark.parametrize("sample_weight", [None, "range"])
def test_gradients_hessians_numerically(loss, sample_weight, global_random_seed):
"""Test gradients and hessians with numerical derivatives.
Gradient should equal the numerical derivatives of the loss function.
Hessians should equal the numerical derivatives of gradients.
"""
n_samples = 20
y_true, raw_prediction = random_y_true_raw_prediction(
loss=loss,
n_samples=n_samples,
y_bound=(-100, 100),
raw_bound=(-5, 5),
seed=global_random_seed,
)
if sample_weight == "range":
sample_weight = np.linspace(1, y_true.shape[0], num=y_true.shape[0])
g, h = loss.gradient_hessian(
y_true=y_true,
raw_prediction=raw_prediction,
sample_weight=sample_weight,
)
assert g.shape == raw_prediction.shape
assert h.shape == raw_prediction.shape
if not loss.is_multiclass:
def loss_func(x):
return loss.loss(
y_true=y_true,
raw_prediction=x,
sample_weight=sample_weight,
)
g_numeric = numerical_derivative(loss_func, raw_prediction, eps=1e-6)
assert_allclose(g, g_numeric, rtol=5e-6, atol=1e-10)
def grad_func(x):
return loss.gradient(
y_true=y_true,
raw_prediction=x,
sample_weight=sample_weight,
)
h_numeric = numerical_derivative(grad_func, raw_prediction, eps=1e-6)
if loss.approx_hessian:
# TODO: What could we test if loss.approx_hessian?
pass
else:
assert_allclose(h, h_numeric, rtol=5e-6, atol=1e-10)
else:
# For multiclass loss, we should only change the predictions of the
# class for which the derivative is taken for, e.g. offset[:, k] = eps
# for class k.
# As a softmax is computed, offsetting the whole array by a constant
# would have no effect on the probabilities, and thus on the loss.
for k in range(loss.n_classes):
def loss_func(x):
raw = raw_prediction.copy()
raw[:, k] = x
return loss.loss(
y_true=y_true,
raw_prediction=raw,
sample_weight=sample_weight,
)
g_numeric = numerical_derivative(loss_func, raw_prediction[:, k], eps=1e-5)
assert_allclose(g[:, k], g_numeric, rtol=5e-6, atol=1e-10)
def grad_func(x):
raw = raw_prediction.copy()
raw[:, k] = x
return loss.gradient(
y_true=y_true,
raw_prediction=raw,
sample_weight=sample_weight,
)[:, k]
h_numeric = numerical_derivative(grad_func, raw_prediction[:, k], eps=1e-6)
if loss.approx_hessian:
# TODO: What could we test if loss.approx_hessian?
pass
else:
assert_allclose(h[:, k], h_numeric, rtol=5e-6, atol=1e-10)
@pytest.mark.parametrize(
"loss, x0, y_true",
[
("squared_error", -2.0, 42),
("squared_error", 117.0, 1.05),
("squared_error", 0.0, 0.0),
# The argmin of binomial_loss for y_true=0 and y_true=1 is resp.
# -inf and +inf due to logit, cf. "complete separation". Therefore, we
# use 0 < y_true < 1.
("binomial_loss", 0.3, 0.1),
("binomial_loss", -12, 0.2),
("binomial_loss", 30, 0.9),
("poisson_loss", 12.0, 1.0),
("poisson_loss", 0.0, 2.0),
("poisson_loss", -22.0, 10.0),
],
)
@skip_if_32bit
def test_derivatives(loss, x0, y_true):
"""Test that gradients are zero at the minimum of the loss.
We check this on a single value/sample using Halley's method with the
first and second order derivatives computed by the Loss instance.
Note that methods of Loss instances operate on arrays while the newton
root finder expects a scalar or a one-element array for this purpose.
"""
loss = _LOSSES[loss](sample_weight=None)
y_true = np.array([y_true], dtype=np.float64)
x0 = np.array([x0], dtype=np.float64)
def func(x: np.ndarray) -> np.ndarray:
"""Compute loss plus constant term.
The constant term is such that the minimum function value is zero,
which is required by the Newton method.
"""
return loss.loss(
y_true=y_true, raw_prediction=x
) + loss.constant_to_optimal_zero(y_true=y_true)
def fprime(x: np.ndarray) -> np.ndarray:
return loss.gradient(y_true=y_true, raw_prediction=x)
def fprime2(x: np.ndarray) -> np.ndarray:
return loss.gradient_hessian(y_true=y_true, raw_prediction=x)[1]
optimum = newton(
func,
x0=x0,
fprime=fprime,
fprime2=fprime2,
maxiter=100,
tol=5e-8,
)
# Need to ravel arrays because assert_allclose requires matching
# dimensions.
y_true = y_true.ravel()
optimum = optimum.ravel()
assert_allclose(loss.link.inverse(optimum), y_true)
assert_allclose(func(optimum), 0, atol=1e-14)
assert_allclose(loss.gradient(y_true=y_true, raw_prediction=optimum), 0, atol=5e-7)
@pytest.mark.parametrize("loss", LOSS_INSTANCES, ids=loss_instance_name)
@pytest.mark.parametrize("sample_weight", [None, "range"])
def test_loss_intercept_only(loss, sample_weight):
"""Test that fit_intercept_only returns the argmin of the loss.
Also test that the gradient is zero at the minimum.
"""
n_samples = 50
if not loss.is_multiclass:
y_true = loss.link.inverse(np.linspace(-4, 4, num=n_samples))
else:
y_true = np.arange(n_samples).astype(np.float64) % loss.n_classes
y_true[::5] = 0 # exceedance of class 0
if sample_weight == "range":
sample_weight = np.linspace(0.1, 2, num=n_samples)
a = loss.fit_intercept_only(y_true=y_true, sample_weight=sample_weight)
# find minimum by optimization
def fun(x):
if not loss.is_multiclass:
raw_prediction = np.full(shape=(n_samples), fill_value=x)
else:
raw_prediction = np.ascontiguousarray(
np.broadcast_to(x, shape=(n_samples, loss.n_classes))
)
return loss(
y_true=y_true,
raw_prediction=raw_prediction,
sample_weight=sample_weight,
)
if not loss.is_multiclass:
opt = minimize_scalar(fun, tol=1e-7, options={"maxiter": 100})
grad = loss.gradient(
y_true=y_true,
raw_prediction=np.full_like(y_true, a),
sample_weight=sample_weight,
)
assert a.shape == tuple() # scalar
assert a.dtype == y_true.dtype
assert_all_finite(a)
a == approx(opt.x, rel=1e-7)
grad.sum() == approx(0, abs=1e-12)
else:
# The constraint corresponds to sum(raw_prediction) = 0. Without it, we would
# need to apply loss.symmetrize_raw_prediction to opt.x before comparing.
opt = minimize(
fun,
np.zeros((loss.n_classes)),
tol=1e-13,
options={"maxiter": 100},
method="SLSQP",
constraints=LinearConstraint(np.ones((1, loss.n_classes)), 0, 0),
)
grad = loss.gradient(
y_true=y_true,
raw_prediction=np.tile(a, (n_samples, 1)),
sample_weight=sample_weight,
)
assert a.dtype == y_true.dtype
assert_all_finite(a)
assert_allclose(a, opt.x, rtol=5e-6, atol=1e-12)
assert_allclose(grad.sum(axis=0), 0, atol=1e-12)
@pytest.mark.parametrize(
"loss, func, random_dist",
[
(HalfSquaredError(), np.mean, "normal"),
(AbsoluteError(), np.median, "normal"),
(PinballLoss(quantile=0.25), lambda x: np.percentile(x, q=25), "normal"),
(HalfPoissonLoss(), np.mean, "poisson"),
(HalfGammaLoss(), np.mean, "exponential"),
(HalfTweedieLoss(), np.mean, "exponential"),
(HalfBinomialLoss(), np.mean, "binomial"),
],
)
def test_specific_fit_intercept_only(loss, func, random_dist, global_random_seed):
"""Test that fit_intercept_only returns the correct functional.
We test the functional for specific, meaningful distributions, e.g.
squared error estimates the expectation of a probability distribution.
"""
rng = np.random.RandomState(global_random_seed)
if random_dist == "binomial":
y_train = rng.binomial(1, 0.5, size=100)
else:
y_train = getattr(rng, random_dist)(size=100)
baseline_prediction = loss.fit_intercept_only(y_true=y_train)
# Make sure baseline prediction is the expected functional=func, e.g. mean
# or median.
assert_all_finite(baseline_prediction)
assert baseline_prediction == approx(loss.link.link(func(y_train)))
assert loss.link.inverse(baseline_prediction) == approx(func(y_train))
if isinstance(loss, IdentityLink):
assert_allclose(loss.link.inverse(baseline_prediction), baseline_prediction)
# Test baseline at boundary
if loss.interval_y_true.low_inclusive:
y_train.fill(loss.interval_y_true.low)
baseline_prediction = loss.fit_intercept_only(y_true=y_train)
assert_all_finite(baseline_prediction)
if loss.interval_y_true.high_inclusive:
y_train.fill(loss.interval_y_true.high)
baseline_prediction = loss.fit_intercept_only(y_true=y_train)
assert_all_finite(baseline_prediction)
def test_multinomial_loss_fit_intercept_only():
"""Test that fit_intercept_only returns the mean functional for CCE."""
rng = np.random.RandomState(0)
n_classes = 4
loss = HalfMultinomialLoss(n_classes=n_classes)
# Same logic as test_specific_fit_intercept_only. Here inverse link
# function = softmax and link function = log - symmetry term.
y_train = rng.randint(0, n_classes + 1, size=100).astype(np.float64)
baseline_prediction = loss.fit_intercept_only(y_true=y_train)
assert baseline_prediction.shape == (n_classes,)
p = np.zeros(n_classes, dtype=y_train.dtype)
for k in range(n_classes):
p[k] = (y_train == k).mean()
assert_allclose(baseline_prediction, np.log(p) - np.mean(np.log(p)))
assert_allclose(baseline_prediction[None, :], loss.link.link(p[None, :]))
for y_train in (np.zeros(shape=10), np.ones(shape=10)):
y_train = y_train.astype(np.float64)
baseline_prediction = loss.fit_intercept_only(y_true=y_train)
assert baseline_prediction.dtype == y_train.dtype
assert_all_finite(baseline_prediction)
def test_binomial_and_multinomial_loss(global_random_seed):
"""Test that multinomial loss with n_classes = 2 is the same as binomial loss."""
rng = np.random.RandomState(global_random_seed)
n_samples = 20
binom = HalfBinomialLoss()
multinom = HalfMultinomialLoss(n_classes=2)
y_train = rng.randint(0, 2, size=n_samples).astype(np.float64)
raw_prediction = rng.normal(size=n_samples)
raw_multinom = np.empty((n_samples, 2))
raw_multinom[:, 0] = -0.5 * raw_prediction
raw_multinom[:, 1] = 0.5 * raw_prediction
assert_allclose(
binom.loss(y_true=y_train, raw_prediction=raw_prediction),
multinom.loss(y_true=y_train, raw_prediction=raw_multinom),
)
@pytest.mark.parametrize("loss", LOSS_INSTANCES, ids=loss_instance_name)
def test_predict_proba(loss, global_random_seed):
"""Test that predict_proba and gradient_proba work as expected."""
n_samples = 20
y_true, raw_prediction = random_y_true_raw_prediction(
loss=loss,
n_samples=n_samples,
y_bound=(-100, 100),
raw_bound=(-5, 5),
seed=global_random_seed,
)
if hasattr(loss, "predict_proba"):
proba = loss.predict_proba(raw_prediction)
assert proba.shape == (n_samples, loss.n_classes)
assert np.sum(proba, axis=1) == approx(1, rel=1e-11)
if hasattr(loss, "gradient_proba"):
for grad, proba in (
(None, None),
(None, np.empty_like(raw_prediction)),
(np.empty_like(raw_prediction), None),
(np.empty_like(raw_prediction), np.empty_like(raw_prediction)),
):
grad, proba = loss.gradient_proba(
y_true=y_true,
raw_prediction=raw_prediction,
sample_weight=None,
gradient_out=grad,
proba_out=proba,
)
assert proba.shape == (n_samples, loss.n_classes)
assert np.sum(proba, axis=1) == approx(1, rel=1e-11)
assert_allclose(
grad,
loss.gradient(
y_true=y_true,
raw_prediction=raw_prediction,
sample_weight=None,
gradient_out=None,
),
)
@pytest.mark.parametrize("loss", ALL_LOSSES)
@pytest.mark.parametrize("sample_weight", [None, "range"])
@pytest.mark.parametrize("dtype", (np.float32, np.float64))
@pytest.mark.parametrize("order", ("C", "F"))
def test_init_gradient_and_hessians(loss, sample_weight, dtype, order):
"""Test that init_gradient_and_hessian works as expected.
passing sample_weight to a loss correctly influences the constant_hessian
attribute, and consequently the shape of the hessian array.
"""
n_samples = 5
if sample_weight == "range":
sample_weight = np.ones(n_samples)
loss = loss(sample_weight=sample_weight)
gradient, hessian = loss.init_gradient_and_hessian(
n_samples=n_samples,
dtype=dtype,
order=order,
)
if loss.constant_hessian:
assert gradient.shape == (n_samples,)
assert hessian.shape == (1,)
elif loss.is_multiclass:
assert gradient.shape == (n_samples, loss.n_classes)
assert hessian.shape == (n_samples, loss.n_classes)
else:
assert hessian.shape == (n_samples,)
assert hessian.shape == (n_samples,)
assert gradient.dtype == dtype
assert hessian.dtype == dtype
if order == "C":
assert gradient.flags.c_contiguous
assert hessian.flags.c_contiguous
else:
assert gradient.flags.f_contiguous
assert hessian.flags.f_contiguous
@pytest.mark.parametrize("loss", ALL_LOSSES)
@pytest.mark.parametrize(
"params, err_msg",
[
(
{"dtype": np.int64},
f"Valid options for 'dtype' are .* Got dtype={np.int64} instead.",
),
],
)
def test_init_gradient_and_hessian_raises(loss, params, err_msg):
"""Test that init_gradient_and_hessian raises errors for invalid input."""
loss = loss()
with pytest.raises((ValueError, TypeError), match=err_msg):
gradient, hessian = loss.init_gradient_and_hessian(n_samples=5, **params)
@pytest.mark.parametrize(
"loss, params, err_type, err_msg",
[
(
PinballLoss,
{"quantile": None},
TypeError,
"quantile must be an instance of float, not NoneType.",
),
(
PinballLoss,
{"quantile": 0},
ValueError,
"quantile == 0, must be > 0.",
),
(PinballLoss, {"quantile": 1.1}, ValueError, "quantile == 1.1, must be < 1."),
],
)
def test_loss_init_parameter_validation(loss, params, err_type, err_msg):
"""Test that loss raises errors for invalid input."""
with pytest.raises(err_type, match=err_msg):
loss(**params)
@pytest.mark.parametrize("loss", LOSS_INSTANCES, ids=loss_instance_name)
def test_loss_pickle(loss):
"""Test that losses can be pickled."""
n_samples = 20
y_true, raw_prediction = random_y_true_raw_prediction(
loss=loss,
n_samples=n_samples,
y_bound=(-100, 100),
raw_bound=(-5, 5),
seed=42,
)
pickled_loss = pickle.dumps(loss)
unpickled_loss = pickle.loads(pickled_loss)
assert loss(y_true=y_true, raw_prediction=raw_prediction) == approx(
unpickled_loss(y_true=y_true, raw_prediction=raw_prediction)
)
@pytest.mark.parametrize("p", [-1.5, 0, 1, 1.5, 2, 3])
def test_tweedie_log_identity_consistency(p):
"""Test for identical losses when only the link function is different."""
half_tweedie_log = HalfTweedieLoss(power=p)
half_tweedie_identity = HalfTweedieLossIdentity(power=p)
n_samples = 10
y_true, raw_prediction = random_y_true_raw_prediction(
loss=half_tweedie_log, n_samples=n_samples, seed=42
)
y_pred = half_tweedie_log.link.inverse(raw_prediction) # exp(raw_prediction)
# Let's compare the loss values, up to some constant term that is dropped
# in HalfTweedieLoss but not in HalfTweedieLossIdentity.
loss_log = half_tweedie_log.loss(
y_true=y_true, raw_prediction=raw_prediction
) + half_tweedie_log.constant_to_optimal_zero(y_true)
loss_identity = half_tweedie_identity.loss(
y_true=y_true, raw_prediction=y_pred
) + half_tweedie_identity.constant_to_optimal_zero(y_true)
# Note that HalfTweedieLoss ignores different constant terms than
# HalfTweedieLossIdentity. Constant terms means terms not depending on
# raw_prediction. By adding these terms, `constant_to_optimal_zero`, both losses
# give the same values.
assert_allclose(loss_log, loss_identity)
# For gradients and hessians, the constant terms do not matter. We have, however,
# to account for the chain rule, i.e. with x=raw_prediction
# gradient_log(x) = d/dx loss_log(x)
# = d/dx loss_identity(exp(x))
# = exp(x) * gradient_identity(exp(x))
# Similarly,
# hessian_log(x) = exp(x) * gradient_identity(exp(x))
# + exp(x)**2 * hessian_identity(x)
gradient_log, hessian_log = half_tweedie_log.gradient_hessian(
y_true=y_true, raw_prediction=raw_prediction
)
gradient_identity, hessian_identity = half_tweedie_identity.gradient_hessian(
y_true=y_true, raw_prediction=y_pred
)
assert_allclose(gradient_log, y_pred * gradient_identity)
assert_allclose(
hessian_log, y_pred * gradient_identity + y_pred**2 * hessian_identity
)
| bsd-3-clause |
tdent/pycbc | pycbc/events/triggers.py | 6 | 8750 | # Copyright (C) 2017 Christopher M. Biwer
#
# This program is free software; you can redistribute it and/or modify it
# under the terms of the GNU General Public License as published by the
# Free Software Foundation; either version 3 of the License, or (at your
# option) any later version.
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General
# Public License for more details.
#
# You should have received a copy of the GNU General Public License along
# with this program; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
""" This modules contains functions for reading single and coincident triggers
from the command line.
"""
import h5py
import numpy
from pycbc import conversions, pnutils
from pycbc.events import coinc
import pycbc.detector
def insert_bank_bins_option_group(parser):
""" Add options to the optparser object for selecting templates in bins.
Parameters
-----------
parser : object
OptionParser instance.
"""
bins_group = parser.add_argument_group(
"Options for selecting templates in bins.")
bins_group.add_argument("--bank-bins", nargs="+", default=None,
help="Ordered list of mass bin upper boundaries. "
"An ordered list of type-boundary pairs, "
"applied sequentially. Must provide a name "
"(can be any unique string for tagging "
"purposes), the parameter to bin "
"on, and the membership condition via "
"'lt' / 'gt' operators. "
"Ex. name1:component:lt2 name2:total:lt15")
bins_group.add_argument("--bank-file", default=None,
help="HDF format template bank file.")
bins_group.add_argument("--f-lower", default=None,
help="Low frequency cutoff in Hz.")
return bins_group
def bank_bins_from_cli(opts):
""" Parses the CLI options related to binning templates in the bank.
Parameters
----------
opts : object
Result of parsing the CLI with OptionParser.
Results
-------
bins_idx : dict
A dict with bin names as key and an array of their indices as value.
bank : dict
A dict of the datasets from the bank file.
"""
bank = {}
fp = h5py.File(opts.bank_file)
for key in fp.keys():
bank[key] = fp[key][:]
bank["f_lower"] = float(opts.f_lower) if opts.f_lower else None
if opts.bank_bins:
bins_idx = coinc.background_bin_from_string(opts.bank_bins, bank)
else:
bins_idx = {"all" : numpy.arange(0, len(bank[tuple(fp.keys())[0]]))}
fp.close()
return bins_idx, bank
def get_mass_spin(bank, tid):
"""
Helper function
Parameters
----------
bank : h5py File object
Bank parameter file
tid : integer or array of int
Indices of the entries to be returned
Returns
-------
m1, m2, s1z, s2z : tuple of floats or arrays of floats
Parameter values of the bank entries
"""
m1 = bank['mass1'][:][tid]
m2 = bank['mass2'][:][tid]
s1z = bank['spin1z'][:][tid]
s2z = bank['spin2z'][:][tid]
return m1, m2, s1z, s2z
def get_param(par, args, m1, m2, s1z, s2z):
"""
Helper function
Parameters
----------
par : string
Name of parameter to calculate
args : Namespace object returned from ArgumentParser instance
Calling code command line options, used for f_lower value
m1 : float or array of floats
First binary component mass (etc.)
Returns
-------
parvals : float or array of floats
Calculated parameter values
"""
if par == 'mchirp':
parvals = conversions.mchirp_from_mass1_mass2(m1, m2)
elif par == 'mtotal':
parvals = m1 + m2
elif par == 'eta':
parvals = conversions.eta_from_mass1_mass2(m1, m2)
elif par in ['chi_eff', 'effective_spin']:
parvals = conversions.chi_eff(m1, m2, s1z, s2z)
elif par == 'template_duration':
# default to SEOBNRv4 duration function
if not hasattr(args, 'approximant') or args.approximant is None:
args.approximant = "SEOBNRv4"
parvals = pnutils.get_imr_duration(m1, m2, s1z, s2z, args.f_lower,
args.approximant)
if args.min_duration:
parvals += args.min_duration
elif par == 'tau0':
parvals = conversions.tau0_from_mass1_mass2(m1, m2, args.f_lower)
elif par == 'tau3':
parvals = conversions.tau3_from_mass1_mass2(m1, m2, args.f_lower)
elif par in pnutils.named_frequency_cutoffs.keys():
parvals = pnutils.frequency_cutoff_from_name(par, m1, m2, s1z, s2z)
else:
# try asking for a LALSimulation frequency function
parvals = pnutils.get_freq(par, m1, m2, s1z, s2z)
return parvals
def get_found_param(injfile, bankfile, trigfile, param, ifo, args=None):
"""
Translates some popular trigger parameters into functions that calculate
them from an hdf found injection file
Parameters
----------
injfile: hdf5 File object
Injection file of format known to ANitz (DOCUMENTME)
bankfile: hdf5 File object or None
Template bank file
trigfile: hdf5 File object or None
Single-detector trigger file
param: string
Parameter to be calculated for the recovered triggers
ifo: string or None
Standard ifo name, ex. 'L1'
args : Namespace object returned from ArgumentParser instance
Calling code command line options, used for f_lower value
Returns
-------
[return value]: NumPy array of floats, array of boolean
The calculated parameter values and a Boolean mask indicating which
injections were found in the given ifo (if supplied)
"""
foundtmp = injfile["found_after_vetoes/template_id"][:]
# will record whether inj was found in the given ifo
found_in_ifo = numpy.ones_like(foundtmp, dtype=bool)
if trigfile is not None:
try: # old 2-ifo behaviour
# get the name of the ifo in the injection file, eg "detector_1"
# and the integer from that name
ifolabel = [name for name, val in injfile.attrs.items() if \
"detector" in name and val == ifo][0]
foundtrg = injfile["found_after_vetoes/trigger_id" + ifolabel[-1]]
except IndexError: # multi-ifo
foundtrg = injfile["found_after_vetoes/%s/trigger_id" % ifo]
# multi-ifo pipeline assigns -1 for inj not found in specific ifo
found_in_ifo = foundtrg[:] != -1
if bankfile is not None and param in bankfile.keys():
return bankfile[param][:][foundtmp], found_in_ifo
elif trigfile is not None and param in trigfile[ifo].keys():
return trigfile[ifo][param][:][foundtrg], found_in_ifo
else:
assert bankfile
b = bankfile
return get_param(param, args, b['mass1'][:], b['mass2'][:],
b['spin1z'][:], b['spin2z'][:])[foundtmp],\
found_in_ifo
def get_inj_param(injfile, param, ifo, args=None):
"""
Translates some popular injection parameters into functions that calculate
them from an hdf found injection file
Parameters
----------
injfile: hdf5 File object
Injection file of format known to ANitz (DOCUMENTME)
param: string
Parameter to be calculated for the injected signals
ifo: string
Standard detector name, ex. 'L1'
args: Namespace object returned from ArgumentParser instance
Calling code command line options, used for f_lower value
Returns
-------
[return value]: NumPy array of floats
The calculated parameter values
"""
det = pycbc.detector.Detector(ifo)
inj = injfile["injections"]
if param in inj.keys():
return inj["injections/"+param]
if param == "end_time_"+ifo[0].lower():
return inj['end_time'][:] + det.time_delay_from_earth_center(
inj['longitude'][:],
inj['latitude'][:],
inj['end_time'][:])
else:
return get_param(param, args, inj['mass1'][:], inj['mass2'][:],
inj['spin1z'][:], inj['spin2z'][:])
| gpl-3.0 |
pytorch/fairseq | examples/speech_synthesis/preprocessing/denoiser/resample.py | 1 | 2226 | # Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
# author: adefossez
import math
import torch as th
from torch.nn import functional as F
def sinc(t):
"""sinc.
:param t: the input tensor
"""
return th.where(t == 0, th.tensor(1., device=t.device, dtype=t.dtype),
th.sin(t) / t)
def kernel_upsample2(zeros=56):
"""kernel_upsample2.
"""
win = th.hann_window(4 * zeros + 1, periodic=False)
winodd = win[1::2]
t = th.linspace(-zeros + 0.5, zeros - 0.5, 2 * zeros)
t *= math.pi
kernel = (sinc(t) * winodd).view(1, 1, -1)
return kernel
def upsample2(x, zeros=56):
"""
Upsampling the input by 2 using sinc interpolation.
Smith, Julius, and Phil Gossett. "A flexible sampling-rate conversion method."
ICASSP'84. IEEE International Conference on Acoustics, Speech, and Signal Processing.
Vol. 9. IEEE, 1984.
"""
*other, time = x.shape
kernel = kernel_upsample2(zeros).to(x)
out = F.conv1d(x.view(-1, 1, time), kernel, padding=zeros)[..., 1:].view(
*other, time
)
y = th.stack([x, out], dim=-1)
return y.view(*other, -1)
def kernel_downsample2(zeros=56):
"""kernel_downsample2.
"""
win = th.hann_window(4 * zeros + 1, periodic=False)
winodd = win[1::2]
t = th.linspace(-zeros + 0.5, zeros - 0.5, 2 * zeros)
t.mul_(math.pi)
kernel = (sinc(t) * winodd).view(1, 1, -1)
return kernel
def downsample2(x, zeros=56):
"""
Downsampling the input by 2 using sinc interpolation.
Smith, Julius, and Phil Gossett. "A flexible sampling-rate conversion method."
ICASSP'84. IEEE International Conference on Acoustics, Speech, and Signal Processing.
Vol. 9. IEEE, 1984.
"""
if x.shape[-1] % 2 != 0:
x = F.pad(x, (0, 1))
xeven = x[..., ::2]
xodd = x[..., 1::2]
*other, time = xodd.shape
kernel = kernel_downsample2(zeros).to(x)
out = xeven + F.conv1d(
xodd.view(-1, 1, time), kernel, padding=zeros
)[..., :-1].view(*other, time)
return out.view(*other, -1).mul(0.5)
| mit |
tensorflow/federated | tensorflow_federated/python/simulation/datasets/__init__.py | 1 | 2163 | # Copyright 2019, The TensorFlow Federated Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Datasets for running TensorFlow Federated simulations."""
from tensorflow_federated.python.simulation.datasets import celeba
from tensorflow_federated.python.simulation.datasets import cifar100
from tensorflow_federated.python.simulation.datasets import emnist
from tensorflow_federated.python.simulation.datasets import gldv2
from tensorflow_federated.python.simulation.datasets import inaturalist
from tensorflow_federated.python.simulation.datasets import shakespeare
from tensorflow_federated.python.simulation.datasets import stackoverflow
from tensorflow_federated.python.simulation.datasets.client_data import ClientData
from tensorflow_federated.python.simulation.datasets.dataset_utils import build_dataset_mixture
from tensorflow_federated.python.simulation.datasets.dataset_utils import build_single_label_dataset
from tensorflow_federated.python.simulation.datasets.dataset_utils import build_synthethic_iid_datasets
from tensorflow_federated.python.simulation.datasets.file_per_user_client_data import FilePerUserClientData
from tensorflow_federated.python.simulation.datasets.from_tensor_slices_client_data import TestClientData
from tensorflow_federated.python.simulation.datasets.sql_client_data import SqlClientData
from tensorflow_federated.python.simulation.datasets.sql_client_data_utils import load_and_parse_sql_client_data
from tensorflow_federated.python.simulation.datasets.sql_client_data_utils import save_to_sql_client_data
from tensorflow_federated.python.simulation.datasets.transforming_client_data import TransformingClientData
| apache-2.0 |
wkentaro/fcn | fcn/datasets/voc.py | 1 | 5016 | import collections
import os.path as osp
import chainer
import numpy as np
import PIL.Image
import scipy.io
from .. import data
DATASETS_DIR = osp.expanduser('~/data/datasets/VOC')
class VOCClassSegBase(chainer.dataset.DatasetMixin):
class_names = np.array([
'background',
'aeroplane',
'bicycle',
'bird',
'boat',
'bottle',
'bus',
'car',
'cat',
'chair',
'cow',
'diningtable',
'dog',
'horse',
'motorbike',
'person',
'potted plant',
'sheep',
'sofa',
'train',
'tv/monitor',
])
def __init__(self, year, split='train'):
self.split = split
# VOC20XX is subset of VOC2012
dataset_dir = osp.join(DATASETS_DIR, 'VOCdevkit/VOC2012')
if not osp.exists(dataset_dir):
self.download()
self.files = collections.defaultdict(list)
for split in ['train', 'val']:
imgsets_file = osp.join(
dataset_dir, 'ImageSets/Segmentation/%s.txt' % split)
for did in open(imgsets_file):
did = did.strip()
img_file = osp.join(dataset_dir, 'JPEGImages/%s.jpg' % did)
lbl_file = osp.join(
dataset_dir, 'SegmentationClass/%s.png' % did)
self.files[split].append({
'img': img_file,
'lbl': lbl_file,
})
def __len__(self):
return len(self.files[self.split])
def get_example(self, index):
data_file = self.files[self.split][index]
# load image
img_file = data_file['img']
img = PIL.Image.open(img_file)
img = np.array(img, dtype=np.uint8)
# load label
lbl_file = data_file['lbl']
lbl = PIL.Image.open(lbl_file)
lbl = np.array(lbl, dtype=np.int32)
lbl[lbl == 255] = -1
return img, lbl
@staticmethod
def download():
raise NotImplementedError
class VOC2011ClassSeg(VOCClassSegBase):
def __init__(self, split='train'):
super(VOC2011ClassSeg, self).__init__(year=2011, split=split)
pkg_root = osp.join(osp.dirname(osp.realpath(__file__)), '..')
imgsets_file = osp.join(
pkg_root, 'external/fcn.berkeleyvision.org',
'data/pascal/seg11valid.txt')
# VOC2011 is subset of VOC2012
dataset_dir = osp.join(DATASETS_DIR, 'VOCdevkit/VOC2012')
for did in open(imgsets_file):
did = did.strip()
img_file = osp.join(dataset_dir, 'JPEGImages/%s.jpg' % did)
lbl_file = osp.join(dataset_dir, 'SegmentationClass/%s.png' % did)
self.files['seg11valid'].append({'img': img_file, 'lbl': lbl_file})
@staticmethod
def download():
VOC2012ClassSeg.download()
class VOC2012ClassSeg(VOCClassSegBase):
def __init__(self, split='train'):
super(VOC2012ClassSeg, self).__init__(year=2012, split=split)
@staticmethod
def download():
url = 'http://host.robots.ox.ac.uk/pascal/VOC/voc2012/VOCtrainval_11-May-2012.tar' # NOQA
path = osp.join(DATASETS_DIR, osp.basename(url))
md5 = '6cd6e144f989b92b3379bac3b3de84fd'
data.cached_download(url, path, md5)
data.extract_file(path, to_directory=DATASETS_DIR)
class SBDClassSeg(VOCClassSegBase):
def __init__(self, split='train'):
self.split = split
dataset_dir = osp.join(DATASETS_DIR, 'benchmark_RELEASE/dataset')
if not osp.exists(dataset_dir):
self.download()
self.files = collections.defaultdict(list)
for split in ['train', 'val']:
imgsets_file = osp.join(dataset_dir, '%s.txt' % split)
for did in open(imgsets_file):
did = did.strip()
img_file = osp.join(dataset_dir, 'img/%s.jpg' % did)
lbl_file = osp.join(dataset_dir, 'cls/%s.mat' % did)
self.files[split].append({
'img': img_file,
'lbl': lbl_file,
})
def get_example(self, index):
data_file = self.files[self.split][index]
# load image
img_file = data_file['img']
img = PIL.Image.open(img_file)
img = np.array(img, dtype=np.uint8)
# load label
lbl_file = data_file['lbl']
mat = scipy.io.loadmat(lbl_file)
lbl = mat['GTcls'][0]['Segmentation'][0].astype(np.int32)
lbl[lbl == 255] = -1
return img, lbl
@staticmethod
def download():
# It must be renamed to benchmark.tar to be extracted
url = 'http://www.eecs.berkeley.edu/Research/Projects/CS/vision/grouping/semantic_contours/benchmark.tgz' # NOQA
path = osp.join(DATASETS_DIR, 'benchmark.tar')
md5 = '2b2af8a6cff7365684e002c08be823a6'
data.cached_download(url, path, md5)
data.extract_file(path, to_directory=DATASETS_DIR)
| mit |
anntzer/scikit-learn | sklearn/ensemble/tests/test_common.py | 9 | 9161 | import numpy as np
import pytest
from sklearn.base import clone
from sklearn.base import ClassifierMixin
from sklearn.base import is_classifier
from sklearn.datasets import make_classification
from sklearn.datasets import make_regression
from sklearn.datasets import load_iris, load_diabetes
from sklearn.impute import SimpleImputer
from sklearn.linear_model import LogisticRegression, LinearRegression
from sklearn.svm import LinearSVC, LinearSVR, SVC, SVR
from sklearn.pipeline import make_pipeline
from sklearn.ensemble import RandomForestClassifier, RandomForestRegressor
from sklearn.ensemble import StackingClassifier, StackingRegressor
from sklearn.ensemble import VotingClassifier, VotingRegressor
X, y = load_iris(return_X_y=True)
X_r, y_r = load_diabetes(return_X_y=True)
@pytest.mark.parametrize(
"X, y, estimator",
[
(
*make_classification(n_samples=10),
StackingClassifier(
estimators=[
("lr", LogisticRegression()),
("svm", LinearSVC()),
("rf", RandomForestClassifier(n_estimators=5, max_depth=3)),
],
cv=2,
),
),
(
*make_classification(n_samples=10),
VotingClassifier(
estimators=[
("lr", LogisticRegression()),
("svm", LinearSVC()),
("rf", RandomForestClassifier(n_estimators=5, max_depth=3)),
]
),
),
(
*make_regression(n_samples=10),
StackingRegressor(
estimators=[
("lr", LinearRegression()),
("svm", LinearSVR()),
("rf", RandomForestRegressor(n_estimators=5, max_depth=3)),
],
cv=2,
),
),
(
*make_regression(n_samples=10),
VotingRegressor(
estimators=[
("lr", LinearRegression()),
("svm", LinearSVR()),
("rf", RandomForestRegressor(n_estimators=5, max_depth=3)),
]
),
),
],
ids=[
"stacking-classifier",
"voting-classifier",
"stacking-regressor",
"voting-regressor",
],
)
def test_ensemble_heterogeneous_estimators_behavior(X, y, estimator):
# check that the behavior of `estimators`, `estimators_`,
# `named_estimators`, `named_estimators_` is consistent across all
# ensemble classes and when using `set_params()`.
# before fit
assert "svm" in estimator.named_estimators
assert estimator.named_estimators.svm is estimator.estimators[1][1]
assert estimator.named_estimators.svm is estimator.named_estimators["svm"]
# check fitted attributes
estimator.fit(X, y)
assert len(estimator.named_estimators) == 3
assert len(estimator.named_estimators_) == 3
assert sorted(list(estimator.named_estimators_.keys())) == sorted(
["lr", "svm", "rf"]
)
# check that set_params() does not add a new attribute
estimator_new_params = clone(estimator)
svm_estimator = SVC() if is_classifier(estimator) else SVR()
estimator_new_params.set_params(svm=svm_estimator).fit(X, y)
assert not hasattr(estimator_new_params, "svm")
assert (
estimator_new_params.named_estimators.lr.get_params()
== estimator.named_estimators.lr.get_params()
)
assert (
estimator_new_params.named_estimators.rf.get_params()
== estimator.named_estimators.rf.get_params()
)
# check the behavior when setting an dropping an estimator
estimator_dropped = clone(estimator)
estimator_dropped.set_params(svm="drop")
estimator_dropped.fit(X, y)
assert len(estimator_dropped.named_estimators) == 3
assert estimator_dropped.named_estimators.svm == "drop"
assert len(estimator_dropped.named_estimators_) == 3
assert sorted(list(estimator_dropped.named_estimators_.keys())) == sorted(
["lr", "svm", "rf"]
)
for sub_est in estimator_dropped.named_estimators_:
# check that the correspondence is correct
assert not isinstance(sub_est, type(estimator.named_estimators.svm))
# check that we can set the parameters of the underlying classifier
estimator.set_params(svm__C=10.0)
estimator.set_params(rf__max_depth=5)
assert (
estimator.get_params()["svm__C"]
== estimator.get_params()["svm"].get_params()["C"]
)
assert (
estimator.get_params()["rf__max_depth"]
== estimator.get_params()["rf"].get_params()["max_depth"]
)
@pytest.mark.parametrize(
"Ensemble",
[StackingClassifier, VotingClassifier, StackingRegressor, VotingRegressor],
)
def test_ensemble_heterogeneous_estimators_type(Ensemble):
# check that ensemble will fail during validation if the underlying
# estimators are not of the same type (i.e. classifier or regressor)
if issubclass(Ensemble, ClassifierMixin):
X, y = make_classification(n_samples=10)
estimators = [("lr", LinearRegression())]
ensemble_type = "classifier"
else:
X, y = make_regression(n_samples=10)
estimators = [("lr", LogisticRegression())]
ensemble_type = "regressor"
ensemble = Ensemble(estimators=estimators)
err_msg = "should be a {}".format(ensemble_type)
with pytest.raises(ValueError, match=err_msg):
ensemble.fit(X, y)
@pytest.mark.parametrize(
"X, y, Ensemble",
[
(*make_classification(n_samples=10), StackingClassifier),
(*make_classification(n_samples=10), VotingClassifier),
(*make_regression(n_samples=10), StackingRegressor),
(*make_regression(n_samples=10), VotingRegressor),
],
)
def test_ensemble_heterogeneous_estimators_name_validation(X, y, Ensemble):
# raise an error when the name contains dunder
if issubclass(Ensemble, ClassifierMixin):
estimators = [("lr__", LogisticRegression())]
else:
estimators = [("lr__", LinearRegression())]
ensemble = Ensemble(estimators=estimators)
err_msg = r"Estimator names must not contain __: got \['lr__'\]"
with pytest.raises(ValueError, match=err_msg):
ensemble.fit(X, y)
# raise an error when the name is not unique
if issubclass(Ensemble, ClassifierMixin):
estimators = [("lr", LogisticRegression()), ("lr", LogisticRegression())]
else:
estimators = [("lr", LinearRegression()), ("lr", LinearRegression())]
ensemble = Ensemble(estimators=estimators)
err_msg = r"Names provided are not unique: \['lr', 'lr'\]"
with pytest.raises(ValueError, match=err_msg):
ensemble.fit(X, y)
# raise an error when the name conflicts with the parameters
if issubclass(Ensemble, ClassifierMixin):
estimators = [("estimators", LogisticRegression())]
else:
estimators = [("estimators", LinearRegression())]
ensemble = Ensemble(estimators=estimators)
err_msg = "Estimator names conflict with constructor arguments"
with pytest.raises(ValueError, match=err_msg):
ensemble.fit(X, y)
@pytest.mark.parametrize(
"X, y, estimator",
[
(
*make_classification(n_samples=10),
StackingClassifier(estimators=[("lr", LogisticRegression())]),
),
(
*make_classification(n_samples=10),
VotingClassifier(estimators=[("lr", LogisticRegression())]),
),
(
*make_regression(n_samples=10),
StackingRegressor(estimators=[("lr", LinearRegression())]),
),
(
*make_regression(n_samples=10),
VotingRegressor(estimators=[("lr", LinearRegression())]),
),
],
ids=[
"stacking-classifier",
"voting-classifier",
"stacking-regressor",
"voting-regressor",
],
)
def test_ensemble_heterogeneous_estimators_all_dropped(X, y, estimator):
# check that we raise a consistent error when all estimators are
# dropped
estimator.set_params(lr="drop")
with pytest.raises(ValueError, match="All estimators are dropped."):
estimator.fit(X, y)
@pytest.mark.parametrize(
"Ensemble, Estimator, X, y",
[
(StackingClassifier, LogisticRegression, X, y),
(StackingRegressor, LinearRegression, X_r, y_r),
(VotingClassifier, LogisticRegression, X, y),
(VotingRegressor, LinearRegression, X_r, y_r),
],
)
# FIXME: we should move this test in `estimator_checks` once we are able
# to construct meta-estimator instances
def test_heterogeneous_ensemble_support_missing_values(Ensemble, Estimator, X, y):
# check that Voting and Stacking predictor delegate the missing values
# validation to the underlying estimator.
X = X.copy()
mask = np.random.choice([1, 0], X.shape, p=[0.1, 0.9]).astype(bool)
X[mask] = np.nan
pipe = make_pipeline(SimpleImputer(), Estimator())
ensemble = Ensemble(estimators=[("pipe1", pipe), ("pipe2", pipe)])
ensemble.fit(X, y).score(X, y)
| bsd-3-clause |
pytorch/fairseq | examples/rxf/rxf_src/sentence_prediction_r3f.py | 1 | 6587 | # Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import math
import torch
import torch.nn.functional as F
from fairseq import utils
from fairseq.criterions import FairseqCriterion, register_criterion
@register_criterion("sentence_prediction_r3f")
class SentencePredictionR3F(FairseqCriterion):
def __init__(
self,
task,
eps,
r3f_lambda,
noise_type,
classification_head_name,
regression_target,
):
super().__init__(task)
self.eps = eps
self.r3f_lambda = r3f_lambda
self.noise_type = noise_type
self.classification_head_name = classification_head_name
self.regression_target = regression_target
if self.noise_type in {"normal"}:
self.noise_sampler = torch.distributions.normal.Normal(
loc=0.0, scale=self.eps
)
elif self.noise_type == "uniform":
self.noise_sampler = torch.distributions.uniform.Uniform(
low=-self.eps, high=self.eps
)
else:
raise Exception(f"unrecognized noise type {self.noise_type}")
@staticmethod
def add_args(parser):
# fmt: off
parser.add_argument('--eps', type=float, default=1e-5,
help='noise eps')
parser.add_argument('--r3f-lambda', type=float, default=1.0,
help='lambda for combining logistic loss and noisy KL loss')
parser.add_argument('--noise-type', type=str, default='uniform',
choices=['normal', 'uniform'],
help='type of noises for RXF methods')
parser.add_argument('--classification-head-name',
default='sentence_classification_head',
help='name of the classification head to use')
parser.add_argument('--regression-target', action='store_true')
# fmt: on
def _get_symm_kl(self, noised_logits, input_logits):
return (
F.kl_div(
F.log_softmax(noised_logits, dim=-1, dtype=torch.float32),
F.softmax(input_logits, dim=-1, dtype=torch.float32),
None,
None,
"sum",
)
+ F.kl_div(
F.log_softmax(input_logits, dim=-1, dtype=torch.float32),
F.softmax(noised_logits, dim=-1, dtype=torch.float32),
None,
None,
"sum",
)
) / noised_logits.size(0)
def forward(self, model, sample, reduce=True):
"""Compute the loss for the given sample.
Returns a tuple with three elements:
1) the loss
2) the sample size, which is used as the denominator for the gradient
3) logging outputs to display while training
"""
assert (
hasattr(model, "classification_heads")
and self.classification_head_name in model.classification_heads
), "model must provide sentence classification head for --criterion=sentence_prediction"
token_embeddings = model.encoder.sentence_encoder.embed_tokens(
sample["net_input"]["src_tokens"]
)
input_logits, _ = model(
**sample["net_input"],
features_only=True,
classification_head_name=self.classification_head_name,
token_embeddings=token_embeddings,
)
if model.training and self.noise_sampler:
noise = self.noise_sampler.sample(sample_shape=token_embeddings.shape).to(
token_embeddings
)
noised_embeddings = token_embeddings.detach().clone() + noise
noised_logits, _ = model(
**sample["net_input"],
features_only=True,
classification_head_name=self.classification_head_name,
token_embeddings=noised_embeddings,
)
symm_kl = self._get_symm_kl(noised_logits, input_logits)
else:
symm_kl = 0
targets = model.get_targets(sample, [input_logits]).view(-1)
sample_size = targets.numel()
if not self.regression_target:
loss = F.nll_loss(
F.log_softmax(input_logits, dim=-1, dtype=torch.float32),
targets,
reduction="sum",
)
if model.training:
symm_kl = symm_kl * sample_size
loss = loss + self.r3f_lambda * symm_kl
else:
logits = input_logits.squeeze().float()
targets = targets.float()
loss = F.mse_loss(logits, targets, reduction="sum")
logging_output = {
"loss": utils.item(loss.data) if reduce else loss.data,
"ntokens": sample["ntokens"],
"nsentences": sample_size,
"sample_size": sample_size,
}
if not self.regression_target:
preds = input_logits.max(dim=1)[1]
logging_output.update(ncorrect=(preds == targets).sum().item())
if model.training and self.noise_sampler:
logging_output.update(
symm_kl=utils.item(symm_kl.data) if reduce else symm_kl.data
)
return loss, sample_size, logging_output
@staticmethod
def aggregate_logging_outputs(logging_outputs):
"""Aggregate logging outputs from data parallel training."""
loss_sum = sum(log.get("loss", 0) for log in logging_outputs)
symm_kl_sum = sum(log.get("symm_kl", 0) for log in logging_outputs)
ntokens = sum(log.get("ntokens", 0) for log in logging_outputs)
nsentences = sum(log.get("nsentences", 0) for log in logging_outputs)
sample_size = sum(log.get("sample_size", 0) for log in logging_outputs)
agg_output = {
"loss": loss_sum / sample_size / math.log(2),
"symm_kl": symm_kl_sum / sample_size,
"ntokens": ntokens,
"nsentences": nsentences,
"sample_size": sample_size,
}
if len(logging_outputs) > 0 and "ncorrect" in logging_outputs[0]:
ncorrect = sum(log.get("ncorrect", 0) for log in logging_outputs)
agg_output.update(accuracy=ncorrect / nsentences)
if sample_size != ntokens:
agg_output["nll_loss"] = loss_sum / ntokens / math.log(2)
return agg_output
| mit |
florian-f/sklearn | sklearn/utils/class_weight.py | 3 | 2062 | # Authors: Andreas Mueller
# License: Simplified BSD
import numpy as np
from .fixes import bincount
def compute_class_weight(class_weight, classes, y_ind):
"""Estimate class weights for unbalanced datasets.
Parameters
----------
class_weight : dict, 'auto' or None
If 'auto', class weights will be given inverse proportional
to the frequency of the class in the data.
If a dictionary is given, keys are classes and values
are corresponding class weights.
If None is given, the class weights will be uniform.
classes : ndarray
Array of the classes occuring in the data, as given by
``np.unique(y_org)`` with ``y_org`` the original class labels.
y_ind : array-like, shape=(n_samples,), dtype=int
Array of class indices per sample;
0 <= y_ind[i] < n_classes for i in range(n_samples).
Returns
-------
class_weight_vect : ndarray, shape=(n_classes,)
Array with class_weight_vect[i] the weight for i-th class
(as determined by sorting).
"""
if class_weight is None or len(class_weight) == 0:
# uniform class weights
weight = np.ones(classes.shape[0], dtype=np.float64, order='C')
elif class_weight == 'auto':
# inversely proportional to the number of samples in the class
counts = bincount(y_ind, minlength=len(classes))
counts = np.maximum(counts, 1)
weight = 1. / counts
weight *= classes.shape[0] / np.sum(weight)
else:
# user-defined dictionary
weight = np.ones(classes.shape[0], dtype=np.float64, order='C')
if not isinstance(class_weight, dict):
raise ValueError("class_weight must be dict, 'auto', or None,"
" got: %r" % class_weight)
for c in class_weight:
i = np.searchsorted(classes, c)
if classes[i] != c:
raise ValueError("Class label %d not present." % c)
else:
weight[i] = class_weight[c]
return weight
| bsd-3-clause |
jiangzhonglian/MachineLearning | src/py2.x/ml/7.AdaBoost/sklearn-adaboost-demo.py | 1 | 1753 | #!/usr/bin/python
# coding:utf8
"""
Created on 2017-07-10
Updated on 2017-07-10
Author: 片刻/Noel Dawe
GitHub: https://github.com/apachecn/AiLearning
sklearn-AdaBoost译文链接: http://cwiki.apachecn.org/pages/viewpage.action?pageId=10813457
"""
from __future__ import print_function
import matplotlib.pyplot as plt
# importing necessary libraries
import numpy as np
from sklearn import metrics
from sklearn.ensemble import AdaBoostRegressor
from sklearn.tree import DecisionTreeRegressor
print(__doc__)
# Create the dataset
rng = np.random.RandomState(1)
X = np.linspace(0, 6, 100)[:, np.newaxis]
y = np.sin(X).ravel() + np.sin(6 * X).ravel() + rng.normal(0, 0.1, X.shape[0])
# dataArr, labelArr = loadDataSet("data/7.AdaBoost/horseColicTraining2.txt")
# Fit regression model
regr_1 = DecisionTreeRegressor(max_depth=4)
regr_2 = AdaBoostRegressor(DecisionTreeRegressor(max_depth=4), n_estimators=300, random_state=rng)
regr_1.fit(X, y)
regr_2.fit(X, y)
# Predict
y_1 = regr_1.predict(X)
y_2 = regr_2.predict(X)
# Plot the results
plt.figure()
plt.scatter(X, y, c="k", label="training samples")
plt.plot(X, y_1, c="g", label="n_estimators=1", linewidth=2)
plt.plot(X, y_2, c="r", label="n_estimators=300", linewidth=2)
plt.xlabel("data")
plt.ylabel("target")
plt.title("Boosted Decision Tree Regression")
plt.legend()
plt.show()
print('y---', type(y[0]), len(y), y[:4])
print('y_1---', type(y_1[0]), len(y_1), y_1[:4])
print('y_2---', type(y_2[0]), len(y_2), y_2[:4])
# 适合2分类
y_true = np.array([0, 0, 1, 1])
y_scores = np.array([0.1, 0.4, 0.35, 0.8])
print('y_scores---', type(y_scores[0]), len(y_scores), y_scores)
print(metrics.roc_auc_score(y_true, y_scores))
# print "-" * 100
# print metrics.roc_auc_score(y[:1], y_2[:1])
| gpl-3.0 |
luanjunyi/cortana | util/__init__.py | 1 | 1818 | import sys, os
import numpy as np
from feat.terms.term_categorize import term_category
from util.log import _logger
TEST_FILE_PATH = os.path.abspath(os.path.dirname(os.path.abspath(__file__)) + "/../data/aggregated/test.dat")
TRAIN_FILE_PATH = os.path.abspath(os.path.dirname(os.path.abspath(__file__)) + "/../data/aggregated/train.dat")
def argmax(ls):
if not ls:
return None, 0.0
return max(ls, key = lambda x: x[1])
def load_data(train_path):
_logger.info("Loading data from %s" % train_path)
X = []
y = []
with open(train_path) as train_file:
for line in train_file:
line = line.strip().decode('utf-8')
if not line:
continue
terms, domain = line.split('\t')
X.append(terms)
y.append(domain)
return np.array(X), np.array(y)
class Tokenizer(object):
def __init__(self):
pass
def __call__(self, sentence):
terms = sentence.strip().split(' ')
ret = [term_category(term) for term in terms]
return list(ret)
class Analyzer(object):
def __init__(self):
from sklearn.feature_extraction.text import TfidfVectorizer
self.tfidf = TfidfVectorizer(min_df = 1, binary = False, ngram_range = (1, 3),
tokenizer = Tokenizer())
self.tokens = self.tfidf.build_tokenizer()
self.ngram = self.tfidf.build_analyzer()
def __call__(self, sentence):
ret = self.ngram(sentence)
terms = self.tokens(sentence)
for term in terms:
cate = term_category(term)
if term != cate:
ret.append(cate)
return ret
__all__ = ["argmax", "load_data", "Tokenizer", "Analyzer", "TEST_FILE_PATH", "TRAIN_FILE_PATH",
"tsv", "conv"]
| mit |
dfdx2/django | django/contrib/gis/geoip2/base.py | 16 | 8969 | import os
import socket
import geoip2.database
from django.conf import settings
from django.core.validators import ipv4_re
from django.utils.ipv6 import is_valid_ipv6_address
from .resources import City, Country
# Creating the settings dictionary with any settings, if needed.
GEOIP_SETTINGS = {
'GEOIP_PATH': getattr(settings, 'GEOIP_PATH', None),
'GEOIP_CITY': getattr(settings, 'GEOIP_CITY', 'GeoLite2-City.mmdb'),
'GEOIP_COUNTRY': getattr(settings, 'GEOIP_COUNTRY', 'GeoLite2-Country.mmdb'),
}
class GeoIP2Exception(Exception):
pass
class GeoIP2:
# The flags for GeoIP memory caching.
# Try MODE_MMAP_EXT, MODE_MMAP, MODE_FILE in that order.
MODE_AUTO = 0
# Use the C extension with memory map.
MODE_MMAP_EXT = 1
# Read from memory map. Pure Python.
MODE_MMAP = 2
# Read database as standard file. Pure Python.
MODE_FILE = 4
# Load database into memory. Pure Python.
MODE_MEMORY = 8
cache_options = {opt: None for opt in (0, 1, 2, 4, 8)}
# Paths to the city & country binary databases.
_city_file = ''
_country_file = ''
# Initially, pointers to GeoIP file references are NULL.
_city = None
_country = None
def __init__(self, path=None, cache=0, country=None, city=None):
"""
Initialize the GeoIP object. No parameters are required to use default
settings. Keyword arguments may be passed in to customize the locations
of the GeoIP datasets.
* path: Base directory to where GeoIP data is located or the full path
to where the city or country data files (*.mmdb) are located.
Assumes that both the city and country data sets are located in
this directory; overrides the GEOIP_PATH setting.
* cache: The cache settings when opening up the GeoIP datasets. May be
an integer in (0, 1, 2, 4, 8) corresponding to the MODE_AUTO,
MODE_MMAP_EXT, MODE_MMAP, MODE_FILE, and MODE_MEMORY,
`GeoIPOptions` C API settings, respectively. Defaults to 0,
meaning MODE_AUTO.
* country: The name of the GeoIP country data file. Defaults to
'GeoLite2-Country.mmdb'; overrides the GEOIP_COUNTRY setting.
* city: The name of the GeoIP city data file. Defaults to
'GeoLite2-City.mmdb'; overrides the GEOIP_CITY setting.
"""
# Checking the given cache option.
if cache in self.cache_options:
self._cache = cache
else:
raise GeoIP2Exception('Invalid GeoIP caching option: %s' % cache)
# Getting the GeoIP data path.
if not path:
path = GEOIP_SETTINGS['GEOIP_PATH']
if not path:
raise GeoIP2Exception('GeoIP path must be provided via parameter or the GEOIP_PATH setting.')
if not isinstance(path, str):
raise TypeError('Invalid path type: %s' % type(path).__name__)
if os.path.isdir(path):
# Constructing the GeoIP database filenames using the settings
# dictionary. If the database files for the GeoLite country
# and/or city datasets exist, then try to open them.
country_db = os.path.join(path, country or GEOIP_SETTINGS['GEOIP_COUNTRY'])
if os.path.isfile(country_db):
self._country = geoip2.database.Reader(country_db, mode=cache)
self._country_file = country_db
city_db = os.path.join(path, city or GEOIP_SETTINGS['GEOIP_CITY'])
if os.path.isfile(city_db):
self._city = geoip2.database.Reader(city_db, mode=cache)
self._city_file = city_db
elif os.path.isfile(path):
# Otherwise, some detective work will be needed to figure out
# whether the given database path is for the GeoIP country or city
# databases.
reader = geoip2.database.Reader(path, mode=cache)
db_type = reader.metadata().database_type
if db_type.endswith('City'):
# GeoLite City database detected.
self._city = reader
self._city_file = path
elif db_type.endswith('Country'):
# GeoIP Country database detected.
self._country = reader
self._country_file = path
else:
raise GeoIP2Exception('Unable to recognize database edition: %s' % db_type)
else:
raise GeoIP2Exception('GeoIP path must be a valid file or directory.')
@property
def _reader(self):
if self._country:
return self._country
else:
return self._city
@property
def _country_or_city(self):
if self._country:
return self._country.country
else:
return self._city.city
def __del__(self):
# Cleanup any GeoIP file handles lying around.
if self._reader:
self._reader.close()
def __repr__(self):
meta = self._reader.metadata()
version = '[v%s.%s]' % (meta.binary_format_major_version, meta.binary_format_minor_version)
return '<%(cls)s %(version)s _country_file="%(country)s", _city_file="%(city)s">' % {
'cls': self.__class__.__name__,
'version': version,
'country': self._country_file,
'city': self._city_file,
}
def _check_query(self, query, country=False, city=False, city_or_country=False):
"Check the query and database availability."
# Making sure a string was passed in for the query.
if not isinstance(query, str):
raise TypeError('GeoIP query must be a string, not type %s' % type(query).__name__)
# Extra checks for the existence of country and city databases.
if city_or_country and not (self._country or self._city):
raise GeoIP2Exception('Invalid GeoIP country and city data files.')
elif country and not self._country:
raise GeoIP2Exception('Invalid GeoIP country data file: %s' % self._country_file)
elif city and not self._city:
raise GeoIP2Exception('Invalid GeoIP city data file: %s' % self._city_file)
# Return the query string back to the caller. GeoIP2 only takes IP addresses.
if not (ipv4_re.match(query) or is_valid_ipv6_address(query)):
query = socket.gethostbyname(query)
return query
def city(self, query):
"""
Return a dictionary of city information for the given IP address or
Fully Qualified Domain Name (FQDN). Some information in the dictionary
may be undefined (None).
"""
enc_query = self._check_query(query, city=True)
return City(self._city.city(enc_query))
def country_code(self, query):
"Return the country code for the given IP Address or FQDN."
enc_query = self._check_query(query, city_or_country=True)
return self.country(enc_query)['country_code']
def country_name(self, query):
"Return the country name for the given IP Address or FQDN."
enc_query = self._check_query(query, city_or_country=True)
return self.country(enc_query)['country_name']
def country(self, query):
"""
Return a dictionary with the country code and name when given an
IP address or a Fully Qualified Domain Name (FQDN). For example, both
'24.124.1.80' and 'djangoproject.com' are valid parameters.
"""
# Returning the country code and name
enc_query = self._check_query(query, city_or_country=True)
return Country(self._country_or_city(enc_query))
# #### Coordinate retrieval routines ####
def coords(self, query, ordering=('longitude', 'latitude')):
cdict = self.city(query)
if cdict is None:
return None
else:
return tuple(cdict[o] for o in ordering)
def lon_lat(self, query):
"Return a tuple of the (longitude, latitude) for the given query."
return self.coords(query)
def lat_lon(self, query):
"Return a tuple of the (latitude, longitude) for the given query."
return self.coords(query, ('latitude', 'longitude'))
def geos(self, query):
"Return a GEOS Point object for the given query."
ll = self.lon_lat(query)
if ll:
from django.contrib.gis.geos import Point
return Point(ll, srid=4326)
else:
return None
# #### GeoIP Database Information Routines ####
@property
def info(self):
"Return information about the GeoIP library and databases in use."
meta = self._reader.metadata()
return 'GeoIP Library:\n\t%s.%s\n' % (meta.binary_format_major_version, meta.binary_format_minor_version)
@classmethod
def open(cls, full_path, cache):
return GeoIP2(full_path, cache)
| bsd-3-clause |
bramwalet/Subliminal.bundle | Contents/Libraries/Shared/guessit/transfo/guess_date.py | 6 | 2202 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# GuessIt - A library for guessing information from filenames
# Copyright (c) 2013 Nicolas Wack <wackou@gmail.com>
#
# GuessIt is free software; you can redistribute it and/or modify it under
# the terms of the Lesser GNU General Public License as published by
# the Free Software Foundation; either version 3 of the License, or
# (at your option) any later version.
#
# GuessIt is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# Lesser GNU General Public License for more details.
#
# You should have received a copy of the Lesser GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
from __future__ import absolute_import, division, print_function, unicode_literals
from guessit.plugins.transformers import Transformer
from guessit.matcher import GuessFinder
from guessit.date import search_date
class GuessDate(Transformer):
def __init__(self):
Transformer.__init__(self, 50)
def register_arguments(self, opts, naming_opts, output_opts, information_opts, webservice_opts, other_options):
naming_opts.add_argument('-Y', '--date-year-first', action='store_true', dest='date_year_first', default=None,
help='If short date is found, consider the first digits as the year.')
naming_opts.add_argument('-D', '--date-day-first', action='store_true', dest='date_day_first', default=None,
help='If short date is found, consider the second digits as the day.')
def supported_properties(self):
return ['date']
def guess_date(self, string, node=None, options=None):
date, span = search_date(string, options.get('date_year_first') if options else False, options.get('date_day_first') if options else False)
if date:
return {'date': date}, span
else:
return None, None
def process(self, mtree, options=None):
GuessFinder(self.guess_date, 1.0, self.log, options).process_nodes(mtree.unidentified_leaves())
| mit |
anntzer/scikit-learn | sklearn/datasets/_olivetti_faces.py | 12 | 5028 | """Modified Olivetti faces dataset.
The original database was available from (now defunct)
https://www.cl.cam.ac.uk/research/dtg/attarchive/facedatabase.html
The version retrieved here comes in MATLAB format from the personal
web page of Sam Roweis:
https://cs.nyu.edu/~roweis/
"""
# Copyright (c) 2011 David Warde-Farley <wardefar at iro dot umontreal dot ca>
# License: BSD 3 clause
from os.path import exists
from os import makedirs, remove
import numpy as np
from scipy.io import loadmat
import joblib
from . import get_data_home
from ._base import _fetch_remote
from ._base import RemoteFileMetadata
from ._base import _pkl_filepath
from ._base import load_descr
from ..utils import check_random_state, Bunch
# The original data can be found at:
# https://cs.nyu.edu/~roweis/data/olivettifaces.mat
FACES = RemoteFileMetadata(
filename="olivettifaces.mat",
url="https://ndownloader.figshare.com/files/5976027",
checksum="b612fb967f2dc77c9c62d3e1266e0c73d5fca46a4b8906c18e454d41af987794",
)
def fetch_olivetti_faces(
*,
data_home=None,
shuffle=False,
random_state=0,
download_if_missing=True,
return_X_y=False,
):
"""Load the Olivetti faces data-set from AT&T (classification).
Download it if necessary.
================= =====================
Classes 40
Samples total 400
Dimensionality 4096
Features real, between 0 and 1
================= =====================
Read more in the :ref:`User Guide <olivetti_faces_dataset>`.
Parameters
----------
data_home : str, default=None
Specify another download and cache folder for the datasets. By default
all scikit-learn data is stored in '~/scikit_learn_data' subfolders.
shuffle : bool, default=False
If True the order of the dataset is shuffled to avoid having
images of the same person grouped.
random_state : int, RandomState instance or None, default=0
Determines random number generation for dataset shuffling. Pass an int
for reproducible output across multiple function calls.
See :term:`Glossary <random_state>`.
download_if_missing : bool, default=True
If False, raise a IOError if the data is not locally available
instead of trying to download the data from the source site.
return_X_y : bool, default=False
If True, returns `(data, target)` instead of a `Bunch` object. See
below for more information about the `data` and `target` object.
.. versionadded:: 0.22
Returns
-------
data : :class:`~sklearn.utils.Bunch`
Dictionary-like object, with the following attributes.
data: ndarray, shape (400, 4096)
Each row corresponds to a ravelled
face image of original size 64 x 64 pixels.
images : ndarray, shape (400, 64, 64)
Each row is a face image
corresponding to one of the 40 subjects of the dataset.
target : ndarray, shape (400,)
Labels associated to each face image.
Those labels are ranging from 0-39 and correspond to the
Subject IDs.
DESCR : str
Description of the modified Olivetti Faces Dataset.
(data, target) : tuple if `return_X_y=True`
Tuple with the `data` and `target` objects described above.
.. versionadded:: 0.22
"""
data_home = get_data_home(data_home=data_home)
if not exists(data_home):
makedirs(data_home)
filepath = _pkl_filepath(data_home, "olivetti.pkz")
if not exists(filepath):
if not download_if_missing:
raise IOError("Data not found and `download_if_missing` is False")
print("downloading Olivetti faces from %s to %s" % (FACES.url, data_home))
mat_path = _fetch_remote(FACES, dirname=data_home)
mfile = loadmat(file_name=mat_path)
# delete raw .mat data
remove(mat_path)
faces = mfile["faces"].T.copy()
joblib.dump(faces, filepath, compress=6)
del mfile
else:
faces = joblib.load(filepath)
# We want floating point data, but float32 is enough (there is only
# one byte of precision in the original uint8s anyway)
faces = np.float32(faces)
faces = faces - faces.min()
faces /= faces.max()
faces = faces.reshape((400, 64, 64)).transpose(0, 2, 1)
# 10 images per class, 400 images total, each class is contiguous.
target = np.array([i // 10 for i in range(400)])
if shuffle:
random_state = check_random_state(random_state)
order = random_state.permutation(len(faces))
faces = faces[order]
target = target[order]
faces_vectorized = faces.reshape(len(faces), -1)
fdescr = load_descr("olivetti_faces.rst")
if return_X_y:
return faces_vectorized, target
return Bunch(data=faces_vectorized, images=faces, target=target, DESCR=fdescr)
| bsd-3-clause |
Obus/scikit-learn | sklearn/datasets/base.py | 195 | 18554 | """
Base IO code for all datasets
"""
# Copyright (c) 2007 David Cournapeau <cournape@gmail.com>
# 2010 Fabian Pedregosa <fabian.pedregosa@inria.fr>
# 2010 Olivier Grisel <olivier.grisel@ensta.org>
# License: BSD 3 clause
import os
import csv
import shutil
from os import environ
from os.path import dirname
from os.path import join
from os.path import exists
from os.path import expanduser
from os.path import isdir
from os import listdir
from os import makedirs
import numpy as np
from ..utils import check_random_state
class Bunch(dict):
"""Container object for datasets
Dictionary-like object that exposes its keys as attributes.
>>> b = Bunch(a=1, b=2)
>>> b['b']
2
>>> b.b
2
>>> b.a = 3
>>> b['a']
3
>>> b.c = 6
>>> b['c']
6
"""
def __init__(self, **kwargs):
dict.__init__(self, kwargs)
def __setattr__(self, key, value):
self[key] = value
def __getattr__(self, key):
try:
return self[key]
except KeyError:
raise AttributeError(key)
def __getstate__(self):
return self.__dict__
def get_data_home(data_home=None):
"""Return the path of the scikit-learn data dir.
This folder is used by some large dataset loaders to avoid
downloading the data several times.
By default the data dir is set to a folder named 'scikit_learn_data'
in the user home folder.
Alternatively, it can be set by the 'SCIKIT_LEARN_DATA' environment
variable or programmatically by giving an explicit folder path. The
'~' symbol is expanded to the user home folder.
If the folder does not already exist, it is automatically created.
"""
if data_home is None:
data_home = environ.get('SCIKIT_LEARN_DATA',
join('~', 'scikit_learn_data'))
data_home = expanduser(data_home)
if not exists(data_home):
makedirs(data_home)
return data_home
def clear_data_home(data_home=None):
"""Delete all the content of the data home cache."""
data_home = get_data_home(data_home)
shutil.rmtree(data_home)
def load_files(container_path, description=None, categories=None,
load_content=True, shuffle=True, encoding=None,
decode_error='strict', random_state=0):
"""Load text files with categories as subfolder names.
Individual samples are assumed to be files stored a two levels folder
structure such as the following:
container_folder/
category_1_folder/
file_1.txt
file_2.txt
...
file_42.txt
category_2_folder/
file_43.txt
file_44.txt
...
The folder names are used as supervised signal label names. The
individual file names are not important.
This function does not try to extract features into a numpy array or
scipy sparse matrix. In addition, if load_content is false it
does not try to load the files in memory.
To use text files in a scikit-learn classification or clustering
algorithm, you will need to use the `sklearn.feature_extraction.text`
module to build a feature extraction transformer that suits your
problem.
If you set load_content=True, you should also specify the encoding of
the text using the 'encoding' parameter. For many modern text files,
'utf-8' will be the correct encoding. If you leave encoding equal to None,
then the content will be made of bytes instead of Unicode, and you will
not be able to use most functions in `sklearn.feature_extraction.text`.
Similar feature extractors should be built for other kind of unstructured
data input such as images, audio, video, ...
Read more in the :ref:`User Guide <datasets>`.
Parameters
----------
container_path : string or unicode
Path to the main folder holding one subfolder per category
description: string or unicode, optional (default=None)
A paragraph describing the characteristic of the dataset: its source,
reference, etc.
categories : A collection of strings or None, optional (default=None)
If None (default), load all the categories.
If not None, list of category names to load (other categories ignored).
load_content : boolean, optional (default=True)
Whether to load or not the content of the different files. If
true a 'data' attribute containing the text information is present
in the data structure returned. If not, a filenames attribute
gives the path to the files.
encoding : string or None (default is None)
If None, do not try to decode the content of the files (e.g. for
images or other non-text content).
If not None, encoding to use to decode text files to Unicode if
load_content is True.
decode_error: {'strict', 'ignore', 'replace'}, optional
Instruction on what to do if a byte sequence is given to analyze that
contains characters not of the given `encoding`. Passed as keyword
argument 'errors' to bytes.decode.
shuffle : bool, optional (default=True)
Whether or not to shuffle the data: might be important for models that
make the assumption that the samples are independent and identically
distributed (i.i.d.), such as stochastic gradient descent.
random_state : int, RandomState instance or None, optional (default=0)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
Returns
-------
data : Bunch
Dictionary-like object, the interesting attributes are: either
data, the raw text data to learn, or 'filenames', the files
holding it, 'target', the classification labels (integer index),
'target_names', the meaning of the labels, and 'DESCR', the full
description of the dataset.
"""
target = []
target_names = []
filenames = []
folders = [f for f in sorted(listdir(container_path))
if isdir(join(container_path, f))]
if categories is not None:
folders = [f for f in folders if f in categories]
for label, folder in enumerate(folders):
target_names.append(folder)
folder_path = join(container_path, folder)
documents = [join(folder_path, d)
for d in sorted(listdir(folder_path))]
target.extend(len(documents) * [label])
filenames.extend(documents)
# convert to array for fancy indexing
filenames = np.array(filenames)
target = np.array(target)
if shuffle:
random_state = check_random_state(random_state)
indices = np.arange(filenames.shape[0])
random_state.shuffle(indices)
filenames = filenames[indices]
target = target[indices]
if load_content:
data = []
for filename in filenames:
with open(filename, 'rb') as f:
data.append(f.read())
if encoding is not None:
data = [d.decode(encoding, decode_error) for d in data]
return Bunch(data=data,
filenames=filenames,
target_names=target_names,
target=target,
DESCR=description)
return Bunch(filenames=filenames,
target_names=target_names,
target=target,
DESCR=description)
def load_iris():
"""Load and return the iris dataset (classification).
The iris dataset is a classic and very easy multi-class classification
dataset.
================= ==============
Classes 3
Samples per class 50
Samples total 150
Dimensionality 4
Features real, positive
================= ==============
Read more in the :ref:`User Guide <datasets>`.
Returns
-------
data : Bunch
Dictionary-like object, the interesting attributes are:
'data', the data to learn, 'target', the classification labels,
'target_names', the meaning of the labels, 'feature_names', the
meaning of the features, and 'DESCR', the
full description of the dataset.
Examples
--------
Let's say you are interested in the samples 10, 25, and 50, and want to
know their class name.
>>> from sklearn.datasets import load_iris
>>> data = load_iris()
>>> data.target[[10, 25, 50]]
array([0, 0, 1])
>>> list(data.target_names)
['setosa', 'versicolor', 'virginica']
"""
module_path = dirname(__file__)
with open(join(module_path, 'data', 'iris.csv')) as csv_file:
data_file = csv.reader(csv_file)
temp = next(data_file)
n_samples = int(temp[0])
n_features = int(temp[1])
target_names = np.array(temp[2:])
data = np.empty((n_samples, n_features))
target = np.empty((n_samples,), dtype=np.int)
for i, ir in enumerate(data_file):
data[i] = np.asarray(ir[:-1], dtype=np.float)
target[i] = np.asarray(ir[-1], dtype=np.int)
with open(join(module_path, 'descr', 'iris.rst')) as rst_file:
fdescr = rst_file.read()
return Bunch(data=data, target=target,
target_names=target_names,
DESCR=fdescr,
feature_names=['sepal length (cm)', 'sepal width (cm)',
'petal length (cm)', 'petal width (cm)'])
def load_digits(n_class=10):
"""Load and return the digits dataset (classification).
Each datapoint is a 8x8 image of a digit.
================= ==============
Classes 10
Samples per class ~180
Samples total 1797
Dimensionality 64
Features integers 0-16
================= ==============
Read more in the :ref:`User Guide <datasets>`.
Parameters
----------
n_class : integer, between 0 and 10, optional (default=10)
The number of classes to return.
Returns
-------
data : Bunch
Dictionary-like object, the interesting attributes are:
'data', the data to learn, 'images', the images corresponding
to each sample, 'target', the classification labels for each
sample, 'target_names', the meaning of the labels, and 'DESCR',
the full description of the dataset.
Examples
--------
To load the data and visualize the images::
>>> from sklearn.datasets import load_digits
>>> digits = load_digits()
>>> print(digits.data.shape)
(1797, 64)
>>> import pylab as pl #doctest: +SKIP
>>> pl.gray() #doctest: +SKIP
>>> pl.matshow(digits.images[0]) #doctest: +SKIP
>>> pl.show() #doctest: +SKIP
"""
module_path = dirname(__file__)
data = np.loadtxt(join(module_path, 'data', 'digits.csv.gz'),
delimiter=',')
with open(join(module_path, 'descr', 'digits.rst')) as f:
descr = f.read()
target = data[:, -1]
flat_data = data[:, :-1]
images = flat_data.view()
images.shape = (-1, 8, 8)
if n_class < 10:
idx = target < n_class
flat_data, target = flat_data[idx], target[idx]
images = images[idx]
return Bunch(data=flat_data,
target=target.astype(np.int),
target_names=np.arange(10),
images=images,
DESCR=descr)
def load_diabetes():
"""Load and return the diabetes dataset (regression).
============== ==================
Samples total 442
Dimensionality 10
Features real, -.2 < x < .2
Targets integer 25 - 346
============== ==================
Read more in the :ref:`User Guide <datasets>`.
Returns
-------
data : Bunch
Dictionary-like object, the interesting attributes are:
'data', the data to learn and 'target', the regression target for each
sample.
"""
base_dir = join(dirname(__file__), 'data')
data = np.loadtxt(join(base_dir, 'diabetes_data.csv.gz'))
target = np.loadtxt(join(base_dir, 'diabetes_target.csv.gz'))
return Bunch(data=data, target=target)
def load_linnerud():
"""Load and return the linnerud dataset (multivariate regression).
Samples total: 20
Dimensionality: 3 for both data and targets
Features: integer
Targets: integer
Returns
-------
data : Bunch
Dictionary-like object, the interesting attributes are: 'data' and
'targets', the two multivariate datasets, with 'data' corresponding to
the exercise and 'targets' corresponding to the physiological
measurements, as well as 'feature_names' and 'target_names'.
"""
base_dir = join(dirname(__file__), 'data/')
# Read data
data_exercise = np.loadtxt(base_dir + 'linnerud_exercise.csv', skiprows=1)
data_physiological = np.loadtxt(base_dir + 'linnerud_physiological.csv',
skiprows=1)
# Read header
with open(base_dir + 'linnerud_exercise.csv') as f:
header_exercise = f.readline().split()
with open(base_dir + 'linnerud_physiological.csv') as f:
header_physiological = f.readline().split()
with open(dirname(__file__) + '/descr/linnerud.rst') as f:
descr = f.read()
return Bunch(data=data_exercise, feature_names=header_exercise,
target=data_physiological,
target_names=header_physiological,
DESCR=descr)
def load_boston():
"""Load and return the boston house-prices dataset (regression).
============== ==============
Samples total 506
Dimensionality 13
Features real, positive
Targets real 5. - 50.
============== ==============
Returns
-------
data : Bunch
Dictionary-like object, the interesting attributes are:
'data', the data to learn, 'target', the regression targets,
and 'DESCR', the full description of the dataset.
Examples
--------
>>> from sklearn.datasets import load_boston
>>> boston = load_boston()
>>> print(boston.data.shape)
(506, 13)
"""
module_path = dirname(__file__)
fdescr_name = join(module_path, 'descr', 'boston_house_prices.rst')
with open(fdescr_name) as f:
descr_text = f.read()
data_file_name = join(module_path, 'data', 'boston_house_prices.csv')
with open(data_file_name) as f:
data_file = csv.reader(f)
temp = next(data_file)
n_samples = int(temp[0])
n_features = int(temp[1])
data = np.empty((n_samples, n_features))
target = np.empty((n_samples,))
temp = next(data_file) # names of features
feature_names = np.array(temp)
for i, d in enumerate(data_file):
data[i] = np.asarray(d[:-1], dtype=np.float)
target[i] = np.asarray(d[-1], dtype=np.float)
return Bunch(data=data,
target=target,
# last column is target value
feature_names=feature_names[:-1],
DESCR=descr_text)
def load_sample_images():
"""Load sample images for image manipulation.
Loads both, ``china`` and ``flower``.
Returns
-------
data : Bunch
Dictionary-like object with the following attributes :
'images', the two sample images, 'filenames', the file
names for the images, and 'DESCR'
the full description of the dataset.
Examples
--------
To load the data and visualize the images:
>>> from sklearn.datasets import load_sample_images
>>> dataset = load_sample_images() #doctest: +SKIP
>>> len(dataset.images) #doctest: +SKIP
2
>>> first_img_data = dataset.images[0] #doctest: +SKIP
>>> first_img_data.shape #doctest: +SKIP
(427, 640, 3)
>>> first_img_data.dtype #doctest: +SKIP
dtype('uint8')
"""
# Try to import imread from scipy. We do this lazily here to prevent
# this module from depending on PIL.
try:
try:
from scipy.misc import imread
except ImportError:
from scipy.misc.pilutil import imread
except ImportError:
raise ImportError("The Python Imaging Library (PIL) "
"is required to load data from jpeg files")
module_path = join(dirname(__file__), "images")
with open(join(module_path, 'README.txt')) as f:
descr = f.read()
filenames = [join(module_path, filename)
for filename in os.listdir(module_path)
if filename.endswith(".jpg")]
# Load image data for each image in the source folder.
images = [imread(filename) for filename in filenames]
return Bunch(images=images,
filenames=filenames,
DESCR=descr)
def load_sample_image(image_name):
"""Load the numpy array of a single sample image
Parameters
-----------
image_name: {`china.jpg`, `flower.jpg`}
The name of the sample image loaded
Returns
-------
img: 3D array
The image as a numpy array: height x width x color
Examples
---------
>>> from sklearn.datasets import load_sample_image
>>> china = load_sample_image('china.jpg') # doctest: +SKIP
>>> china.dtype # doctest: +SKIP
dtype('uint8')
>>> china.shape # doctest: +SKIP
(427, 640, 3)
>>> flower = load_sample_image('flower.jpg') # doctest: +SKIP
>>> flower.dtype # doctest: +SKIP
dtype('uint8')
>>> flower.shape # doctest: +SKIP
(427, 640, 3)
"""
images = load_sample_images()
index = None
for i, filename in enumerate(images.filenames):
if filename.endswith(image_name):
index = i
break
if index is None:
raise AttributeError("Cannot find sample image: %s" % image_name)
return images.images[index]
| bsd-3-clause |
ESS-LLP/erpnext-medical | erpnext/manufacturing/report/work_order_summary/work_order_summary.py | 3 | 6054 | # Copyright (c) 2013, Frappe Technologies Pvt. Ltd. and contributors
# For license information, please see license.txt
from __future__ import unicode_literals
import frappe
from frappe.utils import date_diff, today, getdate, flt
from frappe import _
from erpnext.stock.report.stock_analytics.stock_analytics import (get_period_date_ranges, get_period)
def execute(filters=None):
columns, data = [], []
if not filters.get("age"):
filters["age"] = 0
data = get_data(filters)
columns = get_columns(filters)
chart_data = get_chart_data(data, filters)
return columns, data, None, chart_data
def get_data(filters):
query_filters = {"docstatus": 1}
fields = ["name", "status", "sales_order", "production_item", "qty", "produced_qty",
"planned_start_date", "planned_end_date", "actual_start_date", "actual_end_date", "lead_time"]
for field in ["sales_order", "production_item", "status", "company"]:
if filters.get(field):
query_filters[field] = ("in", filters.get(field))
query_filters["planned_start_date"] = (">=", filters.get("from_date"))
query_filters["planned_end_date"] = ("<=", filters.get("to_date"))
data = frappe.get_all("Work Order",
fields= fields, filters=query_filters, order_by="planned_start_date asc")
res = []
for d in data:
start_date = d.actual_start_date or d.planned_start_date
d.age = 0
if d.status != 'Completed':
d.age = date_diff(today(), start_date)
if filters.get("age") <= d.age:
res.append(d)
return res
def get_chart_data(data, filters):
if filters.get("charts_based_on") == "Status":
return get_chart_based_on_status(data)
elif filters.get("charts_based_on") == "Age":
return get_chart_based_on_age(data)
else:
return get_chart_based_on_qty(data, filters)
def get_chart_based_on_status(data):
labels = ["Completed", "In Process", "Stopped", "Not Started"]
status_wise_data = {
"Not Started": 0,
"In Process": 0,
"Stopped": 0,
"Completed": 0
}
for d in data:
status_wise_data[d.status] += 1
values = [status_wise_data["Completed"], status_wise_data["In Process"],
status_wise_data["Stopped"], status_wise_data["Not Started"]]
chart = {
"data": {
'labels': labels,
'datasets': [{'name':'Qty Wise Chart', 'values': values}]
},
"type": "donut",
"height": 300
}
return chart
def get_chart_based_on_age(data):
labels = ["0-30 Days", "30-60 Days", "60-90 Days", "90 Above"]
age_wise_data = {
"0-30 Days": 0,
"30-60 Days": 0,
"60-90 Days": 0,
"90 Above": 0
}
for d in data:
if d.age > 0 and d.age <= 30:
age_wise_data["0-30 Days"] += 1
elif d.age > 30 and d.age <= 60:
age_wise_data["30-60 Days"] += 1
elif d.age > 60 and d.age <= 90:
age_wise_data["60-90 Days"] += 1
else:
age_wise_data["90 Above"] += 1
values = [age_wise_data["0-30 Days"], age_wise_data["30-60 Days"],
age_wise_data["60-90 Days"], age_wise_data["90 Above"]]
chart = {
"data": {
'labels': labels,
'datasets': [{'name':'Qty Wise Chart', 'values': values}]
},
"type": "donut",
"height": 300
}
return chart
def get_chart_based_on_qty(data, filters):
labels, periodic_data = prepare_chart_data(data, filters)
pending, completed = [], []
datasets = []
for d in labels:
pending.append(periodic_data.get("Pending").get(d))
completed.append(periodic_data.get("Completed").get(d))
datasets.append({"name": "Pending", "values": pending})
datasets.append({"name": "Completed", "values": completed})
chart = {
"data": {
'labels': labels,
'datasets': datasets
},
"type": "bar",
"barOptions": {
"stacked": 1
}
}
return chart
def prepare_chart_data(data, filters):
labels = []
periodic_data = {
"Pending": {},
"Completed": {}
}
filters.range = "Monthly"
ranges = get_period_date_ranges(filters)
for from_date, end_date in ranges:
period = get_period(end_date, filters)
if period not in labels:
labels.append(period)
if period not in periodic_data["Pending"]:
periodic_data["Pending"][period] = 0
if period not in periodic_data["Completed"]:
periodic_data["Completed"][period] = 0
for d in data:
if getdate(d.planned_start_date) >= from_date and getdate(d.planned_start_date) <= end_date:
periodic_data["Pending"][period] += (flt(d.qty) - flt(d.produced_qty))
periodic_data["Completed"][period] += flt(d.produced_qty)
return labels, periodic_data
def get_columns(filters):
columns = [
{
"label": _("Id"),
"fieldname": "name",
"fieldtype": "Link",
"options": "Work Order",
"width": 100
},
]
if not filters.get("status"):
columns.append(
{
"label": _("Status"),
"fieldname": "status",
"width": 100
},
)
columns.extend([
{
"label": _("Production Item"),
"fieldname": "production_item",
"fieldtype": "Link",
"options": "Item",
"width": 130
},
{
"label": _("Produce Qty"),
"fieldname": "qty",
"fieldtype": "Float",
"width": 110
},
{
"label": _("Produced Qty"),
"fieldname": "produced_qty",
"fieldtype": "Float",
"width": 110
},
{
"label": _("Sales Order"),
"fieldname": "sales_order",
"fieldtype": "Link",
"options": "Sales Order",
"width": 90
},
{
"label": _("Planned Start Date"),
"fieldname": "planned_start_date",
"fieldtype": "Date",
"width": 150
},
{
"label": _("Planned End Date"),
"fieldname": "planned_end_date",
"fieldtype": "Date",
"width": 150
}
])
if filters.get("status") != 'Not Started':
columns.extend([
{
"label": _("Actual Start Date"),
"fieldname": "actual_start_date",
"fieldtype": "Date",
"width": 100
},
{
"label": _("Actual End Date"),
"fieldname": "actual_end_date",
"fieldtype": "Date",
"width": 100
},
{
"label": _("Age"),
"fieldname": "age",
"fieldtype": "Float",
"width": 110
},
])
if filters.get("status") == 'Completed':
columns.extend([
{
"label": _("Lead Time (in mins)"),
"fieldname": "lead_time",
"fieldtype": "Float",
"width": 110
},
])
return columns | gpl-3.0 |
mcanthony/nupic | tests/swarming/nupic/swarming/experiments/input_predicted_field/description.py | 32 | 14107 | # ----------------------------------------------------------------------
# Numenta Platform for Intelligent Computing (NuPIC)
# Copyright (C) 2013, Numenta, Inc. Unless you have an agreement
# with Numenta, Inc., for a separate license for this software code, the
# following terms and conditions apply:
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero Public License version 3 as
# published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
# See the GNU Affero Public License for more details.
#
# You should have received a copy of the GNU Affero Public License
# along with this program. If not, see http://www.gnu.org/licenses.
#
# http://numenta.org/licenses/
# ----------------------------------------------------------------------
"""
Template file used by the OPF Experiment Generator to generate the actual
description.py file by replacing $XXXXXXXX tokens with desired values.
This description.py file was generated by:
'/Users/ronmarianetti/nupic/eng/lib/python2.6/site-packages/nupicengine/frameworks/opf/expGenerator/ExpGenerator.pyc'
"""
from nupic.frameworks.opf.expdescriptionapi import ExperimentDescriptionAPI
from nupic.frameworks.opf.expdescriptionhelpers import (
updateConfigFromSubConfig,
applyValueGettersToContainer
)
from nupic.frameworks.opf.clamodelcallbacks import *
from nupic.frameworks.opf.metrics import MetricSpec
from nupic.frameworks.opf.opfutils import (InferenceType,
InferenceElement)
from nupic.support import aggregationDivide
from nupic.frameworks.opf.opftaskdriver import (
IterationPhaseSpecLearnOnly,
IterationPhaseSpecInferOnly,
IterationPhaseSpecLearnAndInfer)
# Model Configuration Dictionary:
#
# Define the model parameters and adjust for any modifications if imported
# from a sub-experiment.
#
# These fields might be modified by a sub-experiment; this dict is passed
# between the sub-experiment and base experiment
#
#
config = {
# Type of model that the rest of these parameters apply to.
'model': "CLA",
# Version that specifies the format of the config.
'version': 1,
# Intermediate variables used to compute fields in modelParams and also
# referenced from the control section.
'aggregationInfo': { 'days': 0,
'fields': [ (u'timestamp', 'first'),
(u'consumption', 'sum'),
],
'hours': 0,
'microseconds': 0,
'milliseconds': 0,
'minutes': 0,
'months': 0,
'seconds': 0,
'weeks': 0,
'years': 0},
'predictAheadTime': None,
# Model parameter dictionary.
'modelParams': {
# The type of inference that this model will perform
'inferenceType': 'TemporalMultiStep',
'sensorParams': {
# Sensor diagnostic output verbosity control;
# if > 0: sensor region will print out on screen what it's sensing
# at each step 0: silent; >=1: some info; >=2: more info;
# >=3: even more info (see compute() in py/regions/RecordSensor.py)
'verbosity' : 0,
# Example:
# 'encoders': {'field1': {'fieldname': 'field1', 'n':100,
# 'name': 'field1', 'type': 'AdaptiveScalarEncoder',
# 'w': 21}}
#
'encoders': {
'consumption': {
'clipInput': True,
'fieldname': u'consumption',
'n': 100,
'name': u'consumption',
'type': 'AdaptiveScalarEncoder',
'w': 21},
'address': {
'fieldname': u'address',
'n': 300,
'name': u'address',
'type': 'SDRCategoryEncoder',
'w': 21},
'gym': {
'fieldname': u'gym',
'n': 100,
'name': u'gym',
'type': 'SDRCategoryEncoder',
'w': 21},
'timestamp_dayOfWeek': {
'dayOfWeek': (7, 3),
'fieldname': u'timestamp',
'name': u'timestamp_dayOfWeek',
'type': 'DateEncoder'},
'timestamp_timeOfDay': {
'fieldname': u'timestamp',
'name': u'timestamp_timeOfDay',
'timeOfDay': (7, 8),
'type': 'DateEncoder'},
'_classifierInput': {
'name': u'_classifierInput',
'fieldname': u'consumption',
'classifierOnly': True,
'type': 'AdaptiveScalarEncoder',
'clipInput': True,
'n': 100,
'w': 21},
},
# A dictionary specifying the period for automatically-generated
# resets from a RecordSensor;
#
# None = disable automatically-generated resets (also disabled if
# all of the specified values evaluate to 0).
# Valid keys is the desired combination of the following:
# days, hours, minutes, seconds, milliseconds, microseconds, weeks
#
# Example for 1.5 days: sensorAutoReset = dict(days=1,hours=12),
#
# (value generated from SENSOR_AUTO_RESET)
'sensorAutoReset' : { u'days': 0, u'hours': 0},
},
'spEnable': True,
'spParams': {
# SP diagnostic output verbosity control;
# 0: silent; >=1: some info; >=2: more info;
'spVerbosity' : 0,
'globalInhibition': 1,
# Number of cell columns in the cortical region (same number for
# SP and TP)
# (see also tpNCellsPerCol)
'columnCount': 2048,
'inputWidth': 0,
# SP inhibition control (absolute value);
# Maximum number of active columns in the SP region's output (when
# there are more, the weaker ones are suppressed)
'numActiveColumnsPerInhArea': 40,
'seed': 1956,
# potentialPct
# What percent of the columns's receptive field is available
# for potential synapses. At initialization time, we will
# choose potentialPct * (2*potentialRadius+1)^2
'potentialPct': 0.5,
# The default connected threshold. Any synapse whose
# permanence value is above the connected threshold is
# a "connected synapse", meaning it can contribute to the
# cell's firing. Typical value is 0.10. Cells whose activity
# level before inhibition falls below minDutyCycleBeforeInh
# will have their own internal synPermConnectedCell
# threshold set below this default value.
# (This concept applies to both SP and TP and so 'cells'
# is correct here as opposed to 'columns')
'synPermConnected': 0.1,
'synPermActiveInc': 0.1,
'synPermInactiveDec': 0.01,
},
# Controls whether TP is enabled or disabled;
# TP is necessary for making temporal predictions, such as predicting
# the next inputs. Without TP, the model is only capable of
# reconstructing missing sensor inputs (via SP).
'tpEnable' : True,
'tpParams': {
# TP diagnostic output verbosity control;
# 0: silent; [1..6]: increasing levels of verbosity
# (see verbosity in nupic/trunk/py/nupic/research/TP.py and TP10X*.py)
'verbosity': 0,
# Number of cell columns in the cortical region (same number for
# SP and TP)
# (see also tpNCellsPerCol)
'columnCount': 2048,
# The number of cells (i.e., states), allocated per column.
'cellsPerColumn': 32,
'inputWidth': 2048,
'seed': 1960,
# Temporal Pooler implementation selector (see _getTPClass in
# CLARegion.py).
'temporalImp': 'cpp',
# New Synapse formation count
# NOTE: If None, use spNumActivePerInhArea
#
# TODO: need better explanation
'newSynapseCount': 20,
# Maximum number of synapses per segment
# > 0 for fixed-size CLA
# -1 for non-fixed-size CLA
#
# TODO: for Ron: once the appropriate value is placed in TP
# constructor, see if we should eliminate this parameter from
# description.py.
'maxSynapsesPerSegment': 32,
# Maximum number of segments per cell
# > 0 for fixed-size CLA
# -1 for non-fixed-size CLA
#
# TODO: for Ron: once the appropriate value is placed in TP
# constructor, see if we should eliminate this parameter from
# description.py.
'maxSegmentsPerCell': 128,
# Initial Permanence
# TODO: need better explanation
'initialPerm': 0.21,
# Permanence Increment
'permanenceInc': 0.1,
# Permanence Decrement
# If set to None, will automatically default to tpPermanenceInc
# value.
'permanenceDec' : 0.1,
'globalDecay': 0.0,
'maxAge': 0,
# Minimum number of active synapses for a segment to be considered
# during search for the best-matching segments.
# None=use default
# Replaces: tpMinThreshold
'minThreshold': 12,
# Segment activation threshold.
# A segment is active if it has >= tpSegmentActivationThreshold
# connected synapses that are active due to infActiveState
# None=use default
# Replaces: tpActivationThreshold
'activationThreshold': 16,
'outputType': 'normal',
# "Pay Attention Mode" length. This tells the TP how many new
# elements to append to the end of a learned sequence at a time.
# Smaller values are better for datasets with short sequences,
# higher values are better for datasets with long sequences.
'pamLength': 1,
},
'clParams': {
'regionName' : 'CLAClassifierRegion',
# Classifier diagnostic output verbosity control;
# 0: silent; [1..6]: increasing levels of verbosity
'clVerbosity' : 0,
# This controls how fast the classifier learns/forgets. Higher values
# make it adapt faster and forget older patterns faster.
'alpha': 0.001,
# This is set after the call to updateConfigFromSubConfig and is
# computed from the aggregationInfo and predictAheadTime.
'steps': '1',
},
'anomalyParams': { u'anomalyCacheRecords': None,
u'autoDetectThreshold': None,
u'autoDetectWaitRecords': None},
'trainSPNetOnlyIfRequested': False,
},
}
# end of config dictionary
# Adjust base config dictionary for any modifications if imported from a
# sub-experiment
updateConfigFromSubConfig(config)
# Compute predictionSteps based on the predictAheadTime and the aggregation
# period, which may be permuted over.
if config['predictAheadTime'] is not None:
predictionSteps = int(round(aggregationDivide(
config['predictAheadTime'], config['aggregationInfo'])))
assert (predictionSteps >= 1)
config['modelParams']['clParams']['steps'] = str(predictionSteps)
# Adjust config by applying ValueGetterBase-derived
# futures. NOTE: this MUST be called after updateConfigFromSubConfig() in order
# to support value-getter-based substitutions from the sub-experiment (if any)
applyValueGettersToContainer(config)
control = {
# The environment that the current model is being run in
"environment": 'nupic',
# Input stream specification per py/nupic/frameworks/opf/jsonschema/stream_def.json.
#
'dataset' : {
u'info': u'test_hotgym',
u'streams': [ { u'columns': [u'*'],
u'info': u'test data',
u'source': u'file://swarming/test_data.csv'}],
u'version': 1},
# Iteration count: maximum number of iterations. Each iteration corresponds
# to one record from the (possibly aggregated) dataset. The task is
# terminated when either number of iterations reaches iterationCount or
# all records in the (possibly aggregated) database have been processed,
# whichever occurs first.
#
# iterationCount of -1 = iterate over the entire dataset
'iterationCount' : -1,
# A dictionary containing all the supplementary parameters for inference
"inferenceArgs":{u'predictedField': u'consumption', u'predictionSteps': [1]},
# Metrics: A list of MetricSpecs that instantiate the metrics that are
# computed for this experiment
'metrics':[
MetricSpec(field=u'consumption', metric='multiStep',
inferenceElement='multiStepBestPredictions',
params={'window': 1000, 'steps': [1], 'errorMetric': 'altMAPE'}),
],
# Logged Metrics: A sequence of regular expressions that specify which of
# the metrics from the Inference Specifications section MUST be logged for
# every prediction. The regex's correspond to the automatically generated
# metric labels. This is similar to the way the optimization metric is
# specified in permutations.py.
'loggedMetrics': ['.*'],
}
descriptionInterface = ExperimentDescriptionAPI(modelConfig=config,
control=control)
| agpl-3.0 |
rsivapr/scikit-learn | sklearn/feature_extraction/image.py | 7 | 16121 | """
The :mod:`sklearn.feature_extraction.image` submodule gathers utilities to
extract features from images.
"""
# Authors: Emmanuelle Gouillart <emmanuelle.gouillart@normalesup.org>
# Gael Varoquaux <gael.varoquaux@normalesup.org>
# Olivier Grisel
# Vlad Niculae
# License: BSD 3 clause
from itertools import product
import numbers
import numpy as np
from scipy import sparse
from numpy.lib.stride_tricks import as_strided
from ..utils.fixes import in1d
from ..utils import array2d, check_random_state
from ..base import BaseEstimator
__all__ = ['PatchExtractor',
'extract_patches_2d',
'grid_to_graph',
'img_to_graph',
'reconstruct_from_patches_2d']
###############################################################################
# From an image to a graph
def _make_edges_3d(n_x, n_y, n_z=1):
"""Returns a list of edges for a 3D image.
Parameters
===========
n_x: integer
The size of the grid in the x direction.
n_y: integer
The size of the grid in the y direction.
n_z: integer, optional
The size of the grid in the z direction, defaults to 1
"""
vertices = np.arange(n_x * n_y * n_z).reshape((n_x, n_y, n_z))
edges_deep = np.vstack((vertices[:, :, :-1].ravel(),
vertices[:, :, 1:].ravel()))
edges_right = np.vstack((vertices[:, :-1].ravel(),
vertices[:, 1:].ravel()))
edges_down = np.vstack((vertices[:-1].ravel(), vertices[1:].ravel()))
edges = np.hstack((edges_deep, edges_right, edges_down))
return edges
def _compute_gradient_3d(edges, img):
n_x, n_y, n_z = img.shape
gradient = np.abs(img[edges[0] // (n_y * n_z),
(edges[0] % (n_y * n_z)) // n_z,
(edges[0] % (n_y * n_z)) % n_z] -
img[edges[1] // (n_y * n_z),
(edges[1] % (n_y * n_z)) // n_z,
(edges[1] % (n_y * n_z)) % n_z])
return gradient
# XXX: Why mask the image after computing the weights?
def _mask_edges_weights(mask, edges, weights=None):
"""Apply a mask to edges (weighted or not)"""
inds = np.arange(mask.size)
inds = inds[mask.ravel()]
ind_mask = np.logical_and(in1d(edges[0], inds),
in1d(edges[1], inds))
edges = edges[:, ind_mask]
if weights is not None:
weights = weights[ind_mask]
if len(edges.ravel()):
maxval = edges.max()
else:
maxval = 0
order = np.searchsorted(np.unique(edges.ravel()), np.arange(maxval + 1))
edges = order[edges]
if weights is None:
return edges
else:
return edges, weights
def _to_graph(n_x, n_y, n_z, mask=None, img=None,
return_as=sparse.coo_matrix, dtype=None):
"""Auxiliary function for img_to_graph and grid_to_graph
"""
edges = _make_edges_3d(n_x, n_y, n_z)
if dtype is None:
if img is None:
dtype = np.int
else:
dtype = img.dtype
if img is not None:
img = np.atleast_3d(img)
weights = _compute_gradient_3d(edges, img)
if mask is not None:
edges, weights = _mask_edges_weights(mask, edges, weights)
diag = img.squeeze()[mask]
else:
diag = img.ravel()
n_voxels = diag.size
else:
if mask is not None:
mask = mask.astype(np.bool)
edges = _mask_edges_weights(mask, edges)
n_voxels = np.sum(mask)
else:
n_voxels = n_x * n_y * n_z
weights = np.ones(edges.shape[1], dtype=dtype)
diag = np.ones(n_voxels, dtype=dtype)
diag_idx = np.arange(n_voxels)
i_idx = np.hstack((edges[0], edges[1]))
j_idx = np.hstack((edges[1], edges[0]))
graph = sparse.coo_matrix((np.hstack((weights, weights, diag)),
(np.hstack((i_idx, diag_idx)),
np.hstack((j_idx, diag_idx)))),
(n_voxels, n_voxels),
dtype=dtype)
if return_as is np.ndarray:
return graph.todense()
return return_as(graph)
def img_to_graph(img, mask=None, return_as=sparse.coo_matrix, dtype=None):
"""Graph of the pixel-to-pixel gradient connections
Edges are weighted with the gradient values.
Parameters
===========
img: ndarray, 2D or 3D
2D or 3D image
mask : ndarray of booleans, optional
An optional mask of the image, to consider only part of the
pixels.
return_as: np.ndarray or a sparse matrix class, optional
The class to use to build the returned adjacency matrix.
dtype: None or dtype, optional
The data of the returned sparse matrix. By default it is the
dtype of img
"""
img = np.atleast_3d(img)
n_x, n_y, n_z = img.shape
return _to_graph(n_x, n_y, n_z, mask, img, return_as, dtype)
def grid_to_graph(n_x, n_y, n_z=1, mask=None, return_as=sparse.coo_matrix,
dtype=np.int):
"""Graph of the pixel-to-pixel connections
Edges exist if 2 voxels are connected.
Parameters
===========
n_x: int
Dimension in x axis
n_y: int
Dimension in y axis
n_z: int, optional, default 1
Dimension in z axis
mask : ndarray of booleans, optional
An optional mask of the image, to consider only part of the
pixels.
return_as: np.ndarray or a sparse matrix class, optional
The class to use to build the returned adjacency matrix.
dtype: dtype, optional, default int
The data of the returned sparse matrix. By default it is int
"""
return _to_graph(n_x, n_y, n_z, mask=mask, return_as=return_as,
dtype=dtype)
###############################################################################
# From an image to a set of small image patches
def _compute_n_patches(i_h, i_w, p_h, p_w, max_patches=None):
"""Compute the number of patches that will be extracted in an image.
Parameters
===========
i_h: int
The image height
i_w: int
The image with
p_h: int
The height of a patch
p_w: int
The width of a patch
max_patches: integer or float, optional default is None
The maximum number of patches to extract. If max_patches is a float
between 0 and 1, it is taken to be a proportion of the total number
of patches.
"""
n_h = i_h - p_h + 1
n_w = i_w - p_w + 1
all_patches = n_h * n_w
if max_patches:
if (isinstance(max_patches, (numbers.Integral))
and max_patches < all_patches):
return max_patches
elif (isinstance(max_patches, (numbers.Real))
and 0 < max_patches < 1):
return int(max_patches * all_patches)
else:
raise ValueError("Invalid value for max_patches: %r" % max_patches)
else:
return all_patches
def extract_patches(arr, patch_shape=8, extraction_step=1):
"""Extracts patches of any n-dimensional array in place using strides.
Given an n-dimensional array it will return a 2n-dimensional array with
the first n dimensions indexing patch position and the last n indexing
the patch content. This operation is immediate (O(1)). A reshape
performed on the first n dimensions will cause numpy to copy data, leading
to a list of extracted patches.
Parameters
----------
arr: ndarray
n-dimensional array of which patches are to be extracted
patch_shape: integer or tuple of length arr.ndim
Indicates the shape of the patches to be extracted. If an
integer is given, the shape will be a hypercube of
sidelength given by its value.
extraction_step: integer or tuple of length arr.ndim
Indicates step size at which extraction shall be performed.
If integer is given, then the step is uniform in all dimensions.
Returns
-------
patches: strided ndarray
2n-dimensional array indexing patches on first n dimensions and
containing patches on the last n dimensions. These dimensions
are fake, but this way no data is copied. A simple reshape invokes
a copying operation to obtain a list of patches:
result.reshape([-1] + list(patch_shape))
"""
arr_ndim = arr.ndim
if isinstance(patch_shape, numbers.Number):
patch_shape = tuple([patch_shape] * arr_ndim)
if isinstance(extraction_step, numbers.Number):
extraction_step = tuple([extraction_step] * arr_ndim)
patch_strides = arr.strides
slices = [slice(None, None, st) for st in extraction_step]
indexing_strides = arr[slices].strides
patch_indices_shape = ((np.array(arr.shape) - np.array(patch_shape)) /
np.array(extraction_step)) + 1
shape = tuple(list(patch_indices_shape) + list(patch_shape))
strides = tuple(list(indexing_strides) + list(patch_strides))
patches = as_strided(arr, shape=shape, strides=strides)
return patches
def extract_patches_2d(image, patch_size, max_patches=None, random_state=None):
"""Reshape a 2D image into a collection of patches
The resulting patches are allocated in a dedicated array.
Parameters
----------
image: array, shape = (image_height, image_width) or
(image_height, image_width, n_channels)
The original image data. For color images, the last dimension specifies
the channel: a RGB image would have `n_channels=3`.
patch_size: tuple of ints (patch_height, patch_width)
the dimensions of one patch
max_patches: integer or float, optional default is None
The maximum number of patches to extract. If max_patches is a float
between 0 and 1, it is taken to be a proportion of the total number
of patches.
random_state: int or RandomState
Pseudo number generator state used for random sampling to use if
`max_patches` is not None.
Returns
-------
patches: array, shape = (n_patches, patch_height, patch_width) or
(n_patches, patch_height, patch_width, n_channels)
The collection of patches extracted from the image, where `n_patches`
is either `max_patches` or the total number of patches that can be
extracted.
Examples
--------
>>> from sklearn.feature_extraction import image
>>> one_image = np.arange(16).reshape((4, 4))
>>> one_image
array([[ 0, 1, 2, 3],
[ 4, 5, 6, 7],
[ 8, 9, 10, 11],
[12, 13, 14, 15]])
>>> patches = image.extract_patches_2d(one_image, (2, 2))
>>> print(patches.shape)
(9, 2, 2)
>>> patches[0]
array([[0, 1],
[4, 5]])
>>> patches[1]
array([[1, 2],
[5, 6]])
>>> patches[8]
array([[10, 11],
[14, 15]])
"""
i_h, i_w = image.shape[:2]
p_h, p_w = patch_size
image = array2d(image)
image = image.reshape((i_h, i_w, -1))
n_colors = image.shape[-1]
extracted_patches = extract_patches(image,
patch_shape=(p_h, p_w, n_colors),
extraction_step=1)
n_patches = _compute_n_patches(i_h, i_w, p_h, p_w, max_patches)
if max_patches:
rng = check_random_state(random_state)
i_s = rng.randint(i_h - p_h + 1, size=n_patches)
j_s = rng.randint(i_w - p_w + 1, size=n_patches)
patches = extracted_patches[i_s, j_s, 0]
else:
patches = extracted_patches
patches = patches.reshape(-1, p_h, p_w, n_colors)
# remove the color dimension if useless
if patches.shape[-1] == 1:
return patches.reshape((n_patches, p_h, p_w))
else:
return patches
def reconstruct_from_patches_2d(patches, image_size):
"""Reconstruct the image from all of its patches.
Patches are assumed to overlap and the image is constructed by filling in
the patches from left to right, top to bottom, averaging the overlapping
regions.
Parameters
----------
patches: array, shape = (n_patches, patch_height, patch_width) or
(n_patches, patch_height, patch_width, n_channels)
The complete set of patches. If the patches contain colour information,
channels are indexed along the last dimension: RGB patches would
have `n_channels=3`.
image_size: tuple of ints (image_height, image_width) or
(image_height, image_width, n_channels)
the size of the image that will be reconstructed
Returns
-------
image: array, shape = image_size
the reconstructed image
"""
i_h, i_w = image_size[:2]
p_h, p_w = patches.shape[1:3]
img = np.zeros(image_size)
# compute the dimensions of the patches array
n_h = i_h - p_h + 1
n_w = i_w - p_w + 1
for p, (i, j) in zip(patches, product(range(n_h), range(n_w))):
img[i:i + p_h, j:j + p_w] += p
for i in range(i_h):
for j in range(i_w):
# divide by the amount of overlap
# XXX: is this the most efficient way? memory-wise yes, cpu wise?
img[i, j] /= float(min(i + 1, p_h, i_h - i) *
min(j + 1, p_w, i_w - j))
return img
class PatchExtractor(BaseEstimator):
"""Extracts patches from a collection of images
Parameters
----------
patch_size: tuple of ints (patch_height, patch_width)
the dimensions of one patch
max_patches: integer or float, optional default is None
The maximum number of patches per image to extract. If max_patches is a
float in (0, 1), it is taken to mean a proportion of the total number
of patches.
random_state: int or RandomState
Pseudo number generator state used for random sampling.
"""
def __init__(self, patch_size=None, max_patches=None, random_state=None):
self.patch_size = patch_size
self.max_patches = max_patches
self.random_state = random_state
def fit(self, X, y=None):
"""Do nothing and return the estimator unchanged
This method is just there to implement the usual API and hence
work in pipelines.
"""
return self
def transform(self, X):
"""Transforms the image samples in X into a matrix of patch data.
Parameters
----------
X : array, shape = (n_samples, image_height, image_width) or
(n_samples, image_height, image_width, n_channels)
Array of images from which to extract patches. For color images,
the last dimension specifies the channel: a RGB image would have
`n_channels=3`.
Returns
-------
patches: array, shape = (n_patches, patch_height, patch_width) or
(n_patches, patch_height, patch_width, n_channels)
The collection of patches extracted from the images, where
`n_patches` is either `n_samples * max_patches` or the total
number of patches that can be extracted.
"""
self.random_state = check_random_state(self.random_state)
n_images, i_h, i_w = X.shape[:3]
X = np.reshape(X, (n_images, i_h, i_w, -1))
n_channels = X.shape[-1]
if self.patch_size is None:
patch_size = i_h / 10, i_w / 10
else:
patch_size = self.patch_size
# compute the dimensions of the patches array
p_h, p_w = patch_size
n_patches = _compute_n_patches(i_h, i_w, p_h, p_w, self.max_patches)
patches_shape = (n_images * n_patches,) + patch_size
if n_channels > 1:
patches_shape += (n_channels,)
# extract the patches
patches = np.empty(patches_shape)
for ii, image in enumerate(X):
patches[ii * n_patches:(ii + 1) * n_patches] = extract_patches_2d(
image, patch_size, self.max_patches, self.random_state)
return patches
| bsd-3-clause |
Obus/scikit-learn | examples/decomposition/plot_pca_vs_fa_model_selection.py | 77 | 4510 | #!/usr/bin/python
# -*- coding: utf-8 -*-
"""
===============================================================
Model selection with Probabilistic PCA and Factor Analysis (FA)
===============================================================
Probabilistic PCA and Factor Analysis are probabilistic models.
The consequence is that the likelihood of new data can be used
for model selection and covariance estimation.
Here we compare PCA and FA with cross-validation on low rank data corrupted
with homoscedastic noise (noise variance
is the same for each feature) or heteroscedastic noise (noise variance
is the different for each feature). In a second step we compare the model
likelihood to the likelihoods obtained from shrinkage covariance estimators.
One can observe that with homoscedastic noise both FA and PCA succeed
in recovering the size of the low rank subspace. The likelihood with PCA
is higher than FA in this case. However PCA fails and overestimates
the rank when heteroscedastic noise is present. Under appropriate
circumstances the low rank models are more likely than shrinkage models.
The automatic estimation from
Automatic Choice of Dimensionality for PCA. NIPS 2000: 598-604
by Thomas P. Minka is also compared.
"""
print(__doc__)
# Authors: Alexandre Gramfort
# Denis A. Engemann
# License: BSD 3 clause
import numpy as np
import matplotlib.pyplot as plt
from scipy import linalg
from sklearn.decomposition import PCA, FactorAnalysis
from sklearn.covariance import ShrunkCovariance, LedoitWolf
from sklearn.cross_validation import cross_val_score
from sklearn.grid_search import GridSearchCV
###############################################################################
# Create the data
n_samples, n_features, rank = 1000, 50, 10
sigma = 1.
rng = np.random.RandomState(42)
U, _, _ = linalg.svd(rng.randn(n_features, n_features))
X = np.dot(rng.randn(n_samples, rank), U[:, :rank].T)
# Adding homoscedastic noise
X_homo = X + sigma * rng.randn(n_samples, n_features)
# Adding heteroscedastic noise
sigmas = sigma * rng.rand(n_features) + sigma / 2.
X_hetero = X + rng.randn(n_samples, n_features) * sigmas
###############################################################################
# Fit the models
n_components = np.arange(0, n_features, 5) # options for n_components
def compute_scores(X):
pca = PCA()
fa = FactorAnalysis()
pca_scores, fa_scores = [], []
for n in n_components:
pca.n_components = n
fa.n_components = n
pca_scores.append(np.mean(cross_val_score(pca, X)))
fa_scores.append(np.mean(cross_val_score(fa, X)))
return pca_scores, fa_scores
def shrunk_cov_score(X):
shrinkages = np.logspace(-2, 0, 30)
cv = GridSearchCV(ShrunkCovariance(), {'shrinkage': shrinkages})
return np.mean(cross_val_score(cv.fit(X).best_estimator_, X))
def lw_score(X):
return np.mean(cross_val_score(LedoitWolf(), X))
for X, title in [(X_homo, 'Homoscedastic Noise'),
(X_hetero, 'Heteroscedastic Noise')]:
pca_scores, fa_scores = compute_scores(X)
n_components_pca = n_components[np.argmax(pca_scores)]
n_components_fa = n_components[np.argmax(fa_scores)]
pca = PCA(n_components='mle')
pca.fit(X)
n_components_pca_mle = pca.n_components_
print("best n_components by PCA CV = %d" % n_components_pca)
print("best n_components by FactorAnalysis CV = %d" % n_components_fa)
print("best n_components by PCA MLE = %d" % n_components_pca_mle)
plt.figure()
plt.plot(n_components, pca_scores, 'b', label='PCA scores')
plt.plot(n_components, fa_scores, 'r', label='FA scores')
plt.axvline(rank, color='g', label='TRUTH: %d' % rank, linestyle='-')
plt.axvline(n_components_pca, color='b',
label='PCA CV: %d' % n_components_pca, linestyle='--')
plt.axvline(n_components_fa, color='r',
label='FactorAnalysis CV: %d' % n_components_fa, linestyle='--')
plt.axvline(n_components_pca_mle, color='k',
label='PCA MLE: %d' % n_components_pca_mle, linestyle='--')
# compare with other covariance estimators
plt.axhline(shrunk_cov_score(X), color='violet',
label='Shrunk Covariance MLE', linestyle='-.')
plt.axhline(lw_score(X), color='orange',
label='LedoitWolf MLE' % n_components_pca_mle, linestyle='-.')
plt.xlabel('nb of components')
plt.ylabel('CV scores')
plt.legend(loc='lower right')
plt.title(title)
plt.show()
| bsd-3-clause |
DeepGnosis/keras | examples/mnist_transfer_cnn.py | 4 | 3801 | '''Transfer learning toy example:
1- Train a simple convnet on the MNIST dataset the first 5 digits [0..4].
2- Freeze convolutional layers and fine-tune dense layers
for the classification of digits [5..9].
Run on GPU: THEANO_FLAGS=mode=FAST_RUN,device=gpu,floatX=float32 python mnist_transfer_cnn.py
Get to 99.8% test accuracy after 5 epochs
for the first five digits classifier
and 99.2% for the last five digits after transfer + fine-tuning.
'''
from __future__ import print_function
import numpy as np
import datetime
np.random.seed(1337) # for reproducibility
from keras.datasets import mnist
from keras.models import Sequential
from keras.layers import Dense, Dropout, Activation, Flatten
from keras.layers import Convolution2D, MaxPooling2D
from keras.utils import np_utils
now = datetime.datetime.now
batch_size = 128
nb_classes = 5
nb_epoch = 5
# input image dimensions
img_rows, img_cols = 28, 28
# number of convolutional filters to use
nb_filters = 32
# size of pooling area for max pooling
nb_pool = 2
# convolution kernel size
nb_conv = 3
def train_model(model, train, test, nb_classes):
X_train = train[0].reshape(train[0].shape[0], 1, img_rows, img_cols)
X_test = test[0].reshape(test[0].shape[0], 1, img_rows, img_cols)
X_train = X_train.astype('float32')
X_test = X_test.astype('float32')
X_train /= 255
X_test /= 255
print('X_train shape:', X_train.shape)
print(X_train.shape[0], 'train samples')
print(X_test.shape[0], 'test samples')
# convert class vectors to binary class matrices
Y_train = np_utils.to_categorical(train[1], nb_classes)
Y_test = np_utils.to_categorical(test[1], nb_classes)
model.compile(loss='categorical_crossentropy',
optimizer='adadelta',
metrics=['accuracy'])
t = now()
model.fit(X_train, Y_train,
batch_size=batch_size, nb_epoch=nb_epoch,
verbose=1,
validation_data=(X_test, Y_test))
print('Training time: %s' % (now() - t))
score = model.evaluate(X_test, Y_test, verbose=0)
print('Test score:', score[0])
print('Test accuracy:', score[1])
# the data, shuffled and split between train and test sets
(X_train, y_train), (X_test, y_test) = mnist.load_data()
# create two datasets one with digits below 5 and one with 5 and above
X_train_lt5 = X_train[y_train < 5]
y_train_lt5 = y_train[y_train < 5]
X_test_lt5 = X_test[y_test < 5]
y_test_lt5 = y_test[y_test < 5]
X_train_gte5 = X_train[y_train >= 5]
y_train_gte5 = y_train[y_train >= 5] - 5 # make classes start at 0 for
X_test_gte5 = X_test[y_test >= 5] # np_utils.to_categorical
y_test_gte5 = y_test[y_test >= 5] - 5
# define two groups of layers: feature (convolutions) and classification (dense)
feature_layers = [
Convolution2D(nb_filters, nb_conv, nb_conv,
border_mode='valid',
input_shape=(1, img_rows, img_cols)),
Activation('relu'),
Convolution2D(nb_filters, nb_conv, nb_conv),
Activation('relu'),
MaxPooling2D(pool_size=(nb_pool, nb_pool)),
Dropout(0.25),
Flatten(),
]
classification_layers = [
Dense(128),
Activation('relu'),
Dropout(0.5),
Dense(nb_classes),
Activation('softmax')
]
# create complete model
model = Sequential()
for l in feature_layers + classification_layers:
model.add(l)
# train model for 5-digit classification [0..4]
train_model(model,
(X_train_lt5, y_train_lt5),
(X_test_lt5, y_test_lt5), nb_classes)
# freeze feature layers and rebuild model
for l in feature_layers:
l.trainable = False
# transfer: train dense layers for new classification task [5..9]
train_model(model,
(X_train_gte5, y_train_gte5),
(X_test_gte5, y_test_gte5), nb_classes)
| mit |
dsbrown1331/CoRL2019-DREX | drex-atari/train.py | 1 | 4337 | import os
from bc import Imitator
import numpy as np
from dataset import Example, Dataset
import utils
#from ale_wrapper import ALEInterfaceWrapper
from evaluator import Evaluator
from pdb import set_trace
import matplotlib.pyplot as plt
#try bmh
plt.style.use('bmh')
def smooth(losses, run=10):
new_losses = []
for i in range(len(losses)):
new_losses.append(np.mean(losses[max(0, i - 10):i+1]))
return new_losses
def plot(losses, checkpoint_dir, env_name):
print("Plotting losses to ", os.path.join(checkpoint_dir, env_name + "_loss.png"))
p=plt.plot(smooth(losses, 25))
plt.xlabel("Update")
plt.ylabel("Loss")
plt.legend(loc='lower center')
plt.savefig(os.path.join(checkpoint_dir, env_name + "loss.png"))
def train(env_name,
minimal_action_set,
learning_rate,
alpha,
l2_penalty,
minibatch_size,
hist_len,
discount,
checkpoint_dir,
updates,
dataset,
validation_dataset,
num_eval_episodes,
epsilon_greedy,
extra_info):
import tracemalloc
# create DQN agent
agent = Imitator(list(minimal_action_set),
learning_rate,
alpha,
checkpoint_dir,
hist_len,
l2_penalty)
print("Beginning training...")
log_frequency = 500
log_num = log_frequency
update = 1
running_loss = 0.
best_v_loss = np.float('inf')
count = 0
while update < updates:
# snapshot = tracemalloc.take_snapshot()
# top_stats = snapshot.statistics('lineno')
# import gc
# for obj in gc.get_objects():
# try:
# if torch.is_tensor(obj) or (hasattr(obj, 'data') and torch.is_tensor(obj.data)):
# print(type(obj), obj.size())
# except:
# pass
#
# print("[ Top 10 ]")
# for stat in top_stats[:10]:
# print(stat)
if update > log_num:
print(str(update) + " updates completed. Loss {}".format(running_loss / log_frequency))
log_num += log_frequency
running_loss = 0
#run validation loss test
v_loss = agent.validate(validation_dataset, 10)
print("Validation accuracy = {}".format(v_loss / validation_dataset.size))
if v_loss > best_v_loss:
count += 1
if count > 5:
print("validation not improing for {} steps. Stopping to prevent overfitting".format(count))
break
else:
best_v_loss = v_loss
print("updating best vloss", best_v_loss)
count = 0
l = agent.train(dataset, minibatch_size)
running_loss += l
update += 1
print("Training completed.")
agent.checkpoint_network(env_name, extra_info)
#Plot losses
#Evaluation
print("beginning evaluation")
evaluator = Evaluator(env_name, num_eval_episodes, checkpoint_dir, epsilon_greedy)
evaluator.evaluate(agent)
return agent
def train_transitions(env_name,
minimal_action_set,
learning_rate,
alpha,
l2_penalty,
minibatch_size,
hist_len,
discount,
checkpoint_dir,
updates,
dataset,
num_eval_episodes):
# create DQN agent
agent = Imitator(list(minimal_action_set),
learning_rate,
alpha,
checkpoint_dir,
hist_len,
l2_penalty)
print("Beginning training...")
log_frequency = 1000
log_num = log_frequency
update = 1
running_loss = 0.
while update < updates:
if update > log_num:
print(str(update) + " updates completed. Loss {}".format(running_loss / log_frequency))
log_num += log_frequency
running_loss = 0
l = agent.train(dataset, minibatch_size)
running_loss += l
update += 1
print("Training completed.")
agent.checkpoint_network(env_name + "_transitions")
#calculate accuacy
#Evaluation
#evaluator = Evaluator(env_name, num_eval_episodes)
#evaluator.evaluate(agent)
return agent
if __name__ == '__main__':
train()
| mit |
chaluemwut/fbserver | venv/lib/python2.7/site-packages/sklearn/qda.py | 5 | 7200 | """
Quadratic Discriminant Analysis
"""
# Author: Matthieu Perrot <matthieu.perrot@gmail.com>
#
# License: BSD 3 clause
import warnings
import numpy as np
from .base import BaseEstimator, ClassifierMixin
from .externals.six.moves import xrange
from .utils import check_arrays, array2d, column_or_1d
__all__ = ['QDA']
class QDA(BaseEstimator, ClassifierMixin):
"""
Quadratic Discriminant Analysis (QDA)
A classifier with a quadratic decision boundary, generated
by fitting class conditional densities to the data
and using Bayes' rule.
The model fits a Gaussian density to each class.
Parameters
----------
priors : array, optional, shape = [n_classes]
Priors on classes
reg_param : float, optional
Regularizes the covariance estimate as
``(1-reg_param)*Sigma + reg_param*np.eye(n_features)``
Attributes
----------
`covariances_` : list of array-like, shape = [n_features, n_features]
Covariance matrices of each class.
`means_` : array-like, shape = [n_classes, n_features]
Class means.
`priors_` : array-like, shape = [n_classes]
Class priors (sum to 1).
`rotations_` : list of arrays
For each class an array of shape [n_samples, n_samples], the
rotation of the Gaussian distribution, i.e. its principal axis.
`scalings_` : array-like, shape = [n_classes, n_features]
Contains the scaling of the Gaussian
distributions along the principal axes for each
class, i.e. the variance in the rotated coordinate system.
Examples
--------
>>> from sklearn.qda import QDA
>>> import numpy as np
>>> X = np.array([[-1, -1], [-2, -1], [-3, -2], [1, 1], [2, 1], [3, 2]])
>>> y = np.array([1, 1, 1, 2, 2, 2])
>>> clf = QDA()
>>> clf.fit(X, y)
QDA(priors=None, reg_param=0.0)
>>> print(clf.predict([[-0.8, -1]]))
[1]
See also
--------
sklearn.lda.LDA: Linear discriminant analysis
"""
def __init__(self, priors=None, reg_param=0.):
self.priors = np.asarray(priors) if priors is not None else None
self.reg_param = reg_param
def fit(self, X, y, store_covariances=False, tol=1.0e-4):
"""
Fit the QDA model according to the given training data and parameters.
Parameters
----------
X : array-like, shape = [n_samples, n_features]
Training vector, where n_samples in the number of samples and
n_features is the number of features.
y : array, shape = [n_samples]
Target values (integers)
store_covariances : boolean
If True the covariance matrices are computed and stored in the
`self.covariances_` attribute.
"""
X, y = check_arrays(X, y)
y = column_or_1d(y, warn=True)
self.classes_, y = np.unique(y, return_inverse=True)
n_samples, n_features = X.shape
n_classes = len(self.classes_)
if n_classes < 2:
raise ValueError('y has less than 2 classes')
if self.priors is None:
self.priors_ = np.bincount(y) / float(n_samples)
else:
self.priors_ = self.priors
cov = None
if store_covariances:
cov = []
means = []
scalings = []
rotations = []
for ind in xrange(n_classes):
Xg = X[y == ind, :]
meang = Xg.mean(0)
means.append(meang)
Xgc = Xg - meang
# Xgc = U * S * V.T
U, S, Vt = np.linalg.svd(Xgc, full_matrices=False)
rank = np.sum(S > tol)
if rank < n_features:
warnings.warn("Variables are collinear")
S2 = (S ** 2) / (len(Xg) - 1)
S2 = ((1 - self.reg_param) * S2) + self.reg_param
if store_covariances:
# cov = V * (S^2 / (n-1)) * V.T
cov.append(np.dot(S2 * Vt.T, Vt))
scalings.append(S2)
rotations.append(Vt.T)
if store_covariances:
self.covariances_ = cov
self.means_ = np.asarray(means)
self.scalings_ = np.asarray(scalings)
self.rotations_ = rotations
return self
def _decision_function(self, X):
X = array2d(X)
norm2 = []
for i in range(len(self.classes_)):
R = self.rotations_[i]
S = self.scalings_[i]
Xm = X - self.means_[i]
X2 = np.dot(Xm, R * (S ** (-0.5)))
norm2.append(np.sum(X2 ** 2, 1))
norm2 = np.array(norm2).T # shape = [len(X), n_classes]
return (-0.5 * (norm2 + np.sum(np.log(self.scalings_), 1))
+ np.log(self.priors_))
def decision_function(self, X):
"""Apply decision function to an array of samples.
Parameters
----------
X : array-like, shape = [n_samples, n_features]
Array of samples (test vectors).
Returns
-------
C : array, shape = [n_samples, n_classes] or [n_samples,]
Decision function values related to each class, per sample.
In the two-class case, the shape is [n_samples,], giving the
log likelihood ratio of the positive class.
"""
dec_func = self._decision_function(X)
# handle special case of two classes
if len(self.classes_) == 2:
return dec_func[:, 1] - dec_func[:, 0]
return dec_func
def predict(self, X):
"""Perform classification on an array of test vectors X.
The predicted class C for each sample in X is returned.
Parameters
----------
X : array-like, shape = [n_samples, n_features]
Returns
-------
C : array, shape = [n_samples]
"""
d = self._decision_function(X)
y_pred = self.classes_.take(d.argmax(1))
return y_pred
def predict_proba(self, X):
"""Return posterior probabilities of classification.
Parameters
----------
X : array-like, shape = [n_samples, n_features]
Array of samples/test vectors.
Returns
-------
C : array, shape = [n_samples, n_classes]
Posterior probabilities of classification per class.
"""
values = self._decision_function(X)
# compute the likelihood of the underlying gaussian models
# up to a multiplicative constant.
likelihood = np.exp(values - values.max(axis=1)[:, np.newaxis])
# compute posterior probabilities
return likelihood / likelihood.sum(axis=1)[:, np.newaxis]
def predict_log_proba(self, X):
"""Return posterior probabilities of classification.
Parameters
----------
X : array-like, shape = [n_samples, n_features]
Array of samples/test vectors.
Returns
-------
C : array, shape = [n_samples, n_classes]
Posterior log-probabilities of classification per class.
"""
# XXX : can do better to avoid precision overflows
probas_ = self.predict_proba(X)
return np.log(probas_)
| apache-2.0 |
scottpurdy/nupic | tests/swarming/nupic/swarming/experiments/dummy_multi_v2/description.py | 10 | 15387 | # ----------------------------------------------------------------------
# Numenta Platform for Intelligent Computing (NuPIC)
# Copyright (C) 2013, Numenta, Inc. Unless you have an agreement
# with Numenta, Inc., for a separate license for this software code, the
# following terms and conditions apply:
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero Public License version 3 as
# published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
# See the GNU Affero Public License for more details.
#
# You should have received a copy of the GNU Affero Public License
# along with this program. If not, see http://www.gnu.org/licenses.
#
# http://numenta.org/licenses/
# ----------------------------------------------------------------------
"""
Template file used by the OPF Experiment Generator to generate the actual
description.py file by replacing $XXXXXXXX tokens with desired values.
This description.py file was generated by:
'/Users/ronmarianetti/nupic/eng/lib/python2.6/site-packages/nupic/frameworks/opf/expGenerator/experiment_generator.py'
"""
from nupic.frameworks.opf.exp_description_api import ExperimentDescriptionAPI
from nupic.frameworks.opf.exp_description_helpers import (
updateConfigFromSubConfig,
applyValueGettersToContainer,
DeferredDictLookup)
from nupic.frameworks.opf.htm_prediction_model_callbacks import *
from nupic.frameworks.opf.metrics import MetricSpec
from nupic.frameworks.opf.opf_utils import (InferenceType,
InferenceElement)
from nupic.support import aggregationDivide
from nupic.frameworks.opf.opf_task_driver import (
IterationPhaseSpecLearnOnly,
IterationPhaseSpecInferOnly,
IterationPhaseSpecLearnAndInfer)
# Model Configuration Dictionary:
#
# Define the model parameters and adjust for any modifications if imported
# from a sub-experiment.
#
# These fields might be modified by a sub-experiment; this dict is passed
# between the sub-experiment and base experiment
#
#
# NOTE: Use of DEFERRED VALUE-GETTERs: dictionary fields and list elements
# within the config dictionary may be assigned futures derived from the
# ValueGetterBase class, such as DeferredDictLookup.
# This facility is particularly handy for enabling substitution of values in
# the config dictionary from other values in the config dictionary, which is
# needed by permutation.py-based experiments. These values will be resolved
# during the call to applyValueGettersToContainer(),
# which we call after the base experiment's config dictionary is updated from
# the sub-experiment. See ValueGetterBase and
# DeferredDictLookup for more details about value-getters.
#
# For each custom encoder parameter to be exposed to the sub-experiment/
# permutation overrides, define a variable in this section, using key names
# beginning with a single underscore character to avoid collisions with
# pre-defined keys (e.g., _dsEncoderFieldName2_N).
#
# Example:
# config = dict(
# _dsEncoderFieldName2_N = 70,
# _dsEncoderFieldName2_W = 5,
# dsEncoderSchema = [
# base=dict(
# fieldname='Name2', type='ScalarEncoder',
# name='Name2', minval=0, maxval=270, clipInput=True,
# n=DeferredDictLookup('_dsEncoderFieldName2_N'),
# w=DeferredDictLookup('_dsEncoderFieldName2_W')),
# ],
# )
# updateConfigFromSubConfig(config)
# applyValueGettersToContainer(config)
config = {
# Type of model that the rest of these parameters apply to.
'model': "HTMPrediction",
# Version that specifies the format of the config.
'version': 1,
# Intermediate variables used to compute fields in modelParams and also
# referenced from the control section.
'aggregationInfo': { 'days': 0,
'fields': [ (u'timestamp', 'first'),
(u'gym', 'first'),
(u'consumption', 'mean'),
(u'address', 'first')],
'hours': 0,
'microseconds': 0,
'milliseconds': 0,
'minutes': 0,
'months': 0,
'seconds': 0,
'weeks': 0,
'years': 0},
'predictAheadTime': None,
# Model parameter dictionary.
'modelParams': {
# The type of inference that this model will perform
'inferenceType': 'TemporalNextStep',
'sensorParams': {
# Sensor diagnostic output verbosity control;
# if > 0: sensor region will print out on screen what it's sensing
# at each step 0: silent; >=1: some info; >=2: more info;
# >=3: even more info (see compute() in py/regions/RecordSensor.py)
'verbosity' : 0,
# Example:
# dsEncoderSchema = [
# DeferredDictLookup('__field_name_encoder'),
# ],
#
# (value generated from DS_ENCODER_SCHEMA)
'encoders': { 'address': { 'fieldname': u'address',
'n': 300,
'name': u'address',
'type': 'SDRCategoryEncoder',
'w': 21},
'consumption': { 'clipInput': True,
'fieldname': u'consumption',
'maxval': 200,
'minval': 0,
'n': 1500,
'name': u'consumption',
'type': 'ScalarEncoder',
'w': 21},
'gym': { 'fieldname': u'gym',
'n': 300,
'name': u'gym',
'type': 'SDRCategoryEncoder',
'w': 21},
'timestamp_dayOfWeek': { 'dayOfWeek': (7, 3),
'fieldname': u'timestamp',
'name': u'timestamp_dayOfWeek',
'type': 'DateEncoder'},
'timestamp_timeOfDay': { 'fieldname': u'timestamp',
'name': u'timestamp_timeOfDay',
'timeOfDay': (7, 8),
'type': 'DateEncoder'}},
# A dictionary specifying the period for automatically-generated
# resets from a RecordSensor;
#
# None = disable automatically-generated resets (also disabled if
# all of the specified values evaluate to 0).
# Valid keys is the desired combination of the following:
# days, hours, minutes, seconds, milliseconds, microseconds, weeks
#
# Example for 1.5 days: sensorAutoReset = dict(days=1,hours=12),
#
# (value generated from SENSOR_AUTO_RESET)
'sensorAutoReset' : None,
},
'spEnable': True,
'spParams': {
# SP diagnostic output verbosity control;
# 0: silent; >=1: some info; >=2: more info;
'spVerbosity' : 0,
'globalInhibition': 1,
# Number of cell columns in the cortical region (same number for
# SP and TM)
# (see also tpNCellsPerCol)
'columnCount': 2048,
'inputWidth': 0,
# SP inhibition control (absolute value);
# Maximum number of active columns in the SP region's output (when
# there are more, the weaker ones are suppressed)
'numActiveColumnsPerInhArea': 40,
'seed': 1956,
# potentialPct
# What percent of the columns's receptive field is available
# for potential synapses. At initialization time, we will
# choose potentialPct * (2*potentialRadius+1)^2
'potentialPct': 0.5,
# The default connected threshold. Any synapse whose
# permanence value is above the connected threshold is
# a "connected synapse", meaning it can contribute to the
# cell's firing. Typical value is 0.10. Cells whose activity
# level before inhibition falls below minDutyCycleBeforeInh
# will have their own internal synPermConnectedCell
# threshold set below this default value.
# (This concept applies to both SP and TM and so 'cells'
# is correct here as opposed to 'columns')
'synPermConnected': 0.1,
'synPermActiveInc': 0.1,
'synPermInactiveDec': 0.01,
},
# Controls whether TM is enabled or disabled;
# TM is necessary for making temporal predictions, such as predicting
# the next inputs. Without TM, the model is only capable of
# reconstructing missing sensor inputs (via SP).
'tmEnable' : True,
'tmParams': {
# TM diagnostic output verbosity control;
# 0: silent; [1..6]: increasing levels of verbosity
# (see verbosity in nupic/trunk/py/nupic/research/backtracking_tm.py and backtracking_tm_cpp.py)
'verbosity': 0,
# Number of cell columns in the cortical region (same number for
# SP and TM)
# (see also tpNCellsPerCol)
'columnCount': 2048,
# The number of cells (i.e., states), allocated per column.
'cellsPerColumn': 32,
'inputWidth': 2048,
'seed': 1960,
# Temporal Pooler implementation selector (see _getTPClass in
# CLARegion.py).
'temporalImp': 'cpp',
# New Synapse formation count
# NOTE: If None, use spNumActivePerInhArea
#
# TODO: need better explanation
'newSynapseCount': 15,
# Maximum number of synapses per segment
# > 0 for fixed-size CLA
# -1 for non-fixed-size CLA
#
# TODO: for Ron: once the appropriate value is placed in TM
# constructor, see if we should eliminate this parameter from
# description.py.
'maxSynapsesPerSegment': 32,
# Maximum number of segments per cell
# > 0 for fixed-size CLA
# -1 for non-fixed-size CLA
#
# TODO: for Ron: once the appropriate value is placed in TM
# constructor, see if we should eliminate this parameter from
# description.py.
'maxSegmentsPerCell': 128,
# Initial Permanence
# TODO: need better explanation
'initialPerm': 0.21,
# Permanence Increment
'permanenceInc': 0.1,
# Permanence Decrement
# If set to None, will automatically default to tpPermanenceInc
# value.
'permanenceDec' : 0.1,
'globalDecay': 0.0,
'maxAge': 0,
# Minimum number of active synapses for a segment to be considered
# during search for the best-matching segments.
# None=use default
# Replaces: tpMinThreshold
'minThreshold': 12,
# Segment activation threshold.
# A segment is active if it has >= tpSegmentActivationThreshold
# connected synapses that are active due to infActiveState
# None=use default
# Replaces: tpActivationThreshold
'activationThreshold': 16,
'outputType': 'normal',
# "Pay Attention Mode" length. This tells the TM how many new
# elements to append to the end of a learned sequence at a time.
# Smaller values are better for datasets with short sequences,
# higher values are better for datasets with long sequences.
'pamLength': 1,
},
'clParams': {
'regionName' : 'SDRClassifierRegion',
# Classifier diagnostic output verbosity control;
# 0: silent; [1..6]: increasing levels of verbosity
'verbosity' : 0,
# This controls how fast the classifier learns/forgets. Higher values
# make it adapt faster and forget older patterns faster.
'alpha': 0.001,
# This is set after the call to updateConfigFromSubConfig and is
# computed from the aggregationInfo and predictAheadTime.
'steps': '1',
},
'trainSPNetOnlyIfRequested': False,
},
}
# end of config dictionary
# Adjust base config dictionary for any modifications if imported from a
# sub-experiment
updateConfigFromSubConfig(config)
# Compute predictionSteps based on the predictAheadTime and the aggregation
# period, which may be permuted over.
if config['predictAheadTime'] is not None:
predictionSteps = int(round(aggregationDivide(
config['predictAheadTime'], config['aggregationInfo'])))
assert (predictionSteps >= 1)
config['modelParams']['clParams']['steps'] = str(predictionSteps)
# Adjust config by applying ValueGetterBase-derived
# futures. NOTE: this MUST be called after updateConfigFromSubConfig() in order
# to support value-getter-based substitutions from the sub-experiment (if any)
applyValueGettersToContainer(config)
control = {
# The environment that the current model is being run in
"environment": 'nupic',
# Input stream specification per py/nupicengine/cluster/database/StreamDef.json.
#
'dataset' : {u'info': u'test_NoProviders',
u'streams': [ { u'columns': [u'*'],
u'info': u'test data',
u'source': u'file://swarming/test_data.csv'}],
u'version': 1},
# Iteration count: maximum number of iterations. Each iteration corresponds
# to one record from the (possibly aggregated) dataset. The task is
# terminated when either number of iterations reaches iterationCount or
# all records in the (possibly aggregated) database have been processed,
# whichever occurs first.
#
# iterationCount of -1 = iterate over the entire dataset
#'iterationCount' : ITERATION_COUNT,
# Metrics: A list of MetricSpecs that instantiate the metrics that are
# computed for this experiment
'metrics':[
MetricSpec(field=u'consumption',
inferenceElement=InferenceElement.prediction,
metric='rmse'),
],
# Logged Metrics: A sequence of regular expressions that specify which of
# the metrics from the Inference Specifications section MUST be logged for
# every prediction. The regex's correspond to the automatically generated
# metric labels. This is similar to the way the optimization metric is
# specified in permutations.py.
'loggedMetrics': [".*nupicScore.*"],
}
descriptionInterface = ExperimentDescriptionAPI(modelConfig=config,
control=control)
| agpl-3.0 |
deepmind/dm_nevis | dm_nevis/datasets_storage/handlers/mnist_m.py | 1 | 3517 | # Copyright 2022 DeepMind Technologies Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""MNIST-m handler."""
import os
import re
import tarfile
from dm_nevis.datasets_storage.handlers import extraction_utils as eu
from dm_nevis.datasets_storage.handlers import splits
from dm_nevis.datasets_storage.handlers import types
from PIL import Image
_DATA_FNAME = 'mnist_m.tar.gz'
_TRAIN_LABELS_FNAME = 'mnist_m/mnist_m_train_labels.txt'
_TEST_LABELS_FNAME = 'mnist_m/mnist_m_test_labels.txt'
_FNAME_AND_LABEL_REGEX = r'([\d]+.png) ([\d]+)'
def _parse_labels(labels_fname, tf):
"""Parses the labels and filenames for given label_fname from a tarfile."""
read_buffer = tf.extractfile(labels_fname)
if read_buffer is None:
raise ValueError(f'Failed to read {labels_fname}')
fname_to_label_list = read_buffer.read().decode('utf-8').split('\n')
parsed_labels = dict()
for fname_to_label in fname_to_label_list:
if not fname_to_label:
continue
regex_match = re.search(_FNAME_AND_LABEL_REGEX, fname_to_label)
if regex_match is None:
raise ValueError('Regex match returned None result.')
fname, label = regex_match.groups()
label = int(label)
parsed_labels[fname] = label
# parsed_labels.append((fname, label))
return parsed_labels
def mnist_m_handler(dataset_path: str) -> types.HandlerOutput:
"""Handler for MNIST-m dataset."""
with tarfile.open(os.path.join(dataset_path, _DATA_FNAME)) as tf:
train_fname_labels = _parse_labels(_TRAIN_LABELS_FNAME, tf)
test_fname_labels = _parse_labels(_TEST_LABELS_FNAME, tf)
def gen(fname_to_labels):
with tarfile.open(os.path.join(dataset_path, _DATA_FNAME), 'r:gz') as tf:
for member in tf.getmembers():
image_fname = os.path.basename(member.path)
if image_fname not in fname_to_labels:
continue
image = Image.open(tf.extractfile(member))
image.load()
label = fname_to_labels[image_fname]
yield (image, label)
metadata = types.DatasetMetaData(
num_classes=10,
num_channels=3,
image_shape=(), # Ignored for now.
additional_metadata=dict(
task_type='classification',
image_type='ocr',
))
# TODO: Make more efficient deduplication algorithm.
merged_fname_labels = train_fname_labels
merged_fname_labels.update(test_fname_labels)
make_gen_fn = eu.deduplicate_data_generator(gen(merged_fname_labels))
per_split_gen = splits.random_split_generator_into_splits_with_fractions(
make_gen_fn, splits.SPLIT_WITH_FRACTIONS_FOR_ALL_DATA,
splits.MERGED_TRAIN_AND_DEV)
return metadata, per_split_gen
mnist_m_dataset = types.DownloadableDataset(
name='mnist_m',
download_urls=[
types.DownloadableArtefact(
url='https://drive.google.com/uc?export=download&id=0B_tExHiYS-0veklUZHFYT19KYjg&confirm=t',
checksum='859df31c91afe82e80e5012ba928f279')
],
website_url='http://yaroslav.ganin.net/',
handler=mnist_m_handler)
| apache-2.0 |
shahankhatch/scikit-learn | examples/exercises/plot_iris_exercise.py | 320 | 1602 | """
================================
SVM Exercise
================================
A tutorial exercise for using different SVM kernels.
This exercise is used in the :ref:`using_kernels_tut` part of the
:ref:`supervised_learning_tut` section of the :ref:`stat_learn_tut_index`.
"""
print(__doc__)
import numpy as np
import matplotlib.pyplot as plt
from sklearn import datasets, svm
iris = datasets.load_iris()
X = iris.data
y = iris.target
X = X[y != 0, :2]
y = y[y != 0]
n_sample = len(X)
np.random.seed(0)
order = np.random.permutation(n_sample)
X = X[order]
y = y[order].astype(np.float)
X_train = X[:.9 * n_sample]
y_train = y[:.9 * n_sample]
X_test = X[.9 * n_sample:]
y_test = y[.9 * n_sample:]
# fit the model
for fig_num, kernel in enumerate(('linear', 'rbf', 'poly')):
clf = svm.SVC(kernel=kernel, gamma=10)
clf.fit(X_train, y_train)
plt.figure(fig_num)
plt.clf()
plt.scatter(X[:, 0], X[:, 1], c=y, zorder=10, cmap=plt.cm.Paired)
# Circle out the test data
plt.scatter(X_test[:, 0], X_test[:, 1], s=80, facecolors='none', zorder=10)
plt.axis('tight')
x_min = X[:, 0].min()
x_max = X[:, 0].max()
y_min = X[:, 1].min()
y_max = X[:, 1].max()
XX, YY = np.mgrid[x_min:x_max:200j, y_min:y_max:200j]
Z = clf.decision_function(np.c_[XX.ravel(), YY.ravel()])
# Put the result into a color plot
Z = Z.reshape(XX.shape)
plt.pcolormesh(XX, YY, Z > 0, cmap=plt.cm.Paired)
plt.contour(XX, YY, Z, colors=['k', 'k', 'k'], linestyles=['--', '-', '--'],
levels=[-.5, 0, .5])
plt.title(kernel)
plt.show()
| bsd-3-clause |
mfjb/scikit-learn | examples/exercises/plot_iris_exercise.py | 320 | 1602 | """
================================
SVM Exercise
================================
A tutorial exercise for using different SVM kernels.
This exercise is used in the :ref:`using_kernels_tut` part of the
:ref:`supervised_learning_tut` section of the :ref:`stat_learn_tut_index`.
"""
print(__doc__)
import numpy as np
import matplotlib.pyplot as plt
from sklearn import datasets, svm
iris = datasets.load_iris()
X = iris.data
y = iris.target
X = X[y != 0, :2]
y = y[y != 0]
n_sample = len(X)
np.random.seed(0)
order = np.random.permutation(n_sample)
X = X[order]
y = y[order].astype(np.float)
X_train = X[:.9 * n_sample]
y_train = y[:.9 * n_sample]
X_test = X[.9 * n_sample:]
y_test = y[.9 * n_sample:]
# fit the model
for fig_num, kernel in enumerate(('linear', 'rbf', 'poly')):
clf = svm.SVC(kernel=kernel, gamma=10)
clf.fit(X_train, y_train)
plt.figure(fig_num)
plt.clf()
plt.scatter(X[:, 0], X[:, 1], c=y, zorder=10, cmap=plt.cm.Paired)
# Circle out the test data
plt.scatter(X_test[:, 0], X_test[:, 1], s=80, facecolors='none', zorder=10)
plt.axis('tight')
x_min = X[:, 0].min()
x_max = X[:, 0].max()
y_min = X[:, 1].min()
y_max = X[:, 1].max()
XX, YY = np.mgrid[x_min:x_max:200j, y_min:y_max:200j]
Z = clf.decision_function(np.c_[XX.ravel(), YY.ravel()])
# Put the result into a color plot
Z = Z.reshape(XX.shape)
plt.pcolormesh(XX, YY, Z > 0, cmap=plt.cm.Paired)
plt.contour(XX, YY, Z, colors=['k', 'k', 'k'], linestyles=['--', '-', '--'],
levels=[-.5, 0, .5])
plt.title(kernel)
plt.show()
| bsd-3-clause |
googleapis/python-datacatalog | google/cloud/datacatalog_v1beta1/services/data_catalog/async_client.py | 1 | 157405 | # -*- coding: utf-8 -*-
# Copyright 2022 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from collections import OrderedDict
import functools
import re
from typing import (
Dict,
Mapping,
MutableMapping,
MutableSequence,
Optional,
Sequence,
Tuple,
Type,
Union,
)
from google.api_core import exceptions as core_exceptions
from google.api_core import gapic_v1
from google.api_core import retry as retries
from google.api_core.client_options import ClientOptions
from google.auth import credentials as ga_credentials # type: ignore
from google.oauth2 import service_account # type: ignore
import pkg_resources
try:
OptionalRetry = Union[retries.Retry, gapic_v1.method._MethodDefault]
except AttributeError: # pragma: NO COVER
OptionalRetry = Union[retries.Retry, object] # type: ignore
from google.iam.v1 import iam_policy_pb2 # type: ignore
from google.iam.v1 import policy_pb2 # type: ignore
from google.protobuf import field_mask_pb2 # type: ignore
from google.cloud.datacatalog_v1beta1.services.data_catalog import pagers
from google.cloud.datacatalog_v1beta1.types import (
common,
datacatalog,
gcs_fileset_spec,
schema,
search,
table_spec,
tags,
timestamps,
)
from .client import DataCatalogClient
from .transports.base import DEFAULT_CLIENT_INFO, DataCatalogTransport
from .transports.grpc_asyncio import DataCatalogGrpcAsyncIOTransport
class DataCatalogAsyncClient:
"""Data Catalog API service allows clients to discover,
understand, and manage their data.
"""
_client: DataCatalogClient
DEFAULT_ENDPOINT = DataCatalogClient.DEFAULT_ENDPOINT
DEFAULT_MTLS_ENDPOINT = DataCatalogClient.DEFAULT_MTLS_ENDPOINT
entry_path = staticmethod(DataCatalogClient.entry_path)
parse_entry_path = staticmethod(DataCatalogClient.parse_entry_path)
entry_group_path = staticmethod(DataCatalogClient.entry_group_path)
parse_entry_group_path = staticmethod(DataCatalogClient.parse_entry_group_path)
tag_path = staticmethod(DataCatalogClient.tag_path)
parse_tag_path = staticmethod(DataCatalogClient.parse_tag_path)
tag_template_path = staticmethod(DataCatalogClient.tag_template_path)
parse_tag_template_path = staticmethod(DataCatalogClient.parse_tag_template_path)
tag_template_field_path = staticmethod(DataCatalogClient.tag_template_field_path)
parse_tag_template_field_path = staticmethod(
DataCatalogClient.parse_tag_template_field_path
)
common_billing_account_path = staticmethod(
DataCatalogClient.common_billing_account_path
)
parse_common_billing_account_path = staticmethod(
DataCatalogClient.parse_common_billing_account_path
)
common_folder_path = staticmethod(DataCatalogClient.common_folder_path)
parse_common_folder_path = staticmethod(DataCatalogClient.parse_common_folder_path)
common_organization_path = staticmethod(DataCatalogClient.common_organization_path)
parse_common_organization_path = staticmethod(
DataCatalogClient.parse_common_organization_path
)
common_project_path = staticmethod(DataCatalogClient.common_project_path)
parse_common_project_path = staticmethod(
DataCatalogClient.parse_common_project_path
)
common_location_path = staticmethod(DataCatalogClient.common_location_path)
parse_common_location_path = staticmethod(
DataCatalogClient.parse_common_location_path
)
@classmethod
def from_service_account_info(cls, info: dict, *args, **kwargs):
"""Creates an instance of this client using the provided credentials
info.
Args:
info (dict): The service account private key info.
args: Additional arguments to pass to the constructor.
kwargs: Additional arguments to pass to the constructor.
Returns:
DataCatalogAsyncClient: The constructed client.
"""
return DataCatalogClient.from_service_account_info.__func__(DataCatalogAsyncClient, info, *args, **kwargs) # type: ignore
@classmethod
def from_service_account_file(cls, filename: str, *args, **kwargs):
"""Creates an instance of this client using the provided credentials
file.
Args:
filename (str): The path to the service account private key json
file.
args: Additional arguments to pass to the constructor.
kwargs: Additional arguments to pass to the constructor.
Returns:
DataCatalogAsyncClient: The constructed client.
"""
return DataCatalogClient.from_service_account_file.__func__(DataCatalogAsyncClient, filename, *args, **kwargs) # type: ignore
from_service_account_json = from_service_account_file
@classmethod
def get_mtls_endpoint_and_cert_source(
cls, client_options: Optional[ClientOptions] = None
):
"""Return the API endpoint and client cert source for mutual TLS.
The client cert source is determined in the following order:
(1) if `GOOGLE_API_USE_CLIENT_CERTIFICATE` environment variable is not "true", the
client cert source is None.
(2) if `client_options.client_cert_source` is provided, use the provided one; if the
default client cert source exists, use the default one; otherwise the client cert
source is None.
The API endpoint is determined in the following order:
(1) if `client_options.api_endpoint` if provided, use the provided one.
(2) if `GOOGLE_API_USE_CLIENT_CERTIFICATE` environment variable is "always", use the
default mTLS endpoint; if the environment variabel is "never", use the default API
endpoint; otherwise if client cert source exists, use the default mTLS endpoint, otherwise
use the default API endpoint.
More details can be found at https://google.aip.dev/auth/4114.
Args:
client_options (google.api_core.client_options.ClientOptions): Custom options for the
client. Only the `api_endpoint` and `client_cert_source` properties may be used
in this method.
Returns:
Tuple[str, Callable[[], Tuple[bytes, bytes]]]: returns the API endpoint and the
client cert source to use.
Raises:
google.auth.exceptions.MutualTLSChannelError: If any errors happen.
"""
return DataCatalogClient.get_mtls_endpoint_and_cert_source(client_options) # type: ignore
@property
def transport(self) -> DataCatalogTransport:
"""Returns the transport used by the client instance.
Returns:
DataCatalogTransport: The transport used by the client instance.
"""
return self._client.transport
get_transport_class = functools.partial(
type(DataCatalogClient).get_transport_class, type(DataCatalogClient)
)
def __init__(
self,
*,
credentials: Optional[ga_credentials.Credentials] = None,
transport: Union[str, DataCatalogTransport] = "grpc_asyncio",
client_options: Optional[ClientOptions] = None,
client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO,
) -> None:
"""Instantiates the data catalog client.
Args:
credentials (Optional[google.auth.credentials.Credentials]): The
authorization credentials to attach to requests. These
credentials identify the application to the service; if none
are specified, the client will attempt to ascertain the
credentials from the environment.
transport (Union[str, ~.DataCatalogTransport]): The
transport to use. If set to None, a transport is chosen
automatically.
client_options (ClientOptions): Custom options for the client. It
won't take effect if a ``transport`` instance is provided.
(1) The ``api_endpoint`` property can be used to override the
default endpoint provided by the client. GOOGLE_API_USE_MTLS_ENDPOINT
environment variable can also be used to override the endpoint:
"always" (always use the default mTLS endpoint), "never" (always
use the default regular endpoint) and "auto" (auto switch to the
default mTLS endpoint if client certificate is present, this is
the default value). However, the ``api_endpoint`` property takes
precedence if provided.
(2) If GOOGLE_API_USE_CLIENT_CERTIFICATE environment variable
is "true", then the ``client_cert_source`` property can be used
to provide client certificate for mutual TLS transport. If
not provided, the default SSL client certificate will be used if
present. If GOOGLE_API_USE_CLIENT_CERTIFICATE is "false" or not
set, no client certificate will be used.
Raises:
google.auth.exceptions.MutualTlsChannelError: If mutual TLS transport
creation failed for any reason.
"""
self._client = DataCatalogClient(
credentials=credentials,
transport=transport,
client_options=client_options,
client_info=client_info,
)
async def search_catalog(
self,
request: Optional[Union[datacatalog.SearchCatalogRequest, dict]] = None,
*,
scope: Optional[datacatalog.SearchCatalogRequest.Scope] = None,
query: Optional[str] = None,
retry: OptionalRetry = gapic_v1.method.DEFAULT,
timeout: Optional[float] = None,
metadata: Sequence[Tuple[str, str]] = (),
) -> pagers.SearchCatalogAsyncPager:
r"""Searches Data Catalog for multiple resources like entries, tags
that match a query.
This is a custom method
(https://cloud.google.com/apis/design/custom_methods) and does
not return the complete resource, only the resource identifier
and high level fields. Clients can subsequentally call ``Get``
methods.
Note that Data Catalog search queries do not guarantee full
recall. Query results that match your query may not be returned,
even in subsequent result pages. Also note that results returned
(and not returned) can vary across repeated search queries.
See `Data Catalog Search
Syntax <https://cloud.google.com/data-catalog/docs/how-to/search-reference>`__
for more information.
.. code-block:: python
# This snippet has been automatically generated and should be regarded as a
# code template only.
# It will require modifications to work:
# - It may require correct/in-range values for request initialization.
# - It may require specifying regional endpoints when creating the service
# client as shown in:
# https://googleapis.dev/python/google-api-core/latest/client_options.html
from google.cloud import datacatalog_v1beta1
async def sample_search_catalog():
# Create a client
client = datacatalog_v1beta1.DataCatalogAsyncClient()
# Initialize request argument(s)
request = datacatalog_v1beta1.SearchCatalogRequest(
query="query_value",
)
# Make the request
page_result = client.search_catalog(request=request)
# Handle the response
async for response in page_result:
print(response)
Args:
request (Optional[Union[google.cloud.datacatalog_v1beta1.types.SearchCatalogRequest, dict]]):
The request object. Request message for
[SearchCatalog][google.cloud.datacatalog.v1beta1.DataCatalog.SearchCatalog].
scope (:class:`google.cloud.datacatalog_v1beta1.types.SearchCatalogRequest.Scope`):
Required. The scope of this search request. A ``scope``
that has empty ``include_org_ids``,
``include_project_ids`` AND false
``include_gcp_public_datasets`` is considered invalid.
Data Catalog will return an error in such a case.
This corresponds to the ``scope`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
query (:class:`str`):
Required. The query string in search query syntax. The
query must be non-empty.
Query strings can be simple as "x" or more qualified as:
- name:x
- column:x
- description:y
Note: Query tokens need to have a minimum of 3
characters for substring matching to work correctly. See
`Data Catalog Search
Syntax <https://cloud.google.com/data-catalog/docs/how-to/search-reference>`__
for more information.
This corresponds to the ``query`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
retry (google.api_core.retry.Retry): Designation of what errors, if any,
should be retried.
timeout (float): The timeout for this request.
metadata (Sequence[Tuple[str, str]]): Strings which should be
sent along with the request as metadata.
Returns:
google.cloud.datacatalog_v1beta1.services.data_catalog.pagers.SearchCatalogAsyncPager:
Response message for
[SearchCatalog][google.cloud.datacatalog.v1beta1.DataCatalog.SearchCatalog].
Iterating over this object will yield results and
resolve additional pages automatically.
"""
# Create or coerce a protobuf request object.
# Quick check: If we got a request object, we should *not* have
# gotten any keyword arguments that map to the request.
has_flattened_params = any([scope, query])
if request is not None and has_flattened_params:
raise ValueError(
"If the `request` argument is set, then none of "
"the individual field arguments should be set."
)
request = datacatalog.SearchCatalogRequest(request)
# If we have keyword arguments corresponding to fields on the
# request, apply these.
if scope is not None:
request.scope = scope
if query is not None:
request.query = query
# Wrap the RPC method; this adds retry and timeout information,
# and friendly error handling.
rpc = gapic_v1.method_async.wrap_method(
self._client._transport.search_catalog,
default_timeout=None,
client_info=DEFAULT_CLIENT_INFO,
)
# Send the request.
response = await rpc(
request,
retry=retry,
timeout=timeout,
metadata=metadata,
)
# This method is paged; wrap the response in a pager, which provides
# an `__aiter__` convenience method.
response = pagers.SearchCatalogAsyncPager(
method=rpc,
request=request,
response=response,
metadata=metadata,
)
# Done; return the response.
return response
async def create_entry_group(
self,
request: Optional[Union[datacatalog.CreateEntryGroupRequest, dict]] = None,
*,
parent: Optional[str] = None,
entry_group_id: Optional[str] = None,
entry_group: Optional[datacatalog.EntryGroup] = None,
retry: OptionalRetry = gapic_v1.method.DEFAULT,
timeout: Optional[float] = None,
metadata: Sequence[Tuple[str, str]] = (),
) -> datacatalog.EntryGroup:
r"""A maximum of 10,000 entry groups may be created per organization
across all locations.
Users should enable the Data Catalog API in the project
identified by the ``parent`` parameter (see [Data Catalog
Resource Project]
(https://cloud.google.com/data-catalog/docs/concepts/resource-project)
for more information).
.. code-block:: python
# This snippet has been automatically generated and should be regarded as a
# code template only.
# It will require modifications to work:
# - It may require correct/in-range values for request initialization.
# - It may require specifying regional endpoints when creating the service
# client as shown in:
# https://googleapis.dev/python/google-api-core/latest/client_options.html
from google.cloud import datacatalog_v1beta1
async def sample_create_entry_group():
# Create a client
client = datacatalog_v1beta1.DataCatalogAsyncClient()
# Initialize request argument(s)
request = datacatalog_v1beta1.CreateEntryGroupRequest(
parent="parent_value",
entry_group_id="entry_group_id_value",
)
# Make the request
response = await client.create_entry_group(request=request)
# Handle the response
print(response)
Args:
request (Optional[Union[google.cloud.datacatalog_v1beta1.types.CreateEntryGroupRequest, dict]]):
The request object. Request message for
[CreateEntryGroup][google.cloud.datacatalog.v1beta1.DataCatalog.CreateEntryGroup].
parent (:class:`str`):
Required. The name of the project this entry group is
in. Example:
- projects/{project_id}/locations/{location}
Note that this EntryGroup and its child resources may
not actually be stored in the location in this name.
This corresponds to the ``parent`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
entry_group_id (:class:`str`):
Required. The id of the entry group
to create. The id must begin with a
letter or underscore, contain only
English letters, numbers and
underscores, and be at most 64
characters.
This corresponds to the ``entry_group_id`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
entry_group (:class:`google.cloud.datacatalog_v1beta1.types.EntryGroup`):
The entry group to create. Defaults
to an empty entry group.
This corresponds to the ``entry_group`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
retry (google.api_core.retry.Retry): Designation of what errors, if any,
should be retried.
timeout (float): The timeout for this request.
metadata (Sequence[Tuple[str, str]]): Strings which should be
sent along with the request as metadata.
Returns:
google.cloud.datacatalog_v1beta1.types.EntryGroup:
EntryGroup Metadata.
An EntryGroup resource represents a logical grouping
of zero or more Data Catalog
[Entry][google.cloud.datacatalog.v1beta1.Entry]
resources.
"""
# Create or coerce a protobuf request object.
# Quick check: If we got a request object, we should *not* have
# gotten any keyword arguments that map to the request.
has_flattened_params = any([parent, entry_group_id, entry_group])
if request is not None and has_flattened_params:
raise ValueError(
"If the `request` argument is set, then none of "
"the individual field arguments should be set."
)
request = datacatalog.CreateEntryGroupRequest(request)
# If we have keyword arguments corresponding to fields on the
# request, apply these.
if parent is not None:
request.parent = parent
if entry_group_id is not None:
request.entry_group_id = entry_group_id
if entry_group is not None:
request.entry_group = entry_group
# Wrap the RPC method; this adds retry and timeout information,
# and friendly error handling.
rpc = gapic_v1.method_async.wrap_method(
self._client._transport.create_entry_group,
default_timeout=None,
client_info=DEFAULT_CLIENT_INFO,
)
# Certain fields should be provided within the metadata header;
# add these here.
metadata = tuple(metadata) + (
gapic_v1.routing_header.to_grpc_metadata((("parent", request.parent),)),
)
# Send the request.
response = await rpc(
request,
retry=retry,
timeout=timeout,
metadata=metadata,
)
# Done; return the response.
return response
async def update_entry_group(
self,
request: Optional[Union[datacatalog.UpdateEntryGroupRequest, dict]] = None,
*,
entry_group: Optional[datacatalog.EntryGroup] = None,
update_mask: Optional[field_mask_pb2.FieldMask] = None,
retry: OptionalRetry = gapic_v1.method.DEFAULT,
timeout: Optional[float] = None,
metadata: Sequence[Tuple[str, str]] = (),
) -> datacatalog.EntryGroup:
r"""Updates an EntryGroup. The user should enable the Data Catalog
API in the project identified by the ``entry_group.name``
parameter (see [Data Catalog Resource Project]
(https://cloud.google.com/data-catalog/docs/concepts/resource-project)
for more information).
.. code-block:: python
# This snippet has been automatically generated and should be regarded as a
# code template only.
# It will require modifications to work:
# - It may require correct/in-range values for request initialization.
# - It may require specifying regional endpoints when creating the service
# client as shown in:
# https://googleapis.dev/python/google-api-core/latest/client_options.html
from google.cloud import datacatalog_v1beta1
async def sample_update_entry_group():
# Create a client
client = datacatalog_v1beta1.DataCatalogAsyncClient()
# Initialize request argument(s)
request = datacatalog_v1beta1.UpdateEntryGroupRequest(
)
# Make the request
response = await client.update_entry_group(request=request)
# Handle the response
print(response)
Args:
request (Optional[Union[google.cloud.datacatalog_v1beta1.types.UpdateEntryGroupRequest, dict]]):
The request object. Request message for
[UpdateEntryGroup][google.cloud.datacatalog.v1beta1.DataCatalog.UpdateEntryGroup].
entry_group (:class:`google.cloud.datacatalog_v1beta1.types.EntryGroup`):
Required. The updated entry group.
"name" field must be set.
This corresponds to the ``entry_group`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
update_mask (:class:`google.protobuf.field_mask_pb2.FieldMask`):
The fields to update on the entry
group. If absent or empty, all
modifiable fields are updated.
This corresponds to the ``update_mask`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
retry (google.api_core.retry.Retry): Designation of what errors, if any,
should be retried.
timeout (float): The timeout for this request.
metadata (Sequence[Tuple[str, str]]): Strings which should be
sent along with the request as metadata.
Returns:
google.cloud.datacatalog_v1beta1.types.EntryGroup:
EntryGroup Metadata.
An EntryGroup resource represents a logical grouping
of zero or more Data Catalog
[Entry][google.cloud.datacatalog.v1beta1.Entry]
resources.
"""
# Create or coerce a protobuf request object.
# Quick check: If we got a request object, we should *not* have
# gotten any keyword arguments that map to the request.
has_flattened_params = any([entry_group, update_mask])
if request is not None and has_flattened_params:
raise ValueError(
"If the `request` argument is set, then none of "
"the individual field arguments should be set."
)
request = datacatalog.UpdateEntryGroupRequest(request)
# If we have keyword arguments corresponding to fields on the
# request, apply these.
if entry_group is not None:
request.entry_group = entry_group
if update_mask is not None:
request.update_mask = update_mask
# Wrap the RPC method; this adds retry and timeout information,
# and friendly error handling.
rpc = gapic_v1.method_async.wrap_method(
self._client._transport.update_entry_group,
default_timeout=None,
client_info=DEFAULT_CLIENT_INFO,
)
# Certain fields should be provided within the metadata header;
# add these here.
metadata = tuple(metadata) + (
gapic_v1.routing_header.to_grpc_metadata(
(("entry_group.name", request.entry_group.name),)
),
)
# Send the request.
response = await rpc(
request,
retry=retry,
timeout=timeout,
metadata=metadata,
)
# Done; return the response.
return response
async def get_entry_group(
self,
request: Optional[Union[datacatalog.GetEntryGroupRequest, dict]] = None,
*,
name: Optional[str] = None,
read_mask: Optional[field_mask_pb2.FieldMask] = None,
retry: OptionalRetry = gapic_v1.method.DEFAULT,
timeout: Optional[float] = None,
metadata: Sequence[Tuple[str, str]] = (),
) -> datacatalog.EntryGroup:
r"""Gets an EntryGroup.
.. code-block:: python
# This snippet has been automatically generated and should be regarded as a
# code template only.
# It will require modifications to work:
# - It may require correct/in-range values for request initialization.
# - It may require specifying regional endpoints when creating the service
# client as shown in:
# https://googleapis.dev/python/google-api-core/latest/client_options.html
from google.cloud import datacatalog_v1beta1
async def sample_get_entry_group():
# Create a client
client = datacatalog_v1beta1.DataCatalogAsyncClient()
# Initialize request argument(s)
request = datacatalog_v1beta1.GetEntryGroupRequest(
name="name_value",
)
# Make the request
response = await client.get_entry_group(request=request)
# Handle the response
print(response)
Args:
request (Optional[Union[google.cloud.datacatalog_v1beta1.types.GetEntryGroupRequest, dict]]):
The request object. Request message for
[GetEntryGroup][google.cloud.datacatalog.v1beta1.DataCatalog.GetEntryGroup].
name (:class:`str`):
Required. The name of the entry group. For example,
``projects/{project_id}/locations/{location}/entryGroups/{entry_group_id}``.
This corresponds to the ``name`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
read_mask (:class:`google.protobuf.field_mask_pb2.FieldMask`):
The fields to return. If not set or
empty, all fields are returned.
This corresponds to the ``read_mask`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
retry (google.api_core.retry.Retry): Designation of what errors, if any,
should be retried.
timeout (float): The timeout for this request.
metadata (Sequence[Tuple[str, str]]): Strings which should be
sent along with the request as metadata.
Returns:
google.cloud.datacatalog_v1beta1.types.EntryGroup:
EntryGroup Metadata.
An EntryGroup resource represents a logical grouping
of zero or more Data Catalog
[Entry][google.cloud.datacatalog.v1beta1.Entry]
resources.
"""
# Create or coerce a protobuf request object.
# Quick check: If we got a request object, we should *not* have
# gotten any keyword arguments that map to the request.
has_flattened_params = any([name, read_mask])
if request is not None and has_flattened_params:
raise ValueError(
"If the `request` argument is set, then none of "
"the individual field arguments should be set."
)
request = datacatalog.GetEntryGroupRequest(request)
# If we have keyword arguments corresponding to fields on the
# request, apply these.
if name is not None:
request.name = name
if read_mask is not None:
request.read_mask = read_mask
# Wrap the RPC method; this adds retry and timeout information,
# and friendly error handling.
rpc = gapic_v1.method_async.wrap_method(
self._client._transport.get_entry_group,
default_retry=retries.Retry(
initial=0.1,
maximum=60.0,
multiplier=1.3,
predicate=retries.if_exception_type(
core_exceptions.DeadlineExceeded,
core_exceptions.ServiceUnavailable,
),
deadline=60.0,
),
default_timeout=60.0,
client_info=DEFAULT_CLIENT_INFO,
)
# Certain fields should be provided within the metadata header;
# add these here.
metadata = tuple(metadata) + (
gapic_v1.routing_header.to_grpc_metadata((("name", request.name),)),
)
# Send the request.
response = await rpc(
request,
retry=retry,
timeout=timeout,
metadata=metadata,
)
# Done; return the response.
return response
async def delete_entry_group(
self,
request: Optional[Union[datacatalog.DeleteEntryGroupRequest, dict]] = None,
*,
name: Optional[str] = None,
retry: OptionalRetry = gapic_v1.method.DEFAULT,
timeout: Optional[float] = None,
metadata: Sequence[Tuple[str, str]] = (),
) -> None:
r"""Deletes an EntryGroup. Only entry groups that do not contain
entries can be deleted. Users should enable the Data Catalog API
in the project identified by the ``name`` parameter (see [Data
Catalog Resource Project]
(https://cloud.google.com/data-catalog/docs/concepts/resource-project)
for more information).
.. code-block:: python
# This snippet has been automatically generated and should be regarded as a
# code template only.
# It will require modifications to work:
# - It may require correct/in-range values for request initialization.
# - It may require specifying regional endpoints when creating the service
# client as shown in:
# https://googleapis.dev/python/google-api-core/latest/client_options.html
from google.cloud import datacatalog_v1beta1
async def sample_delete_entry_group():
# Create a client
client = datacatalog_v1beta1.DataCatalogAsyncClient()
# Initialize request argument(s)
request = datacatalog_v1beta1.DeleteEntryGroupRequest(
name="name_value",
)
# Make the request
await client.delete_entry_group(request=request)
Args:
request (Optional[Union[google.cloud.datacatalog_v1beta1.types.DeleteEntryGroupRequest, dict]]):
The request object. Request message for
[DeleteEntryGroup][google.cloud.datacatalog.v1beta1.DataCatalog.DeleteEntryGroup].
name (:class:`str`):
Required. The name of the entry group. For example,
``projects/{project_id}/locations/{location}/entryGroups/{entry_group_id}``.
This corresponds to the ``name`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
retry (google.api_core.retry.Retry): Designation of what errors, if any,
should be retried.
timeout (float): The timeout for this request.
metadata (Sequence[Tuple[str, str]]): Strings which should be
sent along with the request as metadata.
"""
# Create or coerce a protobuf request object.
# Quick check: If we got a request object, we should *not* have
# gotten any keyword arguments that map to the request.
has_flattened_params = any([name])
if request is not None and has_flattened_params:
raise ValueError(
"If the `request` argument is set, then none of "
"the individual field arguments should be set."
)
request = datacatalog.DeleteEntryGroupRequest(request)
# If we have keyword arguments corresponding to fields on the
# request, apply these.
if name is not None:
request.name = name
# Wrap the RPC method; this adds retry and timeout information,
# and friendly error handling.
rpc = gapic_v1.method_async.wrap_method(
self._client._transport.delete_entry_group,
default_retry=retries.Retry(
initial=0.1,
maximum=60.0,
multiplier=1.3,
predicate=retries.if_exception_type(
core_exceptions.DeadlineExceeded,
core_exceptions.ServiceUnavailable,
),
deadline=60.0,
),
default_timeout=60.0,
client_info=DEFAULT_CLIENT_INFO,
)
# Certain fields should be provided within the metadata header;
# add these here.
metadata = tuple(metadata) + (
gapic_v1.routing_header.to_grpc_metadata((("name", request.name),)),
)
# Send the request.
await rpc(
request,
retry=retry,
timeout=timeout,
metadata=metadata,
)
async def list_entry_groups(
self,
request: Optional[Union[datacatalog.ListEntryGroupsRequest, dict]] = None,
*,
parent: Optional[str] = None,
retry: OptionalRetry = gapic_v1.method.DEFAULT,
timeout: Optional[float] = None,
metadata: Sequence[Tuple[str, str]] = (),
) -> pagers.ListEntryGroupsAsyncPager:
r"""Lists entry groups.
.. code-block:: python
# This snippet has been automatically generated and should be regarded as a
# code template only.
# It will require modifications to work:
# - It may require correct/in-range values for request initialization.
# - It may require specifying regional endpoints when creating the service
# client as shown in:
# https://googleapis.dev/python/google-api-core/latest/client_options.html
from google.cloud import datacatalog_v1beta1
async def sample_list_entry_groups():
# Create a client
client = datacatalog_v1beta1.DataCatalogAsyncClient()
# Initialize request argument(s)
request = datacatalog_v1beta1.ListEntryGroupsRequest(
parent="parent_value",
)
# Make the request
page_result = client.list_entry_groups(request=request)
# Handle the response
async for response in page_result:
print(response)
Args:
request (Optional[Union[google.cloud.datacatalog_v1beta1.types.ListEntryGroupsRequest, dict]]):
The request object. Request message for
[ListEntryGroups][google.cloud.datacatalog.v1beta1.DataCatalog.ListEntryGroups].
parent (:class:`str`):
Required. The name of the location that contains the
entry groups, which can be provided in URL format.
Example:
- projects/{project_id}/locations/{location}
This corresponds to the ``parent`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
retry (google.api_core.retry.Retry): Designation of what errors, if any,
should be retried.
timeout (float): The timeout for this request.
metadata (Sequence[Tuple[str, str]]): Strings which should be
sent along with the request as metadata.
Returns:
google.cloud.datacatalog_v1beta1.services.data_catalog.pagers.ListEntryGroupsAsyncPager:
Response message for
[ListEntryGroups][google.cloud.datacatalog.v1beta1.DataCatalog.ListEntryGroups].
Iterating over this object will yield results and
resolve additional pages automatically.
"""
# Create or coerce a protobuf request object.
# Quick check: If we got a request object, we should *not* have
# gotten any keyword arguments that map to the request.
has_flattened_params = any([parent])
if request is not None and has_flattened_params:
raise ValueError(
"If the `request` argument is set, then none of "
"the individual field arguments should be set."
)
request = datacatalog.ListEntryGroupsRequest(request)
# If we have keyword arguments corresponding to fields on the
# request, apply these.
if parent is not None:
request.parent = parent
# Wrap the RPC method; this adds retry and timeout information,
# and friendly error handling.
rpc = gapic_v1.method_async.wrap_method(
self._client._transport.list_entry_groups,
default_timeout=None,
client_info=DEFAULT_CLIENT_INFO,
)
# Certain fields should be provided within the metadata header;
# add these here.
metadata = tuple(metadata) + (
gapic_v1.routing_header.to_grpc_metadata((("parent", request.parent),)),
)
# Send the request.
response = await rpc(
request,
retry=retry,
timeout=timeout,
metadata=metadata,
)
# This method is paged; wrap the response in a pager, which provides
# an `__aiter__` convenience method.
response = pagers.ListEntryGroupsAsyncPager(
method=rpc,
request=request,
response=response,
metadata=metadata,
)
# Done; return the response.
return response
async def create_entry(
self,
request: Optional[Union[datacatalog.CreateEntryRequest, dict]] = None,
*,
parent: Optional[str] = None,
entry_id: Optional[str] = None,
entry: Optional[datacatalog.Entry] = None,
retry: OptionalRetry = gapic_v1.method.DEFAULT,
timeout: Optional[float] = None,
metadata: Sequence[Tuple[str, str]] = (),
) -> datacatalog.Entry:
r"""Creates an entry. Only entries of 'FILESET' type or
user-specified type can be created.
Users should enable the Data Catalog API in the project
identified by the ``parent`` parameter (see [Data Catalog
Resource Project]
(https://cloud.google.com/data-catalog/docs/concepts/resource-project)
for more information).
A maximum of 100,000 entries may be created per entry group.
.. code-block:: python
# This snippet has been automatically generated and should be regarded as a
# code template only.
# It will require modifications to work:
# - It may require correct/in-range values for request initialization.
# - It may require specifying regional endpoints when creating the service
# client as shown in:
# https://googleapis.dev/python/google-api-core/latest/client_options.html
from google.cloud import datacatalog_v1beta1
async def sample_create_entry():
# Create a client
client = datacatalog_v1beta1.DataCatalogAsyncClient()
# Initialize request argument(s)
entry = datacatalog_v1beta1.Entry()
entry.type_ = "FILESET"
entry.integrated_system = "CLOUD_PUBSUB"
entry.gcs_fileset_spec.file_patterns = ['file_patterns_value1', 'file_patterns_value2']
request = datacatalog_v1beta1.CreateEntryRequest(
parent="parent_value",
entry_id="entry_id_value",
entry=entry,
)
# Make the request
response = await client.create_entry(request=request)
# Handle the response
print(response)
Args:
request (Optional[Union[google.cloud.datacatalog_v1beta1.types.CreateEntryRequest, dict]]):
The request object. Request message for
[CreateEntry][google.cloud.datacatalog.v1beta1.DataCatalog.CreateEntry].
parent (:class:`str`):
Required. The name of the entry group this entry is in.
Example:
- projects/{project_id}/locations/{location}/entryGroups/{entry_group_id}
Note that this Entry and its child resources may not
actually be stored in the location in this name.
This corresponds to the ``parent`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
entry_id (:class:`str`):
Required. The id of the entry to
create.
This corresponds to the ``entry_id`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
entry (:class:`google.cloud.datacatalog_v1beta1.types.Entry`):
Required. The entry to create.
This corresponds to the ``entry`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
retry (google.api_core.retry.Retry): Designation of what errors, if any,
should be retried.
timeout (float): The timeout for this request.
metadata (Sequence[Tuple[str, str]]): Strings which should be
sent along with the request as metadata.
Returns:
google.cloud.datacatalog_v1beta1.types.Entry:
Entry Metadata.
A Data Catalog Entry resource represents another
resource in Google Cloud Platform (such as a BigQuery
dataset or a Pub/Sub topic), or outside of Google
Cloud Platform. Clients can use the linked_resource
field in the Entry resource to refer to the original
resource ID of the source system.
An Entry resource contains resource details, such as
its schema. An Entry can also be used to attach
flexible metadata, such as a
[Tag][google.cloud.datacatalog.v1beta1.Tag].
"""
# Create or coerce a protobuf request object.
# Quick check: If we got a request object, we should *not* have
# gotten any keyword arguments that map to the request.
has_flattened_params = any([parent, entry_id, entry])
if request is not None and has_flattened_params:
raise ValueError(
"If the `request` argument is set, then none of "
"the individual field arguments should be set."
)
request = datacatalog.CreateEntryRequest(request)
# If we have keyword arguments corresponding to fields on the
# request, apply these.
if parent is not None:
request.parent = parent
if entry_id is not None:
request.entry_id = entry_id
if entry is not None:
request.entry = entry
# Wrap the RPC method; this adds retry and timeout information,
# and friendly error handling.
rpc = gapic_v1.method_async.wrap_method(
self._client._transport.create_entry,
default_timeout=None,
client_info=DEFAULT_CLIENT_INFO,
)
# Certain fields should be provided within the metadata header;
# add these here.
metadata = tuple(metadata) + (
gapic_v1.routing_header.to_grpc_metadata((("parent", request.parent),)),
)
# Send the request.
response = await rpc(
request,
retry=retry,
timeout=timeout,
metadata=metadata,
)
# Done; return the response.
return response
async def update_entry(
self,
request: Optional[Union[datacatalog.UpdateEntryRequest, dict]] = None,
*,
entry: Optional[datacatalog.Entry] = None,
update_mask: Optional[field_mask_pb2.FieldMask] = None,
retry: OptionalRetry = gapic_v1.method.DEFAULT,
timeout: Optional[float] = None,
metadata: Sequence[Tuple[str, str]] = (),
) -> datacatalog.Entry:
r"""Updates an existing entry. Users should enable the Data Catalog
API in the project identified by the ``entry.name`` parameter
(see [Data Catalog Resource Project]
(https://cloud.google.com/data-catalog/docs/concepts/resource-project)
for more information).
.. code-block:: python
# This snippet has been automatically generated and should be regarded as a
# code template only.
# It will require modifications to work:
# - It may require correct/in-range values for request initialization.
# - It may require specifying regional endpoints when creating the service
# client as shown in:
# https://googleapis.dev/python/google-api-core/latest/client_options.html
from google.cloud import datacatalog_v1beta1
async def sample_update_entry():
# Create a client
client = datacatalog_v1beta1.DataCatalogAsyncClient()
# Initialize request argument(s)
entry = datacatalog_v1beta1.Entry()
entry.type_ = "FILESET"
entry.integrated_system = "CLOUD_PUBSUB"
entry.gcs_fileset_spec.file_patterns = ['file_patterns_value1', 'file_patterns_value2']
request = datacatalog_v1beta1.UpdateEntryRequest(
entry=entry,
)
# Make the request
response = await client.update_entry(request=request)
# Handle the response
print(response)
Args:
request (Optional[Union[google.cloud.datacatalog_v1beta1.types.UpdateEntryRequest, dict]]):
The request object. Request message for
[UpdateEntry][google.cloud.datacatalog.v1beta1.DataCatalog.UpdateEntry].
entry (:class:`google.cloud.datacatalog_v1beta1.types.Entry`):
Required. The updated entry. The
"name" field must be set.
This corresponds to the ``entry`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
update_mask (:class:`google.protobuf.field_mask_pb2.FieldMask`):
The fields to update on the entry. If absent or empty,
all modifiable fields are updated.
The following fields are modifiable:
- For entries with type ``DATA_STREAM``:
- ``schema``
- For entries with type ``FILESET``
- ``schema``
- ``display_name``
- ``description``
- ``gcs_fileset_spec``
- ``gcs_fileset_spec.file_patterns``
- For entries with ``user_specified_type``
- ``schema``
- ``display_name``
- ``description``
- user_specified_type
- user_specified_system
- linked_resource
- source_system_timestamps
This corresponds to the ``update_mask`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
retry (google.api_core.retry.Retry): Designation of what errors, if any,
should be retried.
timeout (float): The timeout for this request.
metadata (Sequence[Tuple[str, str]]): Strings which should be
sent along with the request as metadata.
Returns:
google.cloud.datacatalog_v1beta1.types.Entry:
Entry Metadata.
A Data Catalog Entry resource represents another
resource in Google Cloud Platform (such as a BigQuery
dataset or a Pub/Sub topic), or outside of Google
Cloud Platform. Clients can use the linked_resource
field in the Entry resource to refer to the original
resource ID of the source system.
An Entry resource contains resource details, such as
its schema. An Entry can also be used to attach
flexible metadata, such as a
[Tag][google.cloud.datacatalog.v1beta1.Tag].
"""
# Create or coerce a protobuf request object.
# Quick check: If we got a request object, we should *not* have
# gotten any keyword arguments that map to the request.
has_flattened_params = any([entry, update_mask])
if request is not None and has_flattened_params:
raise ValueError(
"If the `request` argument is set, then none of "
"the individual field arguments should be set."
)
request = datacatalog.UpdateEntryRequest(request)
# If we have keyword arguments corresponding to fields on the
# request, apply these.
if entry is not None:
request.entry = entry
if update_mask is not None:
request.update_mask = update_mask
# Wrap the RPC method; this adds retry and timeout information,
# and friendly error handling.
rpc = gapic_v1.method_async.wrap_method(
self._client._transport.update_entry,
default_timeout=None,
client_info=DEFAULT_CLIENT_INFO,
)
# Certain fields should be provided within the metadata header;
# add these here.
metadata = tuple(metadata) + (
gapic_v1.routing_header.to_grpc_metadata(
(("entry.name", request.entry.name),)
),
)
# Send the request.
response = await rpc(
request,
retry=retry,
timeout=timeout,
metadata=metadata,
)
# Done; return the response.
return response
async def delete_entry(
self,
request: Optional[Union[datacatalog.DeleteEntryRequest, dict]] = None,
*,
name: Optional[str] = None,
retry: OptionalRetry = gapic_v1.method.DEFAULT,
timeout: Optional[float] = None,
metadata: Sequence[Tuple[str, str]] = (),
) -> None:
r"""Deletes an existing entry. Only entries created through
[CreateEntry][google.cloud.datacatalog.v1beta1.DataCatalog.CreateEntry]
method can be deleted. Users should enable the Data Catalog API
in the project identified by the ``name`` parameter (see [Data
Catalog Resource Project]
(https://cloud.google.com/data-catalog/docs/concepts/resource-project)
for more information).
.. code-block:: python
# This snippet has been automatically generated and should be regarded as a
# code template only.
# It will require modifications to work:
# - It may require correct/in-range values for request initialization.
# - It may require specifying regional endpoints when creating the service
# client as shown in:
# https://googleapis.dev/python/google-api-core/latest/client_options.html
from google.cloud import datacatalog_v1beta1
async def sample_delete_entry():
# Create a client
client = datacatalog_v1beta1.DataCatalogAsyncClient()
# Initialize request argument(s)
request = datacatalog_v1beta1.DeleteEntryRequest(
name="name_value",
)
# Make the request
await client.delete_entry(request=request)
Args:
request (Optional[Union[google.cloud.datacatalog_v1beta1.types.DeleteEntryRequest, dict]]):
The request object. Request message for
[DeleteEntry][google.cloud.datacatalog.v1beta1.DataCatalog.DeleteEntry].
name (:class:`str`):
Required. The name of the entry. Example:
- projects/{project_id}/locations/{location}/entryGroups/{entry_group_id}/entries/{entry_id}
This corresponds to the ``name`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
retry (google.api_core.retry.Retry): Designation of what errors, if any,
should be retried.
timeout (float): The timeout for this request.
metadata (Sequence[Tuple[str, str]]): Strings which should be
sent along with the request as metadata.
"""
# Create or coerce a protobuf request object.
# Quick check: If we got a request object, we should *not* have
# gotten any keyword arguments that map to the request.
has_flattened_params = any([name])
if request is not None and has_flattened_params:
raise ValueError(
"If the `request` argument is set, then none of "
"the individual field arguments should be set."
)
request = datacatalog.DeleteEntryRequest(request)
# If we have keyword arguments corresponding to fields on the
# request, apply these.
if name is not None:
request.name = name
# Wrap the RPC method; this adds retry and timeout information,
# and friendly error handling.
rpc = gapic_v1.method_async.wrap_method(
self._client._transport.delete_entry,
default_retry=retries.Retry(
initial=0.1,
maximum=60.0,
multiplier=1.3,
predicate=retries.if_exception_type(
core_exceptions.DeadlineExceeded,
core_exceptions.ServiceUnavailable,
),
deadline=60.0,
),
default_timeout=60.0,
client_info=DEFAULT_CLIENT_INFO,
)
# Certain fields should be provided within the metadata header;
# add these here.
metadata = tuple(metadata) + (
gapic_v1.routing_header.to_grpc_metadata((("name", request.name),)),
)
# Send the request.
await rpc(
request,
retry=retry,
timeout=timeout,
metadata=metadata,
)
async def get_entry(
self,
request: Optional[Union[datacatalog.GetEntryRequest, dict]] = None,
*,
name: Optional[str] = None,
retry: OptionalRetry = gapic_v1.method.DEFAULT,
timeout: Optional[float] = None,
metadata: Sequence[Tuple[str, str]] = (),
) -> datacatalog.Entry:
r"""Gets an entry.
.. code-block:: python
# This snippet has been automatically generated and should be regarded as a
# code template only.
# It will require modifications to work:
# - It may require correct/in-range values for request initialization.
# - It may require specifying regional endpoints when creating the service
# client as shown in:
# https://googleapis.dev/python/google-api-core/latest/client_options.html
from google.cloud import datacatalog_v1beta1
async def sample_get_entry():
# Create a client
client = datacatalog_v1beta1.DataCatalogAsyncClient()
# Initialize request argument(s)
request = datacatalog_v1beta1.GetEntryRequest(
name="name_value",
)
# Make the request
response = await client.get_entry(request=request)
# Handle the response
print(response)
Args:
request (Optional[Union[google.cloud.datacatalog_v1beta1.types.GetEntryRequest, dict]]):
The request object. Request message for
[GetEntry][google.cloud.datacatalog.v1beta1.DataCatalog.GetEntry].
name (:class:`str`):
Required. The name of the entry. Example:
- projects/{project_id}/locations/{location}/entryGroups/{entry_group_id}/entries/{entry_id}
This corresponds to the ``name`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
retry (google.api_core.retry.Retry): Designation of what errors, if any,
should be retried.
timeout (float): The timeout for this request.
metadata (Sequence[Tuple[str, str]]): Strings which should be
sent along with the request as metadata.
Returns:
google.cloud.datacatalog_v1beta1.types.Entry:
Entry Metadata.
A Data Catalog Entry resource represents another
resource in Google Cloud Platform (such as a BigQuery
dataset or a Pub/Sub topic), or outside of Google
Cloud Platform. Clients can use the linked_resource
field in the Entry resource to refer to the original
resource ID of the source system.
An Entry resource contains resource details, such as
its schema. An Entry can also be used to attach
flexible metadata, such as a
[Tag][google.cloud.datacatalog.v1beta1.Tag].
"""
# Create or coerce a protobuf request object.
# Quick check: If we got a request object, we should *not* have
# gotten any keyword arguments that map to the request.
has_flattened_params = any([name])
if request is not None and has_flattened_params:
raise ValueError(
"If the `request` argument is set, then none of "
"the individual field arguments should be set."
)
request = datacatalog.GetEntryRequest(request)
# If we have keyword arguments corresponding to fields on the
# request, apply these.
if name is not None:
request.name = name
# Wrap the RPC method; this adds retry and timeout information,
# and friendly error handling.
rpc = gapic_v1.method_async.wrap_method(
self._client._transport.get_entry,
default_retry=retries.Retry(
initial=0.1,
maximum=60.0,
multiplier=1.3,
predicate=retries.if_exception_type(
core_exceptions.DeadlineExceeded,
core_exceptions.ServiceUnavailable,
),
deadline=60.0,
),
default_timeout=60.0,
client_info=DEFAULT_CLIENT_INFO,
)
# Certain fields should be provided within the metadata header;
# add these here.
metadata = tuple(metadata) + (
gapic_v1.routing_header.to_grpc_metadata((("name", request.name),)),
)
# Send the request.
response = await rpc(
request,
retry=retry,
timeout=timeout,
metadata=metadata,
)
# Done; return the response.
return response
async def lookup_entry(
self,
request: Optional[Union[datacatalog.LookupEntryRequest, dict]] = None,
*,
retry: OptionalRetry = gapic_v1.method.DEFAULT,
timeout: Optional[float] = None,
metadata: Sequence[Tuple[str, str]] = (),
) -> datacatalog.Entry:
r"""Get an entry by target resource name. This method
allows clients to use the resource name from the source
Google Cloud Platform service to get the Data Catalog
Entry.
.. code-block:: python
# This snippet has been automatically generated and should be regarded as a
# code template only.
# It will require modifications to work:
# - It may require correct/in-range values for request initialization.
# - It may require specifying regional endpoints when creating the service
# client as shown in:
# https://googleapis.dev/python/google-api-core/latest/client_options.html
from google.cloud import datacatalog_v1beta1
async def sample_lookup_entry():
# Create a client
client = datacatalog_v1beta1.DataCatalogAsyncClient()
# Initialize request argument(s)
request = datacatalog_v1beta1.LookupEntryRequest(
linked_resource="linked_resource_value",
)
# Make the request
response = await client.lookup_entry(request=request)
# Handle the response
print(response)
Args:
request (Optional[Union[google.cloud.datacatalog_v1beta1.types.LookupEntryRequest, dict]]):
The request object. Request message for
[LookupEntry][google.cloud.datacatalog.v1beta1.DataCatalog.LookupEntry].
retry (google.api_core.retry.Retry): Designation of what errors, if any,
should be retried.
timeout (float): The timeout for this request.
metadata (Sequence[Tuple[str, str]]): Strings which should be
sent along with the request as metadata.
Returns:
google.cloud.datacatalog_v1beta1.types.Entry:
Entry Metadata.
A Data Catalog Entry resource represents another
resource in Google Cloud Platform (such as a BigQuery
dataset or a Pub/Sub topic), or outside of Google
Cloud Platform. Clients can use the linked_resource
field in the Entry resource to refer to the original
resource ID of the source system.
An Entry resource contains resource details, such as
its schema. An Entry can also be used to attach
flexible metadata, such as a
[Tag][google.cloud.datacatalog.v1beta1.Tag].
"""
# Create or coerce a protobuf request object.
request = datacatalog.LookupEntryRequest(request)
# Wrap the RPC method; this adds retry and timeout information,
# and friendly error handling.
rpc = gapic_v1.method_async.wrap_method(
self._client._transport.lookup_entry,
default_retry=retries.Retry(
initial=0.1,
maximum=60.0,
multiplier=1.3,
predicate=retries.if_exception_type(
core_exceptions.DeadlineExceeded,
core_exceptions.ServiceUnavailable,
),
deadline=60.0,
),
default_timeout=60.0,
client_info=DEFAULT_CLIENT_INFO,
)
# Send the request.
response = await rpc(
request,
retry=retry,
timeout=timeout,
metadata=metadata,
)
# Done; return the response.
return response
async def list_entries(
self,
request: Optional[Union[datacatalog.ListEntriesRequest, dict]] = None,
*,
parent: Optional[str] = None,
retry: OptionalRetry = gapic_v1.method.DEFAULT,
timeout: Optional[float] = None,
metadata: Sequence[Tuple[str, str]] = (),
) -> pagers.ListEntriesAsyncPager:
r"""Lists entries.
.. code-block:: python
# This snippet has been automatically generated and should be regarded as a
# code template only.
# It will require modifications to work:
# - It may require correct/in-range values for request initialization.
# - It may require specifying regional endpoints when creating the service
# client as shown in:
# https://googleapis.dev/python/google-api-core/latest/client_options.html
from google.cloud import datacatalog_v1beta1
async def sample_list_entries():
# Create a client
client = datacatalog_v1beta1.DataCatalogAsyncClient()
# Initialize request argument(s)
request = datacatalog_v1beta1.ListEntriesRequest(
parent="parent_value",
)
# Make the request
page_result = client.list_entries(request=request)
# Handle the response
async for response in page_result:
print(response)
Args:
request (Optional[Union[google.cloud.datacatalog_v1beta1.types.ListEntriesRequest, dict]]):
The request object. Request message for
[ListEntries][google.cloud.datacatalog.v1beta1.DataCatalog.ListEntries].
parent (:class:`str`):
Required. The name of the entry group that contains the
entries, which can be provided in URL format. Example:
- projects/{project_id}/locations/{location}/entryGroups/{entry_group_id}
This corresponds to the ``parent`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
retry (google.api_core.retry.Retry): Designation of what errors, if any,
should be retried.
timeout (float): The timeout for this request.
metadata (Sequence[Tuple[str, str]]): Strings which should be
sent along with the request as metadata.
Returns:
google.cloud.datacatalog_v1beta1.services.data_catalog.pagers.ListEntriesAsyncPager:
Response message for
[ListEntries][google.cloud.datacatalog.v1beta1.DataCatalog.ListEntries].
Iterating over this object will yield results and
resolve additional pages automatically.
"""
# Create or coerce a protobuf request object.
# Quick check: If we got a request object, we should *not* have
# gotten any keyword arguments that map to the request.
has_flattened_params = any([parent])
if request is not None and has_flattened_params:
raise ValueError(
"If the `request` argument is set, then none of "
"the individual field arguments should be set."
)
request = datacatalog.ListEntriesRequest(request)
# If we have keyword arguments corresponding to fields on the
# request, apply these.
if parent is not None:
request.parent = parent
# Wrap the RPC method; this adds retry and timeout information,
# and friendly error handling.
rpc = gapic_v1.method_async.wrap_method(
self._client._transport.list_entries,
default_timeout=None,
client_info=DEFAULT_CLIENT_INFO,
)
# Certain fields should be provided within the metadata header;
# add these here.
metadata = tuple(metadata) + (
gapic_v1.routing_header.to_grpc_metadata((("parent", request.parent),)),
)
# Send the request.
response = await rpc(
request,
retry=retry,
timeout=timeout,
metadata=metadata,
)
# This method is paged; wrap the response in a pager, which provides
# an `__aiter__` convenience method.
response = pagers.ListEntriesAsyncPager(
method=rpc,
request=request,
response=response,
metadata=metadata,
)
# Done; return the response.
return response
async def create_tag_template(
self,
request: Optional[Union[datacatalog.CreateTagTemplateRequest, dict]] = None,
*,
parent: Optional[str] = None,
tag_template_id: Optional[str] = None,
tag_template: Optional[tags.TagTemplate] = None,
retry: OptionalRetry = gapic_v1.method.DEFAULT,
timeout: Optional[float] = None,
metadata: Sequence[Tuple[str, str]] = (),
) -> tags.TagTemplate:
r"""Creates a tag template. The user should enable the Data Catalog
API in the project identified by the ``parent`` parameter (see
`Data Catalog Resource
Project <https://cloud.google.com/data-catalog/docs/concepts/resource-project>`__
for more information).
.. code-block:: python
# This snippet has been automatically generated and should be regarded as a
# code template only.
# It will require modifications to work:
# - It may require correct/in-range values for request initialization.
# - It may require specifying regional endpoints when creating the service
# client as shown in:
# https://googleapis.dev/python/google-api-core/latest/client_options.html
from google.cloud import datacatalog_v1beta1
async def sample_create_tag_template():
# Create a client
client = datacatalog_v1beta1.DataCatalogAsyncClient()
# Initialize request argument(s)
request = datacatalog_v1beta1.CreateTagTemplateRequest(
parent="parent_value",
tag_template_id="tag_template_id_value",
)
# Make the request
response = await client.create_tag_template(request=request)
# Handle the response
print(response)
Args:
request (Optional[Union[google.cloud.datacatalog_v1beta1.types.CreateTagTemplateRequest, dict]]):
The request object. Request message for
[CreateTagTemplate][google.cloud.datacatalog.v1beta1.DataCatalog.CreateTagTemplate].
parent (:class:`str`):
Required. The name of the project and the template
location
[region](https://cloud.google.com/data-catalog/docs/concepts/regions.
Example:
- projects/{project_id}/locations/us-central1
This corresponds to the ``parent`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
tag_template_id (:class:`str`):
Required. The id of the tag template
to create.
This corresponds to the ``tag_template_id`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
tag_template (:class:`google.cloud.datacatalog_v1beta1.types.TagTemplate`):
Required. The tag template to create.
This corresponds to the ``tag_template`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
retry (google.api_core.retry.Retry): Designation of what errors, if any,
should be retried.
timeout (float): The timeout for this request.
metadata (Sequence[Tuple[str, str]]): Strings which should be
sent along with the request as metadata.
Returns:
google.cloud.datacatalog_v1beta1.types.TagTemplate:
A tag template defines a tag, which can have one or more typed fields.
The template is used to create and attach the tag to
GCP resources. [Tag template
roles](\ https://cloud.google.com/iam/docs/understanding-roles#data-catalog-roles)
provide permissions to create, edit, and use the
template. See, for example, the [TagTemplate
User](\ https://cloud.google.com/data-catalog/docs/how-to/template-user)
role, which includes permission to use the tag
template to tag resources.
"""
# Create or coerce a protobuf request object.
# Quick check: If we got a request object, we should *not* have
# gotten any keyword arguments that map to the request.
has_flattened_params = any([parent, tag_template_id, tag_template])
if request is not None and has_flattened_params:
raise ValueError(
"If the `request` argument is set, then none of "
"the individual field arguments should be set."
)
request = datacatalog.CreateTagTemplateRequest(request)
# If we have keyword arguments corresponding to fields on the
# request, apply these.
if parent is not None:
request.parent = parent
if tag_template_id is not None:
request.tag_template_id = tag_template_id
if tag_template is not None:
request.tag_template = tag_template
# Wrap the RPC method; this adds retry and timeout information,
# and friendly error handling.
rpc = gapic_v1.method_async.wrap_method(
self._client._transport.create_tag_template,
default_timeout=None,
client_info=DEFAULT_CLIENT_INFO,
)
# Certain fields should be provided within the metadata header;
# add these here.
metadata = tuple(metadata) + (
gapic_v1.routing_header.to_grpc_metadata((("parent", request.parent),)),
)
# Send the request.
response = await rpc(
request,
retry=retry,
timeout=timeout,
metadata=metadata,
)
# Done; return the response.
return response
async def get_tag_template(
self,
request: Optional[Union[datacatalog.GetTagTemplateRequest, dict]] = None,
*,
name: Optional[str] = None,
retry: OptionalRetry = gapic_v1.method.DEFAULT,
timeout: Optional[float] = None,
metadata: Sequence[Tuple[str, str]] = (),
) -> tags.TagTemplate:
r"""Gets a tag template.
.. code-block:: python
# This snippet has been automatically generated and should be regarded as a
# code template only.
# It will require modifications to work:
# - It may require correct/in-range values for request initialization.
# - It may require specifying regional endpoints when creating the service
# client as shown in:
# https://googleapis.dev/python/google-api-core/latest/client_options.html
from google.cloud import datacatalog_v1beta1
async def sample_get_tag_template():
# Create a client
client = datacatalog_v1beta1.DataCatalogAsyncClient()
# Initialize request argument(s)
request = datacatalog_v1beta1.GetTagTemplateRequest(
name="name_value",
)
# Make the request
response = await client.get_tag_template(request=request)
# Handle the response
print(response)
Args:
request (Optional[Union[google.cloud.datacatalog_v1beta1.types.GetTagTemplateRequest, dict]]):
The request object. Request message for
[GetTagTemplate][google.cloud.datacatalog.v1beta1.DataCatalog.GetTagTemplate].
name (:class:`str`):
Required. The name of the tag template. Example:
- projects/{project_id}/locations/{location}/tagTemplates/{tag_template_id}
This corresponds to the ``name`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
retry (google.api_core.retry.Retry): Designation of what errors, if any,
should be retried.
timeout (float): The timeout for this request.
metadata (Sequence[Tuple[str, str]]): Strings which should be
sent along with the request as metadata.
Returns:
google.cloud.datacatalog_v1beta1.types.TagTemplate:
A tag template defines a tag, which can have one or more typed fields.
The template is used to create and attach the tag to
GCP resources. [Tag template
roles](\ https://cloud.google.com/iam/docs/understanding-roles#data-catalog-roles)
provide permissions to create, edit, and use the
template. See, for example, the [TagTemplate
User](\ https://cloud.google.com/data-catalog/docs/how-to/template-user)
role, which includes permission to use the tag
template to tag resources.
"""
# Create or coerce a protobuf request object.
# Quick check: If we got a request object, we should *not* have
# gotten any keyword arguments that map to the request.
has_flattened_params = any([name])
if request is not None and has_flattened_params:
raise ValueError(
"If the `request` argument is set, then none of "
"the individual field arguments should be set."
)
request = datacatalog.GetTagTemplateRequest(request)
# If we have keyword arguments corresponding to fields on the
# request, apply these.
if name is not None:
request.name = name
# Wrap the RPC method; this adds retry and timeout information,
# and friendly error handling.
rpc = gapic_v1.method_async.wrap_method(
self._client._transport.get_tag_template,
default_retry=retries.Retry(
initial=0.1,
maximum=60.0,
multiplier=1.3,
predicate=retries.if_exception_type(
core_exceptions.DeadlineExceeded,
core_exceptions.ServiceUnavailable,
),
deadline=60.0,
),
default_timeout=60.0,
client_info=DEFAULT_CLIENT_INFO,
)
# Certain fields should be provided within the metadata header;
# add these here.
metadata = tuple(metadata) + (
gapic_v1.routing_header.to_grpc_metadata((("name", request.name),)),
)
# Send the request.
response = await rpc(
request,
retry=retry,
timeout=timeout,
metadata=metadata,
)
# Done; return the response.
return response
async def update_tag_template(
self,
request: Optional[Union[datacatalog.UpdateTagTemplateRequest, dict]] = None,
*,
tag_template: Optional[tags.TagTemplate] = None,
update_mask: Optional[field_mask_pb2.FieldMask] = None,
retry: OptionalRetry = gapic_v1.method.DEFAULT,
timeout: Optional[float] = None,
metadata: Sequence[Tuple[str, str]] = (),
) -> tags.TagTemplate:
r"""Updates a tag template. This method cannot be used to update the
fields of a template. The tag template fields are represented as
separate resources and should be updated using their own
create/update/delete methods. Users should enable the Data
Catalog API in the project identified by the
``tag_template.name`` parameter (see [Data Catalog Resource
Project]
(https://cloud.google.com/data-catalog/docs/concepts/resource-project)
for more information).
.. code-block:: python
# This snippet has been automatically generated and should be regarded as a
# code template only.
# It will require modifications to work:
# - It may require correct/in-range values for request initialization.
# - It may require specifying regional endpoints when creating the service
# client as shown in:
# https://googleapis.dev/python/google-api-core/latest/client_options.html
from google.cloud import datacatalog_v1beta1
async def sample_update_tag_template():
# Create a client
client = datacatalog_v1beta1.DataCatalogAsyncClient()
# Initialize request argument(s)
request = datacatalog_v1beta1.UpdateTagTemplateRequest(
)
# Make the request
response = await client.update_tag_template(request=request)
# Handle the response
print(response)
Args:
request (Optional[Union[google.cloud.datacatalog_v1beta1.types.UpdateTagTemplateRequest, dict]]):
The request object. Request message for
[UpdateTagTemplate][google.cloud.datacatalog.v1beta1.DataCatalog.UpdateTagTemplate].
tag_template (:class:`google.cloud.datacatalog_v1beta1.types.TagTemplate`):
Required. The template to update. The
"name" field must be set.
This corresponds to the ``tag_template`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
update_mask (:class:`google.protobuf.field_mask_pb2.FieldMask`):
The field mask specifies the parts of the template to
overwrite.
Allowed fields:
- ``display_name``
If absent or empty, all of the allowed fields above will
be updated.
This corresponds to the ``update_mask`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
retry (google.api_core.retry.Retry): Designation of what errors, if any,
should be retried.
timeout (float): The timeout for this request.
metadata (Sequence[Tuple[str, str]]): Strings which should be
sent along with the request as metadata.
Returns:
google.cloud.datacatalog_v1beta1.types.TagTemplate:
A tag template defines a tag, which can have one or more typed fields.
The template is used to create and attach the tag to
GCP resources. [Tag template
roles](\ https://cloud.google.com/iam/docs/understanding-roles#data-catalog-roles)
provide permissions to create, edit, and use the
template. See, for example, the [TagTemplate
User](\ https://cloud.google.com/data-catalog/docs/how-to/template-user)
role, which includes permission to use the tag
template to tag resources.
"""
# Create or coerce a protobuf request object.
# Quick check: If we got a request object, we should *not* have
# gotten any keyword arguments that map to the request.
has_flattened_params = any([tag_template, update_mask])
if request is not None and has_flattened_params:
raise ValueError(
"If the `request` argument is set, then none of "
"the individual field arguments should be set."
)
request = datacatalog.UpdateTagTemplateRequest(request)
# If we have keyword arguments corresponding to fields on the
# request, apply these.
if tag_template is not None:
request.tag_template = tag_template
if update_mask is not None:
request.update_mask = update_mask
# Wrap the RPC method; this adds retry and timeout information,
# and friendly error handling.
rpc = gapic_v1.method_async.wrap_method(
self._client._transport.update_tag_template,
default_timeout=None,
client_info=DEFAULT_CLIENT_INFO,
)
# Certain fields should be provided within the metadata header;
# add these here.
metadata = tuple(metadata) + (
gapic_v1.routing_header.to_grpc_metadata(
(("tag_template.name", request.tag_template.name),)
),
)
# Send the request.
response = await rpc(
request,
retry=retry,
timeout=timeout,
metadata=metadata,
)
# Done; return the response.
return response
async def delete_tag_template(
self,
request: Optional[Union[datacatalog.DeleteTagTemplateRequest, dict]] = None,
*,
name: Optional[str] = None,
force: Optional[bool] = None,
retry: OptionalRetry = gapic_v1.method.DEFAULT,
timeout: Optional[float] = None,
metadata: Sequence[Tuple[str, str]] = (),
) -> None:
r"""Deletes a tag template and all tags using the template. Users
should enable the Data Catalog API in the project identified by
the ``name`` parameter (see [Data Catalog Resource Project]
(https://cloud.google.com/data-catalog/docs/concepts/resource-project)
for more information).
.. code-block:: python
# This snippet has been automatically generated and should be regarded as a
# code template only.
# It will require modifications to work:
# - It may require correct/in-range values for request initialization.
# - It may require specifying regional endpoints when creating the service
# client as shown in:
# https://googleapis.dev/python/google-api-core/latest/client_options.html
from google.cloud import datacatalog_v1beta1
async def sample_delete_tag_template():
# Create a client
client = datacatalog_v1beta1.DataCatalogAsyncClient()
# Initialize request argument(s)
request = datacatalog_v1beta1.DeleteTagTemplateRequest(
name="name_value",
force=True,
)
# Make the request
await client.delete_tag_template(request=request)
Args:
request (Optional[Union[google.cloud.datacatalog_v1beta1.types.DeleteTagTemplateRequest, dict]]):
The request object. Request message for
[DeleteTagTemplate][google.cloud.datacatalog.v1beta1.DataCatalog.DeleteTagTemplate].
name (:class:`str`):
Required. The name of the tag template to delete.
Example:
- projects/{project_id}/locations/{location}/tagTemplates/{tag_template_id}
This corresponds to the ``name`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
force (:class:`bool`):
Required. Currently, this field must always be set to
``true``. This confirms the deletion of any possible
tags using this template. ``force = false`` will be
supported in the future.
This corresponds to the ``force`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
retry (google.api_core.retry.Retry): Designation of what errors, if any,
should be retried.
timeout (float): The timeout for this request.
metadata (Sequence[Tuple[str, str]]): Strings which should be
sent along with the request as metadata.
"""
# Create or coerce a protobuf request object.
# Quick check: If we got a request object, we should *not* have
# gotten any keyword arguments that map to the request.
has_flattened_params = any([name, force])
if request is not None and has_flattened_params:
raise ValueError(
"If the `request` argument is set, then none of "
"the individual field arguments should be set."
)
request = datacatalog.DeleteTagTemplateRequest(request)
# If we have keyword arguments corresponding to fields on the
# request, apply these.
if name is not None:
request.name = name
if force is not None:
request.force = force
# Wrap the RPC method; this adds retry and timeout information,
# and friendly error handling.
rpc = gapic_v1.method_async.wrap_method(
self._client._transport.delete_tag_template,
default_retry=retries.Retry(
initial=0.1,
maximum=60.0,
multiplier=1.3,
predicate=retries.if_exception_type(
core_exceptions.DeadlineExceeded,
core_exceptions.ServiceUnavailable,
),
deadline=60.0,
),
default_timeout=60.0,
client_info=DEFAULT_CLIENT_INFO,
)
# Certain fields should be provided within the metadata header;
# add these here.
metadata = tuple(metadata) + (
gapic_v1.routing_header.to_grpc_metadata((("name", request.name),)),
)
# Send the request.
await rpc(
request,
retry=retry,
timeout=timeout,
metadata=metadata,
)
async def create_tag_template_field(
self,
request: Optional[
Union[datacatalog.CreateTagTemplateFieldRequest, dict]
] = None,
*,
parent: Optional[str] = None,
tag_template_field_id: Optional[str] = None,
tag_template_field: Optional[tags.TagTemplateField] = None,
retry: OptionalRetry = gapic_v1.method.DEFAULT,
timeout: Optional[float] = None,
metadata: Sequence[Tuple[str, str]] = (),
) -> tags.TagTemplateField:
r"""Creates a field in a tag template. The user should enable the
Data Catalog API in the project identified by the ``parent``
parameter (see `Data Catalog Resource
Project <https://cloud.google.com/data-catalog/docs/concepts/resource-project>`__
for more information).
.. code-block:: python
# This snippet has been automatically generated and should be regarded as a
# code template only.
# It will require modifications to work:
# - It may require correct/in-range values for request initialization.
# - It may require specifying regional endpoints when creating the service
# client as shown in:
# https://googleapis.dev/python/google-api-core/latest/client_options.html
from google.cloud import datacatalog_v1beta1
async def sample_create_tag_template_field():
# Create a client
client = datacatalog_v1beta1.DataCatalogAsyncClient()
# Initialize request argument(s)
tag_template_field = datacatalog_v1beta1.TagTemplateField()
tag_template_field.type_.primitive_type = "TIMESTAMP"
request = datacatalog_v1beta1.CreateTagTemplateFieldRequest(
parent="parent_value",
tag_template_field_id="tag_template_field_id_value",
tag_template_field=tag_template_field,
)
# Make the request
response = await client.create_tag_template_field(request=request)
# Handle the response
print(response)
Args:
request (Optional[Union[google.cloud.datacatalog_v1beta1.types.CreateTagTemplateFieldRequest, dict]]):
The request object. Request message for
[CreateTagTemplateField][google.cloud.datacatalog.v1beta1.DataCatalog.CreateTagTemplateField].
parent (:class:`str`):
Required. The name of the project and the template
location
`region <https://cloud.google.com/data-catalog/docs/concepts/regions>`__.
Example:
- projects/{project_id}/locations/us-central1/tagTemplates/{tag_template_id}
This corresponds to the ``parent`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
tag_template_field_id (:class:`str`):
Required. The ID of the tag template field to create.
Field ids can contain letters (both uppercase and
lowercase), numbers (0-9), underscores (_) and dashes
(-). Field IDs must be at least 1 character long and at
most 128 characters long. Field IDs must also be unique
within their template.
This corresponds to the ``tag_template_field_id`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
tag_template_field (:class:`google.cloud.datacatalog_v1beta1.types.TagTemplateField`):
Required. The tag template field to
create.
This corresponds to the ``tag_template_field`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
retry (google.api_core.retry.Retry): Designation of what errors, if any,
should be retried.
timeout (float): The timeout for this request.
metadata (Sequence[Tuple[str, str]]): Strings which should be
sent along with the request as metadata.
Returns:
google.cloud.datacatalog_v1beta1.types.TagTemplateField:
The template for an individual field
within a tag template.
"""
# Create or coerce a protobuf request object.
# Quick check: If we got a request object, we should *not* have
# gotten any keyword arguments that map to the request.
has_flattened_params = any([parent, tag_template_field_id, tag_template_field])
if request is not None and has_flattened_params:
raise ValueError(
"If the `request` argument is set, then none of "
"the individual field arguments should be set."
)
request = datacatalog.CreateTagTemplateFieldRequest(request)
# If we have keyword arguments corresponding to fields on the
# request, apply these.
if parent is not None:
request.parent = parent
if tag_template_field_id is not None:
request.tag_template_field_id = tag_template_field_id
if tag_template_field is not None:
request.tag_template_field = tag_template_field
# Wrap the RPC method; this adds retry and timeout information,
# and friendly error handling.
rpc = gapic_v1.method_async.wrap_method(
self._client._transport.create_tag_template_field,
default_timeout=None,
client_info=DEFAULT_CLIENT_INFO,
)
# Certain fields should be provided within the metadata header;
# add these here.
metadata = tuple(metadata) + (
gapic_v1.routing_header.to_grpc_metadata((("parent", request.parent),)),
)
# Send the request.
response = await rpc(
request,
retry=retry,
timeout=timeout,
metadata=metadata,
)
# Done; return the response.
return response
async def update_tag_template_field(
self,
request: Optional[
Union[datacatalog.UpdateTagTemplateFieldRequest, dict]
] = None,
*,
name: Optional[str] = None,
tag_template_field: Optional[tags.TagTemplateField] = None,
update_mask: Optional[field_mask_pb2.FieldMask] = None,
retry: OptionalRetry = gapic_v1.method.DEFAULT,
timeout: Optional[float] = None,
metadata: Sequence[Tuple[str, str]] = (),
) -> tags.TagTemplateField:
r"""Updates a field in a tag template. This method cannot be used to
update the field type. Users should enable the Data Catalog API
in the project identified by the ``name`` parameter (see [Data
Catalog Resource Project]
(https://cloud.google.com/data-catalog/docs/concepts/resource-project)
for more information).
.. code-block:: python
# This snippet has been automatically generated and should be regarded as a
# code template only.
# It will require modifications to work:
# - It may require correct/in-range values for request initialization.
# - It may require specifying regional endpoints when creating the service
# client as shown in:
# https://googleapis.dev/python/google-api-core/latest/client_options.html
from google.cloud import datacatalog_v1beta1
async def sample_update_tag_template_field():
# Create a client
client = datacatalog_v1beta1.DataCatalogAsyncClient()
# Initialize request argument(s)
tag_template_field = datacatalog_v1beta1.TagTemplateField()
tag_template_field.type_.primitive_type = "TIMESTAMP"
request = datacatalog_v1beta1.UpdateTagTemplateFieldRequest(
name="name_value",
tag_template_field=tag_template_field,
)
# Make the request
response = await client.update_tag_template_field(request=request)
# Handle the response
print(response)
Args:
request (Optional[Union[google.cloud.datacatalog_v1beta1.types.UpdateTagTemplateFieldRequest, dict]]):
The request object. Request message for
[UpdateTagTemplateField][google.cloud.datacatalog.v1beta1.DataCatalog.UpdateTagTemplateField].
name (:class:`str`):
Required. The name of the tag template field. Example:
- projects/{project_id}/locations/{location}/tagTemplates/{tag_template_id}/fields/{tag_template_field_id}
This corresponds to the ``name`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
tag_template_field (:class:`google.cloud.datacatalog_v1beta1.types.TagTemplateField`):
Required. The template to update.
This corresponds to the ``tag_template_field`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
update_mask (:class:`google.protobuf.field_mask_pb2.FieldMask`):
Optional. The field mask specifies the parts of the
template to be updated. Allowed fields:
- ``display_name``
- ``type.enum_type``
- ``is_required``
If ``update_mask`` is not set or empty, all of the
allowed fields above will be updated.
When updating an enum type, the provided values will be
merged with the existing values. Therefore, enum values
can only be added, existing enum values cannot be
deleted nor renamed. Updating a template field from
optional to required is NOT allowed.
This corresponds to the ``update_mask`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
retry (google.api_core.retry.Retry): Designation of what errors, if any,
should be retried.
timeout (float): The timeout for this request.
metadata (Sequence[Tuple[str, str]]): Strings which should be
sent along with the request as metadata.
Returns:
google.cloud.datacatalog_v1beta1.types.TagTemplateField:
The template for an individual field
within a tag template.
"""
# Create or coerce a protobuf request object.
# Quick check: If we got a request object, we should *not* have
# gotten any keyword arguments that map to the request.
has_flattened_params = any([name, tag_template_field, update_mask])
if request is not None and has_flattened_params:
raise ValueError(
"If the `request` argument is set, then none of "
"the individual field arguments should be set."
)
request = datacatalog.UpdateTagTemplateFieldRequest(request)
# If we have keyword arguments corresponding to fields on the
# request, apply these.
if name is not None:
request.name = name
if tag_template_field is not None:
request.tag_template_field = tag_template_field
if update_mask is not None:
request.update_mask = update_mask
# Wrap the RPC method; this adds retry and timeout information,
# and friendly error handling.
rpc = gapic_v1.method_async.wrap_method(
self._client._transport.update_tag_template_field,
default_timeout=None,
client_info=DEFAULT_CLIENT_INFO,
)
# Certain fields should be provided within the metadata header;
# add these here.
metadata = tuple(metadata) + (
gapic_v1.routing_header.to_grpc_metadata((("name", request.name),)),
)
# Send the request.
response = await rpc(
request,
retry=retry,
timeout=timeout,
metadata=metadata,
)
# Done; return the response.
return response
async def rename_tag_template_field(
self,
request: Optional[
Union[datacatalog.RenameTagTemplateFieldRequest, dict]
] = None,
*,
name: Optional[str] = None,
new_tag_template_field_id: Optional[str] = None,
retry: OptionalRetry = gapic_v1.method.DEFAULT,
timeout: Optional[float] = None,
metadata: Sequence[Tuple[str, str]] = (),
) -> tags.TagTemplateField:
r"""Renames a field in a tag template. The user should enable the
Data Catalog API in the project identified by the ``name``
parameter (see `Data Catalog Resource
Project <https://cloud.google.com/data-catalog/docs/concepts/resource-project>`__
for more information).
.. code-block:: python
# This snippet has been automatically generated and should be regarded as a
# code template only.
# It will require modifications to work:
# - It may require correct/in-range values for request initialization.
# - It may require specifying regional endpoints when creating the service
# client as shown in:
# https://googleapis.dev/python/google-api-core/latest/client_options.html
from google.cloud import datacatalog_v1beta1
async def sample_rename_tag_template_field():
# Create a client
client = datacatalog_v1beta1.DataCatalogAsyncClient()
# Initialize request argument(s)
request = datacatalog_v1beta1.RenameTagTemplateFieldRequest(
name="name_value",
new_tag_template_field_id="new_tag_template_field_id_value",
)
# Make the request
response = await client.rename_tag_template_field(request=request)
# Handle the response
print(response)
Args:
request (Optional[Union[google.cloud.datacatalog_v1beta1.types.RenameTagTemplateFieldRequest, dict]]):
The request object. Request message for
[RenameTagTemplateField][google.cloud.datacatalog.v1beta1.DataCatalog.RenameTagTemplateField].
name (:class:`str`):
Required. The name of the tag template. Example:
- projects/{project_id}/locations/{location}/tagTemplates/{tag_template_id}/fields/{tag_template_field_id}
This corresponds to the ``name`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
new_tag_template_field_id (:class:`str`):
Required. The new ID of this tag template field. For
example, ``my_new_field``.
This corresponds to the ``new_tag_template_field_id`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
retry (google.api_core.retry.Retry): Designation of what errors, if any,
should be retried.
timeout (float): The timeout for this request.
metadata (Sequence[Tuple[str, str]]): Strings which should be
sent along with the request as metadata.
Returns:
google.cloud.datacatalog_v1beta1.types.TagTemplateField:
The template for an individual field
within a tag template.
"""
# Create or coerce a protobuf request object.
# Quick check: If we got a request object, we should *not* have
# gotten any keyword arguments that map to the request.
has_flattened_params = any([name, new_tag_template_field_id])
if request is not None and has_flattened_params:
raise ValueError(
"If the `request` argument is set, then none of "
"the individual field arguments should be set."
)
request = datacatalog.RenameTagTemplateFieldRequest(request)
# If we have keyword arguments corresponding to fields on the
# request, apply these.
if name is not None:
request.name = name
if new_tag_template_field_id is not None:
request.new_tag_template_field_id = new_tag_template_field_id
# Wrap the RPC method; this adds retry and timeout information,
# and friendly error handling.
rpc = gapic_v1.method_async.wrap_method(
self._client._transport.rename_tag_template_field,
default_timeout=None,
client_info=DEFAULT_CLIENT_INFO,
)
# Certain fields should be provided within the metadata header;
# add these here.
metadata = tuple(metadata) + (
gapic_v1.routing_header.to_grpc_metadata((("name", request.name),)),
)
# Send the request.
response = await rpc(
request,
retry=retry,
timeout=timeout,
metadata=metadata,
)
# Done; return the response.
return response
async def delete_tag_template_field(
self,
request: Optional[
Union[datacatalog.DeleteTagTemplateFieldRequest, dict]
] = None,
*,
name: Optional[str] = None,
force: Optional[bool] = None,
retry: OptionalRetry = gapic_v1.method.DEFAULT,
timeout: Optional[float] = None,
metadata: Sequence[Tuple[str, str]] = (),
) -> None:
r"""Deletes a field in a tag template and all uses of that field.
Users should enable the Data Catalog API in the project
identified by the ``name`` parameter (see [Data Catalog Resource
Project]
(https://cloud.google.com/data-catalog/docs/concepts/resource-project)
for more information).
.. code-block:: python
# This snippet has been automatically generated and should be regarded as a
# code template only.
# It will require modifications to work:
# - It may require correct/in-range values for request initialization.
# - It may require specifying regional endpoints when creating the service
# client as shown in:
# https://googleapis.dev/python/google-api-core/latest/client_options.html
from google.cloud import datacatalog_v1beta1
async def sample_delete_tag_template_field():
# Create a client
client = datacatalog_v1beta1.DataCatalogAsyncClient()
# Initialize request argument(s)
request = datacatalog_v1beta1.DeleteTagTemplateFieldRequest(
name="name_value",
force=True,
)
# Make the request
await client.delete_tag_template_field(request=request)
Args:
request (Optional[Union[google.cloud.datacatalog_v1beta1.types.DeleteTagTemplateFieldRequest, dict]]):
The request object. Request message for
[DeleteTagTemplateField][google.cloud.datacatalog.v1beta1.DataCatalog.DeleteTagTemplateField].
name (:class:`str`):
Required. The name of the tag template field to delete.
Example:
- projects/{project_id}/locations/{location}/tagTemplates/{tag_template_id}/fields/{tag_template_field_id}
This corresponds to the ``name`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
force (:class:`bool`):
Required. Currently, this field must always be set to
``true``. This confirms the deletion of this field from
any tags using this field. ``force = false`` will be
supported in the future.
This corresponds to the ``force`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
retry (google.api_core.retry.Retry): Designation of what errors, if any,
should be retried.
timeout (float): The timeout for this request.
metadata (Sequence[Tuple[str, str]]): Strings which should be
sent along with the request as metadata.
"""
# Create or coerce a protobuf request object.
# Quick check: If we got a request object, we should *not* have
# gotten any keyword arguments that map to the request.
has_flattened_params = any([name, force])
if request is not None and has_flattened_params:
raise ValueError(
"If the `request` argument is set, then none of "
"the individual field arguments should be set."
)
request = datacatalog.DeleteTagTemplateFieldRequest(request)
# If we have keyword arguments corresponding to fields on the
# request, apply these.
if name is not None:
request.name = name
if force is not None:
request.force = force
# Wrap the RPC method; this adds retry and timeout information,
# and friendly error handling.
rpc = gapic_v1.method_async.wrap_method(
self._client._transport.delete_tag_template_field,
default_retry=retries.Retry(
initial=0.1,
maximum=60.0,
multiplier=1.3,
predicate=retries.if_exception_type(
core_exceptions.DeadlineExceeded,
core_exceptions.ServiceUnavailable,
),
deadline=60.0,
),
default_timeout=60.0,
client_info=DEFAULT_CLIENT_INFO,
)
# Certain fields should be provided within the metadata header;
# add these here.
metadata = tuple(metadata) + (
gapic_v1.routing_header.to_grpc_metadata((("name", request.name),)),
)
# Send the request.
await rpc(
request,
retry=retry,
timeout=timeout,
metadata=metadata,
)
async def create_tag(
self,
request: Optional[Union[datacatalog.CreateTagRequest, dict]] = None,
*,
parent: Optional[str] = None,
tag: Optional[tags.Tag] = None,
retry: OptionalRetry = gapic_v1.method.DEFAULT,
timeout: Optional[float] = None,
metadata: Sequence[Tuple[str, str]] = (),
) -> tags.Tag:
r"""Creates a tag on an
[Entry][google.cloud.datacatalog.v1beta1.Entry]. Note: The
project identified by the ``parent`` parameter for the
`tag <https://cloud.google.com/data-catalog/docs/reference/rest/v1beta1/projects.locations.entryGroups.entries.tags/create#path-parameters>`__
and the `tag
template <https://cloud.google.com/data-catalog/docs/reference/rest/v1beta1/projects.locations.tagTemplates/create#path-parameters>`__
used to create the tag must be from the same organization.
.. code-block:: python
# This snippet has been automatically generated and should be regarded as a
# code template only.
# It will require modifications to work:
# - It may require correct/in-range values for request initialization.
# - It may require specifying regional endpoints when creating the service
# client as shown in:
# https://googleapis.dev/python/google-api-core/latest/client_options.html
from google.cloud import datacatalog_v1beta1
async def sample_create_tag():
# Create a client
client = datacatalog_v1beta1.DataCatalogAsyncClient()
# Initialize request argument(s)
tag = datacatalog_v1beta1.Tag()
tag.column = "column_value"
tag.template = "template_value"
request = datacatalog_v1beta1.CreateTagRequest(
parent="parent_value",
tag=tag,
)
# Make the request
response = await client.create_tag(request=request)
# Handle the response
print(response)
Args:
request (Optional[Union[google.cloud.datacatalog_v1beta1.types.CreateTagRequest, dict]]):
The request object. Request message for
[CreateTag][google.cloud.datacatalog.v1beta1.DataCatalog.CreateTag].
parent (:class:`str`):
Required. The name of the resource to attach this tag
to. Tags can be attached to Entries. Example:
- projects/{project_id}/locations/{location}/entryGroups/{entry_group_id}/entries/{entry_id}
Note that this Tag and its child resources may not
actually be stored in the location in this name.
This corresponds to the ``parent`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
tag (:class:`google.cloud.datacatalog_v1beta1.types.Tag`):
Required. The tag to create.
This corresponds to the ``tag`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
retry (google.api_core.retry.Retry): Designation of what errors, if any,
should be retried.
timeout (float): The timeout for this request.
metadata (Sequence[Tuple[str, str]]): Strings which should be
sent along with the request as metadata.
Returns:
google.cloud.datacatalog_v1beta1.types.Tag:
Tags are used to attach custom metadata to Data Catalog resources. Tags
conform to the specifications within their tag
template.
See [Data Catalog
IAM](\ https://cloud.google.com/data-catalog/docs/concepts/iam)
for information on the permissions needed to create
or view tags.
"""
# Create or coerce a protobuf request object.
# Quick check: If we got a request object, we should *not* have
# gotten any keyword arguments that map to the request.
has_flattened_params = any([parent, tag])
if request is not None and has_flattened_params:
raise ValueError(
"If the `request` argument is set, then none of "
"the individual field arguments should be set."
)
request = datacatalog.CreateTagRequest(request)
# If we have keyword arguments corresponding to fields on the
# request, apply these.
if parent is not None:
request.parent = parent
if tag is not None:
request.tag = tag
# Wrap the RPC method; this adds retry and timeout information,
# and friendly error handling.
rpc = gapic_v1.method_async.wrap_method(
self._client._transport.create_tag,
default_timeout=None,
client_info=DEFAULT_CLIENT_INFO,
)
# Certain fields should be provided within the metadata header;
# add these here.
metadata = tuple(metadata) + (
gapic_v1.routing_header.to_grpc_metadata((("parent", request.parent),)),
)
# Send the request.
response = await rpc(
request,
retry=retry,
timeout=timeout,
metadata=metadata,
)
# Done; return the response.
return response
async def update_tag(
self,
request: Optional[Union[datacatalog.UpdateTagRequest, dict]] = None,
*,
tag: Optional[tags.Tag] = None,
update_mask: Optional[field_mask_pb2.FieldMask] = None,
retry: OptionalRetry = gapic_v1.method.DEFAULT,
timeout: Optional[float] = None,
metadata: Sequence[Tuple[str, str]] = (),
) -> tags.Tag:
r"""Updates an existing tag.
.. code-block:: python
# This snippet has been automatically generated and should be regarded as a
# code template only.
# It will require modifications to work:
# - It may require correct/in-range values for request initialization.
# - It may require specifying regional endpoints when creating the service
# client as shown in:
# https://googleapis.dev/python/google-api-core/latest/client_options.html
from google.cloud import datacatalog_v1beta1
async def sample_update_tag():
# Create a client
client = datacatalog_v1beta1.DataCatalogAsyncClient()
# Initialize request argument(s)
tag = datacatalog_v1beta1.Tag()
tag.column = "column_value"
tag.template = "template_value"
request = datacatalog_v1beta1.UpdateTagRequest(
tag=tag,
)
# Make the request
response = await client.update_tag(request=request)
# Handle the response
print(response)
Args:
request (Optional[Union[google.cloud.datacatalog_v1beta1.types.UpdateTagRequest, dict]]):
The request object. Request message for
[UpdateTag][google.cloud.datacatalog.v1beta1.DataCatalog.UpdateTag].
tag (:class:`google.cloud.datacatalog_v1beta1.types.Tag`):
Required. The updated tag. The "name"
field must be set.
This corresponds to the ``tag`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
update_mask (:class:`google.protobuf.field_mask_pb2.FieldMask`):
The fields to update on the Tag. If absent or empty, all
modifiable fields are updated. Currently the only
modifiable field is the field ``fields``.
This corresponds to the ``update_mask`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
retry (google.api_core.retry.Retry): Designation of what errors, if any,
should be retried.
timeout (float): The timeout for this request.
metadata (Sequence[Tuple[str, str]]): Strings which should be
sent along with the request as metadata.
Returns:
google.cloud.datacatalog_v1beta1.types.Tag:
Tags are used to attach custom metadata to Data Catalog resources. Tags
conform to the specifications within their tag
template.
See [Data Catalog
IAM](\ https://cloud.google.com/data-catalog/docs/concepts/iam)
for information on the permissions needed to create
or view tags.
"""
# Create or coerce a protobuf request object.
# Quick check: If we got a request object, we should *not* have
# gotten any keyword arguments that map to the request.
has_flattened_params = any([tag, update_mask])
if request is not None and has_flattened_params:
raise ValueError(
"If the `request` argument is set, then none of "
"the individual field arguments should be set."
)
request = datacatalog.UpdateTagRequest(request)
# If we have keyword arguments corresponding to fields on the
# request, apply these.
if tag is not None:
request.tag = tag
if update_mask is not None:
request.update_mask = update_mask
# Wrap the RPC method; this adds retry and timeout information,
# and friendly error handling.
rpc = gapic_v1.method_async.wrap_method(
self._client._transport.update_tag,
default_timeout=None,
client_info=DEFAULT_CLIENT_INFO,
)
# Certain fields should be provided within the metadata header;
# add these here.
metadata = tuple(metadata) + (
gapic_v1.routing_header.to_grpc_metadata((("tag.name", request.tag.name),)),
)
# Send the request.
response = await rpc(
request,
retry=retry,
timeout=timeout,
metadata=metadata,
)
# Done; return the response.
return response
async def delete_tag(
self,
request: Optional[Union[datacatalog.DeleteTagRequest, dict]] = None,
*,
name: Optional[str] = None,
retry: OptionalRetry = gapic_v1.method.DEFAULT,
timeout: Optional[float] = None,
metadata: Sequence[Tuple[str, str]] = (),
) -> None:
r"""Deletes a tag.
.. code-block:: python
# This snippet has been automatically generated and should be regarded as a
# code template only.
# It will require modifications to work:
# - It may require correct/in-range values for request initialization.
# - It may require specifying regional endpoints when creating the service
# client as shown in:
# https://googleapis.dev/python/google-api-core/latest/client_options.html
from google.cloud import datacatalog_v1beta1
async def sample_delete_tag():
# Create a client
client = datacatalog_v1beta1.DataCatalogAsyncClient()
# Initialize request argument(s)
request = datacatalog_v1beta1.DeleteTagRequest(
name="name_value",
)
# Make the request
await client.delete_tag(request=request)
Args:
request (Optional[Union[google.cloud.datacatalog_v1beta1.types.DeleteTagRequest, dict]]):
The request object. Request message for
[DeleteTag][google.cloud.datacatalog.v1beta1.DataCatalog.DeleteTag].
name (:class:`str`):
Required. The name of the tag to delete. Example:
- projects/{project_id}/locations/{location}/entryGroups/{entry_group_id}/entries/{entry_id}/tags/{tag_id}
This corresponds to the ``name`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
retry (google.api_core.retry.Retry): Designation of what errors, if any,
should be retried.
timeout (float): The timeout for this request.
metadata (Sequence[Tuple[str, str]]): Strings which should be
sent along with the request as metadata.
"""
# Create or coerce a protobuf request object.
# Quick check: If we got a request object, we should *not* have
# gotten any keyword arguments that map to the request.
has_flattened_params = any([name])
if request is not None and has_flattened_params:
raise ValueError(
"If the `request` argument is set, then none of "
"the individual field arguments should be set."
)
request = datacatalog.DeleteTagRequest(request)
# If we have keyword arguments corresponding to fields on the
# request, apply these.
if name is not None:
request.name = name
# Wrap the RPC method; this adds retry and timeout information,
# and friendly error handling.
rpc = gapic_v1.method_async.wrap_method(
self._client._transport.delete_tag,
default_retry=retries.Retry(
initial=0.1,
maximum=60.0,
multiplier=1.3,
predicate=retries.if_exception_type(
core_exceptions.DeadlineExceeded,
core_exceptions.ServiceUnavailable,
),
deadline=60.0,
),
default_timeout=60.0,
client_info=DEFAULT_CLIENT_INFO,
)
# Certain fields should be provided within the metadata header;
# add these here.
metadata = tuple(metadata) + (
gapic_v1.routing_header.to_grpc_metadata((("name", request.name),)),
)
# Send the request.
await rpc(
request,
retry=retry,
timeout=timeout,
metadata=metadata,
)
async def list_tags(
self,
request: Optional[Union[datacatalog.ListTagsRequest, dict]] = None,
*,
parent: Optional[str] = None,
retry: OptionalRetry = gapic_v1.method.DEFAULT,
timeout: Optional[float] = None,
metadata: Sequence[Tuple[str, str]] = (),
) -> pagers.ListTagsAsyncPager:
r"""Lists the tags on an
[Entry][google.cloud.datacatalog.v1beta1.Entry].
.. code-block:: python
# This snippet has been automatically generated and should be regarded as a
# code template only.
# It will require modifications to work:
# - It may require correct/in-range values for request initialization.
# - It may require specifying regional endpoints when creating the service
# client as shown in:
# https://googleapis.dev/python/google-api-core/latest/client_options.html
from google.cloud import datacatalog_v1beta1
async def sample_list_tags():
# Create a client
client = datacatalog_v1beta1.DataCatalogAsyncClient()
# Initialize request argument(s)
request = datacatalog_v1beta1.ListTagsRequest(
parent="parent_value",
)
# Make the request
page_result = client.list_tags(request=request)
# Handle the response
async for response in page_result:
print(response)
Args:
request (Optional[Union[google.cloud.datacatalog_v1beta1.types.ListTagsRequest, dict]]):
The request object. Request message for
[ListTags][google.cloud.datacatalog.v1beta1.DataCatalog.ListTags].
parent (:class:`str`):
Required. The name of the Data Catalog resource to list
the tags of. The resource could be an
[Entry][google.cloud.datacatalog.v1beta1.Entry] or an
[EntryGroup][google.cloud.datacatalog.v1beta1.EntryGroup].
Examples:
- projects/{project_id}/locations/{location}/entryGroups/{entry_group_id}
- projects/{project_id}/locations/{location}/entryGroups/{entry_group_id}/entries/{entry_id}
This corresponds to the ``parent`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
retry (google.api_core.retry.Retry): Designation of what errors, if any,
should be retried.
timeout (float): The timeout for this request.
metadata (Sequence[Tuple[str, str]]): Strings which should be
sent along with the request as metadata.
Returns:
google.cloud.datacatalog_v1beta1.services.data_catalog.pagers.ListTagsAsyncPager:
Response message for
[ListTags][google.cloud.datacatalog.v1beta1.DataCatalog.ListTags].
Iterating over this object will yield results and
resolve additional pages automatically.
"""
# Create or coerce a protobuf request object.
# Quick check: If we got a request object, we should *not* have
# gotten any keyword arguments that map to the request.
has_flattened_params = any([parent])
if request is not None and has_flattened_params:
raise ValueError(
"If the `request` argument is set, then none of "
"the individual field arguments should be set."
)
request = datacatalog.ListTagsRequest(request)
# If we have keyword arguments corresponding to fields on the
# request, apply these.
if parent is not None:
request.parent = parent
# Wrap the RPC method; this adds retry and timeout information,
# and friendly error handling.
rpc = gapic_v1.method_async.wrap_method(
self._client._transport.list_tags,
default_retry=retries.Retry(
initial=0.1,
maximum=60.0,
multiplier=1.3,
predicate=retries.if_exception_type(
core_exceptions.DeadlineExceeded,
core_exceptions.ServiceUnavailable,
),
deadline=60.0,
),
default_timeout=60.0,
client_info=DEFAULT_CLIENT_INFO,
)
# Certain fields should be provided within the metadata header;
# add these here.
metadata = tuple(metadata) + (
gapic_v1.routing_header.to_grpc_metadata((("parent", request.parent),)),
)
# Send the request.
response = await rpc(
request,
retry=retry,
timeout=timeout,
metadata=metadata,
)
# This method is paged; wrap the response in a pager, which provides
# an `__aiter__` convenience method.
response = pagers.ListTagsAsyncPager(
method=rpc,
request=request,
response=response,
metadata=metadata,
)
# Done; return the response.
return response
async def set_iam_policy(
self,
request: Optional[Union[iam_policy_pb2.SetIamPolicyRequest, dict]] = None,
*,
resource: Optional[str] = None,
retry: OptionalRetry = gapic_v1.method.DEFAULT,
timeout: Optional[float] = None,
metadata: Sequence[Tuple[str, str]] = (),
) -> policy_pb2.Policy:
r"""Sets the access control policy for a resource. Replaces any
existing policy. Supported resources are:
- Tag templates.
- Entries.
- Entry groups. Note, this method cannot be used to manage
policies for BigQuery, Pub/Sub and any external Google Cloud
Platform resources synced to Data Catalog.
Callers must have following Google IAM permission
- ``datacatalog.tagTemplates.setIamPolicy`` to set policies on
tag templates.
- ``datacatalog.entries.setIamPolicy`` to set policies on
entries.
- ``datacatalog.entryGroups.setIamPolicy`` to set policies on
entry groups.
.. code-block:: python
# This snippet has been automatically generated and should be regarded as a
# code template only.
# It will require modifications to work:
# - It may require correct/in-range values for request initialization.
# - It may require specifying regional endpoints when creating the service
# client as shown in:
# https://googleapis.dev/python/google-api-core/latest/client_options.html
from google.cloud import datacatalog_v1beta1
from google.iam.v1 import iam_policy_pb2 # type: ignore
async def sample_set_iam_policy():
# Create a client
client = datacatalog_v1beta1.DataCatalogAsyncClient()
# Initialize request argument(s)
request = iam_policy_pb2.SetIamPolicyRequest(
resource="resource_value",
)
# Make the request
response = await client.set_iam_policy(request=request)
# Handle the response
print(response)
Args:
request (Optional[Union[google.iam.v1.iam_policy_pb2.SetIamPolicyRequest, dict]]):
The request object. Request message for `SetIamPolicy`
method.
resource (:class:`str`):
REQUIRED: The resource for which the
policy is being specified. See the
operation documentation for the
appropriate value for this field.
This corresponds to the ``resource`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
retry (google.api_core.retry.Retry): Designation of what errors, if any,
should be retried.
timeout (float): The timeout for this request.
metadata (Sequence[Tuple[str, str]]): Strings which should be
sent along with the request as metadata.
Returns:
google.iam.v1.policy_pb2.Policy:
An Identity and Access Management (IAM) policy, which specifies access
controls for Google Cloud resources.
A Policy is a collection of bindings. A binding binds
one or more members, or principals, to a single role.
Principals can be user accounts, service accounts,
Google groups, and domains (such as G Suite). A role
is a named list of permissions; each role can be an
IAM predefined role or a user-created custom role.
For some types of Google Cloud resources, a binding
can also specify a condition, which is a logical
expression that allows access to a resource only if
the expression evaluates to true. A condition can add
constraints based on attributes of the request, the
resource, or both. To learn which resources support
conditions in their IAM policies, see the [IAM
documentation](\ https://cloud.google.com/iam/help/conditions/resource-policies).
**JSON example:**
{
"bindings": [
{
"role":
"roles/resourcemanager.organizationAdmin",
"members": [ "user:mike@example.com",
"group:admins@example.com",
"domain:google.com",
"serviceAccount:my-project-id@appspot.gserviceaccount.com"
]
}, { "role":
"roles/resourcemanager.organizationViewer",
"members": [ "user:eve@example.com" ],
"condition": { "title": "expirable access",
"description": "Does not grant access after
Sep 2020", "expression": "request.time <
timestamp('2020-10-01T00:00:00.000Z')", } }
], "etag": "BwWWja0YfJA=", "version": 3
}
**YAML example:**
bindings: - members: - user:\ mike@example.com -
group:\ admins@example.com - domain:google.com -
serviceAccount:\ my-project-id@appspot.gserviceaccount.com
role: roles/resourcemanager.organizationAdmin -
members: - user:\ eve@example.com role:
roles/resourcemanager.organizationViewer
condition: title: expirable access description:
Does not grant access after Sep 2020 expression:
request.time <
timestamp('2020-10-01T00:00:00.000Z') etag:
BwWWja0YfJA= version: 3
For a description of IAM and its features, see the
[IAM
documentation](\ https://cloud.google.com/iam/docs/).
"""
# Create or coerce a protobuf request object.
# Quick check: If we got a request object, we should *not* have
# gotten any keyword arguments that map to the request.
has_flattened_params = any([resource])
if request is not None and has_flattened_params:
raise ValueError(
"If the `request` argument is set, then none of "
"the individual field arguments should be set."
)
# The request isn't a proto-plus wrapped type,
# so it must be constructed via keyword expansion.
if isinstance(request, dict):
request = iam_policy_pb2.SetIamPolicyRequest(**request)
elif not request:
request = iam_policy_pb2.SetIamPolicyRequest(
resource=resource,
)
# Wrap the RPC method; this adds retry and timeout information,
# and friendly error handling.
rpc = gapic_v1.method_async.wrap_method(
self._client._transport.set_iam_policy,
default_timeout=None,
client_info=DEFAULT_CLIENT_INFO,
)
# Certain fields should be provided within the metadata header;
# add these here.
metadata = tuple(metadata) + (
gapic_v1.routing_header.to_grpc_metadata((("resource", request.resource),)),
)
# Send the request.
response = await rpc(
request,
retry=retry,
timeout=timeout,
metadata=metadata,
)
# Done; return the response.
return response
async def get_iam_policy(
self,
request: Optional[Union[iam_policy_pb2.GetIamPolicyRequest, dict]] = None,
*,
resource: Optional[str] = None,
retry: OptionalRetry = gapic_v1.method.DEFAULT,
timeout: Optional[float] = None,
metadata: Sequence[Tuple[str, str]] = (),
) -> policy_pb2.Policy:
r"""Gets the access control policy for a resource. A ``NOT_FOUND``
error is returned if the resource does not exist. An empty
policy is returned if the resource exists but does not have a
policy set on it.
Supported resources are:
- Tag templates.
- Entries.
- Entry groups. Note, this method cannot be used to manage
policies for BigQuery, Pub/Sub and any external Google Cloud
Platform resources synced to Data Catalog.
Callers must have following Google IAM permission
- ``datacatalog.tagTemplates.getIamPolicy`` to get policies on
tag templates.
- ``datacatalog.entries.getIamPolicy`` to get policies on
entries.
- ``datacatalog.entryGroups.getIamPolicy`` to get policies on
entry groups.
.. code-block:: python
# This snippet has been automatically generated and should be regarded as a
# code template only.
# It will require modifications to work:
# - It may require correct/in-range values for request initialization.
# - It may require specifying regional endpoints when creating the service
# client as shown in:
# https://googleapis.dev/python/google-api-core/latest/client_options.html
from google.cloud import datacatalog_v1beta1
from google.iam.v1 import iam_policy_pb2 # type: ignore
async def sample_get_iam_policy():
# Create a client
client = datacatalog_v1beta1.DataCatalogAsyncClient()
# Initialize request argument(s)
request = iam_policy_pb2.GetIamPolicyRequest(
resource="resource_value",
)
# Make the request
response = await client.get_iam_policy(request=request)
# Handle the response
print(response)
Args:
request (Optional[Union[google.iam.v1.iam_policy_pb2.GetIamPolicyRequest, dict]]):
The request object. Request message for `GetIamPolicy`
method.
resource (:class:`str`):
REQUIRED: The resource for which the
policy is being requested. See the
operation documentation for the
appropriate value for this field.
This corresponds to the ``resource`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
retry (google.api_core.retry.Retry): Designation of what errors, if any,
should be retried.
timeout (float): The timeout for this request.
metadata (Sequence[Tuple[str, str]]): Strings which should be
sent along with the request as metadata.
Returns:
google.iam.v1.policy_pb2.Policy:
An Identity and Access Management (IAM) policy, which specifies access
controls for Google Cloud resources.
A Policy is a collection of bindings. A binding binds
one or more members, or principals, to a single role.
Principals can be user accounts, service accounts,
Google groups, and domains (such as G Suite). A role
is a named list of permissions; each role can be an
IAM predefined role or a user-created custom role.
For some types of Google Cloud resources, a binding
can also specify a condition, which is a logical
expression that allows access to a resource only if
the expression evaluates to true. A condition can add
constraints based on attributes of the request, the
resource, or both. To learn which resources support
conditions in their IAM policies, see the [IAM
documentation](\ https://cloud.google.com/iam/help/conditions/resource-policies).
**JSON example:**
{
"bindings": [
{
"role":
"roles/resourcemanager.organizationAdmin",
"members": [ "user:mike@example.com",
"group:admins@example.com",
"domain:google.com",
"serviceAccount:my-project-id@appspot.gserviceaccount.com"
]
}, { "role":
"roles/resourcemanager.organizationViewer",
"members": [ "user:eve@example.com" ],
"condition": { "title": "expirable access",
"description": "Does not grant access after
Sep 2020", "expression": "request.time <
timestamp('2020-10-01T00:00:00.000Z')", } }
], "etag": "BwWWja0YfJA=", "version": 3
}
**YAML example:**
bindings: - members: - user:\ mike@example.com -
group:\ admins@example.com - domain:google.com -
serviceAccount:\ my-project-id@appspot.gserviceaccount.com
role: roles/resourcemanager.organizationAdmin -
members: - user:\ eve@example.com role:
roles/resourcemanager.organizationViewer
condition: title: expirable access description:
Does not grant access after Sep 2020 expression:
request.time <
timestamp('2020-10-01T00:00:00.000Z') etag:
BwWWja0YfJA= version: 3
For a description of IAM and its features, see the
[IAM
documentation](\ https://cloud.google.com/iam/docs/).
"""
# Create or coerce a protobuf request object.
# Quick check: If we got a request object, we should *not* have
# gotten any keyword arguments that map to the request.
has_flattened_params = any([resource])
if request is not None and has_flattened_params:
raise ValueError(
"If the `request` argument is set, then none of "
"the individual field arguments should be set."
)
# The request isn't a proto-plus wrapped type,
# so it must be constructed via keyword expansion.
if isinstance(request, dict):
request = iam_policy_pb2.GetIamPolicyRequest(**request)
elif not request:
request = iam_policy_pb2.GetIamPolicyRequest(
resource=resource,
)
# Wrap the RPC method; this adds retry and timeout information,
# and friendly error handling.
rpc = gapic_v1.method_async.wrap_method(
self._client._transport.get_iam_policy,
default_timeout=None,
client_info=DEFAULT_CLIENT_INFO,
)
# Certain fields should be provided within the metadata header;
# add these here.
metadata = tuple(metadata) + (
gapic_v1.routing_header.to_grpc_metadata((("resource", request.resource),)),
)
# Send the request.
response = await rpc(
request,
retry=retry,
timeout=timeout,
metadata=metadata,
)
# Done; return the response.
return response
async def test_iam_permissions(
self,
request: Optional[Union[iam_policy_pb2.TestIamPermissionsRequest, dict]] = None,
*,
retry: OptionalRetry = gapic_v1.method.DEFAULT,
timeout: Optional[float] = None,
metadata: Sequence[Tuple[str, str]] = (),
) -> iam_policy_pb2.TestIamPermissionsResponse:
r"""Returns the caller's permissions on a resource. If the resource
does not exist, an empty set of permissions is returned (We
don't return a ``NOT_FOUND`` error).
Supported resources are:
- Tag templates.
- Entries.
- Entry groups. Note, this method cannot be used to manage
policies for BigQuery, Pub/Sub and any external Google Cloud
Platform resources synced to Data Catalog.
A caller is not required to have Google IAM permission to make
this request.
.. code-block:: python
# This snippet has been automatically generated and should be regarded as a
# code template only.
# It will require modifications to work:
# - It may require correct/in-range values for request initialization.
# - It may require specifying regional endpoints when creating the service
# client as shown in:
# https://googleapis.dev/python/google-api-core/latest/client_options.html
from google.cloud import datacatalog_v1beta1
from google.iam.v1 import iam_policy_pb2 # type: ignore
async def sample_test_iam_permissions():
# Create a client
client = datacatalog_v1beta1.DataCatalogAsyncClient()
# Initialize request argument(s)
request = iam_policy_pb2.TestIamPermissionsRequest(
resource="resource_value",
permissions=['permissions_value1', 'permissions_value2'],
)
# Make the request
response = await client.test_iam_permissions(request=request)
# Handle the response
print(response)
Args:
request (Optional[Union[google.iam.v1.iam_policy_pb2.TestIamPermissionsRequest, dict]]):
The request object. Request message for
`TestIamPermissions` method.
retry (google.api_core.retry.Retry): Designation of what errors, if any,
should be retried.
timeout (float): The timeout for this request.
metadata (Sequence[Tuple[str, str]]): Strings which should be
sent along with the request as metadata.
Returns:
google.iam.v1.iam_policy_pb2.TestIamPermissionsResponse:
Response message for TestIamPermissions method.
"""
# Create or coerce a protobuf request object.
# The request isn't a proto-plus wrapped type,
# so it must be constructed via keyword expansion.
if isinstance(request, dict):
request = iam_policy_pb2.TestIamPermissionsRequest(**request)
# Wrap the RPC method; this adds retry and timeout information,
# and friendly error handling.
rpc = gapic_v1.method_async.wrap_method(
self._client._transport.test_iam_permissions,
default_timeout=None,
client_info=DEFAULT_CLIENT_INFO,
)
# Certain fields should be provided within the metadata header;
# add these here.
metadata = tuple(metadata) + (
gapic_v1.routing_header.to_grpc_metadata((("resource", request.resource),)),
)
# Send the request.
response = await rpc(
request,
retry=retry,
timeout=timeout,
metadata=metadata,
)
# Done; return the response.
return response
async def __aenter__(self):
return self
async def __aexit__(self, exc_type, exc, tb):
await self.transport.close()
try:
DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo(
gapic_version=pkg_resources.get_distribution(
"google-cloud-datacatalog",
).version,
)
except pkg_resources.DistributionNotFound:
DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo()
__all__ = ("DataCatalogAsyncClient",)
| apache-2.0 |
caporaso-lab/tax-credit | tax_credit/tests/test_eval_framework.py | 4 | 80359 | #!/usr/bin/env python
# ----------------------------------------------------------------------------
# Copyright (c) 2014--, tax-credit development team.
#
# Distributed under the terms of the Modified BSD License.
#
# The full license is in the file COPYING.txt, distributed with this software.
# ----------------------------------------------------------------------------
from unittest import TestCase, main
import json
import numpy as np
import pandas as pd
from io import StringIO
from os.path import join
from tempfile import mkdtemp
from shutil import rmtree
from biom import Table
from sklearn.metrics import precision_recall_fscore_support
from tax_credit.eval_framework import (compute_taxon_accuracy,
filter_table,
get_sample_to_top_params,
parameter_comparisons,
per_sequence_precision)
class EvalFrameworkTests(TestCase):
def test_get_sample_to_top_params(self):
actual = get_sample_to_top_params(self.mock_result_table1, "F-measure")
self.assertEqual(actual['rdp'][('B1', 'm1')], ['0.2'])
self.assertEqual(actual['rdp'][('F2', 'm2')], ['0.1'])
self.assertEqual(actual['rdp'][('F2', 'm3')], ['0', '0.1'])
self.assertEqual(actual['uclust'][('B1', 'm1')], ['0.51:0.8:3'])
self.assertEqual(actual['uclust'][('F2', 'm2')], ['0.51:0.8:3'])
self.assertEqual(actual['uclust'][('F2', 'm3')], ['0.51:0.9:3'])
self.assertEqual(actual.shape, (3, 2))
def test_parameter_comparisons(self):
actual = parameter_comparisons(self.mock_result_table1, "rdp")
self.assertEqual(actual['F-measure']['0.1'], 2)
self.assertEqual(actual['F-measure']['0.2'], 1)
self.assertEqual(actual['F-measure']['0'], 1)
self.assertEqual(actual['F-measure']['0.3'], 0)
self.assertEqual(actual['Precision']['0.1'], 2)
self.assertEqual(actual['Recall']['0.1'], 3)
self.assertEqual(actual.shape, (6, 5))
actual = parameter_comparisons(self.mock_result_table1, "uclust")
self.assertEqual(actual['F-measure']['0.51:0.8:3'], 2)
self.assertEqual(actual['F-measure']['0.51:0.9:3'], 1)
self.assertEqual(actual.shape, (2, 5))
def test_filter_table(self):
# prior to filtering there are observations with count less than 10
self.assertTrue(np.array(
[e.sum() < 10 for e in self.table3.iter_data(
axis='observation')]).any())
filtered_table = filter_table(
self.table3, min_count=10, taxonomy_level=0)
# after filtering there are no observations with count less than 10
self.assertFalse(np.array(
[e.sum() < 10 for e in filtered_table.iter_data(
axis='observation')]).any())
# but some observations are still present
self.assertTrue(filtered_table.shape[0] > 0)
self.assertTrue(np.array(
[e.sum() < 100 for e in self.table3.iter_data(
axis='observation')]).any())
filtered_table = filter_table(
self.table3, min_count=100, taxonomy_level=0)
self.assertFalse(np.array(
[e.sum() < 100 for e in filtered_table.iter_data(
axis='observation')]).any())
# but some observations are still present
self.assertTrue(filtered_table.shape[0] > 0)
# prior to filtering, there are taxonomies with fewer than 4 levels
md_levels = [len(md['taxonomy']) < 4
for _, _, md in self.table3.iter(axis='observation')]
self.assertTrue(np.array(md_levels).any())
filtered_table = filter_table(
self.table3, min_count=0, taxonomy_level=4)
# after filtering, there are no taxonomies with fewer than 4 levels
md_levels = [len(md['taxonomy']) < 4
for _, _, md in filtered_table.iter(axis='observation')]
self.assertFalse(np.array(md_levels).any())
# but some observations are still present
self.assertTrue(filtered_table.shape[0] > 0)
md_levels = [len(md['taxonomy']) < 5
for _, _, md in self.table3.iter(axis='observation')]
self.assertTrue(np.array(md_levels).any())
filtered_table = filter_table(
self.table3, min_count=0, taxonomy_level=5)
md_levels = [len(md['taxonomy']) < 5
for _, _, md in filtered_table.iter(axis='observation')]
self.assertFalse(np.array(md_levels).any())
# but some observations are still present
self.assertTrue(filtered_table.shape[0] > 0)
md_levels = [len(md['taxonomy']) < 6
for _, _, md in self.table3.iter(axis='observation')]
self.assertTrue(np.array(md_levels).any())
filtered_table = filter_table(
self.table3, min_count=0, taxonomy_level=6)
md_levels = [len(md['taxonomy']) < 6
for _, _, md in filtered_table.iter(axis='observation')]
self.assertFalse(np.array(md_levels).any())
# but some observations are still present
self.assertTrue(filtered_table.shape[0] > 0)
def test_filter_table_taxa(self):
""" taxa-based filtering works as expected """
taxa_to_keep = ["k__Bacteria", "p__Firmicutes", "c__Bacilli"]
filtered_table = filter_table(self.table3, taxa_to_keep=taxa_to_keep)
# expected value determined with grep -c c__Bacilli
self.assertEqual(filtered_table.shape[0], 53)
taxa_to_keep = ["k__Bacteria", "p__Firmicutes", "c__Bacilli",
"o__Bacillales", "f__Staphylococcaceae",
"g__Staphylococcus"]
filtered_table = filter_table(self.table3, taxa_to_keep=taxa_to_keep)
# expected value determined with grep -c g__Staphylococcus
self.assertEqual(filtered_table.shape[0], 8)
taxa_to_keep = ["k__Bacteria"]
filtered_table = filter_table(self.table3, taxa_to_keep=taxa_to_keep)
# all observations are retained
self.assertEqual(filtered_table.shape[0], self.table3.shape[0])
taxa_to_keep = ["k__Archaea"]
filtered_table = filter_table(self.table3, taxa_to_keep=taxa_to_keep)
# no observations are retained
self.assertEqual(filtered_table.shape[0], 0)
def test_compute_taxon_accuracy_default(self):
""" p, r and f compute correctly when default to first sample ids"""
# default of comparing first sample in each table
actual = compute_taxon_accuracy(self.table1, self.table2)
expected = (2./3., 1.0)
self.assertAlmostEqual(actual, expected)
# default of comparing first sample in each table
actual = compute_taxon_accuracy(self.table2, self.table1)
expected = (1.0, 2./3.)
self.assertAlmostEqual(actual, expected)
def test_compute_taxon_accuracy_alt_sample_ids(self):
""" p, r and f compute correctly when using alternative sample ids"""
# alt sample in table 1
actual = compute_taxon_accuracy(
self.table1, self.table2, actual_sample_id='s2')
expected = (1.0, 1.0)
self.assertEqual(actual, expected)
# alt sample in table 2
actual = compute_taxon_accuracy(
self.table1, self.table2, expected_sample_id='s4')
expected = (1./3., 1.0)
self.assertAlmostEqual(actual, expected)
# alt sample in tables 1 & 2
actual = compute_taxon_accuracy(
self.table1, self.table2, actual_sample_id='s2',
expected_sample_id='s4')
expected = (0.5, 1.0)
self.assertAlmostEqual(actual, expected)
def test_per_sequence_precision(self):
""" p, r, and f on individual expected sequences."""
exp_taxa = join(self.tmpdir, 'trueish-taxonomies.tsv')
obs_taxa = join(self.tmpdir, 'taxonomy.tsv')
exp_list = []
obs_list = []
weights = []
with open(exp_taxa) as exp_in:
for line in exp_in:
sid, taxon = line.split()
if not self.table1.exists(sid, axis='observation'):
continue
exp_list.append(taxon)
with open(obs_taxa) as obs_in:
for oline in obs_in:
osid, otaxon = oline.split()
if osid == sid:
obs_list.append(otaxon)
break
weights.append(self.table1.get_value_by_ids(sid, 's1'))
for i in range(7):
p, r, f = per_sequence_precision(
exp_taxa, obs_taxa, self.table1, 's1', i)
elist = [';'.join(e.split(';')[:i+1]) for e in exp_list]
olist = [';'.join(o.split(';')[:i+1]) for o in obs_list]
ep, er, ef, _ = precision_recall_fscore_support(
elist, olist, sample_weight=weights, average='micro')
self.assertAlmostEqual(p, ep)
self.assertAlmostEqual(r, er)
self.assertAlmostEqual(f, ef)
def test_per_sequence_precision_exclude(self):
""" p, r, and f on individual expected sequences, excluding specific
taxa.
"""
exp_taxa = join(self.tmpdir, 'trueish-taxonomies.tsv')
obs_taxa = join(self.tmpdir, 'taxonomy.tsv')
etable = self.table1.filter(['o1'], axis='observation', invert=True,
inplace=False)
for i in range(7):
p, r, f = per_sequence_precision(
exp_taxa, obs_taxa, self.table1, 's1', i, exclude=['other'])
ep, er, ef = per_sequence_precision(
exp_taxa, obs_taxa, etable, 's1', i)
self.assertAlmostEqual(p, ep)
self.assertAlmostEqual(r, er)
self.assertAlmostEqual(f, ef)
@classmethod
def setUpClass(self):
_table1 = """{"id": "None",
"format": "Biological Observation Matrix 1.0.0",
"format_url": "http:\/\/biom-format.org",
"type": "OTU table",
"generated_by": "greg",
"date": "2013-08-22T13:10:23.907145",
"matrix_type": "sparse",
"matrix_element_type": "float",
"shape": [
3,
4
],
"data": [
[
0,
0,
1
],
[
0,
1,
2
],
[
0,
2,
3
],
[
0,
3,
4
],
[
1,
0,
2
],
[
1,
1,
0
],
[
1,
2,
7
],
[
1,
3,
8
],
[
2,
0,
9
],
[
2,
1,
10
],
[
2,
2,
11
],
[
2,
3,
12
]
],
"rows": [
{
"id": "o1",
"metadata": {
"domain": "Archaea"
}
},
{
"id": "o2",
"metadata": {
"domain": "Bacteria"
}
},
{
"id": "o3",
"metadata": {
"domain": "Bacteria"
}
}
],
"columns": [
{
"id": "s1",
"metadata": {
"country": "Peru",
"pH": 4.2
}
},
{
"id": "s2",
"metadata": {
"country": "Peru",
"pH": 5.2
}
},
{
"id": "s3",
"metadata": {
"country": "Peru",
"pH": 5
}
},
{
"id": "s4",
"metadata": {
"country": "Peru",
"pH": 4.9
}
}
]
}"""
# table 1
# OTU ID s1 s2 s3 s4
# o1 1.0 2.0 3.0 4.0
# o2 2.0 0.0 7.0 8.0
# o3 9.0 10.0 11.0 12.0
_table2 = """{"id": "None",
"format": "Biological Observation Matrix 1.0.0",
"format_url": "http:\/\/biom-format.org",
"type": "OTU table",
"generated_by": "greg",
"date": "2013-08-22T13:19:35.281188",
"matrix_type": "sparse",
"matrix_element_type": "float",
"shape": [
2,
4
],
"data": [
[
0,
0,
1
],
[
0,
1,
2
],
[
0,
2,
3
],
[
0,
3,
0.001
],
[
1,
0,
9
],
[
1,
1,
10
],
[
1,
2,
11
],
[
1,
3,
0
]
],
"rows": [
{
"id": "o1",
"metadata": {
"domain": "Archaea"
}
},
{
"id": "o3",
"metadata": {
"domain": "Bacteria"
}
}
],
"columns": [
{
"id": "s1",
"metadata": {
"country": "Peru",
"pH": 4.2
}
},
{
"id": "s2",
"metadata": {
"country": "Peru",
"pH": 5.2
}
},
{
"id": "s3",
"metadata": {
"country": "Peru",
"pH": 5
}
},
{
"id": "s4",
"metadata": {
"country": "Peru",
"pH": 4.9
}
}
]
}"""
# table 2
# OTU ID s1 s2 s3 s4
# o1 1.0 2.0 3.0 0.001
# o3 9.0 10.0 11.0 0.0
_table3 = """{"id": "None",
"format": "Biological Observation Matrix 1.0.0",
"format_url": "http:\/\/biom-format.org",
"type": "OTU table",
"generated_by": "BIOM-Format 1.1.2",
"date": "2013-06-13T09:41:43.709874",
"matrix_type": "sparse",
"matrix_element_type": "float",
"shape": [
70,
4
],
"data": [
[
0,
0,
1
],
[
0,
1,
1
],
[
1,
0,
1
],
[
1,
1,
2
],
[
1,
2,
2
],
[
1,
3,
1
],
[
2,
0,
22
],
[
2,
1,
44
],
[
2,
2,
19
],
[
2,
3,
26
],
[
3,
0,
937
],
[
3,
1,
1815
],
[
3,
2,
923
],
[
3,
3,
775
],
[
4,
0,
1
],
[
4,
1,
1
],
[
4,
2,
3
],
[
4,
3,
1
],
[
5,
0,
130
],
[
5,
1,
229
],
[
5,
2,
122
],
[
5,
3,
69
],
[
6,
2,
1
],
[
6,
3,
2
],
[
7,
0,
52
],
[
7,
1,
80
],
[
7,
2,
5
],
[
7,
3,
2
],
[
8,
1,
2
],
[
9,
0,
3
],
[
9,
1,
7
],
[
9,
2,
4
],
[
9,
3,
2
],
[
10,
1,
1
],
[
10,
3,
1
],
[
11,
0,
6
],
[
11,
1,
9
],
[
11,
2,
4
],
[
11,
3,
5
],
[
12,
1,
1
],
[
12,
2,
1
],
[
12,
3,
2
],
[
13,
0,
1
],
[
13,
2,
1
],
[
14,
1,
2
],
[
15,
0,
1
],
[
15,
3,
3
],
[
16,
3,
2
],
[
17,
1,
4
],
[
18,
0,
1
],
[
18,
3,
1
],
[
19,
0,
1
],
[
19,
1,
1
],
[
19,
3,
1
],
[
20,
0,
5
],
[
20,
1,
13
],
[
21,
0,
2
],
[
21,
1,
3
],
[
21,
2,
2
],
[
21,
3,
1
],
[
22,
0,
1
],
[
22,
1,
2
],
[
23,
0,
2
],
[
23,
1,
2
],
[
23,
2,
2
],
[
23,
3,
1
],
[
24,
0,
1
],
[
24,
1,
1
],
[
25,
1,
2
],
[
25,
3,
1
],
[
26,
0,
17
],
[
26,
1,
18
],
[
26,
2,
69
],
[
26,
3,
64
],
[
27,
1,
1
],
[
27,
3,
2
],
[
28,
0,
20
],
[
28,
1,
29
],
[
28,
2,
133
],
[
28,
3,
104
],
[
29,
0,
2
],
[
29,
1,
5
],
[
29,
2,
2
],
[
29,
3,
3
],
[
30,
0,
31
],
[
30,
1,
48
],
[
30,
2,
10
],
[
30,
3,
15
],
[
31,
0,
1
],
[
31,
1,
2
],
[
31,
2,
15
],
[
31,
3,
12
],
[
32,
0,
1
],
[
32,
1,
1
],
[
33,
0,
94
],
[
33,
1,
150
],
[
33,
2,
63
],
[
33,
3,
39
],
[
34,
1,
1
],
[
34,
2,
1
],
[
35,
1,
4
],
[
36,
0,
1
],
[
36,
1,
1
],
[
37,
1,
1
],
[
37,
3,
1
],
[
38,
0,
1
],
[
38,
1,
1
],
[
39,
0,
22
],
[
39,
1,
44
],
[
39,
2,
1
],
[
40,
0,
4
],
[
40,
1,
7
],
[
41,
0,
1
],
[
41,
1,
2
],
[
41,
2,
3
],
[
42,
0,
198
],
[
42,
1,
374
],
[
42,
2,
181
],
[
42,
3,
167
],
[
43,
0,
192
],
[
43,
1,
338
],
[
43,
2,
5
],
[
43,
3,
17
],
[
44,
0,
1
],
[
44,
1,
1
],
[
45,
0,
1
],
[
45,
1,
1
],
[
45,
3,
1
],
[
46,
0,
1
],
[
46,
1,
1
],
[
46,
3,
4
],
[
47,
0,
2
],
[
47,
1,
3
],
[
47,
2,
1
],
[
47,
3,
3
],
[
48,
1,
1
],
[
48,
2,
1
],
[
49,
0,
2
],
[
49,
1,
1
],
[
50,
0,
14
],
[
50,
1,
19
],
[
50,
2,
6
],
[
50,
3,
8
],
[
51,
0,
27
],
[
51,
1,
55
],
[
51,
2,
1
],
[
52,
1,
1
],
[
52,
2,
1
],
[
53,
2,
2
],
[
54,
0,
9
],
[
54,
1,
27
],
[
54,
2,
14
],
[
54,
3,
11
],
[
55,
1,
1
],
[
55,
3,
1
],
[
56,
0,
8
],
[
56,
1,
9
],
[
56,
2,
2
],
[
56,
3,
4
],
[
57,
0,
1
],
[
57,
1,
1
],
[
57,
2,
1
],
[
57,
3,
1
],
[
58,
0,
3
],
[
58,
1,
1
],
[
58,
2,
1
],
[
58,
3,
1
],
[
59,
1,
2
],
[
60,
0,
3
],
[
60,
2,
1
],
[
61,
0,
91
],
[
61,
1,
160
],
[
61,
2,
4
],
[
61,
3,
3
],
[
62,
0,
1
],
[
62,
1,
1
],
[
62,
2,
1
],
[
62,
3,
2
],
[
63,
0,
3
],
[
63,
1,
1
],
[
64,
0,
1
],
[
64,
1,
1
],
[
64,
2,
2
],
[
64,
3,
1
],
[
65,
2,
1
],
[
65,
3,
1
],
[
66,
1,
2
],
[
66,
2,
2
],
[
66,
3,
2
],
[
67,
2,
1
],
[
67,
3,
1
],
[
68,
0,
1
],
[
68,
1,
2
],
[
69,
0,
1
],
[
69,
1,
1
]
],
"rows": [
{
"id": "269901",
"metadata": {
"taxonomy": [
"k__Bacteria",
"p__Proteobacteria",
"c__Gammaproteobacteria",
"o__Pseudomonadales",
"f__Pseudomonadaceae"
]
}
},
{
"id": "4130483",
"metadata": {
"taxonomy": [
"k__Bacteria",
"p__Firmicutes",
"c__Bacilli"
]
}
},
{
"id": "137056",
"metadata": {
"taxonomy": [
"k__Bacteria",
"p__Firmicutes",
"c__Bacilli"
]
}
},
{
"id": "1995363",
"metadata": {
"taxonomy": [
"k__Bacteria",
"p__Firmicutes",
"c__Bacilli",
"o__Bacillales",
"f__Staphylococcaceae",
"g__Staphylococcus"
]
}
},
{
"id": "939252",
"metadata": {
"taxonomy": [
"k__Bacteria",
"p__Firmicutes",
"c__Bacilli",
"o__Bacillales",
"f__Staphylococcaceae",
"g__Staphylococcus"
]
}
},
{
"id": "4380971",
"metadata": {
"taxonomy": [
"k__Bacteria",
"p__Firmicutes",
"c__Clostridia",
"o__Clostridiales",
"f__Clostridiaceae",
"g__Clostridium",
"s__butyricum"
]
}
},
{
"id": "1081058",
"metadata": {
"taxonomy": [
"k__Bacteria",
"p__Firmicutes",
"c__Bacilli",
"o__Bacillales",
"f__Staphylococcaceae",
"g__Staphylococcus"
]
}
},
{
"id": "4440404",
"metadata": {
"taxonomy": [
"k__Bacteria",
"p__Proteobacteria",
"c__Betaproteobacteria",
"o__Neisseriales",
"f__Neisseriaceae",
"g__Neisseria"
]
}
},
{
"id": "984924",
"metadata": {
"taxonomy": [
"k__Bacteria",
"p__Firmicutes",
"c__Bacilli"
]
}
},
{
"id": "953758",
"metadata": {
"taxonomy": [
"k__Bacteria",
"p__Firmicutes",
"c__Bacilli",
"o__Bacillales",
"f__Staphylococcaceae",
"g__Staphylococcus"
]
}
},
{
"id": "4316928",
"metadata": {
"taxonomy": [
"k__Bacteria",
"p__Firmicutes",
"c__Bacilli"
]
}
},
{
"id": "152001",
"metadata": {
"taxonomy": [
"k__Bacteria",
"p__Firmicutes",
"c__Bacilli",
"o__Bacillales"
]
}
},
{
"id": "227083",
"metadata": {
"taxonomy": [
"k__Bacteria",
"p__Firmicutes",
"c__Bacilli"
]
}
},
{
"id": "4445673",
"metadata": {
"taxonomy": [
"k__Bacteria",
"p__Firmicutes",
"c__Clostridia",
"o__Clostridiales",
"f__Clostridiaceae",
"g__Clostridium",
"s__perfringens"
]
}
},
{
"id": "138389",
"metadata": {
"taxonomy": [
"k__Bacteria",
"p__Firmicutes",
"c__Bacilli"
]
}
},
{
"id": "4427114",
"metadata": {
"taxonomy": [
"k__Bacteria",
"p__Firmicutes",
"c__Bacilli"
]
}
},
{
"id": "153046",
"metadata": {
"taxonomy": [
"k__Bacteria",
"p__Firmicutes",
"c__Bacilli",
"o__Bacillales",
"f__Staphylococcaceae",
"g__Staphylococcus"
]
}
},
{
"id": "1059655",
"metadata": {
"taxonomy": [
"k__Bacteria",
"p__Firmicutes",
"c__Bacilli",
"o__Lactobacillales",
"f__Streptococcaceae",
"g__Streptococcus"
]
}
},
{
"id": "1550056",
"metadata": {
"taxonomy": [
"k__Bacteria",
"p__Firmicutes",
"c__Bacilli"
]
}
},
{
"id": "979261",
"metadata": {
"taxonomy": [
"k__Bacteria",
"p__Firmicutes",
"c__Bacilli"
]
}
},
{
"id": "12574",
"metadata": {
"taxonomy": [
"k__Bacteria",
"p__Actinobacteria",
"c__Actinobacteria",
"o__Actinomycetales",
"f__Actinomycetaceae",
"g__Actinomyces",
"s__"
]
}
},
{
"id": "368134",
"metadata": {
"taxonomy": [
"k__Bacteria",
"p__Firmicutes",
"c__Bacilli",
"o__Bacillales",
"f__Staphylococcaceae"
]
}
},
{
"id": "1039016",
"metadata": {
"taxonomy": [
"k__Bacteria",
"p__Firmicutes",
"c__Bacilli"
]
}
},
{
"id": "996487",
"metadata": {
"taxonomy": [
"k__Bacteria",
"p__Firmicutes",
"c__Bacilli",
"o__Bacillales",
"f__Staphylococcaceae",
"g__Staphylococcus"
]
}
},
{
"id": "1069592",
"metadata": {
"taxonomy": [
"k__Bacteria",
"p__Firmicutes",
"c__Bacilli"
]
}
},
{
"id": "1112200",
"metadata": {
"taxonomy": [
"k__Bacteria",
"p__Firmicutes",
"c__Bacilli"
]
}
},
{
"id": "4297222",
"metadata": {
"taxonomy": [
"k__Bacteria",
"p__Firmicutes"
]
}
},
{
"id": "923151",
"metadata": {
"taxonomy": [
"k__Bacteria",
"p__Firmicutes",
"c__Bacilli"
]
}
},
{
"id": "532163",
"metadata": {
"taxonomy": [
"k__Bacteria",
"p__Proteobacteria",
"c__Alphaproteobacteria",
"o__Rhodobacterales",
"f__Rhodobacteraceae"
]
}
},
{
"id": "928538",
"metadata": {
"taxonomy": [
"k__Bacteria",
"p__Firmicutes",
"c__Bacilli",
"o__Bacillales",
"f__Staphylococcaceae",
"g__Staphylococcus"
]
}
},
{
"id": "1891556",
"metadata": {
"taxonomy": [
"k__Bacteria",
"p__Firmicutes",
"c__Bacilli"
]
}
},
{
"id": "114510",
"metadata": {
"taxonomy": [
"k__Bacteria",
"p__Proteobacteria",
"c__Gammaproteobacteria",
"o__Enterobacteriales",
"f__Enterobacteriaceae"
]
}
},
{
"id": "158047",
"metadata": {
"taxonomy": [
"k__Bacteria",
"p__Firmicutes",
"c__Bacilli"
]
}
},
{
"id": "242070",
"metadata": {
"taxonomy": [
"k__Bacteria",
"p__Proteobacteria",
"c__Gammaproteobacteria",
"o__Pseudomonadales",
"f__Pseudomonadaceae"
]
}
},
{
"id": "149265",
"metadata": {
"taxonomy": [
"k__Bacteria",
"p__Firmicutes",
"c__Bacilli"
]
}
},
{
"id": "919490",
"metadata": {
"taxonomy": [
"k__Bacteria",
"p__Firmicutes",
"c__Bacilli"
]
}
},
{
"id": "164413",
"metadata": {
"taxonomy": [
"k__Bacteria",
"p__Firmicutes",
"c__Bacilli"
]
}
},
{
"id": "767863",
"metadata": {
"taxonomy": [
"k__Bacteria",
"p__Firmicutes",
"c__Bacilli"
]
}
},
{
"id": "113773",
"metadata": {
"taxonomy": [
"k__Bacteria",
"p__Firmicutes",
"c__Bacilli",
"o__Lactobacillales"
]
}
},
{
"id": "128604",
"metadata": {
"taxonomy": [
"k__Bacteria",
"p__[Thermi]",
"c__Deinococci",
"o__Deinococcales",
"f__Deinococcaceae",
"g__Deinococcus",
"s__"
]
}
},
{
"id": "99882",
"metadata": {
"taxonomy": [
"k__Bacteria",
"p__Firmicutes",
"c__Bacilli",
"o__Lactobacillales"
]
}
},
{
"id": "519673",
"metadata": {
"taxonomy": [
"k__Bacteria",
"p__Firmicutes",
"c__Bacilli"
]
}
},
{
"id": "630141",
"metadata": {
"taxonomy": [
"k__Bacteria",
"p__Firmicutes",
"c__Bacilli"
]
}
},
{
"id": "219151",
"metadata": {
"taxonomy": [
"k__Bacteria",
"p__Proteobacteria",
"c__Gammaproteobacteria",
"o__Pseudomonadales",
"f__Moraxellaceae",
"g__Acinetobacter"
]
}
},
{
"id": "977188",
"metadata": {
"taxonomy": [
"k__Bacteria",
"p__Firmicutes",
"c__Bacilli"
]
}
},
{
"id": "1121111",
"metadata": {
"taxonomy": [
"k__Bacteria",
"p__Firmicutes",
"c__Bacilli"
]
}
},
{
"id": "894774",
"metadata": {
"taxonomy": [
"k__Bacteria",
"p__Firmicutes",
"c__Bacilli"
]
}
},
{
"id": "441155",
"metadata": {
"taxonomy": [
"k__Bacteria",
"p__Firmicutes",
"c__Bacilli",
"o__Bacillales",
"f__Staphylococcaceae",
"g__Staphylococcus"
]
}
},
{
"id": "1059977",
"metadata": {
"taxonomy": [
"k__Bacteria",
"p__Firmicutes",
"c__Bacilli",
"o__Lactobacillales",
"f__Streptococcaceae",
"g__Streptococcus"
]
}
},
{
"id": "552922",
"metadata": {
"taxonomy": [
"k__Bacteria",
"p__Proteobacteria",
"c__Gammaproteobacteria",
"o__Pseudomonadales",
"f__Moraxellaceae",
"g__Acinetobacter"
]
}
},
{
"id": "2874742",
"metadata": {
"taxonomy": [
"k__Bacteria",
"p__Firmicutes",
"c__Bacilli"
]
}
},
{
"id": "1756274",
"metadata": {
"taxonomy": [
"k__Bacteria",
"p__Firmicutes",
"c__Bacilli",
"o__Lactobacillales",
"f__Enterococcaceae"
]
}
},
{
"id": "4315958",
"metadata": {
"taxonomy": [
"k__Bacteria"
]
}
},
{
"id": "617833",
"metadata": {
"taxonomy": [
"k__Bacteria",
"p__Firmicutes",
"c__Bacilli"
]
}
},
{
"id": "2896107",
"metadata": {
"taxonomy": [
"k__Bacteria",
"p__Firmicutes",
"c__Bacilli",
"o__Bacillales",
"f__Staphylococcaceae"
]
}
},
{
"id": "4365141",
"metadata": {
"taxonomy": [
"k__Bacteria",
"p__Firmicutes",
"c__Bacilli",
"o__Lactobacillales",
"f__Leuconostocaceae",
"g__Leuconostoc"
]
}
},
{
"id": "356733",
"metadata": {
"taxonomy": [
"k__Bacteria",
"p__Firmicutes",
"c__Bacilli"
]
}
},
{
"id": "1067519",
"metadata": {
"taxonomy": [
"k__Bacteria",
"p__Firmicutes",
"c__Bacilli"
]
}
},
{
"id": "1068955",
"metadata": {
"taxonomy": [
"k__Bacteria",
"p__Firmicutes",
"c__Bacilli"
]
}
},
{
"id": "4438739",
"metadata": {
"taxonomy": [
"k__Bacteria",
"p__Actinobacteria",
"c__Actinobacteria",
"o__Actinomycetales",
"f__Propionibacteriaceae",
"g__Propionibacterium"
]
}
},
{
"id": "164612",
"metadata": {
"taxonomy": [
"k__Bacteria",
"p__Firmicutes",
"c__Bacilli"
]
}
},
{
"id": "4416988",
"metadata": {
"taxonomy": [
"k__Bacteria",
"p__Firmicutes",
"c__Bacilli"
]
}
},
{
"id": "1055132",
"metadata": {
"taxonomy": [
"k__Bacteria",
"p__Firmicutes",
"c__Bacilli"
]
}
},
{
"id": "187233",
"metadata": {
"taxonomy": [
"k__Bacteria",
"p__Firmicutes",
"c__Bacilli",
"o__Lactobacillales",
"f__Lactobacillaceae"
]
}
},
{
"id": "New.CleanUp.ReferenceOTU0",
"metadata": {
"taxonomy": [
"k__Bacteria"
]
}
},
{
"id": "New.CleanUp.ReferenceOTU2",
"metadata": {
"taxonomy": [
"k__Bacteria",
"p__Proteobacteria",
"c__Alphaproteobacteria"
]
}
},
{
"id": "New.CleanUp.ReferenceOTU10",
"metadata": {
"taxonomy": [
"k__Bacteria",
"p__Firmicutes",
"c__Bacilli",
"o__Bacillales",
"f__Staphylococcaceae"
]
}
},
{
"id": "New.CleanUp.ReferenceOTU27",
"metadata": {
"taxonomy": [
"k__Bacteria",
"p__Firmicutes",
"c__Bacilli"
]
}
},
{
"id": "New.CleanUp.ReferenceOTU36",
"metadata": {
"taxonomy": [
"k__Bacteria"
]
}
},
{
"id": "New.CleanUp.ReferenceOTU39",
"metadata": {
"taxonomy": [
"k__Bacteria",
"p__Firmicutes",
"c__Bacilli"
]
}
}
],
"columns": [
{
"id": "HMPMockV1.1.Even1",
"metadata": null
},
{
"id": "HMPMockV1.1.Even2",
"metadata": null
},
{
"id": "HMPMockV1.2.Staggered2",
"metadata": null
},
{
"id": "HMPMockV1.2.Staggered1",
"metadata": null
}
]
}"""
_mock_result_table1 = '\n'.join(
[',Dataset,F-measure,Method,Parameters,Taxon Accuracy Rate,'
'Taxon Detection Rate,Precision,Recall,Reference,SampleID,'
'Spearman p,Spearman r,best N alignments,confidence,coverage,'
'e value,e-value,max accepts,min consensus fraction,similarity',
'402,F2,1,rdp,0,0.507167459,-0.304176543,1,1,unite-97-rep-set,'
'm3,0.741153822,0.15430335,,0,,,,,,',
'404,F2,1,rdp,0.1,0.507167459,-0.304176543,1,1,unite-97-rep-set,'
'm3,0.741153822,0.15430335,,0.1,,,,,,',
'405,F2,1,rdp,0.1,0.507167459,-0.304176543,1,1,unite-97-rep-set,'
'm2,0.741153822,0.15430335,,0.1,,,,,,',
'408,F2,0.933333333,rdp,0.2,0.380361021,-0.360486997,0.875,1,'
'unite-97-rep-set,m3,0.634447233,-0.20025047,,0.2,,,,,,',
'411,F2,0.933333333,rdp,0.3,0.380361021,-0.360486997,0.875,1,'
'unite-97-rep-set,m3,0.634447233,-0.20025047,,0.3,,,,,,',
'414,F2,0.933333333,rdp,0.4,0.380361021,-0.360486997,0.875,1,'
'unite-97-rep-set,m3,0.634447233,-0.20025047,,0.4,,,,,,',
'417,F2,0.933333333,rdp,0.5,0.380361021,-0.360486997,0.875,1,'
'unite-97-rep-set,m3,0.634447233,-0.20025047,,0.5,,,,,,',
'2568,F2,0.933333333,uclust,0.51:0.9:3,0.968373131,0.016870865,'
'0.875,1,unite-97-rep-set,m3,0.394741568,0.350438322,,,,,,3,0.51,'
'0.9',
'2559,F2,0.875,uclust,0.51:0.8:3,0.775094897,-0.111550294,'
'0.777777778,1,unite-97-rep-set,m3,0.705299666,0.147297986,,,,,,'
'3,0.51,0.8',
'2560,F2,0.875,uclust,0.51:0.8:3,0.775094897,-0.111550294,'
'0.777777778,1,unite-97-rep-set,m2,0.705299666,0.147297986,,,,,,'
'3,0.51,0.8',
'6,B1,0.666666667,rdp,0.2,0.01020742,0.535556024,0.523809524,'
'0.916666667,gg_13_8_otus,m1,0.005044461,0.57579308,,0.2,,,,,,',
'4,B1,0.647058824,rdp,0,0.006884692,0.54720917,0.5,0.916666667,'
'gg_13_8_otus,m1,0.003014749,0.590459586,,0,,,,,,',
'7,B1,0.647058824,rdp,0.3,0.007585897,0.541731947,0.5,0.916666667'
',gg_13_8_otus,m1,0.003647366,0.580986972,,0.3,,,,,,',
'5,B1,0.628571429,rdp,0.1,0.004629362,0.55772667,0.47826087,'
'0.916666667,gg_13_8_otus,m1,0.001859651,0.601902881,,0.1,,,,,,',
'8,B1,0.628571429,rdp,0.4,0.005449953,0.549139489,0.47826087,'
'0.916666667,gg_13_8_otus,m1,0.004205291,0.562678799,,0.4,,,,,,',
'73,B1,0.628571429,uclust,0.51:0.8:3,0.008500543,0.524553724,'
'0.47826087,0.916666667,gg_13_8_otus,m1,0.002372594,0.590696001'
',,,,,,3,0.51,0.8',
'9,B1,0.611111111,rdp,0.5,0.00572351,0.536241977,0.458333333,'
'0.916666667,gg_13_8_otus,m1,0.002837231,0.571587285,,0.5,,,,,,',
'76,B1,0.594594595,uclust,0.51:0.9:3,0.006571463,0.519167888,'
'0.44,0.916666667,gg_13_8_otus,m1,0.00097195,0.608520351,,,,,,'
'3,0.51,0.9'])
_exp_taxa = '\n'.join(
['o1\tother',
'o3\tAa;Ii;Jj;Kk;Ll;Mm;Nn',
'o2\tAa;Bb;Cc;Dd;Ee;Ff;Hh',
'o4\tAa;Ii;Jj;Kk;Ll;Mm;Oo'])
_obs_taxa = '\n'.join(
['o2\tAa;Bb;Cc;Dd;Ee;Ff;Ii',
'o3\tAa;Ii;Pp;Qq;Rr;Tt',
'o4\tAa;Ii;Jj;Kk;Ll;Mm;Oo',
'o1\tAa;Bb;Cc;Dd;Ee;Ff;Gg'])
self.table1 = Table.from_json(json.loads(_table1))
self.table2 = Table.from_json(json.loads(_table2))
self.table3 = Table.from_json(json.loads(_table3))
self.mock_result_table1 = pd.DataFrame.from_csv(
StringIO(_mock_result_table1))
self.tmpdir = mkdtemp()
with open(join(self.tmpdir, 'trueish-taxonomies.tsv'), 'w') as out:
out.write(_exp_taxa)
with open(join(self.tmpdir, 'taxonomy.tsv'), 'w') as out:
out.write(_obs_taxa)
@classmethod
def tearDownClass(self):
rmtree(self.tmpdir)
if __name__ == "__main__":
main()
| bsd-3-clause |
ndingwall/scikit-learn | examples/model_selection/plot_det.py | 10 | 3367 | """
====================================
Detection error tradeoff (DET) curve
====================================
In this example, we compare receiver operating characteristic (ROC) and
detection error tradeoff (DET) curves for different classification algorithms
for the same classification task.
DET curves are commonly plotted in normal deviate scale.
To achieve this `plot_det_curve` transforms the error rates as returned by the
:func:`~sklearn.metrics.det_curve` and the axis scale using
:func:`scipy.stats.norm`.
The point of this example is to demonstrate two properties of DET curves,
namely:
1. It might be easier to visually assess the overall performance of different
classification algorithms using DET curves over ROC curves.
Due to the linear scale used for plotting ROC curves, different classifiers
usually only differ in the top left corner of the graph and appear similar
for a large part of the plot. On the other hand, because DET curves
represent straight lines in normal deviate scale. As such, they tend to be
distinguishable as a whole and the area of interest spans a large part of
the plot.
2. DET curves give the user direct feedback of the detection error tradeoff to
aid in operating point analysis.
The user can deduct directly from the DET-curve plot at which rate
false-negative error rate will improve when willing to accept an increase in
false-positive error rate (or vice-versa).
The plots in this example compare ROC curves on the left side to corresponding
DET curves on the right.
There is no particular reason why these classifiers have been chosen for the
example plot over other classifiers available in scikit-learn.
.. note::
- See :func:`sklearn.metrics.roc_curve` for further information about ROC
curves.
- See :func:`sklearn.metrics.det_curve` for further information about
DET curves.
- This example is loosely based on
:ref:`sphx_glr_auto_examples_classification_plot_classifier_comparison.py`
example.
"""
import matplotlib.pyplot as plt
from sklearn.datasets import make_classification
from sklearn.ensemble import RandomForestClassifier
from sklearn.metrics import plot_det_curve
from sklearn.metrics import plot_roc_curve
from sklearn.model_selection import train_test_split
from sklearn.pipeline import make_pipeline
from sklearn.preprocessing import StandardScaler
from sklearn.svm import LinearSVC
N_SAMPLES = 1000
classifiers = {
"Linear SVM": make_pipeline(StandardScaler(), LinearSVC(C=0.025)),
"Random Forest": RandomForestClassifier(
max_depth=5, n_estimators=10, max_features=1
),
}
X, y = make_classification(
n_samples=N_SAMPLES, n_features=2, n_redundant=0, n_informative=2,
random_state=1, n_clusters_per_class=1)
X_train, X_test, y_train, y_test = train_test_split(
X, y, test_size=.4, random_state=0)
# prepare plots
fig, [ax_roc, ax_det] = plt.subplots(1, 2, figsize=(11, 5))
for name, clf in classifiers.items():
clf.fit(X_train, y_train)
plot_roc_curve(clf, X_test, y_test, ax=ax_roc, name=name)
plot_det_curve(clf, X_test, y_test, ax=ax_det, name=name)
ax_roc.set_title('Receiver Operating Characteristic (ROC) curves')
ax_det.set_title('Detection Error Tradeoff (DET) curves')
ax_roc.grid(linestyle='--')
ax_det.grid(linestyle='--')
plt.legend()
plt.show()
| bsd-3-clause |
Erotemic/ubelt | ubelt/util_zip.py | 1 | 14646 | """
Abstractions for working with zipfiles and archives
This may be renamed to util_archive in the future.
The :func:`ubelt.split_archive` works with paths that reference a file inside
of an archive (e.g. a zipfile). It splits it into two parts, the full path to
the archive and then the path to the file inside of the archive. By convention
these are separated with either a pathsep or a colon.
The :func:`ubelt.zopen` works to open a file that lives inside of an archive
without the user needing to worry about extracting it first. When possible it
will read it directly from the archive, but in some cases it may extract it to
a temporary directory first.
"""
import io
import os
from os.path import exists, join
from ubelt.util_mixins import NiceRepr
__all__ = ['zopen', 'split_archive']
def split_archive(fpath, ext='.zip'):
"""
If fpath specifies a file inside a zipfile, it breaks it into two parts the
path to the zipfile and the internal path in the zipfile.
Example:
>>> split_archive('/a/b/foo.txt')
>>> split_archive('/a/b/foo.zip/bar.txt')
>>> split_archive('/a/b/foo.zip/baz/biz.zip/bar.py')
>>> split_archive('archive.zip')
>>> import ubelt as ub
>>> split_archive(ub.Path('/a/b/foo.zip/baz/biz.zip/bar.py'))
>>> split_archive('/a/b/foo.zip/baz.pt/bar.zip/bar.zip', '.pt')
TODO:
Fix got/want for win32
(None, None)
('/a/b/foo.zip', 'bar.txt')
('/a/b/foo.zip/baz/biz.zip', 'bar.py')
('archive.zip', None)
('/a/b/foo.zip/baz/biz.zip', 'bar.py')
('/a/b/foo.zip/baz.pt', 'bar.zip/bar.zip')
"""
import re
fpath = os.fspath(fpath)
# fpath = os.fspath(fpath)
pat = '({}[{}/:])'.format(re.escape(ext), re.escape(os.path.sep))
# pat = r'(\'' + ext + '[' + re.escape(os.path.sep) + '/:])'
parts = re.split(pat, fpath, flags=re.IGNORECASE)
if len(parts) > 2:
archivepath = ''.join(parts[:-1])[:-1]
internal = parts[-1]
elif len(parts) == 1:
archivepath = parts[0]
if not archivepath.endswith(ext):
archivepath = None
internal = None
else: # nocover
raise AssertionError('impossible state')
return archivepath, internal
class zopen(NiceRepr):
"""
An abstraction of the normal :func:`open` function that can also handle
reading data directly inside of zipfiles.
This is a file-object like interface [FileObj] --- i.e. it supports the
read and write methods to an underlying resource.
Can open a file normally or open a file within a zip file (readonly).
Tries to read from memory only, but will extract to a tempfile if necessary.
Just treat the zipfile like a directory,
e.g. /path/to/myzip.zip/compressed/path.txt OR?
e.g. /path/to/myzip.zip:compressed/path.txt
References:
.. [FileObj] https://docs.python.org/3/glossary.html#term-file-object
TODO:
- [ ] Fast way to open a base zipfile, query what is inside, and
then choose a file to further zopen (and passing along the same
open zipfile reference maybe?).
- [ ] Write mode in some restricted setting?
Args:
fpath (str | PathLike):
path to a file, or a special path that denotes both a
path to a zipfile and a path to a archived file inside of
the zipfile.
mode (str):
Currently only "r" - readonly mode is supported
seekable (bool):
If True, attempts to force "seekability" of the underlying
file-object, for compressed files this will first extract
the file to a temporary location on disk. If False, any underlying
compressed file will be opened directly which may result in the
object being non-seekable.
ext (str):
The extension of the zipfile. Modify this is a non-standard
extension is used (e.g. for torch packages).
Example:
>>> from ubelt.util_zip import * # NOQA
>>> import pickle
>>> import ubelt as ub
>>> dpath = ub.Path.appdir('ubelt/tests/util_zip').ensuredir()
>>> dpath = ub.Path(dpath)
>>> data_fpath = dpath / 'test.pkl'
>>> data = {'demo': 'data'}
>>> with open(str(data_fpath), 'wb') as file:
>>> pickle.dump(data, file)
>>> # Write data
>>> import zipfile
>>> zip_fpath = dpath / 'test_zip.archive'
>>> stl_w_zfile = zipfile.ZipFile(os.fspath(zip_fpath), mode='w')
>>> stl_w_zfile.write(os.fspath(data_fpath), os.fspath(data_fpath.relative_to(dpath)))
>>> stl_w_zfile.close()
>>> stl_r_zfile = zipfile.ZipFile(os.fspath(zip_fpath), mode='r')
>>> stl_r_zfile.namelist()
>>> stl_r_zfile.close()
>>> # Test zopen
>>> self = zopen(zip_fpath / 'test.pkl', mode='rb', ext='.archive')
>>> print(self._split_archive())
>>> print(self.namelist())
>>> self.close()
>>> self = zopen(zip_fpath / 'test.pkl', mode='rb', ext='.archive')
>>> recon1 = pickle.loads(self.read())
>>> self.close()
>>> self = zopen(zip_fpath / 'test.pkl', mode='rb', ext='.archive')
>>> recon2 = pickle.load(self)
>>> self.close()
>>> assert recon1 == recon2
>>> assert recon1 is not recon2
Example:
>>> # Test we can load json data from a zipfile
>>> from ubelt.util_zip import * # NOQA
>>> import ubelt as ub
>>> import json
>>> import zipfile
>>> dpath = ub.Path.appdir('ubelt/tests/util_zip').ensuredir()
>>> infopath = join(dpath, 'info.json')
>>> ub.writeto(infopath, '{"x": "1"}')
>>> zippath = join(dpath, 'infozip.zip')
>>> internal = 'folder/info.json'
>>> with zipfile.ZipFile(zippath, 'w') as myzip:
>>> myzip.write(infopath, internal)
>>> fpath = zippath + '/' + internal
>>> # Test context manager
>>> with zopen(fpath, 'r') as self:
>>> info2 = json.load(self)
>>> assert info2['x'] == '1'
>>> # Test outside of context manager
>>> self = zopen(fpath, 'r')
>>> print(self._split_archive())
>>> info2 = json.load(self)
>>> assert info2['x'] == '1'
>>> # Test nice repr (with zfile)
>>> print('self = {!r}'.format(self))
>>> self.close()
Example:
>>> # Coverage tests --- move to unit-test
>>> from ubelt.util_zip import * # NOQA
>>> import ubelt as ub
>>> import json
>>> import zipfile
>>> dpath = ub.Path.appdir('ubelt/tests/util_zip').ensuredir()
>>> textpath = join(dpath, 'seekable_test.txt')
>>> text = chr(10).join(['line{}'.format(i) for i in range(10)])
>>> ub.writeto(textpath, text)
>>> zippath = join(dpath, 'seekable_test.zip')
>>> internal = 'folder/seekable_test.txt'
>>> with zipfile.ZipFile(zippath, 'w') as myzip:
>>> myzip.write(textpath, internal)
>>> ub.delete(textpath)
>>> fpath = zippath + '/' + internal
>>> # Test seekable
>>> self_seekable = zopen(fpath, 'r', seekable=True)
>>> assert self_seekable.seekable()
>>> self_seekable.seek(8)
>>> assert self_seekable.readline() == 'ne1' + chr(10)
>>> assert self_seekable.readline() == 'line2' + chr(10)
>>> self_seekable.seek(8)
>>> assert self_seekable.readline() == 'ne1' + chr(10)
>>> assert self_seekable.readline() == 'line2' + chr(10)
>>> # Test non-seekable?
>>> # Sometimes non-seekable files are still seekable
>>> maybe_seekable = zopen(fpath, 'r', seekable=False)
>>> if maybe_seekable.seekable():
>>> maybe_seekable.seek(8)
>>> assert maybe_seekable.readline() == 'ne1' + chr(10)
>>> assert maybe_seekable.readline() == 'line2' + chr(10)
>>> maybe_seekable.seek(8)
>>> assert maybe_seekable.readline() == 'ne1' + chr(10)
>>> assert maybe_seekable.readline() == 'line2' + chr(10)
Example:
>>> # More coverage tests --- move to unit-test
>>> from ubelt.util_zip import * # NOQA
>>> import ubelt as ub
>>> import pytest
>>> dpath = ub.Path.appdir('ubelt/tests/util_zip').ensuredir()
>>> with pytest.raises(OSError):
>>> self = zopen('', 'r')
>>> # Test open non-zip exsting file
>>> existing_fpath = join(dpath, 'exists.json')
>>> ub.writeto(existing_fpath, '{"x": "1"}')
>>> self = zopen(existing_fpath, 'r')
>>> assert self.read() == '{"x": "1"}'
>>> # Test dir
>>> dir(self)
>>> # Test nice
>>> print(self)
>>> print('self = {!r}'.format(self))
>>> self.close()
>>> # Test open non-zip non-existing file
>>> nonexisting_fpath = join(dpath, 'does-not-exist.txt')
>>> ub.delete(nonexisting_fpath)
>>> with pytest.raises(OSError):
>>> self = zopen(nonexisting_fpath, 'r')
>>> with pytest.raises(NotImplementedError):
>>> self = zopen(nonexisting_fpath, 'w')
>>> # Test nice-repr
>>> self = zopen(existing_fpath, 'r')
>>> print('self = {!r}'.format(self))
>>> # pathological
>>> self = zopen(existing_fpath, 'r')
>>> self._handle = None
>>> dir(self)
"""
def __init__(self, fpath, mode='r', seekable=False, ext='.zip'):
self.fpath = fpath
self.ext = ext
self.name = fpath
self.mode = mode
self._seekable = seekable
self._zfpath = None # points to the base zipfile (if appropriate)
self._temp_dpath = None # for temporary extraction
self._zfile_read = None # underlying opened zipfile object
# The _handle pointer should be a file-like object that this zopen
# object impersonate, by forwarding most every getattr call to it.
self._handle = None
self._open()
@property
def zfile(self):
"""
Access the underlying archive file
"""
if self._zfile_read is None:
import zipfile
archivefile, internal = self._split_archive()
myzip = zipfile.ZipFile(archivefile, 'r')
self._zfile_read = myzip
return self._zfile_read
def namelist(self):
"""
Lists the contents of this zipfile
"""
myzip = self.zfile
namelist = myzip.namelist()
return namelist
def __nice__(self):
if self._zfpath is None:
return 'handle={}, mode={}'.format(str(self._handle), self.mode)
else:
return 'handle={} in zipfpath={}, mode={}'.format(self._handle, self._zfpath, self.mode)
def __getattr__(self, key):
# Expose attributes of wrapped handle
if hasattr(self._handle, key):
assert self._handle is not self
return getattr(self._handle, key)
raise AttributeError(key)
def __dir__(self):
# Expose attributes of wrapped handle
zopen_attributes = {
'namelist',
'zfile',
}
keyset = set(dir(super(zopen, self)))
keyset.update(set(self.__dict__.keys()))
if self._handle is not None:
keyset.update(set(dir(self._handle)))
return sorted(keyset | zopen_attributes)
def _cleanup(self):
# print('self._cleanup = {!r}'.format(self._cleanup))
if self._handle is not None:
if not getattr(self, 'closed', True):
closemethod = getattr(self, 'close', None)
if closemethod is not None: # nocover
closemethod()
closemethod = None
self._handle = None
if self._temp_dpath and exists(self._temp_dpath):
# os.unlink(self._temp_dpath)
import ubelt as ub
ub.delete(self._temp_dpath)
def __del__(self):
self._cleanup()
def _split_archive(self):
archivefile, internal = split_archive(self.fpath, self.ext)
return archivefile, internal
def _open(self):
"""
This logic sets the "_handle" to the appropriate backend object
such that zopen can behave like a standard IO object.
In read-only mode:
* If fpath is a normal file, _handle is the standard `open` object
* If fpath is a seekable zipfile, _handle is an IOWrapper pointing
to the internal data
* If fpath is a non-seekable zipfile, the data is extracted behind
the scenes and a standard `open` object to the extracted file
is given.
In write mode:
* NotImpelemented
"""
if 'r' not in self.mode:
raise NotImplementedError('Only read mode is supported for now')
_handle = None
fpath = os.fspath(self.fpath)
if exists(fpath):
_handle = open(fpath, self.mode)
elif self.ext + '/' in fpath or self.ext + os.path.sep in fpath:
archivefile, internal = self._split_archive()
myzip = self.zfile
if self._seekable:
import tempfile
# If we need data to be seekable, then we must extract it to a
# temporary file first.
self._temp_dpath = tempfile.mkdtemp(prefix='zopen_')
temp_fpath = join(self._temp_dpath, internal)
myzip.extract(internal, self._temp_dpath)
_handle = open(temp_fpath, self.mode)
else:
# Try to load data directly from the zipfile
_handle = myzip.open(internal, 'r')
if self.mode == 'rb':
data = _handle.read()
_handle = io.BytesIO(data)
elif self.mode == 'r':
# FIXME: does not always work. handle seems to be closed
# too soon in the case util.zopen(module.__file__).read()
_handle = io.TextIOWrapper(_handle)
else:
raise KeyError(self.mode)
self._zfpath = archivefile
if _handle is None:
raise IOError('file {!r} does not exist'.format(fpath))
self._handle = _handle
def __enter__(self):
return self
def __exit__(self, *args):
self.close()
| apache-2.0 |
arborh/tensorflow | tensorflow/python/keras/engine/data_adapter.py | 3 | 38939 | # Copyright 2019 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Adapter module that convert different input data objects into tf.dataset."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import abc
import itertools
import math
import random
import numpy as np
import six
from tensorflow.python.data.ops import dataset_ops
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.framework.ops import composite_tensor
from tensorflow.python.keras import backend
from tensorflow.python.keras.engine import training_utils
from tensorflow.python.keras.utils import data_utils
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import random_ops
from tensorflow.python.ops import script_ops
from tensorflow.python.platform import tf_logging as logging
from tensorflow.python.util import nest
try:
from scipy import sparse as scipy_sparse # pylint: disable=g-import-not-at-top
except ImportError:
scipy_sparse = None
try:
import pandas as pd # pylint: disable=g-import-not-at-top
except ImportError:
pd = None
try:
# In Python2 unicode is a scalar type
scalar_types = (float, int, str, unicode)
except NameError:
# In Python3 unicode is not present, it always uses string
scalar_types = (float, int, str)
@six.add_metaclass(abc.ABCMeta)
class DataAdapter(object):
"""Base class for input data adapter.
In TF 2.0, tf.data is the preferred API for user to feed in data. In order
to simplify the training code path, all the input data object will be
converted to `tf.data.Dataset` if possible.
Note that since this class is mainly targeted for TF 2.0, it might have a lot
of assumptions under the hood, eg eager context by default, distribution
strategy, etc. In the meantime, some legacy feature support might be dropped,
eg, Iterator from dataset API in v1, etc.
The sample usage of this class is like:
```
x = tf.data.Dataset.range(100)
adapter_cls = [NumpyArrayDataAdapter, ..., DatasetAdapter]
applicable_adapters = [cls for cls in adapter_cls if cls.can_handle(x)]
if len(applicable_adapters) != 1:
raise ValueError("Expect only one adapter class to handle the input")
dataset = applicable_adapters[0](x).get_dataset()
for data in dataset:
# training
```
"""
@staticmethod
def can_handle(x, y=None):
"""Whether the current DataAdapter could handle the input x and y.
Structure wise, x and y can be single object, or list of objects if there
multiple input/output, or dictionary of objects when the intput/output are
named.
Args:
x: input features.
y: target labels. Note that y could be None in the case of prediction.
Returns:
boolean
"""
raise NotImplementedError
@abc.abstractmethod
def __init__(self, x, y=None, **kwargs):
"""Create a DataAdapter based on data inputs.
The caller must make sure to call `can_handle()` first before invoking this
method. Provide unsupported data type will result into unexpected behavior.
Args:
x: input features.
y: target labels. Note that y could be None in the case of prediction.
**kwargs: Other keyword arguments for DataAdapter during the construction
of the tf.dataset.Dataset. For example:
- Numpy data might have `sample_weights` which will be used for
weighting the loss function during training.
- Numpy data might need to have `batch_size` parameter when constructing
the dataset and iterator.
- Certain input might need to be distribution strategy aware. When
`distribution_strategy` is passed, the created dataset need to respect
the strategy.
DataAdapter might choose to ignore any keyword argument if it doesn't
use it, or raise exception if any required argument is not provide.
"""
if not self.can_handle(x, y):
raise ValueError("{} Cannot handle input {}, {}".format(
self.__class__, x, y))
@abc.abstractmethod
def get_dataset(self):
"""Get a dataset instance for the current DataAdapter.
Note that the dataset returned does not repeat for epoch, so caller might
need to create new iterator for the same dataset at the beginning of the
epoch. This behavior might change in future.
Returns:
An tf.dataset.Dataset. Caller might use the dataset in different
context, eg iter(dataset) in eager to get the value directly, or in graph
mode, provide the iterator tensor to Keras model function.
"""
raise NotImplementedError
@abc.abstractmethod
def get_size(self):
"""Return the size (number of batches) for the dataset created.
For certain type of the data input, the number of batches is known, eg for
Numpy data, the size is same as (number_of_element / batch_size). Whereas
for dataset or python generator, the size is unknown since it may or may not
have a end state.
Returns:
int, the number of batches for the dataset, or None if it is unknown. The
caller could use this to control the loop of training, show progress bar,
or handle unexpected StopIteration error.
"""
raise NotImplementedError
@abc.abstractmethod
def batch_size(self):
"""Return the batch size of the dataset created.
For certain type of the data input, the batch size is known, and even
required, like numpy array. Where as for dataset, the batch is unknown
unless we take a peek.
Returns:
int, the batch size of the dataset, or None if it is unknown.
"""
raise NotImplementedError
def representative_batch_size(self):
"""Return a representative size for batches in the dataset.
This is not guaranteed to be the batch size for all batches in the
dataset. It just needs to be a rough approximation for batch sizes in
the dataset.
Returns:
int, a representative size for batches found in the dataset,
or None if it is unknown.
"""
return self.batch_size()
@abc.abstractmethod
def has_partial_batch(self):
"""Whether the dataset has partial batch at the end."""
raise NotImplementedError
@abc.abstractmethod
def partial_batch_size(self):
"""The size of the final partial batch for dataset.
Will return None if has_partial_batch is False or batch_size is None.
"""
raise NotImplementedError
def should_recreate_iterator(self, steps_per_epoch):
"""Returns whether a new iterator should be created every epoch."""
# Only recreate iterator when the data has a fixed length, which will be
# fully consumed every epoch, or has a unknown length (dataset, generator)
# and will be fully consumed (steps_per_epoch is None)
return self.get_size() is not None or steps_per_epoch is None
class TensorLikeDataAdapter(DataAdapter):
"""Adapter that handles Tensor-like objects, e.g. EagerTensor and NumPy."""
@staticmethod
def can_handle(x, y=None):
# TODO(kaftan): Check performance implications of using a flatten
# here for other types of inputs.
flat_inputs = nest.flatten(x)
if y is not None:
flat_inputs += nest.flatten(y)
tensor_types = (ops.Tensor, np.ndarray)
if pd:
tensor_types = (ops.Tensor, np.ndarray, pd.Series, pd.DataFrame)
def _is_tensor(v):
if isinstance(v, tensor_types):
return True
return False
return all(_is_tensor(v) for v in flat_inputs)
def __init__(self,
x,
y=None,
sample_weights=None,
sample_weight_modes=None,
batch_size=None,
epochs=1,
steps=None,
shuffle=False,
**kwargs):
super(TensorLikeDataAdapter, self).__init__(x, y, **kwargs)
x = _process_numpy_inputs(x)
y = _process_numpy_inputs(y)
sample_weights = _process_numpy_inputs(sample_weights)
sample_weight_modes = broadcast_sample_weight_modes(
sample_weights, sample_weight_modes)
# If sample_weights are not specified for an output use 1.0 as weights.
(sample_weights, any_sample_weight, _
) = training_utils.handle_partial_sample_weights(
y, sample_weights, sample_weight_modes, check_all_flat=True)
if y is not None and any_sample_weight:
inputs = (x, y, sample_weights)
elif y is not None:
# Sample weight is only needed for training, so if y is None, then
# sample_weight is ignored.
inputs = (x, y)
else:
inputs = (x,)
num_samples = set(int(i.shape[0]) for i in nest.flatten(inputs))
if len(num_samples) > 1:
msg = "Data cardinality is ambiguous:\n"
for label, data in zip(["x", "y", "sample_weight"], inputs):
msg += " {} sizes: {}\n".format(
label, ", ".join([str(i.shape[0]) for i in nest.flatten(data)]))
msg += "Please provide data which shares the same first dimension."
raise ValueError(msg)
num_samples = num_samples.pop()
# If batch_size is not passed but steps is, calculate from the input data.
if steps and not batch_size:
batch_size = int(math.ceil(num_samples / steps))
if not batch_size:
raise ValueError(
"`batch_size` or `steps` is required for `Tensor` or `NumPy`"
" input data.")
self._size = int(math.ceil(num_samples / batch_size))
self._batch_size = batch_size
num_full_batches = int(num_samples // batch_size)
self._partial_batch_size = num_samples % batch_size
if isinstance(shuffle, str):
shuffle = shuffle.lower()
self._shuffle = shuffle
# Vectorized version of shuffle.
# This is a performance improvement over using `from_tensor_slices`.
# The indices of the data are shuffled and batched, and these indices
# are then zipped with the data and used to extract a batch of the data
# at each step. The performance improvements here come from:
# 1. vectorized batch using gather
# 2. parallelized map
# 3. pipelined permutation generation
# 4. optimized permutation batching
# 5. disabled static optimizations
indices_dataset = dataset_ops.DatasetV2.range(1)
if shuffle != "batch":
indices_dataset = indices_dataset.repeat(epochs)
def permutation(_):
# It turns out to be more performant to make a new set of indices rather
# than reusing the same range Tensor. (presumably because of buffer
# forwarding.)
indices = math_ops.range(num_samples, dtype=dtypes.int64)
if shuffle and shuffle != "batch":
indices = random_ops.random_shuffle(indices)
return indices
# We prefetch a single element. Computing large permutations can take quite
# a while so we don't want to wait for prefetching over an epoch boundary to
# trigger the next permutation. On the other hand, too many simultaneous
# shuffles can contend on a hardware level and degrade all performance.
indices_dataset = indices_dataset.map(permutation).prefetch(1)
def slice_batch_indices(indices):
"""Convert a Tensor of indices into a dataset of batched indices.
This step can be accomplished in several ways. The most natural is to
slice the Tensor in a Dataset map. (With a condition on the upper index to
handle the partial batch.) However it turns out that coercing the Tensor
into a shape which is divisible by the batch size (and handling the last
partial batch separately) allows for a much more favorable memory access
pattern and improved performance.
Args:
indices: Tensor which determines the data order for an entire epoch.
Returns:
A Dataset of batched indices.
"""
num_in_full_batch = num_full_batches * batch_size
first_k_indices = array_ops.slice(indices, [0], [num_in_full_batch])
first_k_indices = array_ops.reshape(
first_k_indices, [num_full_batches, batch_size])
flat_dataset = dataset_ops.DatasetV2.from_tensor_slices(first_k_indices)
if self._partial_batch_size:
index_remainder = dataset_ops.DatasetV2.from_tensors(array_ops.slice(
indices, [num_in_full_batch], [self._partial_batch_size]))
flat_dataset = flat_dataset.concatenate(index_remainder)
if shuffle == "batch":
# 1024 is a magic constant that has not been properly evaluated
flat_dataset = flat_dataset.shuffle(1024).repeat(epochs)
return flat_dataset
indices_dataset = indices_dataset.flat_map(slice_batch_indices)
dataset = self.slice_inputs(indices_dataset, inputs)
if shuffle == "batch":
def shuffle_batch(*batch):
return nest.map_structure(random_ops.random_shuffle, batch)
dataset = dataset.map(shuffle_batch)
self._dataset = dataset
def slice_inputs(self, indices_dataset, inputs):
"""Slice inputs into a Dataset of batches.
Given a Dataset of batch indices and the unsliced inputs,
this step slices the inputs in a parallelized fashion
and produces a dataset of input batches.
Args:
indices_dataset: A Dataset of batched indices
inputs: A python data structure that contains the inputs, targets,
and possibly sample weights.
Returns:
A Dataset of input batches matching the batch indices.
"""
dataset = dataset_ops.DatasetV2.zip((
indices_dataset,
dataset_ops.DatasetV2.from_tensors(inputs).repeat()
))
def grab_batch(i, data):
return nest.map_structure(lambda d: array_ops.gather(d, i, axis=0), data)
dataset = dataset.map(
grab_batch, num_parallel_calls=dataset_ops.AUTOTUNE)
# Default optimizations are disabled to avoid the overhead of (unnecessary)
# input pipeline graph serialization and deserialization
options = dataset_ops.Options()
options.experimental_optimization.apply_default_optimizations = False
if self._shuffle:
# See b/141490660 for more details.
options.experimental_external_state_policy = (
dataset_ops.ExternalStatePolicy.IGNORE)
dataset = dataset.with_options(options)
return dataset
def get_dataset(self):
return self._dataset
def get_size(self):
return self._size
def batch_size(self):
return self._batch_size
def has_partial_batch(self):
return self._partial_batch_size > 0
def partial_batch_size(self):
return self._partial_batch_size or None
def should_recreate_iterator(self, _):
# An infinite dataset is always created here.
return False
class GenericArrayLikeDataAdapter(TensorLikeDataAdapter):
"""Adapter that handles array-like data without forcing it into memory.
As an example, this adapter handles `keras.utils.HDF5Matrix` which holds
datasets that may be too big to fully fit into memory.
Specifically, this adapter handles any Python class which implements:
`__get_item__`, `__len__`, `shape`, and `dtype` with the same meanings
as Numpy, but it ignores any case where all the inputs are Tensors or Numpy
arrays (because that case is handled by the base TensorLikeDataAdapter).
It ignores scipy sparse matrices and Composite Tensors because those are
handled by the CompositeTensorDataAdapter.
It also does not handle lists/tuples of scalars, because those are handled
by the ListsOfScalarsDataAdapter.
"""
@staticmethod
def can_handle(x, y=None):
flat_inputs = nest.flatten(x)
if y is not None:
flat_inputs += nest.flatten(y)
def _is_array_like(v):
"""Return True if v is a Tensor, array, or is array-like."""
return (
hasattr(v, "__getitem__") and
hasattr(v, "shape") and
hasattr(v, "dtype") and
hasattr(v, "__len__")
)
if (not TensorLikeDataAdapter.can_handle(x, y) and
not CompositeTensorDataAdapter.can_handle(x, y)):
return all(_is_array_like(v) for v in flat_inputs)
else:
return False
def __init__(self, *args, **kwargs):
logging.warn(
"Keras is training/fitting/evaluating on array-like data. Keras may "
"not be optimized for this format, so if your input data format is "
"supported by TensorFlow I/O (https://github.com/tensorflow/io) we "
"recommend using that to load a Dataset instead.")
super(GenericArrayLikeDataAdapter, self).__init__(*args, **kwargs)
def slice_inputs(self, indices_dataset, inputs):
"""Slice inputs into a Dataset of batches.
Given a Dataset of batch indices and the unsliced inputs,
this step slices the inputs in a parallelized fashion
and produces a dataset of input batches.
Args:
indices_dataset: A Dataset of batched indices
inputs: A python data structure that contains the inputs, targets,
and possibly sample weights.
Returns:
A Dataset of input batches matching the batch indices.
"""
flat_inputs = nest.flatten(inputs)
def dynamic_shape_like(t):
shape = list(t.shape)
shape[0] = None
return tuple(shape)
flat_dtypes = [inp.dtype for inp in flat_inputs]
contiguous = True
if self._shuffle and self._shuffle != "batch":
contiguous = False
def grab_batch(indices):
"""Grab a batch of data from the inputs."""
# This uses a py_function to avoid converting the array-like
# into a Tensor before slicing it, because converting the array-like
# to a Tensor may force it into memory..
def py_method(ind):
def slice_array(data):
return training_utils.slice_arrays(data, ind.numpy(),
contiguous=contiguous)
return [slice_array(inp) for inp in flat_inputs]
flat_out = script_ops.eager_py_func(py_method, [indices], flat_dtypes)
for v, original_inp in zip(flat_out, flat_inputs):
v.set_shape(dynamic_shape_like(original_inp))
return nest.pack_sequence_as(inputs, flat_out)
dataset = indices_dataset.map(
grab_batch, num_parallel_calls=dataset_ops.AUTOTUNE)
return dataset
class CompositeTensorDataAdapter(DataAdapter):
"""Adapter that handles composite tensor."""
@staticmethod
def can_handle(x, y=None):
flat_inputs = nest.flatten(x)
if y is not None:
flat_inputs += nest.flatten(y)
def _is_composite(v):
# Dataset inherits from CompositeTensor but shouldn't be handled here.
if (isinstance(v, composite_tensor.CompositeTensor) and
not isinstance(v, dataset_ops.DatasetV2)):
return True
# Support Scipy sparse tensors if scipy is installed
if scipy_sparse is not None and scipy_sparse.issparse(v):
return True
return False
def _is_tensor_or_composite(v):
if isinstance(v, (ops.Tensor, np.ndarray)):
return True
return _is_composite(v)
return (any(_is_composite(v) for v in flat_inputs) and
all(_is_tensor_or_composite(v) for v in flat_inputs))
def __init__(self,
x,
y=None,
sample_weights=None,
sample_weight_modes=None,
batch_size=None,
steps=None,
shuffle=False,
**kwargs):
super(CompositeTensorDataAdapter, self).__init__(x, y, **kwargs)
x = _process_numpy_inputs(x)
y = _process_numpy_inputs(y)
sample_weights = _process_numpy_inputs(sample_weights)
sample_weight_modes = broadcast_sample_weight_modes(
sample_weights, sample_weight_modes)
# If sample_weights are not specified for an output use 1.0 as weights.
(sample_weights, any_sample_weight, _
) = training_utils.handle_partial_sample_weights(
y, sample_weights, sample_weight_modes, check_all_flat=True)
if y is not None and any_sample_weight:
inputs = (x, y, sample_weights)
elif y is not None:
# Sample weight is only needed for training, so if y is None, then
# sample_weight is ignored.
inputs = (x, y)
else:
inputs = (x,)
dataset = dataset_ops.DatasetV2.from_tensor_slices(inputs)
num_samples = int(nest.flatten(x)[0].shape[0])
if shuffle:
dataset = dataset.shuffle(num_samples)
# If batch_size is not passed but steps is, calculate from the input data.
if steps and not batch_size:
batch_size = int(math.ceil(num_samples/steps))
if not batch_size:
raise ValueError(
"`batch_size` or `steps` is required for `Tensor` or `NumPy`"
" input data.")
dataset = dataset.batch(batch_size)
self._size = int(math.ceil(num_samples / batch_size))
self._batch_size = batch_size
self._has_partial_batch = (self._size != (num_samples // batch_size))
self._partial_batch_size = None
if self._has_partial_batch:
self._partial_batch_size = (
num_samples - (self._size - 1) * self._batch_size)
self._dataset = dataset
def get_dataset(self):
return self._dataset
def get_size(self):
return self._size
def batch_size(self):
return self._batch_size
def has_partial_batch(self):
return self._has_partial_batch
def partial_batch_size(self):
return self._partial_batch_size
class ListsOfScalarsDataAdapter(DataAdapter):
"""Adapter that handles lists of scalars and lists of lists of scalars."""
@staticmethod
def can_handle(x, y=None):
handles_x = ListsOfScalarsDataAdapter._is_list_of_scalars(x)
handles_y = True
if y is not None:
handles_y = ListsOfScalarsDataAdapter._is_list_of_scalars(y)
return handles_x and handles_y
@staticmethod
def _is_list_of_scalars(inp):
if isinstance(inp, scalar_types):
return True
if isinstance(inp, (list, tuple)):
return ListsOfScalarsDataAdapter._is_list_of_scalars(inp[0])
return False
def __init__(self,
x,
y=None,
sample_weights=None,
sample_weight_modes=None,
batch_size=None,
shuffle=False,
standardize_function=None,
**kwargs):
super(ListsOfScalarsDataAdapter, self).__init__(x, y, **kwargs)
x = np.asarray(x)
if y is not None:
y = np.asarray(y)
if sample_weights is not None:
sample_weights = np.asarray(sample_weights)
sample_weight_modes = broadcast_sample_weight_modes(
sample_weights, sample_weight_modes)
if standardize_function is not None:
x, y, sample_weights = standardize_function(
x=x, y=y, sample_weight=sample_weights)
self._internal_adapter = TensorLikeDataAdapter(
x,
y=y,
sample_weights=sample_weights,
sample_weight_modes=sample_weight_modes,
batch_size=batch_size,
shuffle=shuffle,
**kwargs)
def get_dataset(self):
return self._internal_adapter.get_dataset()
def get_size(self):
return self._internal_adapter.get_size()
def batch_size(self):
return self._internal_adapter.batch_size()
def has_partial_batch(self):
return self._internal_adapter.has_partial_batch()
def partial_batch_size(self):
return self._internal_adapter.partial_batch_size()
class DatasetAdapter(DataAdapter):
"""Adapter that handles `tf.data.Dataset`."""
@staticmethod
def can_handle(x, y=None):
return isinstance(x, (dataset_ops.DatasetV1, dataset_ops.DatasetV2))
def __init__(self, x, y=None, sample_weights=None, standardize_function=None,
**kwargs):
super(DatasetAdapter, self).__init__(x, y, **kwargs)
if not is_none_or_empty(y):
raise ValueError("`y` argument is not supported when using "
"dataset as input.")
if not is_none_or_empty(sample_weights):
raise ValueError("`sample_weight` argument is not supported when using "
"dataset as input.")
if standardize_function is not None:
x = standardize_function(x)
# Note that the dataset instance is immutable, its fine to reusing the user
# provided dataset.
self._dataset = x
def get_dataset(self):
return self._dataset
def get_size(self):
# The size of dataset is unknown, unless its fully consumed.
return None
def batch_size(self):
return None
def has_partial_batch(self):
return False
def partial_batch_size(self):
return None
class GeneratorDataAdapter(DataAdapter):
"""Adapter that handles python generators and iterators."""
@staticmethod
def can_handle(x, y=None):
return ((hasattr(x, "__next__") or hasattr(x, "next"))
and hasattr(x, "__iter__")
and not isinstance(x, data_utils.Sequence))
def __init__(self, x, y=None, sample_weights=None, standardize_function=None,
workers=1, use_multiprocessing=False, max_queue_size=10,
**kwargs):
if not is_none_or_empty(y):
raise ValueError("`y` argument is not supported when using "
"python generator as input.")
if not is_none_or_empty(sample_weights):
raise ValueError("`sample_weight` argument is not supported when using "
"python generator as input.")
super(GeneratorDataAdapter, self).__init__(x, y, **kwargs)
# Since we have to know the dtype of the python generator when we build the
# dataset, we have to look at a batch to infer the structure.
peek, x = self._peek_and_restore(x)
(peek, wrap_in_tuple, elements_to_keep, partial_sample_weight,
sample_weight_modes, nested_shape, nested_dtypes
) = self._canonicalize_peek(peek, kwargs.get("sample_weight_modes"))
# Note that dataset API takes a callable that creates a generator object,
# rather than generator itself, which is why we define a function here.
generator_fn = self._make_callable(x, workers, use_multiprocessing,
max_queue_size)
generator_fn = self._make_bridging_callable(
generator_fn, wrap_in_tuple, peek, elements_to_keep,
partial_sample_weight, sample_weight_modes)
dataset = dataset_ops.DatasetV2.from_generator(
generator_fn, nested_dtypes, output_shapes=nested_shape)
if standardize_function is not None:
dataset = standardize_function(dataset)
if kwargs.get("shuffle", False) and self.get_size() is not None:
dataset = dataset.shuffle(self.get_size())
if workers == 1 and not use_multiprocessing:
dataset = dataset.prefetch(1)
self._dataset = dataset
def _canonicalize_peek(self, peek, sample_weight_modes):
"""Map the peeked batch into a regular form.
This function serves two purposes. First, it determines if per-batch
transformations are needed. Second, it extracts the structure to be used
by Dataset.from_generator.
Args:
peek: The first batch of the user's data
sample_weight_modes: Optional structure indicating how to handle sample
weights. If it is a string, it will be mapped to match the target
structure.
Returns:
An updated peek and various inspection results.
"""
wrap_in_tuple = False
if not isinstance(peek, tuple):
peek, wrap_in_tuple = (peek,), True
if len(peek) not in (1, 2, 3):
raise ValueError(
"Output of generator should be a tuple of 1 or 2 or 3 elements: "
"(input,) or (input, target) or (input, target, sample_weights). "
"Received {}".format(peek))
x_peek, y_peek, sample_weights_peek = list(peek) + [None] * (3 - len(peek))
any_sample_weight, partial_sample_weight = False, False
sample_weight_modes = broadcast_sample_weight_modes(
sample_weights_peek if sample_weights_peek is not None else y_peek,
sample_weight_modes)
if len(peek) == 3:
(sample_weights_peek, any_sample_weight, partial_sample_weight
) = training_utils.handle_partial_sample_weights(
y_peek, sample_weights_peek, sample_weight_modes, check_all_flat=True)
peek = (x_peek, y_peek, sample_weights_peek)
# Users often return None for fields which are not used. For instance:
# (x, y, None) to indicate no sample weights.
if len(peek) >= 2 and y_peek is None:
if any_sample_weight:
raise ValueError("Found sample weights but no targets\n{}".format(peek))
elements_to_keep = 1
elif len(peek) == 3 and not any_sample_weight:
elements_to_keep = 2
else:
elements_to_keep = len(peek)
def dynamic_shape_like(t):
return tuple(None for _ in t.shape)
def convert_for_inspection(t):
if getattr(t, "shape", None) and getattr(t, "dtype", None):
return t
return np.array(t, dtype=backend.floatx())
canonicalized_peek = nest._list_to_tuple( # pylint: disable=protected-access
nest.map_structure(convert_for_inspection, peek[:elements_to_keep]))
nested_dtypes = nest.map_structure(lambda t: t.dtype, canonicalized_peek)
nested_shape = nest.map_structure(dynamic_shape_like, canonicalized_peek)
try:
self._first_batch_size = int(nest.flatten(canonicalized_peek)[0].shape[0])
except IndexError:
raise IndexError("Could not infer batch size from: {}".format(peek))
return (peek, wrap_in_tuple, elements_to_keep, partial_sample_weight,
sample_weight_modes, nested_shape, nested_dtypes)
@staticmethod
def _peek_and_restore(x):
peek = next(x)
return peek, itertools.chain([peek], x)
def _make_callable(self, x, workers, use_multiprocessing, max_queue_size):
"""Create a callable, and possilbly include an Enqueuer."""
if workers > 1 or (workers > 0 and use_multiprocessing):
if use_multiprocessing:
logging.warning(
UserWarning("Using a generator with `use_multiprocessing=True` "
"and multiple workers may duplicate your data. "
"Please consider using the `tf.data.Dataset`."))
def generator_fn():
enqueuer = data_utils.GeneratorEnqueuer(
x, use_multiprocessing=use_multiprocessing)
enqueuer.start(workers=workers, max_queue_size=max_queue_size)
return enqueuer.get()
else:
generator_fn = lambda: x
return generator_fn
@staticmethod
def _make_bridging_callable(
generator_fn, wrap_in_tuple, peek, elements_to_keep,
partial_sample_weight, sample_weight_modes):
"""Optional compatibility layer between user's data and Dataset."""
must_prune_nones = (elements_to_keep != len(peek))
try:
nest.assert_same_structure(peek, nest._list_to_tuple(peek)) # pylint: disable=protected-access
must_extract_lists = False
except TypeError:
must_extract_lists = True
# No additional transformations are needed.
if not (wrap_in_tuple or must_extract_lists or must_prune_nones or
partial_sample_weight):
return generator_fn
def wrapped_generator():
"""Remove Nones and lists before invoking Dataset.from_generator."""
for batch in generator_fn():
if wrap_in_tuple:
batch = (batch,)
if must_extract_lists:
batch = nest._list_to_tuple(batch) # pylint: disable=protected-access
if must_prune_nones:
batch = batch[:elements_to_keep]
if partial_sample_weight:
sample_weights, _, _ = training_utils.handle_partial_sample_weights(
batch[1], batch[2], sample_weight_modes, check_all_flat=False)
batch = batch[:2] + (sample_weights,)
yield batch
return wrapped_generator
def get_dataset(self):
return self._dataset
def get_size(self):
return None
def batch_size(self):
return None
def representative_batch_size(self):
return self._first_batch_size
def has_partial_batch(self):
return False
def partial_batch_size(self):
return None
class KerasSequenceAdapter(GeneratorDataAdapter):
"""Adapter that handles `keras.utils.Sequence`."""
@staticmethod
def can_handle(x, y=None):
return isinstance(x, data_utils.Sequence)
def __init__(self, x, y=None, sample_weights=None, standardize_function=None,
shuffle=False, workers=1, use_multiprocessing=False,
max_queue_size=10, **kwargs):
if not is_none_or_empty(y):
raise ValueError("`y` argument is not supported when using "
"`keras.utils.Sequence` as input.")
if not is_none_or_empty(sample_weights):
raise ValueError("`sample_weight` argument is not supported when using "
"`keras.utils.Sequence` as input.")
self._size = len(x)
self._shuffle_sequence = shuffle
super(KerasSequenceAdapter, self).__init__(
x,
standardize_function=standardize_function,
shuffle=False, # Shuffle is handed in the _make_callable override.
workers=workers,
use_multiprocessing=use_multiprocessing,
max_queue_size=max_queue_size,
**kwargs)
@staticmethod
def _peek_and_restore(x):
return x[0], x
def _make_callable(self, x, workers, use_multiprocessing, max_queue_size):
if workers > 1 or (workers > 0 and use_multiprocessing):
def generator_fn():
enqueuer = data_utils.OrderedEnqueuer(
x, use_multiprocessing=use_multiprocessing,
shuffle=self._shuffle_sequence)
enqueuer.start(workers=workers, max_queue_size=max_queue_size)
return enqueuer.get()
else:
def generator_fn():
order = range(len(x))
if self._shuffle_sequence:
# Match the shuffle convention in OrderedEnqueuer.
order = list(order)
random.shuffle(order)
for i in order:
yield x[i]
return generator_fn
def get_size(self):
return self._size
ALL_ADAPTER_CLS = [
ListsOfScalarsDataAdapter, TensorLikeDataAdapter,
GenericArrayLikeDataAdapter, DatasetAdapter,
GeneratorDataAdapter, KerasSequenceAdapter, CompositeTensorDataAdapter,
]
def select_data_adapter(x, y):
"""Selects a data adapter than can handle a given x and y."""
adapter_cls = [cls for cls in ALL_ADAPTER_CLS if cls.can_handle(x, y)]
if not adapter_cls:
# TODO(scottzhu): This should be a less implementation-specific error.
raise ValueError(
"Failed to find data adapter that can handle "
"input: {}, {}".format(
_type_name(x), _type_name(y)))
elif len(adapter_cls) > 1:
raise RuntimeError(
"Data adapters should be mutually exclusive for "
"handling inputs. Found multiple adapters {} to handle "
"input: {}, {}".format(
adapter_cls, _type_name(x), _type_name(y)))
return adapter_cls[0]
def _type_name(x):
"""Generates a description of the type of an object."""
if isinstance(x, dict):
key_types = set(_type_name(key) for key in x.keys())
val_types = set(_type_name(key) for key in x.values())
return "({} containing {} keys and {} values)".format(
type(x), key_types, val_types)
if isinstance(x, (list, tuple)):
types = set(_type_name(val) for val in x)
return "({} containing values of types {})".format(
type(x), types)
return str(type(x))
def _process_numpy_inputs(inputs):
"""Process numpy array inputs.
For numpy inputs, it is possible to be single numpy array, or list/dict of
them. They could also be preprocessed by other lib to match with the order
of position for the model. The result here should be something that can be
used to build dataset.
Args:
inputs: single or list/tuple/dict of numpy array.
Returns:
numpy arrays can be used to build dataset.
"""
if is_none_or_empty(inputs):
return None
flat_inputs = nest.flatten(inputs)
if len(flat_inputs) == 1:
return flat_inputs[0]
def _convert_non_tensor(x):
# Don't call `ops.convert_to_tensor` on all `inputs` because
# `SparseTensors` can't be converted to `Tensor`.
if isinstance(x, np.ndarray):
return ops.convert_to_tensor(x)
return x
inputs = nest.map_structure(_convert_non_tensor, inputs)
# For more complicated structure, we only convert the out most list to tuple
# since dataset will stack the list, but treat elements in the tuple as
# individual element.
return training_utils.list_to_tuple(inputs)
def is_none_or_empty(inputs):
# util method to check if the input is a None or a empty list.
# the python "not" check will raise an error like below if the input is a
# numpy array
# "The truth value of an array with more than one element is ambiguous.
# Use a.any() or a.all()"
return inputs is None or not nest.flatten(inputs)
def broadcast_sample_weight_modes(target_structure, sample_weight_modes):
"""Match sample_weigt_modes structure with output structure."""
if target_structure is None or not nest.flatten(target_structure):
return sample_weight_modes
if isinstance(sample_weight_modes, str):
if isinstance(target_structure, dict):
return {key: sample_weight_modes for key in target_structure.keys()}
return [sample_weight_modes for _ in target_structure]
if sample_weight_modes:
try:
nest.assert_same_structure(
training_utils.list_to_tuple(target_structure),
training_utils.list_to_tuple(sample_weight_modes))
except (ValueError, TypeError):
target_str = str(nest.map_structure(lambda _: "...", target_structure))
mode_str = str(nest.map_structure(lambda _: "...", sample_weight_modes))
# Attempt to coerce sample_weight_modes to the target structure. This
# implicitly depends on the fact that Model flattens outputs for its
# internal representation.
try:
sample_weight_modes = nest.pack_sequence_as(
target_structure, nest.flatten(sample_weight_modes))
logging.warning(
"sample_weight modes were coerced from\n {}\n to \n {}"
.format(target_str, mode_str))
except (ValueError, TypeError):
raise ValueError(
"Unable to match target structure and sample_weight_modes "
"structure:\n {}\n to \n {}".format(target_str, mode_str))
return sample_weight_modes
| apache-2.0 |
shahankhatch/scikit-learn | examples/gaussian_process/plot_gp_regression.py | 252 | 4054 | #!/usr/bin/python
# -*- coding: utf-8 -*-
r"""
=========================================================
Gaussian Processes regression: basic introductory example
=========================================================
A simple one-dimensional regression exercise computed in two different ways:
1. A noise-free case with a cubic correlation model
2. A noisy case with a squared Euclidean correlation model
In both cases, the model parameters are estimated using the maximum
likelihood principle.
The figures illustrate the interpolating property of the Gaussian Process
model as well as its probabilistic nature in the form of a pointwise 95%
confidence interval.
Note that the parameter ``nugget`` is applied as a Tikhonov regularization
of the assumed covariance between the training points. In the special case
of the squared euclidean correlation model, nugget is mathematically equivalent
to a normalized variance: That is
.. math::
\mathrm{nugget}_i = \left[\frac{\sigma_i}{y_i}\right]^2
"""
print(__doc__)
# Author: Vincent Dubourg <vincent.dubourg@gmail.com>
# Jake Vanderplas <vanderplas@astro.washington.edu>
# Licence: BSD 3 clause
import numpy as np
from sklearn.gaussian_process import GaussianProcess
from matplotlib import pyplot as pl
np.random.seed(1)
def f(x):
"""The function to predict."""
return x * np.sin(x)
#----------------------------------------------------------------------
# First the noiseless case
X = np.atleast_2d([1., 3., 5., 6., 7., 8.]).T
# Observations
y = f(X).ravel()
# Mesh the input space for evaluations of the real function, the prediction and
# its MSE
x = np.atleast_2d(np.linspace(0, 10, 1000)).T
# Instanciate a Gaussian Process model
gp = GaussianProcess(corr='cubic', theta0=1e-2, thetaL=1e-4, thetaU=1e-1,
random_start=100)
# Fit to data using Maximum Likelihood Estimation of the parameters
gp.fit(X, y)
# Make the prediction on the meshed x-axis (ask for MSE as well)
y_pred, MSE = gp.predict(x, eval_MSE=True)
sigma = np.sqrt(MSE)
# Plot the function, the prediction and the 95% confidence interval based on
# the MSE
fig = pl.figure()
pl.plot(x, f(x), 'r:', label=u'$f(x) = x\,\sin(x)$')
pl.plot(X, y, 'r.', markersize=10, label=u'Observations')
pl.plot(x, y_pred, 'b-', label=u'Prediction')
pl.fill(np.concatenate([x, x[::-1]]),
np.concatenate([y_pred - 1.9600 * sigma,
(y_pred + 1.9600 * sigma)[::-1]]),
alpha=.5, fc='b', ec='None', label='95% confidence interval')
pl.xlabel('$x$')
pl.ylabel('$f(x)$')
pl.ylim(-10, 20)
pl.legend(loc='upper left')
#----------------------------------------------------------------------
# now the noisy case
X = np.linspace(0.1, 9.9, 20)
X = np.atleast_2d(X).T
# Observations and noise
y = f(X).ravel()
dy = 0.5 + 1.0 * np.random.random(y.shape)
noise = np.random.normal(0, dy)
y += noise
# Mesh the input space for evaluations of the real function, the prediction and
# its MSE
x = np.atleast_2d(np.linspace(0, 10, 1000)).T
# Instanciate a Gaussian Process model
gp = GaussianProcess(corr='squared_exponential', theta0=1e-1,
thetaL=1e-3, thetaU=1,
nugget=(dy / y) ** 2,
random_start=100)
# Fit to data using Maximum Likelihood Estimation of the parameters
gp.fit(X, y)
# Make the prediction on the meshed x-axis (ask for MSE as well)
y_pred, MSE = gp.predict(x, eval_MSE=True)
sigma = np.sqrt(MSE)
# Plot the function, the prediction and the 95% confidence interval based on
# the MSE
fig = pl.figure()
pl.plot(x, f(x), 'r:', label=u'$f(x) = x\,\sin(x)$')
pl.errorbar(X.ravel(), y, dy, fmt='r.', markersize=10, label=u'Observations')
pl.plot(x, y_pred, 'b-', label=u'Prediction')
pl.fill(np.concatenate([x, x[::-1]]),
np.concatenate([y_pred - 1.9600 * sigma,
(y_pred + 1.9600 * sigma)[::-1]]),
alpha=.5, fc='b', ec='None', label='95% confidence interval')
pl.xlabel('$x$')
pl.ylabel('$f(x)$')
pl.ylim(-10, 20)
pl.legend(loc='upper left')
pl.show()
| bsd-3-clause |
mfjb/scikit-learn | examples/gaussian_process/plot_gp_regression.py | 252 | 4054 | #!/usr/bin/python
# -*- coding: utf-8 -*-
r"""
=========================================================
Gaussian Processes regression: basic introductory example
=========================================================
A simple one-dimensional regression exercise computed in two different ways:
1. A noise-free case with a cubic correlation model
2. A noisy case with a squared Euclidean correlation model
In both cases, the model parameters are estimated using the maximum
likelihood principle.
The figures illustrate the interpolating property of the Gaussian Process
model as well as its probabilistic nature in the form of a pointwise 95%
confidence interval.
Note that the parameter ``nugget`` is applied as a Tikhonov regularization
of the assumed covariance between the training points. In the special case
of the squared euclidean correlation model, nugget is mathematically equivalent
to a normalized variance: That is
.. math::
\mathrm{nugget}_i = \left[\frac{\sigma_i}{y_i}\right]^2
"""
print(__doc__)
# Author: Vincent Dubourg <vincent.dubourg@gmail.com>
# Jake Vanderplas <vanderplas@astro.washington.edu>
# Licence: BSD 3 clause
import numpy as np
from sklearn.gaussian_process import GaussianProcess
from matplotlib import pyplot as pl
np.random.seed(1)
def f(x):
"""The function to predict."""
return x * np.sin(x)
#----------------------------------------------------------------------
# First the noiseless case
X = np.atleast_2d([1., 3., 5., 6., 7., 8.]).T
# Observations
y = f(X).ravel()
# Mesh the input space for evaluations of the real function, the prediction and
# its MSE
x = np.atleast_2d(np.linspace(0, 10, 1000)).T
# Instanciate a Gaussian Process model
gp = GaussianProcess(corr='cubic', theta0=1e-2, thetaL=1e-4, thetaU=1e-1,
random_start=100)
# Fit to data using Maximum Likelihood Estimation of the parameters
gp.fit(X, y)
# Make the prediction on the meshed x-axis (ask for MSE as well)
y_pred, MSE = gp.predict(x, eval_MSE=True)
sigma = np.sqrt(MSE)
# Plot the function, the prediction and the 95% confidence interval based on
# the MSE
fig = pl.figure()
pl.plot(x, f(x), 'r:', label=u'$f(x) = x\,\sin(x)$')
pl.plot(X, y, 'r.', markersize=10, label=u'Observations')
pl.plot(x, y_pred, 'b-', label=u'Prediction')
pl.fill(np.concatenate([x, x[::-1]]),
np.concatenate([y_pred - 1.9600 * sigma,
(y_pred + 1.9600 * sigma)[::-1]]),
alpha=.5, fc='b', ec='None', label='95% confidence interval')
pl.xlabel('$x$')
pl.ylabel('$f(x)$')
pl.ylim(-10, 20)
pl.legend(loc='upper left')
#----------------------------------------------------------------------
# now the noisy case
X = np.linspace(0.1, 9.9, 20)
X = np.atleast_2d(X).T
# Observations and noise
y = f(X).ravel()
dy = 0.5 + 1.0 * np.random.random(y.shape)
noise = np.random.normal(0, dy)
y += noise
# Mesh the input space for evaluations of the real function, the prediction and
# its MSE
x = np.atleast_2d(np.linspace(0, 10, 1000)).T
# Instanciate a Gaussian Process model
gp = GaussianProcess(corr='squared_exponential', theta0=1e-1,
thetaL=1e-3, thetaU=1,
nugget=(dy / y) ** 2,
random_start=100)
# Fit to data using Maximum Likelihood Estimation of the parameters
gp.fit(X, y)
# Make the prediction on the meshed x-axis (ask for MSE as well)
y_pred, MSE = gp.predict(x, eval_MSE=True)
sigma = np.sqrt(MSE)
# Plot the function, the prediction and the 95% confidence interval based on
# the MSE
fig = pl.figure()
pl.plot(x, f(x), 'r:', label=u'$f(x) = x\,\sin(x)$')
pl.errorbar(X.ravel(), y, dy, fmt='r.', markersize=10, label=u'Observations')
pl.plot(x, y_pred, 'b-', label=u'Prediction')
pl.fill(np.concatenate([x, x[::-1]]),
np.concatenate([y_pred - 1.9600 * sigma,
(y_pred + 1.9600 * sigma)[::-1]]),
alpha=.5, fc='b', ec='None', label='95% confidence interval')
pl.xlabel('$x$')
pl.ylabel('$f(x)$')
pl.ylim(-10, 20)
pl.legend(loc='upper left')
pl.show()
| bsd-3-clause |
ojotoxy/iTerm2 | tools/ply/ply-3.4/ply/ctokens.py | 360 | 3170 | # ----------------------------------------------------------------------
# ctokens.py
#
# Token specifications for symbols in ANSI C and C++. This file is
# meant to be used as a library in other tokenizers.
# ----------------------------------------------------------------------
# Reserved words
tokens = [
# Literals (identifier, integer constant, float constant, string constant, char const)
'ID', 'TYPEID', 'ICONST', 'FCONST', 'SCONST', 'CCONST',
# Operators (+,-,*,/,%,|,&,~,^,<<,>>, ||, &&, !, <, <=, >, >=, ==, !=)
'PLUS', 'MINUS', 'TIMES', 'DIVIDE', 'MOD',
'OR', 'AND', 'NOT', 'XOR', 'LSHIFT', 'RSHIFT',
'LOR', 'LAND', 'LNOT',
'LT', 'LE', 'GT', 'GE', 'EQ', 'NE',
# Assignment (=, *=, /=, %=, +=, -=, <<=, >>=, &=, ^=, |=)
'EQUALS', 'TIMESEQUAL', 'DIVEQUAL', 'MODEQUAL', 'PLUSEQUAL', 'MINUSEQUAL',
'LSHIFTEQUAL','RSHIFTEQUAL', 'ANDEQUAL', 'XOREQUAL', 'OREQUAL',
# Increment/decrement (++,--)
'PLUSPLUS', 'MINUSMINUS',
# Structure dereference (->)
'ARROW',
# Ternary operator (?)
'TERNARY',
# Delimeters ( ) [ ] { } , . ; :
'LPAREN', 'RPAREN',
'LBRACKET', 'RBRACKET',
'LBRACE', 'RBRACE',
'COMMA', 'PERIOD', 'SEMI', 'COLON',
# Ellipsis (...)
'ELLIPSIS',
]
# Operators
t_PLUS = r'\+'
t_MINUS = r'-'
t_TIMES = r'\*'
t_DIVIDE = r'/'
t_MODULO = r'%'
t_OR = r'\|'
t_AND = r'&'
t_NOT = r'~'
t_XOR = r'\^'
t_LSHIFT = r'<<'
t_RSHIFT = r'>>'
t_LOR = r'\|\|'
t_LAND = r'&&'
t_LNOT = r'!'
t_LT = r'<'
t_GT = r'>'
t_LE = r'<='
t_GE = r'>='
t_EQ = r'=='
t_NE = r'!='
# Assignment operators
t_EQUALS = r'='
t_TIMESEQUAL = r'\*='
t_DIVEQUAL = r'/='
t_MODEQUAL = r'%='
t_PLUSEQUAL = r'\+='
t_MINUSEQUAL = r'-='
t_LSHIFTEQUAL = r'<<='
t_RSHIFTEQUAL = r'>>='
t_ANDEQUAL = r'&='
t_OREQUAL = r'\|='
t_XOREQUAL = r'^='
# Increment/decrement
t_INCREMENT = r'\+\+'
t_DECREMENT = r'--'
# ->
t_ARROW = r'->'
# ?
t_TERNARY = r'\?'
# Delimeters
t_LPAREN = r'\('
t_RPAREN = r'\)'
t_LBRACKET = r'\['
t_RBRACKET = r'\]'
t_LBRACE = r'\{'
t_RBRACE = r'\}'
t_COMMA = r','
t_PERIOD = r'\.'
t_SEMI = r';'
t_COLON = r':'
t_ELLIPSIS = r'\.\.\.'
# Identifiers
t_ID = r'[A-Za-z_][A-Za-z0-9_]*'
# Integer literal
t_INTEGER = r'\d+([uU]|[lL]|[uU][lL]|[lL][uU])?'
# Floating literal
t_FLOAT = r'((\d+)(\.\d+)(e(\+|-)?(\d+))? | (\d+)e(\+|-)?(\d+))([lL]|[fF])?'
# String literal
t_STRING = r'\"([^\\\n]|(\\.))*?\"'
# Character constant 'c' or L'c'
t_CHARACTER = r'(L)?\'([^\\\n]|(\\.))*?\''
# Comment (C-Style)
def t_COMMENT(t):
r'/\*(.|\n)*?\*/'
t.lexer.lineno += t.value.count('\n')
return t
# Comment (C++-Style)
def t_CPPCOMMENT(t):
r'//.*\n'
t.lexer.lineno += 1
return t
| gpl-2.0 |
TuKo/brainiak | brainiak/factoranalysis/htfa.py | 1 | 28629 | # Copyright 2016 Intel Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Hierarchical Topographical Factor Analysis (HTFA)
This implementation is based on the work in [Manning2014-1]_, [Manning2014-2]_,
and [AndersonMJ2016]_.
.. [Manning2014-1] "Topographic factor analysis: a bayesian model for
inferring brain networks from neural data", J. R. Manning,
R. Ranganath, K. A. Norman, and D. M. Blei. PLoS One, vol. 9, no. 5,
2014.
.. [Manning2014-2] "Hierarchical topographic factor analysis", Jeremy. R.
Manning, R. Ranganath, W. Keung, N. B. Turk-Browne, J. D.Cohen,
K. A. Norman, and D. M. Blei. Pattern Recognition in Neuroimaging,
2014 International Workshop on, June 2014.
.. [AndersonMJ2016] "Enabling Factor Analysis on Thousand-Subject Neuroimaging
Datasets",
Michael J. Anderson, Mihai Capotă, Javier S. Turek, Xia Zhu, Theodore L.
Willke, Yida Wang, Po-Hsuan Chen, Jeremy R. Manning, Peter J. Ramadge,
Kenneth A. Norman,
IEEE International Conference on Big Data, 2016.
https://doi.org/10.1109/BigData.2016.7840719
"""
# Authors: Xia Zhu (Intel Labs), Jeremy Manning (Dartmouth College) 2015~2016
import numpy as np
from mpi4py import MPI
from scipy.optimize import linear_sum_assignment
from sklearn.metrics import mean_squared_error
from scipy.spatial import distance
import logging
from .tfa import TFA
from ..utils.utils import from_tri_2_sym, from_sym_2_tri
__all__ = [
"HTFA",
]
logger = logging.getLogger(__name__)
class HTFA(TFA):
"""Hierarchical Topographical Factor Analysis (HTFA)
Given multi-subject data, HTFA factorizes data from each subject as a
spatial factor F and a weight matrix W per subject. Also at top
level, it estimates global template across subjects:
Parameters
----------
K : int
Number of factors to compute.
n_subj : int
Total number of subjects in dataset.
max_global_iter : int, default: 10
Number of global iterations to run the algorithm.
max_local_iter : int, default: 10
Number of local iterations to run on each subject within each
global interation.
threshold : float, default: 1.0
Tolerance for terminate the parameter estimation
nlss_method : {'trf', 'dogbox', 'lm'}, default: 'trf'
Non-Linear Least Square (NLSS) algorithm used by scipy.least_suqares to
perform minimization. More information at
http://docs.scipy.org/doc/scipy-0.17.0/reference/generated/scipy.optimize.least_squares.html
nlss_loss: str or callable, default: 'linear'
Loss function used by scipy.least_squares.
More information at
http://docs.scipy.org/doc/scipy-0.17.0/reference/generated/scipy.optimize.least_squares.html
jac : {'2-point', '3-point', 'cs', callable}, default: '2-point'
Method of computing the Jacobian matrix.
More information at
http://docs.scipy.org/doc/scipy-0.17.0/reference/generated/scipy.optimize.least_squares.html
x_scale : float or array_like or 'jac', default: 1.0
Characteristic scale of each variable for scipy.least_suqares.
More information at
http://docs.scipy.org/doc/scipy-0.17.0/reference/generated/scipy.optimize.least_squares.html
tr_solver: {None, 'exact', 'lsmr'}, default: None
Method for solving trust-region subproblems, relevant only for 'trf'
and 'dogbox' methods.
More information at
http://docs.scipy.org/doc/scipy-0.17.0/reference/generated/scipy.optimize.least_squares.html
weight_method : {'rr','ols'}, default: 'rr'
Method for estimating weight matrix W given X and F.
'rr' means ridge regression, 'ols' means ordinary least square.
upper_ratio : float, default: 1.8
The upper bound of the ratio between factor's width and brain diameter.
lower_ratio : float, default: 0.02
The lower bound of the ratio between factor's width and brain diameter.
voxel_ratio : float, default: 0.25
The percentage of voxels to sample in each inner iteration.
tr_ratio : float, default: 0.1
The percentage of trs to sample in each inner iteration.
max_voxel : int, default: 5000
The maximum number of voxels to sample in each inner iteration.
max_tr : int, default: 500
The maximum number of trs to sample in each inner iteration.
comm : Intracomm
MPI communication group, default MPI.COMM_WORLD
verbose : boolean, default: False
Verbose mode flag.
Attributes
----------
global_prior_ : 1D array
The global prior on mean and variance of centers and widths.
global_posterior_ : 1D array
The global posterior on mean and variance of centers and widths.
local_posterior_ : 1D array
Local posterior on centers and widths of subjects allocated
to this process.
local_weights_ : 1D array
Local posterior on weights allocated to this process.
Notes
-----
We recommend to use data in MNI space to better interpret global template
"""
def __init__(self, K, n_subj, max_global_iter=10, max_local_iter=10,
threshold=0.01, nlss_method='trf', nlss_loss='soft_l1',
jac='2-point', x_scale='jac', tr_solver=None,
weight_method='rr', upper_ratio=1.8, lower_ratio=0.02,
voxel_ratio=0.25, tr_ratio=0.1, max_voxel=5000, max_tr=500,
comm=MPI.COMM_WORLD, verbose=False):
self.K = K
self.n_subj = n_subj
self.max_global_iter = max_global_iter
self.max_local_iter = max_local_iter
self.threshold = threshold
self.nlss_method = nlss_method
self.nlss_loss = nlss_loss
self.jac = jac
self.x_scale = x_scale
self.tr_solver = tr_solver
self.weight_method = weight_method
self.upper_ratio = upper_ratio
self.lower_ratio = lower_ratio
self.voxel_ratio = voxel_ratio
self.tr_ratio = tr_ratio
self.max_voxel = max_voxel
self.max_tr = max_tr
self.comm = comm
self.verbose = verbose
def _converged(self):
"""Check convergence based on maximum absolute difference
Returns
-------
converged : boolean
Whether the parameter estimation converged.
max_diff : float
Maximum absolute difference between prior and posterior.
"""
prior = self.global_prior_[0:self.prior_size]
posterior = self.global_posterior_[0:self.prior_size]
diff = prior - posterior
max_diff = np.max(np.fabs(diff))
if self.verbose:
_, mse = self._mse_converged()
diff_ratio = np.sum(diff ** 2) / np.sum(posterior ** 2)
logger.info(
'htfa prior posterior max diff %f mse %f diff_ratio %f' %
((max_diff, mse, diff_ratio)))
if max_diff > self.threshold:
return False, max_diff
else:
return True, max_diff
def _mse_converged(self):
"""Check convergence based on mean squared difference between
prior and posterior
Returns
-------
converged : boolean
Whether the parameter estimation converged.
mse : float
Mean squared error between prior and posterior.
"""
prior = self.global_prior_[0:self.prior_size]
posterior = self.global_posterior_[0:self.prior_size]
mse = mean_squared_error(prior, posterior,
multioutput='uniform_average')
if mse > self.threshold:
return False, mse
else:
return True, mse
def _map_update(
self,
prior_mean,
prior_cov,
global_cov_scaled,
new_observation):
"""Maximum A Posterior (MAP) update of a parameter
Parameters
----------
prior_mean : float or 1D array
Prior mean of parameters.
prior_cov : float or 1D array
Prior variance of scalar parameter, or
prior covariance of multivariate parameter
global_cov_scaled : float or 1D array
Global prior variance of scalar parameter, or
global prior covariance of multivariate parameter
new_observation : 1D or 2D array, with shape [n_dim, n_subj]
New observations on parameters.
Returns
-------
posterior_mean : float or 1D array
Posterior mean of parameters.
posterior_cov : float or 1D array
Posterior variance of scalar parameter, or
posterior covariance of multivariate parameter
"""
common = np.linalg.inv(prior_cov + global_cov_scaled)
observation_mean = np.mean(new_observation, axis=1)
posterior_mean = prior_cov.dot(common.dot(observation_mean)) +\
global_cov_scaled.dot(common.dot(prior_mean))
posterior_cov =\
prior_cov.dot(common.dot(global_cov_scaled))
return posterior_mean, posterior_cov
def _map_update_posterior(self):
"""Maximum A Posterior (MAP) update of HTFA parameters
Returns
-------
HTFA
Returns the instance itself.
"""
self.global_posterior_ = self.global_prior_.copy()
prior_centers = self.get_centers(self.global_prior_)
prior_widths = self.get_widths(self.global_prior_)
prior_centers_mean_cov = self.get_centers_mean_cov(self.global_prior_)
prior_widths_mean_var = self.get_widths_mean_var(self.global_prior_)
center_size = self.K * self.n_dim
posterior_size = center_size + self.K
for k in np.arange(self.K):
next_centers = np.zeros((self.n_dim, self.n_subj))
next_widths = np.zeros(self.n_subj)
for s in np.arange(self.n_subj):
center_start = s * posterior_size
width_start = center_start + center_size
start_idx = center_start + k * self.n_dim
end_idx = center_start + (k + 1) * self.n_dim
next_centers[:, s] = self.gather_posterior[start_idx:end_idx]\
.copy()
next_widths[s] = self.gather_posterior[width_start + k].copy()
# centers
posterior_mean, posterior_cov = self._map_update(
prior_centers[k].T.copy(),
from_tri_2_sym(prior_centers_mean_cov[k], self.n_dim),
self.global_centers_cov_scaled,
next_centers)
self.global_posterior_[k * self.n_dim:(k + 1) * self.n_dim] =\
posterior_mean.T
start_idx = self.map_offset[2] + k * self.cov_vec_size
end_idx = self.map_offset[2] + (k + 1) * self.cov_vec_size
self.global_posterior_[start_idx:end_idx] =\
from_sym_2_tri(posterior_cov)
# widths
common = 1.0 /\
(prior_widths_mean_var[k] + self.global_widths_var_scaled)
observation_mean = np.mean(next_widths)
tmp = common * self.global_widths_var_scaled
self.global_posterior_[self.map_offset[1] + k] = \
prior_widths_mean_var[k] * common * observation_mean +\
tmp * prior_widths[k]
self.global_posterior_[self.map_offset[3] + k] = \
prior_widths_mean_var[k] * tmp
return self
def _get_gather_offset(self, size):
"""Calculate the offset for gather result from this process
Parameters
----------
size : int
The total number of process.
Returns
-------
tuple_size : tuple_int
Number of elements to send from each process
(one integer for each process)
tuple_offset : tuple_int
Number of elements away from the first element
in the array at which to begin the new, segmented
array for a process
(one integer for each process)
subject_map : dictionary
Mapping between global subject id to local id
"""
gather_size = np.zeros(size).astype(int)
gather_offset = np.zeros(size).astype(int)
num_local_subjs = np.zeros(size).astype(int)
subject_map = {}
for idx, s in enumerate(np.arange(self.n_subj)):
cur_rank = idx % size
gather_size[cur_rank] += self.prior_size
subject_map[idx] = (cur_rank, num_local_subjs[cur_rank])
num_local_subjs[cur_rank] += 1
for idx in np.arange(size - 1) + 1:
gather_offset[idx] = gather_offset[idx - 1] + gather_size[idx - 1]
tuple_size = tuple(gather_size)
tuple_offset = tuple(gather_offset)
return tuple_size, tuple_offset, subject_map
def _get_weight_size(self, data, n_local_subj):
"""Calculate the size of weight for this process
Parameters
----------
data : a list of 2D array, each in shape [n_voxel, n_tr]
The fMRI data from multi-subject.
n_local_subj : int
Number of subjects allocated to this process.
Returns
-------
weight_size : 1D array
The size of total subject weight on this process.
local_weight_offset : 1D array
Number of elements away from the first element
in the combined weight array at which to begin
the new, segmented array for a subject
"""
weight_size = np.zeros(1).astype(int)
local_weight_offset = np.zeros(n_local_subj).astype(int)
for idx, subj_data in enumerate(data):
if idx > 0:
local_weight_offset[idx] = weight_size[0]
weight_size[0] += self.K * subj_data.shape[1]
return weight_size, local_weight_offset
def _get_subject_info(self, n_local_subj, data):
"""Calculate metadata for subjects allocated to this process
Parameters
----------
n_local_subj : int
Number of subjects allocated to this process.
data : list of 2D array. Each in shape [n_voxel, n_tr]
Total number of MPI process.
Returns
-------
max_sample_tr : 1D array
Maximum number of TR to subsample for each subject
max_sample_voxel : 1D array
Maximum number of voxel to subsample for each subject
"""
max_sample_tr = np.zeros(n_local_subj).astype(int)
max_sample_voxel = np.zeros(n_local_subj).astype(int)
for idx in np.arange(n_local_subj):
nvoxel = data[idx].shape[0]
ntr = data[idx].shape[1]
max_sample_voxel[idx] =\
min(self.max_voxel, int(self.voxel_ratio * nvoxel))
max_sample_tr[idx] = min(self.max_tr, int(self.tr_ratio * ntr))
return max_sample_tr, max_sample_voxel
def _get_mpi_info(self):
"""get basic MPI info
Returns
-------
comm : Intracomm
Returns MPI communication group
rank : integer
Returns the rank of this process
size : integer
Returns total number of processes
"""
rank = self.comm.Get_rank()
size = self.comm.Get_size()
return rank, size
def _init_prior_posterior(self, rank, R, n_local_subj):
"""set prior for this subject
Parameters
----------
rank : integer
The rank of this process
R : list of 2D arrays, element i has shape=[n_voxel, n_dim]
Each element in the list contains the scanner coordinate matrix
of fMRI data of one subject.
n_local_subj : integer
The number of subjects allocated to this process.
Returns
-------
HTFA
Returns the instance itself.
"""
if rank == 0:
idx = np.random.choice(n_local_subj, 1)
self.global_prior_, self.global_centers_cov,\
self.global_widths_var = self.get_template(R[idx[0]])
self.global_centers_cov_scaled =\
self.global_centers_cov / float(self.n_subj)
self.global_widths_var_scaled =\
self.global_widths_var / float(self.n_subj)
self.gather_posterior = np.zeros(self.n_subj * self.prior_size)
self.global_posterior_ = np.zeros(self.prior_size)
else:
self.global_prior_ = np.zeros(self.prior_bcast_size)
self.global_posterior_ = None
self.gather_posterior = None
return self
def _gather_local_posterior(self, use_gather,
gather_size, gather_offset):
"""Gather/Gatherv local posterior
Parameters
----------
comm : object
MPI communication group
use_gather : boolean
Whether to use Gather or Gatherv
gather_size : 1D array
The size of each local posterior
gather_offset : 1D array
The offset of each local posterior
Returns
-------
HTFA
Returns the instance itself.
Notes
-----
We use numpy array rather than generic Python objects for MPI
communication because Gatherv is only supported for the former.
https://pythonhosted.org/mpi4py/usrman/tutorial.html
"""
if use_gather:
self.comm.Gather(self.local_posterior_,
self.gather_posterior, root=0)
else:
target = [
self.gather_posterior,
gather_size,
gather_offset,
MPI.DOUBLE]
self.comm.Gatherv(self.local_posterior_, target)
return self
def _assign_posterior(self):
"""assign posterior to the right prior based on
Hungarian algorithm
Returns
-------
HTFA
Returns the instance itself.
"""
prior_centers = self.get_centers(self.global_prior_)
posterior_centers = self.get_centers(self.global_posterior_)
posterior_widths = self.get_widths(self.global_posterior_)
posterior_centers_mean_cov =\
self.get_centers_mean_cov(self.global_posterior_)
posterior_widths_mean_var =\
self.get_widths_mean_var(self.global_posterior_)
# linear assignment on centers
cost = distance.cdist(prior_centers, posterior_centers, 'euclidean')
_, col_ind = linear_sum_assignment(cost)
# reorder centers/widths based on cost assignment
self.set_centers(self.global_posterior_, posterior_centers)
self.set_widths(self.global_posterior_, posterior_widths)
# reorder cov/var based on cost assignment
self.set_centers_mean_cov(
self.global_posterior_,
posterior_centers_mean_cov[col_ind])
self.set_widths_mean_var(
self.global_posterior_,
posterior_widths_mean_var[col_ind])
return self
def _update_global_posterior(
self, rank, m, outer_converged):
"""Update global posterior and then check convergence
Parameters
----------
rank : integer
The rank of current process.
m : integer
The outer iteration number of HTFA.
outer_converged : 1D array
Record whether HTFA loop converged
Returns
-------
1D array, contains only 1 element for MPI
1 means HTFA converged, 0 means not converged.
"""
if rank == 0:
self._map_update_posterior()
self._assign_posterior()
is_converged, _ = self._converged()
if is_converged:
logger.info("converged at %d outer iter" % (m))
outer_converged[0] = 1
else:
self.global_prior_ = self.global_posterior_
return outer_converged
def _update_weight(self, data, R, n_local_subj, local_weight_offset):
"""update local weight
Parameters
----------
data : list of 2D array, element i has shape=[n_voxel, n_tr]
Subjects' fMRI data.
R : list of 2D arrays, element i has shape=[n_voxel, n_dim]
Each element in the list contains the scanner coordinate matrix
of fMRI data of one subject.
n_local_subj : integer
Number of subjects allocated to this process.
local_weight_offset : 1D array
Offset of each subject's weights on this process.
Returns
-------
HTFA
Returns the instance itself.
"""
for s, subj_data in enumerate(data):
base = s * self.prior_size
centers = self.local_posterior_[base:base + self.K * self.n_dim]\
.reshape((self.K, self.n_dim))
start_idx = base + self.K * self.n_dim
end_idx = base + self.prior_size
widths = self.local_posterior_[start_idx:end_idx]\
.reshape((self.K, 1))
unique_R, inds = self.get_unique_R(R[s])
F = self.get_factors(unique_R, inds, centers, widths)
start_idx = local_weight_offset[s]
if s == n_local_subj - 1:
self.local_weights_[start_idx:] =\
self.get_weights(subj_data, F).ravel()
else:
end_idx = local_weight_offset[s + 1]
self.local_weights_[start_idx:end_idx] =\
self.get_weights(subj_data, F).ravel()
return self
def _fit_htfa(self, data, R):
"""HTFA main algorithm
Parameters
----------
data : list of 2D array. Each in shape [n_voxel, n_tr]
The fMRI data from multiple subjects.
R : list of 2D arrays, element i has shape=[n_voxel, n_dim]
Each element in the list contains the scanner coordinate matrix
of fMRI data of one subject.
Returns
-------
HTFA
Returns the instance itself.
"""
rank, size = self._get_mpi_info()
use_gather = True if self.n_subj % size == 0 else False
n_local_subj = len(R)
max_sample_tr, max_sample_voxel =\
self._get_subject_info(n_local_subj, data)
tfa = []
# init tfa for each subject
for s, subj_data in enumerate(data):
tfa.append(TFA(
max_iter=self.max_local_iter,
threshold=self.threshold,
K=self.K,
nlss_method=self.nlss_method,
nlss_loss=self.nlss_loss,
x_scale=self.x_scale,
tr_solver=self.tr_solver,
weight_method=self.weight_method,
upper_ratio=self.upper_ratio,
lower_ratio=self.lower_ratio,
verbose=self.verbose,
max_num_tr=max_sample_tr[s],
max_num_voxel=max_sample_voxel[s]))
# map data to processes
gather_size, gather_offset, subject_map =\
self._get_gather_offset(size)
self.local_posterior_ = np.zeros(n_local_subj * self.prior_size)
self._init_prior_posterior(rank, R, n_local_subj)
node_weight_size, local_weight_offset =\
self._get_weight_size(data, n_local_subj)
self.local_weights_ = np.zeros(node_weight_size[0])
m = 0
outer_converged = np.array([0])
while m < self.max_global_iter and not outer_converged[0]:
if(self.verbose):
logger.info("HTFA global iter %d " % (m))
# root broadcast first 4 fields of global_prior to all nodes
self.comm.Bcast(self.global_prior_, root=0)
# each node loop over its data
for s, subj_data in enumerate(data):
# update tfa with current local prior
tfa[s].set_prior(self.global_prior_[0:self.prior_size].copy())
tfa[s].set_seed(m * self.max_local_iter)
tfa[s].fit(
subj_data,
R=R[s],
template_prior=self.global_prior_.copy())
tfa[s]._assign_posterior()
start_idx = s * self.prior_size
end_idx = (s + 1) * self.prior_size
self.local_posterior_[start_idx:end_idx] =\
tfa[s].local_posterior_
self._gather_local_posterior(
use_gather,
gather_size,
gather_offset)
# root updates global_posterior
outer_converged =\
self._update_global_posterior(rank, m, outer_converged)
self.comm.Bcast(outer_converged, root=0)
m += 1
# update weight matrix for each subject
self._update_weight(
data,
R,
n_local_subj,
local_weight_offset)
return self
def _check_input(self, X, R):
"""Check whether input data and coordinates in right type
Parameters
----------
X : list of 2D arrays, element i has shape=[voxels_i, samples]
Each element in the list contains the fMRI data of one subject.
R : list of 2D arrays, element i has shape=[n_voxel, n_dim]
Each element in the list contains the scanner coordinate matrix
of fMRI data of one subject.
Returns
-------
HTFA
Returns the instance itself.
"""
# Check data type
if not isinstance(X, list):
raise TypeError("Input data should be a list")
if not isinstance(R, list):
raise TypeError("Coordinates should be a list")
# Check the number of subjects
if len(X) < 1:
raise ValueError("Need at leat one subject to train the model.\
Got {0:d}".format(len(X)))
for idx, x in enumerate(X):
if not isinstance(x, np.ndarray):
raise TypeError("Each subject data should be an array")
if x.ndim != 2:
raise TypeError("Each subject data should be 2D array")
if not isinstance(R[idx], np.ndarray):
raise TypeError(
"Each scanner coordinate matrix should be an array")
if R[idx].ndim != 2:
raise TypeError(
"Each scanner coordinate matrix should be 2D array")
if x.shape[0] != R[idx].shape[0]:
raise TypeError(
"n_voxel should be the same in X[idx] and R[idx]")
return self
def fit(self, X, R):
"""Compute Hierarchical Topographical Factor Analysis Model
[Manning2014-1][Manning2014-2]
Parameters
----------
X : list of 2D arrays, element i has shape=[voxels_i, samples]
Each element in the list contains the fMRI data of one subject.
R : list of 2D arrays, element i has shape=[n_voxel, n_dim]
Each element in the list contains the scanner coordinate matrix
of fMRI data of one subject.
Returns
-------
HTFA
Returns the instance itself.
"""
self._check_input(X, R)
if self.verbose:
logger.info("Start to fit HTFA")
self.n_dim = R[0].shape[1]
self.cov_vec_size = np.sum(np.arange(self.n_dim) + 1)
# centers,widths
self.prior_size = self.K * (self.n_dim + 1)
# centers,widths,centerCov,widthVar
self.prior_bcast_size =\
self.K * (self.n_dim + 2 + self.cov_vec_size)
self.get_map_offset()
self._fit_htfa(X, R)
return self
| apache-2.0 |
antoan2/incubator-mxnet | example/ssd/symbol/resnet.py | 53 | 9283 | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
'''
Adapted from https://github.com/tornadomeet/ResNet/blob/master/symbol_resnet.py
Original author Wei Wu
Implemented the following paper:
Kaiming He, Xiangyu Zhang, Shaoqing Ren, Jian Sun. "Identity Mappings in Deep Residual Networks"
'''
import mxnet as mx
def residual_unit(data, num_filter, stride, dim_match, name, bottle_neck=True, bn_mom=0.9, workspace=256, memonger=False):
"""Return ResNet Unit symbol for building ResNet
Parameters
----------
data : str
Input data
num_filter : int
Number of output channels
bnf : int
Bottle neck channels factor with regard to num_filter
stride : tupe
Stride used in convolution
dim_match : Boolen
True means channel number between input and output is the same, otherwise means differ
name : str
Base name of the operators
workspace : int
Workspace used in convolution operator
"""
if bottle_neck:
# the same as https://github.com/facebook/fb.resnet.torch#notes, a bit difference with origin paper
bn1 = mx.sym.BatchNorm(data=data, fix_gamma=False, eps=2e-5, momentum=bn_mom, name=name + '_bn1')
act1 = mx.sym.Activation(data=bn1, act_type='relu', name=name + '_relu1')
conv1 = mx.sym.Convolution(data=act1, num_filter=int(num_filter*0.25), kernel=(1,1), stride=(1,1), pad=(0,0),
no_bias=True, workspace=workspace, name=name + '_conv1')
bn2 = mx.sym.BatchNorm(data=conv1, fix_gamma=False, eps=2e-5, momentum=bn_mom, name=name + '_bn2')
act2 = mx.sym.Activation(data=bn2, act_type='relu', name=name + '_relu2')
conv2 = mx.sym.Convolution(data=act2, num_filter=int(num_filter*0.25), kernel=(3,3), stride=stride, pad=(1,1),
no_bias=True, workspace=workspace, name=name + '_conv2')
bn3 = mx.sym.BatchNorm(data=conv2, fix_gamma=False, eps=2e-5, momentum=bn_mom, name=name + '_bn3')
act3 = mx.sym.Activation(data=bn3, act_type='relu', name=name + '_relu3')
conv3 = mx.sym.Convolution(data=act3, num_filter=num_filter, kernel=(1,1), stride=(1,1), pad=(0,0), no_bias=True,
workspace=workspace, name=name + '_conv3')
if dim_match:
shortcut = data
else:
shortcut = mx.sym.Convolution(data=act1, num_filter=num_filter, kernel=(1,1), stride=stride, no_bias=True,
workspace=workspace, name=name+'_sc')
if memonger:
shortcut._set_attr(mirror_stage='True')
return conv3 + shortcut
else:
bn1 = mx.sym.BatchNorm(data=data, fix_gamma=False, momentum=bn_mom, eps=2e-5, name=name + '_bn1')
act1 = mx.sym.Activation(data=bn1, act_type='relu', name=name + '_relu1')
conv1 = mx.sym.Convolution(data=act1, num_filter=num_filter, kernel=(3,3), stride=stride, pad=(1,1),
no_bias=True, workspace=workspace, name=name + '_conv1')
bn2 = mx.sym.BatchNorm(data=conv1, fix_gamma=False, momentum=bn_mom, eps=2e-5, name=name + '_bn2')
act2 = mx.sym.Activation(data=bn2, act_type='relu', name=name + '_relu2')
conv2 = mx.sym.Convolution(data=act2, num_filter=num_filter, kernel=(3,3), stride=(1,1), pad=(1,1),
no_bias=True, workspace=workspace, name=name + '_conv2')
if dim_match:
shortcut = data
else:
shortcut = mx.sym.Convolution(data=act1, num_filter=num_filter, kernel=(1,1), stride=stride, no_bias=True,
workspace=workspace, name=name+'_sc')
if memonger:
shortcut._set_attr(mirror_stage='True')
return conv2 + shortcut
def resnet(units, num_stages, filter_list, num_classes, image_shape, bottle_neck=True, bn_mom=0.9, workspace=256, memonger=False):
"""Return ResNet symbol of
Parameters
----------
units : list
Number of units in each stage
num_stages : int
Number of stage
filter_list : list
Channel size of each stage
num_classes : int
Ouput size of symbol
dataset : str
Dataset type, only cifar10 and imagenet supports
workspace : int
Workspace used in convolution operator
"""
num_unit = len(units)
assert(num_unit == num_stages)
data = mx.sym.Variable(name='data')
data = mx.sym.identity(data=data, name='id')
data = mx.sym.BatchNorm(data=data, fix_gamma=True, eps=2e-5, momentum=bn_mom, name='bn_data')
(nchannel, height, width) = image_shape
if height <= 32: # such as cifar10
body = mx.sym.Convolution(data=data, num_filter=filter_list[0], kernel=(3, 3), stride=(1,1), pad=(1, 1),
no_bias=True, name="conv0", workspace=workspace)
else: # often expected to be 224 such as imagenet
body = mx.sym.Convolution(data=data, num_filter=filter_list[0], kernel=(7, 7), stride=(2,2), pad=(3, 3),
no_bias=True, name="conv0", workspace=workspace)
body = mx.sym.BatchNorm(data=body, fix_gamma=False, eps=2e-5, momentum=bn_mom, name='bn0')
body = mx.sym.Activation(data=body, act_type='relu', name='relu0')
body = mx.symbol.Pooling(data=body, kernel=(3, 3), stride=(2,2), pad=(1,1), pool_type='max')
for i in range(num_stages):
body = residual_unit(body, filter_list[i+1], (1 if i==0 else 2, 1 if i==0 else 2), False,
name='stage%d_unit%d' % (i + 1, 1), bottle_neck=bottle_neck, workspace=workspace,
memonger=memonger)
for j in range(units[i]-1):
body = residual_unit(body, filter_list[i+1], (1,1), True, name='stage%d_unit%d' % (i + 1, j + 2),
bottle_neck=bottle_neck, workspace=workspace, memonger=memonger)
bn1 = mx.sym.BatchNorm(data=body, fix_gamma=False, eps=2e-5, momentum=bn_mom, name='bn1')
relu1 = mx.sym.Activation(data=bn1, act_type='relu', name='relu1')
# Although kernel is not used here when global_pool=True, we should put one
pool1 = mx.symbol.Pooling(data=relu1, global_pool=True, kernel=(7, 7), pool_type='avg', name='pool1')
flat = mx.symbol.Flatten(data=pool1)
fc1 = mx.symbol.FullyConnected(data=flat, num_hidden=num_classes, name='fc1')
return mx.symbol.SoftmaxOutput(data=fc1, name='softmax')
def get_symbol(num_classes, num_layers, image_shape, conv_workspace=256, **kwargs):
"""
Adapted from https://github.com/tornadomeet/ResNet/blob/master/train_resnet.py
Original author Wei Wu
"""
image_shape = [int(l) for l in image_shape.split(',')]
(nchannel, height, width) = image_shape
if height <= 28:
num_stages = 3
if (num_layers-2) % 9 == 0 and num_layers >= 164:
per_unit = [(num_layers-2)//9]
filter_list = [16, 64, 128, 256]
bottle_neck = True
elif (num_layers-2) % 6 == 0 and num_layers < 164:
per_unit = [(num_layers-2)//6]
filter_list = [16, 16, 32, 64]
bottle_neck = False
else:
raise ValueError("no experiments done on num_layers {}, you can do it yourself".format(num_layers))
units = per_unit * num_stages
else:
if num_layers >= 50:
filter_list = [64, 256, 512, 1024, 2048]
bottle_neck = True
else:
filter_list = [64, 64, 128, 256, 512]
bottle_neck = False
num_stages = 4
if num_layers == 18:
units = [2, 2, 2, 2]
elif num_layers == 34:
units = [3, 4, 6, 3]
elif num_layers == 50:
units = [3, 4, 6, 3]
elif num_layers == 101:
units = [3, 4, 23, 3]
elif num_layers == 152:
units = [3, 8, 36, 3]
elif num_layers == 200:
units = [3, 24, 36, 3]
elif num_layers == 269:
units = [3, 30, 48, 8]
else:
raise ValueError("no experiments done on num_layers {}, you can do it yourself".format(num_layers))
return resnet(units = units,
num_stages = num_stages,
filter_list = filter_list,
num_classes = num_classes,
image_shape = image_shape,
bottle_neck = bottle_neck,
workspace = conv_workspace)
| apache-2.0 |
ThomasYeoLab/CBIG | stable_projects/predict_phenotypes/An2022_gcVAE/evaluation/goalDNN/train_goalDNN.py | 1 | 7420 | #!/usr/bin/env python3
# -*- encoding: utf-8 -*-
'''
Written by Lijun An and CBIG under MIT license:
https://github.com/ThomasYeoLab/CBIG/blob/master/LICENSE.md
'''
import os
import torch
import argparse
import numpy as np
from torch.nn import L1Loss, CrossEntropyLoss
from config import global_config
from model.goalDNN import goalDNN
from utils.nn_misc import train_dataloader, extract_goaldnn_input
from utils.metrics import subject_acc, subject_mae
from utils.misc import txt2list, load_pkl, create_folder
# loss functions used for training goalDNN model
loss_mae = L1Loss(reduction='mean')
loss_ce = CrossEntropyLoss(reduction='mean')
def train_goalDNN_args_parser():
"""
Parameters for training goalDNN model
"""
parser = argparse.ArgumentParser(prog='TraingoalDNNArgs')
# general parameters
parser.add_argument('--seed', type=int, default=0)
parser.add_argument('--GPU', type=int, default=-1)
parser.add_argument('--data_path', type=str, default='/')
parser.add_argument('--checkpoint_path', type=str, default='/')
parser.add_argument('--isSaving', action='store_true', default=False)
parser.add_argument('--cpu', action='store_true', default=False)
# training parameters
parser.add_argument('--epochs', type=int, default=100)
parser.add_argument('--step', type=int, default=0)
parser.add_argument('--batch_size', type=int, default=128)
parser.add_argument('--in_dim', type=int, default=108)
parser.add_argument('--nb_category', type=int, default=3)
parser.add_argument('--nb_measures', type=int, default=1)
parser.add_argument(
'--hidden_dims', type=list, default=[512, 256, 128, 64, 32])
parser.add_argument('--drop_out', type=float, default=0.1)
parser.add_argument('--weight_decay', type=float, default=1e-4)
# hyperparameters
parser.add_argument('--lr', type=float, default=0.000174138)
parser.add_argument('--lambda_dx', type=float, default=0.9)
parser.add_argument('--lambda_mmse', type=float, default=0.4)
parser.add_argument('--lr_step', type=int, default=95)
parser.add_argument('--h1', type=int, default=512)
parser.add_argument('--h2', type=int, default=64)
parser.add_argument('--h3', type=int, default=512)
parser.add_argument('--h4', type=int, default=512)
parser.add_argument('--h5', type=int, default=512)
parser.add_argument('--nb_layers', type=int, default=5)
args, _ = parser.parse_known_args()
return args
def train_1epoch(args, dataloader, model, optimizer, device, mmse_std):
"""
Train goalDNN model for one epoch
Args:
args (tuple): Parameters
dataloader (class Dataloader): Training dataloader
model (class VAE): cVAE model
optimizer (class Adam): Adam optimizer
device (class device): Device the model training on
mmse_std (tensor): std of MMSE
"""
for _, batch_data in enumerate(dataloader):
batchROIs = batch_data[:, :args.in_dim].float()
batchMMSEs = \
batch_data[:, args.in_dim:args.in_dim+args.nb_measures].float()
batchDXs = batch_data[:, args.in_dim + args.nb_measures:].long()
batchDXs = torch.reshape(batchDXs, (batchDXs.shape[0], ))
# move to GPU
batchROIs = batchROIs.to(device)
batchMMSEs = batchMMSEs.to(device)
batchDXs = batchDXs.to(device)
model.zero_grad()
# forward pass
[MMSE_pred, DX_pred] = model(batchROIs)
# calculate losses
mae_loss = loss_mae(MMSE_pred, batchMMSEs) * mmse_std
crossentropy_loss = \
loss_ce(DX_pred, batchDXs)
loss = args.lambda_mmse * mae_loss + args.lambda_dx * crossentropy_loss
# backward & update parameters
loss.backward()
optimizer.step()
return model, optimizer
def train(args):
"""
Wrapper function for training goalDNN model
Args:
args (tuple): Parameters
"""
if args.isSaving:
create_folder(args.checkpoint_path)
hid_dims = []
hid_dims.append(args.h1)
hid_dims.append(args.h2)
hid_dims.append(args.h3)
hid_dims.append(args.h4)
hid_dims.append(args.h5)
args.hidden_dims = hid_dims[:args.nb_layers]
# Set random seed
np.random.seed(args.seed)
torch.manual_seed(args.seed)
# Set device to train on
# By default, we use GPU to train model
if args.GPU >= 0:
pass
else:
# let gpuQ to select which GPU to train
pass
if args.cpu:
device = torch.device('cpu')
else:
device = torch.device('cuda')
# load data
# read features of training data ==> 108 brain ROI volumes
ROI_features = txt2list(global_config.ROI_features_path)
# load training data
train_pkl = load_pkl(os.path.join(args.data_path, 'train.pkl'))
val_pkl = load_pkl(os.path.join(args.data_path, 'val.pkl'))
# extract data for model training
trainROIs, trainMMSEs, trainDXs, _ = \
extract_goaldnn_input(train_pkl, ROI_features)
valROIs, valMMSEs, valDXs, val_index = \
extract_goaldnn_input(val_pkl, ROI_features)
valROIs = valROIs.float()
valROIs = valROIs.to(device)
valMMSEs = valMMSEs.float()
valMMSEs = valMMSEs.to(device)
valDXs = valDXs.long()
valDXs = valDXs.to(device)
# build model
model = goalDNN(
in_dim=args.in_dim,
nb_category=args.nb_category,
nb_measures=args.nb_measures,
p_dropout=args.drop_out,
hidden_dims=args.hidden_dims)
model = model.to(device)
# create dataloader
train_data = torch.cat((trainROIs, trainMMSEs, trainDXs), dim=1)
dataloader = train_dataloader(train_data, args.batch_size)
# optimizer
optimizer = torch.optim.Adam(
params=model.parameters(),
lr=args.lr,
weight_decay=args.weight_decay,
betas=(0.9, 0.999),
eps=1e-7,
amsgrad=False)
lr_scheduler = torch.optim.lr_scheduler.MultiStepLR(
optimizer, milestones=[args.lr_step], gamma=0.1)
# best_val
best_model = None
best_val = 1e5
best_valMMSEMAE = 1e5
best_valDXAcc = 0
# begin training
for epoch in range(args.epochs):
args.step = epoch
# train 1 epoch
model.train()
model, optimizer = \
train_1epoch(args, dataloader, model,
optimizer, device, train_pkl['std']['MMSE'])
lr_scheduler.step()
# evaluate model performance on validation set
model.eval()
[valMMSEs_pred, valDXs_pred] = model(valROIs)
_, valMMSEMAE = subject_mae(valMMSEs_pred, valMMSEs,
val_pkl['RID'].values[val_index])
valMMSEMAE *= train_pkl['std']['MMSE']
_, valDXAcc = subject_acc(valDXs_pred, valDXs,
val_pkl['RID'].values[val_index])
if valMMSEMAE / 2 - valDXAcc < best_val:
best_val = valMMSEMAE / 2 - valDXAcc
best_valMMSEMAE = valMMSEMAE
best_valDXAcc = valDXAcc
best_model = model
if args.isSaving:
torch.save(best_model, os.path.join(args.checkpoint_path,
'goalDNN.pt'))
else:
# output via stdout
print(str(best_valMMSEMAE) + ',' + str(best_valDXAcc))
if __name__ == '__main__':
train(train_goalDNN_args_parser())
| mit |
NelisVerhoef/scikit-learn | sklearn/neighbors/tests/test_nearest_centroid.py | 302 | 4121 | """
Testing for the nearest centroid module.
"""
import numpy as np
from scipy import sparse as sp
from numpy.testing import assert_array_equal
from numpy.testing import assert_equal
from sklearn.neighbors import NearestCentroid
from sklearn import datasets
from sklearn.metrics.pairwise import pairwise_distances
# toy sample
X = [[-2, -1], [-1, -1], [-1, -2], [1, 1], [1, 2], [2, 1]]
X_csr = sp.csr_matrix(X) # Sparse matrix
y = [-1, -1, -1, 1, 1, 1]
T = [[-1, -1], [2, 2], [3, 2]]
T_csr = sp.csr_matrix(T)
true_result = [-1, 1, 1]
# also load the iris dataset
# and randomly permute it
iris = datasets.load_iris()
rng = np.random.RandomState(1)
perm = rng.permutation(iris.target.size)
iris.data = iris.data[perm]
iris.target = iris.target[perm]
def test_classification_toy():
# Check classification on a toy dataset, including sparse versions.
clf = NearestCentroid()
clf.fit(X, y)
assert_array_equal(clf.predict(T), true_result)
# Same test, but with a sparse matrix to fit and test.
clf = NearestCentroid()
clf.fit(X_csr, y)
assert_array_equal(clf.predict(T_csr), true_result)
# Fit with sparse, test with non-sparse
clf = NearestCentroid()
clf.fit(X_csr, y)
assert_array_equal(clf.predict(T), true_result)
# Fit with non-sparse, test with sparse
clf = NearestCentroid()
clf.fit(X, y)
assert_array_equal(clf.predict(T_csr), true_result)
# Fit and predict with non-CSR sparse matrices
clf = NearestCentroid()
clf.fit(X_csr.tocoo(), y)
assert_array_equal(clf.predict(T_csr.tolil()), true_result)
def test_precomputed():
clf = NearestCentroid(metric="precomputed")
clf.fit(X, y)
S = pairwise_distances(T, clf.centroids_)
assert_array_equal(clf.predict(S), true_result)
def test_iris():
# Check consistency on dataset iris.
for metric in ('euclidean', 'cosine'):
clf = NearestCentroid(metric=metric).fit(iris.data, iris.target)
score = np.mean(clf.predict(iris.data) == iris.target)
assert score > 0.9, "Failed with score = " + str(score)
def test_iris_shrinkage():
# Check consistency on dataset iris, when using shrinkage.
for metric in ('euclidean', 'cosine'):
for shrink_threshold in [None, 0.1, 0.5]:
clf = NearestCentroid(metric=metric,
shrink_threshold=shrink_threshold)
clf = clf.fit(iris.data, iris.target)
score = np.mean(clf.predict(iris.data) == iris.target)
assert score > 0.8, "Failed with score = " + str(score)
def test_pickle():
import pickle
# classification
obj = NearestCentroid()
obj.fit(iris.data, iris.target)
score = obj.score(iris.data, iris.target)
s = pickle.dumps(obj)
obj2 = pickle.loads(s)
assert_equal(type(obj2), obj.__class__)
score2 = obj2.score(iris.data, iris.target)
assert_array_equal(score, score2,
"Failed to generate same score"
" after pickling (classification).")
def test_shrinkage_threshold_decoded_y():
clf = NearestCentroid(shrink_threshold=0.01)
y_ind = np.asarray(y)
y_ind[y_ind == -1] = 0
clf.fit(X, y_ind)
centroid_encoded = clf.centroids_
clf.fit(X, y)
assert_array_equal(centroid_encoded, clf.centroids_)
def test_predict_translated_data():
# Test that NearestCentroid gives same results on translated data
rng = np.random.RandomState(0)
X = rng.rand(50, 50)
y = rng.randint(0, 3, 50)
noise = rng.rand(50)
clf = NearestCentroid(shrink_threshold=0.1)
clf.fit(X, y)
y_init = clf.predict(X)
clf = NearestCentroid(shrink_threshold=0.1)
X_noise = X + noise
clf.fit(X_noise, y)
y_translate = clf.predict(X_noise)
assert_array_equal(y_init, y_translate)
def test_manhattan_metric():
# Test the manhattan metric.
clf = NearestCentroid(metric='manhattan')
clf.fit(X, y)
dense_centroid = clf.centroids_
clf.fit(X_csr, y)
assert_array_equal(clf.centroids_, dense_centroid)
assert_array_equal(dense_centroid, [[-1, -1], [1, 1]])
| bsd-3-clause |
smartscheduling/scikit-learn-categorical-tree | sklearn/utils/random.py | 19 | 10413 | # Author: Hamzeh Alsalhi <ha258@cornell.edu>
#
# License: BSD 3 clause
from __future__ import division
import numpy as np
import scipy.sparse as sp
import operator
import array
from sklearn.utils import check_random_state
from ._random import sample_without_replacement
__all__ = ['sample_without_replacement', 'choice']
# This is a backport of np.random.choice from numpy 1.7
# The function can be removed when we bump the requirements to >=1.7
def choice(a, size=None, replace=True, p=None, random_state=None):
"""
choice(a, size=None, replace=True, p=None)
Generates a random sample from a given 1-D array
.. versionadded:: 1.7.0
Parameters
-----------
a : 1-D array-like or int
If an ndarray, a random sample is generated from its elements.
If an int, the random sample is generated as if a was np.arange(n)
size : int or tuple of ints, optional
Output shape. Default is None, in which case a single value is
returned.
replace : boolean, optional
Whether the sample is with or without replacement.
p : 1-D array-like, optional
The probabilities associated with each entry in a.
If not given the sample assumes a uniform distribtion over all
entries in a.
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
Returns
--------
samples : 1-D ndarray, shape (size,)
The generated random samples
Raises
-------
ValueError
If a is an int and less than zero, if a or p are not 1-dimensional,
if a is an array-like of size 0, if p is not a vector of
probabilities, if a and p have different lengths, or if
replace=False and the sample size is greater than the population
size
See Also
---------
randint, shuffle, permutation
Examples
---------
Generate a uniform random sample from np.arange(5) of size 3:
>>> np.random.choice(5, 3) # doctest: +SKIP
array([0, 3, 4])
>>> #This is equivalent to np.random.randint(0,5,3)
Generate a non-uniform random sample from np.arange(5) of size 3:
>>> np.random.choice(5, 3, p=[0.1, 0, 0.3, 0.6, 0]) # doctest: +SKIP
array([3, 3, 0])
Generate a uniform random sample from np.arange(5) of size 3 without
replacement:
>>> np.random.choice(5, 3, replace=False) # doctest: +SKIP
array([3,1,0])
>>> #This is equivalent to np.random.shuffle(np.arange(5))[:3]
Generate a non-uniform random sample from np.arange(5) of size
3 without replacement:
>>> np.random.choice(5, 3, replace=False, p=[0.1, 0, 0.3, 0.6, 0])
... # doctest: +SKIP
array([2, 3, 0])
Any of the above can be repeated with an arbitrary array-like
instead of just integers. For instance:
>>> aa_milne_arr = ['pooh', 'rabbit', 'piglet', 'Christopher']
>>> np.random.choice(aa_milne_arr, 5, p=[0.5, 0.1, 0.1, 0.3])
... # doctest: +SKIP
array(['pooh', 'pooh', 'pooh', 'Christopher', 'piglet'],
dtype='|S11')
"""
random_state = check_random_state(random_state)
# Format and Verify input
a = np.array(a, copy=False)
if a.ndim == 0:
try:
# __index__ must return an integer by python rules.
pop_size = operator.index(a.item())
except TypeError:
raise ValueError("a must be 1-dimensional or an integer")
if pop_size <= 0:
raise ValueError("a must be greater than 0")
elif a.ndim != 1:
raise ValueError("a must be 1-dimensional")
else:
pop_size = a.shape[0]
if pop_size is 0:
raise ValueError("a must be non-empty")
if None != p:
p = np.array(p, dtype=np.double, ndmin=1, copy=False)
if p.ndim != 1:
raise ValueError("p must be 1-dimensional")
if p.size != pop_size:
raise ValueError("a and p must have same size")
if np.any(p < 0):
raise ValueError("probabilities are not non-negative")
if not np.allclose(p.sum(), 1):
raise ValueError("probabilities do not sum to 1")
shape = size
if shape is not None:
size = np.prod(shape, dtype=np.intp)
else:
size = 1
# Actual sampling
if replace:
if None != p:
cdf = p.cumsum()
cdf /= cdf[-1]
uniform_samples = random_state.random_sample(shape)
idx = cdf.searchsorted(uniform_samples, side='right')
# searchsorted returns a scalar
idx = np.array(idx, copy=False)
else:
idx = random_state.randint(0, pop_size, size=shape)
else:
if size > pop_size:
raise ValueError("Cannot take a larger sample than "
"population when 'replace=False'")
if None != p:
if np.sum(p > 0) < size:
raise ValueError("Fewer non-zero entries in p than size")
n_uniq = 0
p = p.copy()
found = np.zeros(shape, dtype=np.int)
flat_found = found.ravel()
while n_uniq < size:
x = random_state.rand(size - n_uniq)
if n_uniq > 0:
p[flat_found[0:n_uniq]] = 0
cdf = np.cumsum(p)
cdf /= cdf[-1]
new = cdf.searchsorted(x, side='right')
_, unique_indices = np.unique(new, return_index=True)
unique_indices.sort()
new = new.take(unique_indices)
flat_found[n_uniq:n_uniq + new.size] = new
n_uniq += new.size
idx = found
else:
idx = random_state.permutation(pop_size)[:size]
if shape is not None:
idx.shape = shape
if shape is None and isinstance(idx, np.ndarray):
# In most cases a scalar will have been made an array
idx = idx.item(0)
# Use samples as indices for a if a is array-like
if a.ndim == 0:
return idx
if shape is not None and idx.ndim == 0:
# If size == () then the user requested a 0-d array as opposed to
# a scalar object when size is None. However a[idx] is always a
# scalar and not an array. So this makes sure the result is an
# array, taking into account that np.array(item) may not work
# for object arrays.
res = np.empty((), dtype=a.dtype)
res[()] = a[idx]
return res
return a[idx]
def random_choice_csc(n_samples, classes, class_probability=None,
random_state=None):
"""Generate a sparse random matrix given column class distributions
Parameters
----------
n_samples : int,
Number of samples to draw in each column.
classes : list of size n_outputs of arrays of size (n_classes,)
List of classes for each column.
class_probability : list of size n_outputs of arrays of size (n_classes,)
Optional (default=None). Class distribution of each column. If None the
uniform distribution is assumed.
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
Returns
-------
random_matrix : sparse csc matrix of size (n_samples, n_outputs)
"""
data = array.array('i')
indices = array.array('i')
indptr = array.array('i', [0])
for j in range(len(classes)):
classes[j] = np.asarray(classes[j])
if classes[j].dtype.kind != 'i':
raise ValueError("class dtype %s is not supported" %
classes[j].dtype)
classes[j] = classes[j].astype(int)
# use uniform distribution if no class_probability is given
if class_probability is None:
class_prob_j = np.empty(shape=classes[j].shape[0])
class_prob_j.fill(1 / classes[j].shape[0])
else:
class_prob_j = np.asarray(class_probability[j])
if np.sum(class_prob_j) != 1.0:
raise ValueError("Probability array at index {0} does not sum to "
"one".format(j))
if class_prob_j.shape[0] != classes[j].shape[0]:
raise ValueError("classes[{0}] (length {1}) and "
"class_probability[{0}] (length {2}) have "
"different length.".format(j,
classes[j].shape[0],
class_prob_j.shape[0]))
# If 0 is not present in the classes insert it with a probability 0.0
if 0 not in classes[j]:
classes[j] = np.insert(classes[j], 0, 0)
class_prob_j = np.insert(class_prob_j, 0, 0.0)
# If there are nonzero classes choose randomly using class_probability
if classes[j].shape[0] > 1:
p_nonzero = 1 - class_prob_j[classes[j] == 0]
nnz = int(n_samples * p_nonzero)
ind_sample = sample_without_replacement(n_population=n_samples,
n_samples=nnz,
random_state=random_state)
indices.extend(ind_sample)
# Normalize probabilites for the nonzero elements
classes_j_nonzero = classes[j] != 0
class_probability_nz = class_prob_j[classes_j_nonzero]
class_probability_nz_norm = (class_probability_nz /
np.sum(class_probability_nz))
classes_ind = np.searchsorted(class_probability_nz_norm.cumsum(),
np.random.rand(nnz))
data.extend(classes[j][classes_j_nonzero][classes_ind])
indptr.append(len(indices))
return sp.csc_matrix((data, indices, indptr),
(n_samples, len(classes)),
dtype=int)
| bsd-3-clause |
smartscheduling/scikit-learn-categorical-tree | examples/linear_model/plot_logistic_l1_l2_sparsity.py | 377 | 2601 | """
==============================================
L1 Penalty and Sparsity in Logistic Regression
==============================================
Comparison of the sparsity (percentage of zero coefficients) of solutions when
L1 and L2 penalty are used for different values of C. We can see that large
values of C give more freedom to the model. Conversely, smaller values of C
constrain the model more. In the L1 penalty case, this leads to sparser
solutions.
We classify 8x8 images of digits into two classes: 0-4 against 5-9.
The visualization shows coefficients of the models for varying C.
"""
print(__doc__)
# Authors: Alexandre Gramfort <alexandre.gramfort@inria.fr>
# Mathieu Blondel <mathieu@mblondel.org>
# Andreas Mueller <amueller@ais.uni-bonn.de>
# License: BSD 3 clause
import numpy as np
import matplotlib.pyplot as plt
from sklearn.linear_model import LogisticRegression
from sklearn import datasets
from sklearn.preprocessing import StandardScaler
digits = datasets.load_digits()
X, y = digits.data, digits.target
X = StandardScaler().fit_transform(X)
# classify small against large digits
y = (y > 4).astype(np.int)
# Set regularization parameter
for i, C in enumerate((100, 1, 0.01)):
# turn down tolerance for short training time
clf_l1_LR = LogisticRegression(C=C, penalty='l1', tol=0.01)
clf_l2_LR = LogisticRegression(C=C, penalty='l2', tol=0.01)
clf_l1_LR.fit(X, y)
clf_l2_LR.fit(X, y)
coef_l1_LR = clf_l1_LR.coef_.ravel()
coef_l2_LR = clf_l2_LR.coef_.ravel()
# coef_l1_LR contains zeros due to the
# L1 sparsity inducing norm
sparsity_l1_LR = np.mean(coef_l1_LR == 0) * 100
sparsity_l2_LR = np.mean(coef_l2_LR == 0) * 100
print("C=%.2f" % C)
print("Sparsity with L1 penalty: %.2f%%" % sparsity_l1_LR)
print("score with L1 penalty: %.4f" % clf_l1_LR.score(X, y))
print("Sparsity with L2 penalty: %.2f%%" % sparsity_l2_LR)
print("score with L2 penalty: %.4f" % clf_l2_LR.score(X, y))
l1_plot = plt.subplot(3, 2, 2 * i + 1)
l2_plot = plt.subplot(3, 2, 2 * (i + 1))
if i == 0:
l1_plot.set_title("L1 penalty")
l2_plot.set_title("L2 penalty")
l1_plot.imshow(np.abs(coef_l1_LR.reshape(8, 8)), interpolation='nearest',
cmap='binary', vmax=1, vmin=0)
l2_plot.imshow(np.abs(coef_l2_LR.reshape(8, 8)), interpolation='nearest',
cmap='binary', vmax=1, vmin=0)
plt.text(-8, 3, "C = %.2f" % C)
l1_plot.set_xticks(())
l1_plot.set_yticks(())
l2_plot.set_xticks(())
l2_plot.set_yticks(())
plt.show()
| bsd-3-clause |
google/seqio | seqio/experimental.py | 1 | 20512 | # Copyright 2022 The SeqIO Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Experimental utilities for SeqIO."""
import functools
import inspect
from typing import Callable, Iterable, Mapping, Optional, Sequence
from absl import logging
from seqio import dataset_providers
from seqio import preprocessors as seqio_preprocessors
from seqio import utils
import tensorflow as tf
CacheDatasetPlaceholder = dataset_providers.CacheDatasetPlaceholder
Mixture = dataset_providers.Mixture
MixtureRegistry = dataset_providers.MixtureRegistry
ShardInfo = dataset_providers.ShardInfo
Task = dataset_providers.Task
TaskRegistry = dataset_providers.TaskRegistry
def _get_fully_cached_name(
original_name: str,
sequence_length: Mapping[str, int]
) -> str:
"""Generates name for fully-cached task or mixture."""
new_name = f'{original_name}_'
# Find shortest unique prefix.
prefix_len = 0
while (len(set(feat[:prefix_len] for feat in sequence_length)) !=
len(sequence_length)):
prefix_len += 1
new_name += '_'.join(
f'{feat[:prefix_len]}{sequence_length[feat]}' for feat in sequence_length)
return new_name
def add_fully_cached_task(
task_name: str,
sequence_length: Mapping[str, int],
disallow_shuffling: bool = False
) -> Task:
"""Adds fully-cached version of the task for given sequence lengths."""
task = TaskRegistry.get(task_name)
new_name = _get_fully_cached_name(task_name, sequence_length)
try:
return TaskRegistry.get(new_name)
except ValueError:
pass
# Rename the sequence lengths to differentiate from the preprocessor kwarg.
fixed_sequence_length = sequence_length
new_preprocessors = []
for prep in task.preprocessors:
if isinstance(prep, CacheDatasetPlaceholder):
continue
def wrapped_prep(ds, output_features, prep=prep):
prep_args = inspect.signature(prep).parameters.keys()
extra_kwargs = {}
if 'sequence_length' in prep_args:
extra_kwargs['sequence_length'] = fixed_sequence_length
if 'output_features' in prep_args:
extra_kwargs['output_features'] = output_features
return prep(ds, **extra_kwargs)
new_preprocessors.append(wrapped_prep)
# Cache at the end of the pipeline.
new_preprocessors.append(CacheDatasetPlaceholder(required=True))
# Add post-cache preprocessor to ensure the runtime sequence length is valid.
def validate_sequence_length(ds, sequence_length):
if (sequence_length is not None and
dict(sequence_length) != dict(fixed_sequence_length)):
raise ValueError(
f"Fully-cached task '{new_name}' can only be loaded with "
f'`sequence_length={fixed_sequence_length}` or `None`. '
f'Given sequence_length={sequence_length}.'
)
return ds
new_preprocessors.append(validate_sequence_length)
logging.info("Registering fully cached Task '%s' with sequence lengths %s.",
new_name, sequence_length)
return TaskRegistry.add(
new_name,
source=task.source,
preprocessors=new_preprocessors,
output_features=task.output_features,
metric_fns=task.metric_fns,
postprocess_fn=task.postprocessor,
shuffle_buffer_size=
None if disallow_shuffling else dataset_providers.SHUFFLE_BUFFER_SIZE
)
def add_fully_cached_mixture(
mixture_name: str,
sequence_length: Mapping[str, int],
disallow_shuffling: bool = False
) -> Mixture:
"""Adds fully-cached version of the mixture for given sequence lengths."""
mixture = MixtureRegistry.get(mixture_name)
new_name = _get_fully_cached_name(mixture_name, sequence_length)
# Register fully-cached tasks for the mixture.
new_tasks = [
add_fully_cached_task(task.name, sequence_length, disallow_shuffling)
for task in mixture.tasks]
logging.info(
"Registering fully cached Mixture '%s' with sequence lengths %s.",
new_name, sequence_length)
return MixtureRegistry.add(
new_name,
[(new_t.name, mixture._task_to_rate[old_t.name]) # pylint:disable=protected-access
for old_t, new_t in zip(mixture.tasks, new_tasks)])
class FewshotDataSource(dataset_providers.DataSource):
"""Combines two splits of another `DataSource` to provide fewshot examples.
Output examples are a dictionary containing a single eval example and a batch
of train examples. For example, with `num_shots=2`:
{
'train': {
'inputs': [
'How many Beatles are there?', 'How many Beatles are alive in 2020?'
],
'targets': ['4', '2']
},
'eval': {
'inputs': 'What city were the Beatles from?'
'targets': 'Liverpool'
}
}
Note that if `num_shots` is 0, the 'train' entry will not be included in the
resulting examples.
"""
def __init__(
self,
original_source: dataset_providers.DataSource,
num_shots: int,
train_preprocessors:
Iterable[Callable[[tf.data.Dataset], tf.data.Dataset]] = (),
eval_preprocessors:
Iterable[Callable[[tf.data.Dataset], tf.data.Dataset]] = (),
train_split: str = 'train',
train_feature_keys: Iterable[str] = ('inputs', 'targets'),
shuffle_buffer_size: int = dataset_providers.SHUFFLE_BUFFER_SIZE,
eval_on_fixed_exemplars: bool = False,
):
"""Initializes FewshotDataSource.
Args:
original_source: a DataSource to produce fewshot examples from.
num_shots: A non-negative integer specifying how many training examples to
include in the inputs.
train_preprocessors: an iterable of preprocessors to run on the train
split before zipping with the eval split.
eval_preprocessors: an iterable of preprocessors to run on the eval
split before zipping with the train split.
train_split: the split to use as training examples.
train_feature_keys: the features to retain in the train split after
preprocessing but before batching zipping with the eval split. This is
necessary to remove variable-length sequences, which cannot be batched.
shuffle_buffer_size: size of the shuffle buffer used when calling
`get_dataset` with shuffle=True. Note that separate shuffles are applied
to the `train` and `eval` splits before they are combined.
eval_on_fixed_exemplars: If True, uses a fixed set of exemplars at
evaluation time. Only effective during evaluation when `split` not
equals `self._train_split`.
"""
self._original_source = original_source
self._num_shots = num_shots
self._train_preprocessors = train_preprocessors
self._eval_preprocessors = eval_preprocessors
self._train_split = train_split
self._train_feature_keys = train_feature_keys
self._shuffle_buffer_size = shuffle_buffer_size
self._eval_on_fixed_exemplars = eval_on_fixed_exemplars
# Override split in property since it may need to be loaded lazily (e.g.,
# for TfdsSource)
super().__init__(splits=())
@property
def splits(self) -> Sequence[str]:
return self._original_source.splits
@property
def supports_arbitrary_sharding(self) -> bool:
return False
def list_shards(self, split: str) -> Sequence[str]:
return self._original_source.list_shards(split)
def get_dataset(
self,
split: str,
shuffle: bool = True,
seed: Optional[int] = None,
shard_info: Optional[ShardInfo] = None
) -> tf.data.Dataset:
shard_info: ShardInfo = shard_info or ShardInfo(0, 1)
if self._train_split not in self._original_source.splits:
raise ValueError(
f"Train split '{self._train_split}' is not one of the original "
f"source splits: {self._original_source.splits}")
if not self._num_shots:
logging.warning(
'Train examples will not be included in the provided dataset since '
'`num_shots` is 0.')
def _apply_preprocessors(ds, preprocessors):
for prep_fn in preprocessors:
ds = prep_fn(ds)
return ds
def _get_maybe_sharded_dataset(
split_: str, shuffle_: bool, seed_: int) -> tf.data.Dataset:
"""Shard at source if possible, but fall back to examples if not."""
num_shards = len(self._original_source.list_shards(split_))
if num_shards >= shard_info.num_shards:
# Shard at the source.
ds = self._original_source.get_dataset(
split=split_, shuffle=shuffle_, seed=seed_, shard_info=shard_info)
else:
# Shard the examples.
ds = self._original_source.get_dataset(
split=split_, shuffle=shuffle_, seed=seed_).shard(
shard_info.num_shards, shard_info.index)
if shuffle_:
# Do our own shuffling here, because original_source.get_dataset does
# not necessarily return an adequately shuffled dataset even when we
# request shuffle=True. For example, TfdsDataSource only shuffles at the
# file shard level, not the individual example level (this amounts to no
# shuffling if there is only one file shard).
ds = ds.shuffle(
buffer_size=self._shuffle_buffer_size,
seed=seed_,
reshuffle_each_iteration=True)
return ds
if seed is None:
train_seed = None
eval_seed = None
else:
# If fixing a seed, train and eval seeds need to be different, otherwise
# in the num_shots=1 case, identical examples would be zipped together.
train_seed = seed
eval_seed = seed + 1
datasets = {}
if self._num_shots:
# Note that we ALWAYS shuffle the train split, even if the user passes
# shuffle=False. This is to prevent the degenerate situation where train
# and eval examples are identical. In the case of shuffle=False, we still
# guarantee determinism by using a fixed seed of 0.
train_ds = _get_maybe_sharded_dataset(
split_=self._train_split,
shuffle_=True,
seed_=train_seed if shuffle else 0)
train_ds = _apply_preprocessors(train_ds, self._train_preprocessors)
train_ds = train_ds.map(
lambda x: {k: x[k] for k in self._train_feature_keys},
num_parallel_calls=tf.data.experimental.AUTOTUNE)
train_ds = train_ds.repeat().batch(self._num_shots)
if self._eval_on_fixed_exemplars and split != self._train_split:
train_ds = train_ds.take(1).cache().repeat()
datasets['train'] = train_ds
eval_ds = _get_maybe_sharded_dataset(
split_=split, shuffle_=shuffle, seed_=eval_seed)
eval_ds = _apply_preprocessors(eval_ds, self._eval_preprocessors)
datasets['eval'] = eval_ds
return tf.data.Dataset.zip(datasets)
def fewshot_preprocessor(ds,
inputs_prefix='',
targets_prefix='',
example_separator='\n\n',
prompt='',
reverse=False):
"""Create 'inputs' and 'targets' strings for (zero/few)-shot evaluation.
Inputs and targets will be formatted using the given prefixes along with a
separator between each pair. The few-shot examples from the train set will
include both inputs and targets, whereas the eval example (at the end) will
contain only the input followed by the targets prefix.
NOTE: The final target prefix will be right-stripped so that the input does
not end with whitepsace.
For example, a 2-shot output might look like:
output: {
'inputs':
'0 How many states in the US? X 1 50 X 0 How many cents in a dollar? X '
'1 100 X 0 Who was in the Beatles? X 1',
'targets': 'John',
'answers': ['John', 'Paul', 'George', 'Ringo']
}
Args:
ds: A dictionary of zipped eval and train tf.data.Datasets, each
preprocessed with at least the fields 'inputs' and 'targets'. Note that
the train dataset will not exist in the 0-shot case.
inputs_prefix: Prefix string for inputs.
targets_prefix: Prefix string for targets.
example_separator: The string separator to delimit different examples.
prompt: Optional prefix for the entire few-shot input. Typically
consists of a natural language description of the task or task
instructions.
reverse: If True, the list of few shot examples is reversed. If used with
eval_on_fixed_exemplars = True and a fixed train_seed, the last N shots
will be the same when num_shots is N or N+M. In other words, additional
shots are prepended instead of appended.
Returns:
A tf.data.Dataset containing 'inputs', 'targets', and any other features
from the evaluation dataset.
"""
@utils.map_over_dataset
def fewshot_map(ex):
if 'train' in ex:
train_examples = tf.stack([
inputs_prefix + ex['train']['inputs'],
targets_prefix + ex['train']['targets'] + example_separator
],
axis=1)
if reverse:
train_examples = tf.reverse(train_examples, [0])
shots = tf.strings.reduce_join(tf.reshape(train_examples, [-1]))
else:
shots = ''
if prompt:
shots = tf.strings.join([prompt, shots], separator=example_separator)
new_ex = {
'inputs':
shots + inputs_prefix + ex['eval']['inputs'] +
targets_prefix.rstrip(),
'targets': ex['eval']['targets'],
}
# Pass through other eval features unchanged.
new_ex.update(
{k: v for k, v in ex['eval'].items() if k not in ('inputs', 'targets')}
)
return new_ex
ds = fewshot_map(ds)
if ds.element_spec['inputs'].shape.rank:
# Unbatch if not a scalar. This is useful for fewshot eval.
ds = ds.unbatch()
return ds
def add_task_with_sentinels(
task_name: str,
num_sentinels: Optional[int] = 1):
"""Adds sentinels to the inputs/outputs of a task.
Adds num_sentinels sentinels to the end of 'inputs' and at the beginning
of 'targets'. This is known to help fine-tuning span corruption models,
especially on smaller datasets.
This will also rename the task by adding a "_{num_sentinels}_sentinel" suffix
to the task name, but making sure it comes before the following suffixes:
'_train', '_dev', '_test', '.'.
Example before:
'inputs': What is the captial of illinois?
'targets': Springfield.
Example after:
'inputs': What is the captial of illinois? <extra_id_0>
'targets': <extra_id_0> Springfield.
Args:
task_name: a str, which is the name of the task you want to have sentinels
added to. Note this will not override the current task, but will create
a new one.
num_sentinels: integer, number of sentinels to end of inputs and the
beginning of targets.
"""
def _append_eos_after_trim_and_preserve(
dataset: tf.data.Dataset,
output_features: Mapping[str, dataset_providers.Feature],
sequence_length: Optional[Mapping[str, int]] = None,
preserve_final_n_tokens_when_trimming: Optional[int] = None
) -> tf.data.Dataset:
"""Version of append_eos_after_trim with option to preserve last n tokens."""
def _maybe_add_eos_and_trim(key: str, value: tf.Tensor) -> tf.Tensor:
if key not in output_features or not output_features[key].add_eos:
return value
eos_id = output_features[key].vocabulary.eos_id
if (sequence_length is not None and
sequence_length.get(key, None) is not None):
max_length = sequence_length[key]
if (preserve_final_n_tokens_when_trimming is not None and
preserve_final_n_tokens_when_trimming > 0):
# Compute the new length of the sequence excluding the EOS token.
trimmed_length = tf.minimum(max_length, tf.shape(value)[0] + 1)
# Can't preserve more tokens than the sequence length.
n_tokens_to_preserve = tf.minimum(
preserve_final_n_tokens_when_trimming, trimmed_length - 1)
# pylint: disable=invalid-unary-operand-type
return tf.concat(
[value[:trimmed_length-(n_tokens_to_preserve + 1)],
value[-n_tokens_to_preserve:],
[eos_id]], axis=0)
# pylint: enable=invalid-unary-operand-type
else:
return tf.concat([value[:max_length-1], [eos_id]], axis=0)
else:
return tf.concat([value, [eos_id]], axis=0)
return dataset.map(
lambda ex: {k: _maybe_add_eos_and_trim(k, v) for k, v in ex.items()},
num_parallel_calls=tf.data.experimental.AUTOTUNE)
def _create_new_task_name(task_name):
"""Creates the new task name with sentinels added."""
sentinel_name = '_{}_sentinel'.format(num_sentinels)
# Avoid messing up evaluation suffixes, so insert the sentinel name right
# before these keywords.
for suffix in ['_train', '_dev', '_test', '_eval', '.']:
idx = task_name.find(suffix)
if idx >= 0:
return task_name[:idx] + sentinel_name + task_name[idx:]
return task_name + sentinel_name
def _sentinel_id(vocabulary, sentinel_num=0):
"""Token ID to use as a sentinel.
Args:
vocabulary: a t5.data.vocabularies.Vocabulary
sentinel_num: an optional interger, what sentinel should be returned.
By default it returns the first sentinel.
Returns:
an integer
"""
return vocabulary.vocab_size - 1 - sentinel_num
def _add_sentinels(dataset, sequence_length, output_features):
"""Adds sentinels to end of inputs and beginning of targets."""
del sequence_length
input_vocab = output_features['inputs'].vocabulary
target_vocab = output_features['targets'].vocabulary
@utils.map_over_dataset
def _my_fn(x):
sentinels_input = [
_sentinel_id(input_vocab, idx) for idx in range(num_sentinels)]
sentinels_output = [
_sentinel_id(target_vocab, idx) for idx in range(num_sentinels)]
x['inputs'] = tf.concat([x['inputs'], sentinels_input], 0)
x['targets'] = tf.concat([sentinels_output, x['targets']], 0)
return x
return _my_fn(dataset)
def _postprocess_fn_remove_sentinel(string_label, *args, **kwargs):
"""If sentinels are appended to the task, then remove them before eval."""
del args
del kwargs
vocab = task.output_features['targets'].vocabulary
sentinel_str = vocab.decode(
[_sentinel_id(vocab, idx) for idx in range(num_sentinels)])
if string_label.startswith(sentinel_str):
string_label = string_label[len(sentinel_str):].strip()
return string_label
def _wrap_postprocess_fn_remove_sentinel(postprocess_fn):
"""Wrap around another postprocess_fn to remove sentinels first."""
def new_fn(string_label, *args, **kwargs):
string_label = _postprocess_fn_remove_sentinel(
string_label, *args, **kwargs)
return postprocess_fn(string_label, *args, **kwargs)
return new_fn
# Create the new task name.
task = TaskRegistry.get(task_name)
sentinel_task_name = _create_new_task_name(task_name)
# Make the new preprocessors that will insert sentinels and make sure
# sentinels are preserved if the sequences are trimmed.
new_preprocessors = list(task.preprocessors)
if new_preprocessors[-1] is seqio_preprocessors.append_eos_after_trim:
new_eos_funtion = functools.partial(
_append_eos_after_trim_and_preserve,
preserve_final_n_tokens_when_trimming=num_sentinels)
new_preprocessors[-1] = new_eos_funtion
new_preprocessors.insert(-1, _add_sentinels)
else:
new_preprocessors.append(_add_sentinels)
# Remove the inserted sentinels in the postprocessor.
postprocess_fn = task.postprocessor
if postprocess_fn is not None:
new_postprocess_fn = _wrap_postprocess_fn_remove_sentinel(postprocess_fn)
else:
new_postprocess_fn = _postprocess_fn_remove_sentinel
TaskRegistry.add(
sentinel_task_name,
source=task.source,
preprocessors=new_preprocessors,
output_features=task.output_features,
postprocess_fn=new_postprocess_fn,
metric_fns=task.metric_fns,
)
| apache-2.0 |
wooga/airflow | airflow/providers/google/cloud/example_dags/example_automl_nl_text_classification.py | 4 | 3431 | #
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""
Example Airflow DAG that uses Google AutoML services.
"""
import os
from airflow import models
from airflow.providers.google.cloud.hooks.automl import CloudAutoMLHook
from airflow.providers.google.cloud.operators.automl import (
AutoMLCreateDatasetOperator, AutoMLDeleteDatasetOperator, AutoMLDeleteModelOperator,
AutoMLImportDataOperator, AutoMLTrainModelOperator,
)
from airflow.utils.dates import days_ago
GCP_PROJECT_ID = os.environ.get("GCP_PROJECT_ID", "your-project-id")
GCP_AUTOML_LOCATION = os.environ.get("GCP_AUTOML_LOCATION", "us-central1")
GCP_AUTOML_TEXT_CLS_BUCKET = os.environ.get(
"GCP_AUTOML_TEXT_CLS_BUCKET", "gs://"
)
# Example values
DATASET_ID = ""
# Example model
MODEL = {
"display_name": "auto_model_1",
"dataset_id": DATASET_ID,
"text_classification_model_metadata": {},
}
# Example dataset
DATASET = {
"display_name": "test_text_cls_dataset",
"text_classification_dataset_metadata": {"classification_type": "MULTICLASS"},
}
IMPORT_INPUT_CONFIG = {"gcs_source": {"input_uris": [GCP_AUTOML_TEXT_CLS_BUCKET]}}
default_args = {"start_date": days_ago(1)}
extract_object_id = CloudAutoMLHook.extract_object_id
# Example DAG for AutoML Natural Language Text Classification
with models.DAG(
"example_automl_text_cls",
default_args=default_args,
schedule_interval=None, # Override to match your needs
tags=['example'],
) as example_dag:
create_dataset_task = AutoMLCreateDatasetOperator(
task_id="create_dataset_task", dataset=DATASET, location=GCP_AUTOML_LOCATION
)
dataset_id = (
'{{ task_instance.xcom_pull("create_dataset_task", key="dataset_id") }}'
)
import_dataset_task = AutoMLImportDataOperator(
task_id="import_dataset_task",
dataset_id=dataset_id,
location=GCP_AUTOML_LOCATION,
input_config=IMPORT_INPUT_CONFIG,
)
MODEL["dataset_id"] = dataset_id
create_model = AutoMLTrainModelOperator(
task_id="create_model", model=MODEL, location=GCP_AUTOML_LOCATION
)
model_id = "{{ task_instance.xcom_pull('create_model', key='model_id') }}"
delete_model_task = AutoMLDeleteModelOperator(
task_id="delete_model_task",
model_id=model_id,
location=GCP_AUTOML_LOCATION,
project_id=GCP_PROJECT_ID,
)
delete_datasets_task = AutoMLDeleteDatasetOperator(
task_id="delete_datasets_task",
dataset_id=dataset_id,
location=GCP_AUTOML_LOCATION,
project_id=GCP_PROJECT_ID,
)
create_dataset_task >> import_dataset_task >> create_model >> \
delete_model_task >> delete_datasets_task
| apache-2.0 |
mith1979/ansible_automation | applied_python/applied_python/lib/python2.7/site-packages/dns/rrset.py | 98 | 5895 | # Copyright (C) 2003-2007, 2009-2011 Nominum, Inc.
#
# Permission to use, copy, modify, and distribute this software and its
# documentation for any purpose with or without fee is hereby granted,
# provided that the above copyright notice and this permission notice
# appear in all copies.
#
# THE SOFTWARE IS PROVIDED "AS IS" AND NOMINUM DISCLAIMS ALL WARRANTIES
# WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
# MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL NOMINUM BE LIABLE FOR
# ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
# WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
# ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT
# OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
"""DNS RRsets (an RRset is a named rdataset)"""
import dns.name
import dns.rdataset
import dns.rdataclass
import dns.renderer
class RRset(dns.rdataset.Rdataset):
"""A DNS RRset (named rdataset).
RRset inherits from Rdataset, and RRsets can be treated as
Rdatasets in most cases. There are, however, a few notable
exceptions. RRsets have different to_wire() and to_text() method
arguments, reflecting the fact that RRsets always have an owner
name.
"""
__slots__ = ['name', 'deleting']
def __init__(self, name, rdclass, rdtype, covers=dns.rdatatype.NONE,
deleting=None):
"""Create a new RRset."""
super(RRset, self).__init__(rdclass, rdtype, covers)
self.name = name
self.deleting = deleting
def _clone(self):
obj = super(RRset, self)._clone()
obj.name = self.name
obj.deleting = self.deleting
return obj
def __repr__(self):
if self.covers == 0:
ctext = ''
else:
ctext = '(' + dns.rdatatype.to_text(self.covers) + ')'
if not self.deleting is None:
dtext = ' delete=' + dns.rdataclass.to_text(self.deleting)
else:
dtext = ''
return '<DNS ' + str(self.name) + ' ' + \
dns.rdataclass.to_text(self.rdclass) + ' ' + \
dns.rdatatype.to_text(self.rdtype) + ctext + dtext + ' RRset>'
def __str__(self):
return self.to_text()
def __eq__(self, other):
"""Two RRsets are equal if they have the same name and the same
rdataset
@rtype: bool"""
if not isinstance(other, RRset):
return False
if self.name != other.name:
return False
return super(RRset, self).__eq__(other)
def match(self, name, rdclass, rdtype, covers, deleting=None):
"""Returns True if this rrset matches the specified class, type,
covers, and deletion state."""
if not super(RRset, self).match(rdclass, rdtype, covers):
return False
if self.name != name or self.deleting != deleting:
return False
return True
def to_text(self, origin=None, relativize=True, **kw):
"""Convert the RRset into DNS master file format.
@see: L{dns.name.Name.choose_relativity} for more information
on how I{origin} and I{relativize} determine the way names
are emitted.
Any additional keyword arguments are passed on to the rdata
to_text() method.
@param origin: The origin for relative names, or None.
@type origin: dns.name.Name object
@param relativize: True if names should names be relativized
@type relativize: bool"""
return super(RRset, self).to_text(self.name, origin, relativize,
self.deleting, **kw)
def to_wire(self, file, compress=None, origin=None, **kw):
"""Convert the RRset to wire format."""
return super(RRset, self).to_wire(self.name, file, compress, origin,
self.deleting, **kw)
def to_rdataset(self):
"""Convert an RRset into an Rdataset.
@rtype: dns.rdataset.Rdataset object
"""
return dns.rdataset.from_rdata_list(self.ttl, list(self))
def from_text_list(name, ttl, rdclass, rdtype, text_rdatas):
"""Create an RRset with the specified name, TTL, class, and type, and with
the specified list of rdatas in text format.
@rtype: dns.rrset.RRset object
"""
if isinstance(name, (str, unicode)):
name = dns.name.from_text(name, None)
if isinstance(rdclass, (str, unicode)):
rdclass = dns.rdataclass.from_text(rdclass)
if isinstance(rdtype, (str, unicode)):
rdtype = dns.rdatatype.from_text(rdtype)
r = RRset(name, rdclass, rdtype)
r.update_ttl(ttl)
for t in text_rdatas:
rd = dns.rdata.from_text(r.rdclass, r.rdtype, t)
r.add(rd)
return r
def from_text(name, ttl, rdclass, rdtype, *text_rdatas):
"""Create an RRset with the specified name, TTL, class, and type and with
the specified rdatas in text format.
@rtype: dns.rrset.RRset object
"""
return from_text_list(name, ttl, rdclass, rdtype, text_rdatas)
def from_rdata_list(name, ttl, rdatas):
"""Create an RRset with the specified name and TTL, and with
the specified list of rdata objects.
@rtype: dns.rrset.RRset object
"""
if isinstance(name, (str, unicode)):
name = dns.name.from_text(name, None)
if len(rdatas) == 0:
raise ValueError("rdata list must not be empty")
r = None
for rd in rdatas:
if r is None:
r = RRset(name, rd.rdclass, rd.rdtype)
r.update_ttl(ttl)
first_time = False
r.add(rd)
return r
def from_rdata(name, ttl, *rdatas):
"""Create an RRset with the specified name and TTL, and with
the specified rdata objects.
@rtype: dns.rrset.RRset object
"""
return from_rdata_list(name, ttl, rdatas)
| apache-2.0 |
aestrivex/mne-python | examples/inverse/plot_rap_music.py | 27 | 1913 | """
================================
Compute Rap-Music on evoked data
================================
Compute a Recursively Applied and Projected MUltiple Signal Classification
(RAP-MUSIC) on evoked dataset.
The reference for Rap-Music is:
J.C. Mosher and R.M. Leahy. 1999. Source localization using recursively
applied and projected (RAP) MUSIC. Trans. Sig. Proc. 47, 2
(February 1999), 332-340.
DOI=10.1109/78.740118 http://dx.doi.org/10.1109/78.740118
"""
# Author: Yousra Bekhti <yousra.bekhti@gmail.com>
#
# License: BSD (3-clause)
import mne
from mne.datasets import sample
from mne.beamformer import rap_music
from mne.viz import plot_dipole_locations, plot_dipole_amplitudes
print(__doc__)
data_path = sample.data_path()
subjects_dir = data_path + '/subjects'
fwd_fname = data_path + '/MEG/sample/sample_audvis-meg-eeg-oct-6-fwd.fif'
evoked_fname = data_path + '/MEG/sample/sample_audvis-ave.fif'
cov_fname = data_path + '/MEG/sample/sample_audvis-cov.fif'
# Read the evoked response and crop it
condition = 'Right Auditory'
evoked = mne.read_evokeds(evoked_fname, condition=condition,
baseline=(None, 0))
evoked.crop(tmin=0.05, tmax=0.15) # select N100
evoked.pick_types(meg=True, eeg=False)
# Read the forward solution
forward = mne.read_forward_solution(fwd_fname, surf_ori=True,
force_fixed=False)
# Read noise covariance matrix
noise_cov = mne.read_cov(cov_fname)
dipoles, residual = rap_music(evoked, forward, noise_cov, n_dipoles=2,
return_residual=True, verbose=True)
trans = forward['mri_head_t']
plot_dipole_locations(dipoles, trans, 'sample', subjects_dir=subjects_dir)
plot_dipole_amplitudes(dipoles)
# Plot the evoked data and the residual.
evoked.plot(ylim=dict(grad=[-300, 300], mag=[-800, 800], eeg=[-6, 8]))
residual.plot(ylim=dict(grad=[-300, 300], mag=[-800, 800], eeg=[-6, 8]))
| bsd-3-clause |
GeorgKunk/MachineLearning | MLP_MNIST.py | 1 | 5483 | import matplotlib.pyplot as plt
from sklearn.datasets import fetch_mldata
from sklearn.neural_network import MLPClassifier
import numpy as np
import pickle
import os.path
import math
mnist = fetch_mldata("MNIST original")
X, y = mnist.data / 255., mnist.target
X_train, X_test = X[:60000], X[60000:]
y_train, y_test = y[:60000], y[60000:]
def plot_example_digits(X_train, y_train):
fig, ax = plt.subplots(nrows=2, ncols=5, sharex=True, sharey=True,)
ax = ax.flatten()
for i in range(10):
img = X_train[y_train == i][0].reshape(28, 28)
ax[i].imshow(img, cmap='Greys', interpolation='nearest')
#plot_example_digits(X_train, y_train)
def mlp_store_name(hidden_units):
directory = "storedClassifiers"
name = "MLP_MNIST_"
for i in hidden_units:
name += str(i)
name += "-"
name = name[:-1]
name += "_hiddenUnits_Solverlbfgs.p"
return os.path.join(directory,name)
def run_MLP_on_MNIST(hidden_units=(50,), print_output=True):
"""
Runs a multilayer perceptron with hidden layers specified by hidden_units
on the MNIST handwritten digits dataset and calculates training and
test accuracy.
"""
if os.path.isfile(mlp_store_name(hidden_units)):
if (print_output):
print("loading clf from pickle file")
clf = pickle.load(open(mlp_store_name(hidden_units), "rb" ))
else:
if (print_output):
print("training classifier for " + str(hidden_units))
clf = MLPClassifier(solver='lbfgs', alpha=1e-5,
hidden_layer_sizes=hidden_units, random_state=1)
clf.fit(X_train, y_train)
pickle.dump( clf, open( mlp_store_name(hidden_units), "wb" ) )
if (print_output):
print("Results for classfier with hidden units", str(hidden_units))
print("Training set accuracy: %f" % clf.score(X_train, y_train))
print("Test set accuracy: %f \n" % clf.score(X_test, y_test))
return clf
def print_misclassifications(clf, no_of_images=20):
"""
Print a few examples of misclassified images
"""
y_test_pred = clf.predict(X_test)
misclassified = X_test[y_test != y_test_pred][:no_of_images]
true_labels = y_test[y_test != y_test_pred][:no_of_images]
predicted_labels = y_test_pred[y_test != y_test_pred][:no_of_images]
fig, ax = plt.subplots(nrows=int(math.ceil(no_of_images/5)), ncols=5, sharex=True, sharey=True,)
ax = ax.flatten()
for i in range(no_of_images):
img = misclassified[i].reshape(28, 28)
ax[i].imshow(img, cmap='Greys', interpolation='nearest')
ax[i].set_title('%d) t: %d p: %d' % (i+1, true_labels[i], predicted_labels[i]))
ax[0].set_xticks([])
ax[0].set_yticks([])
plt.tight_layout()
plt.show()
#mlpClassifier50=run_MLP_on_MNIST((50,))
#print_misclassifications(mlpClassifier50)
### Compare different layer depths for the same # of neurons ###
hidden_unit_ints=[6,12,18,24,30,36,48,60,96]
hidden_unit_numbers={}
for number_of_layers in range(1,3+1):
hidden_units_tuple_list=[]
for total_hd_un in hidden_unit_ints:
hidden_units_tuple=()
for layer in range(1,number_of_layers+1):
hidden_units_tuple += (int(total_hd_un/number_of_layers),)
hidden_units_tuple_list.append(hidden_units_tuple)
key=str(number_of_layers) + "_hidden_layers"
hidden_unit_numbers[key]=hidden_units_tuple_list
print(hidden_unit_numbers)
for hd_key in hidden_unit_numbers:
for hd_tuple in hidden_unit_numbers[hd_key]:
run_MLP_on_MNIST(hd_tuple)
training_accuracies={key : list([]) for key in hidden_unit_numbers.keys()}
test_accuracies={key : list([]) for key in hidden_unit_numbers.keys()}
del_accuracies={key : list([]) for key in hidden_unit_numbers.keys()}
for hd_key in hidden_unit_numbers:
for hd_tuple in hidden_unit_numbers[hd_key]:
clf=run_MLP_on_IST(hd_tuple, False)
training_accuracies[hd_key].append(clf.score(X_train, y_train))
test_accuracies[hd_key].append(clf.score(X_test, y_test))
d_accuracy=clf.score(X_train, y_train)-clf.score(X_test, y_test)
del_accuracies[hd_key].append(d_accuracy)
import matplotlib.cm as cm
color=iter(cm.rainbow(np.linspace(0,1,len(hidden_unit_numbers))))
fig, ax = plt.subplots()
for i,hd_key in enumerate(hidden_unit_numbers):
ax.plot(hidden_unit_ints,
training_accuracies[hd_key], label=hd_key,
color=next(color))
ax.legend()
ax.set_title("Training accuracy")
ax.set_xlabel("# of hidden neurons")
ax.set_ylabel("Training accuracy")
plt.show()
color=iter(cm.rainbow(np.linspace(0,1,len(hidden_unit_numbers))))
fig, ax = plt.subplots()
for hd_key in hidden_unit_numbers:
ax.plot(hidden_unit_ints,
test_accuracies[hd_key],
label=hd_key, color=next(color))
ax.legend()
ax.set_title("Test accuracy")
ax.set_xlabel("# of hidden neurons")
ax.set_ylabel("Test accuracy")
plt.show()
color=iter(cm.rainbow(np.linspace(0,1,len(hidden_unit_numbers))))
fig, ax = plt.subplots()
for i,hd_key in enumerate(hidden_unit_numbers):
ax.plot(hidden_unit_ints,
del_accuracies[hd_key], label=hd_key,
color=next(color))
ax.legend()
ax.set_title("Delta of training and test accuracy")
ax.set_xlabel("# of hidden neurons")
ax.set_ylabel("Training-Testing accurady")
plt.show()
| mit |
LokiCoder/Sick-Beard | lib/guessit/transfo/guess_idnumber.py | 11 | 2404 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# GuessIt - A library for guessing information from filenames
# Copyright (c) 2013 Nicolas Wack <wackou@gmail.com>
#
# GuessIt is free software; you can redistribute it and/or modify it under
# the terms of the Lesser GNU General Public License as published by
# the Free Software Foundation; either version 3 of the License, or
# (at your option) any later version.
#
# GuessIt is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# Lesser GNU General Public License for more details.
#
# You should have received a copy of the Lesser GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
from __future__ import absolute_import, division, print_function, unicode_literals
from guessit.plugins.transformers import Transformer
from guessit.matcher import GuessFinder
import re
class GuessIdnumber(Transformer):
def __init__(self):
Transformer.__init__(self, -180)
def supported_properties(self):
return ['idNumber']
_idnum = re.compile(r'(?P<idNumber>[a-zA-Z0-9-]{20,})') # 1.0, (0, 0))
def guess_idnumber(self, string, node=None, options=None):
match = self._idnum.search(string)
if match is not None:
result = match.groupdict()
switch_count = 0
DIGIT = 0
LETTER = 1
OTHER = 2
last = LETTER
for c in result['idNumber']:
if c in '0123456789':
ci = DIGIT
elif c in 'abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ':
ci = LETTER
else:
ci = OTHER
if ci != last:
switch_count += 1
last = ci
switch_ratio = float(switch_count) / len(result['idNumber'])
# only return the result as probable if we alternate often between
# char type (more likely for hash values than for common words)
if switch_ratio > 0.4:
return result, match.span()
return None, None
def process(self, mtree, options=None):
GuessFinder(self.guess_idnumber, 0.4, self.log, options).process_nodes(mtree.unidentified_leaves())
| gpl-3.0 |
NelisVerhoef/scikit-learn | examples/feature_selection/plot_rfe_with_cross_validation.py | 225 | 1384 | """
===================================================
Recursive feature elimination with cross-validation
===================================================
A recursive feature elimination example with automatic tuning of the
number of features selected with cross-validation.
"""
print(__doc__)
import matplotlib.pyplot as plt
from sklearn.svm import SVC
from sklearn.cross_validation import StratifiedKFold
from sklearn.feature_selection import RFECV
from sklearn.datasets import make_classification
# Build a classification task using 3 informative features
X, y = make_classification(n_samples=1000, n_features=25, n_informative=3,
n_redundant=2, n_repeated=0, n_classes=8,
n_clusters_per_class=1, random_state=0)
# Create the RFE object and compute a cross-validated score.
svc = SVC(kernel="linear")
# The "accuracy" scoring is proportional to the number of correct
# classifications
rfecv = RFECV(estimator=svc, step=1, cv=StratifiedKFold(y, 2),
scoring='accuracy')
rfecv.fit(X, y)
print("Optimal number of features : %d" % rfecv.n_features_)
# Plot number of features VS. cross-validation scores
plt.figure()
plt.xlabel("Number of features selected")
plt.ylabel("Cross validation score (nb of correct classifications)")
plt.plot(range(1, len(rfecv.grid_scores_) + 1), rfecv.grid_scores_)
plt.show()
| bsd-3-clause |
alexnowakvila/DCN | code/ConvexHull2d/data_generator.py | 1 | 3831 | import numpy as np
import os
from scipy.spatial import ConvexHull
from sklearn.decomposition import PCA
import matplotlib
matplotlib.use('Agg')
from matplotlib import pyplot as plt
class Generator(object):
def __init__(
self, num_examples_train, num_examples_test,
path_dataset, batch_size
):
self.num_examples_train = num_examples_train
self.num_examples_test = num_examples_test
self.batch_size = batch_size
self.path_dataset = path_dataset
self.input_size = 2
# self.input_size = 3
self.task = 'convex_hull'
scales_train = [1, 2, 3]
scales_test = [5]
self.scales = {'train': scales_train, 'test': scales_test}
self.data = {'train': {}, 'test': {}}
def load_dataset(self):
for mode in ['train', 'test']:
for sc in self.scales[mode]:
path = os.path.join(self.path_dataset, mode + str(sc))
if self.input_size == 2:
path = path + 'def.npz'
elif self.input_size == 3:
path = path = 'def3d.npz'
if os.path.exists(path):
print('Reading {} dataset for {} scales'
.format(mode, sc))
npz = np.load(path)
self.data[mode][sc] = {'x': npz['x'], 'y': npz['y']}
else:
x, y = self.create(scales=sc, mode=mode)
self.data[mode][sc] = {'x': x, 'y': y}
# save
np.savez(path, x=x, y=y)
print('Created {} dataset for {} scales'
.format(mode, sc))
def get_batch(self, batch=0, scales=3, mode="train"):
bs = self.batch_size
batch_x = self.data[mode][scales]['x'][batch * bs: (batch + 1) * bs]
batch_y = self.data[mode][scales]['y'][batch * bs: (batch + 1) * bs]
return batch_x, batch_y
def compute_length(self, scales, mode='train'):
if mode == 'train':
length = np.random.randint(3 * 2 ** scales, 6 * 2 ** (scales) + 1)
max_length = 6 * 2 ** scales
else:
if scales == 2:
length, max_length = 25, 25
elif scales == 3:
length, max_length = 50, 50
elif scales == 4:
length, max_length = 100, 100
elif scales == 5:
length, max_length = 200, 200
return length, max_length
def convexhull_example(self, length, scales):
points = np.random.uniform(0, 1, [length, self.input_size])
target = -1 * np.ones([length])
ch = ConvexHull(points).vertices
argmin = np.argsort(ch)[0]
ch = list(ch[argmin:]) + list(ch[:argmin])
target[:len(ch)] = np.array(ch)
target += 1
return points, target
def create(self, scales=3, mode='train'):
if mode == 'train':
num_examples = self.num_examples_train
else:
num_examples = self.num_examples_test
_, max_length = self.compute_length(scales, mode=mode)
x = -1 * np.ones([num_examples, max_length, self.input_size])
y = np.zeros([num_examples, max_length])
for ex in xrange(num_examples):
length, max_length = self.compute_length(scales, mode=mode)
if self.task == "convex_hull":
x_ex, y_ex = self.convexhull_example(length, scales)
if ex % 500000 == 499999:
print('Created example {}'.format(ex))
else:
raise ValueError("task {} not implemented"
.format(self.task))
x[ex, :length], y[ex, :length] = x_ex, y_ex
return x, y
| bsd-3-clause |
cauchycui/scikit-learn | sklearn/decomposition/tests/test_incremental_pca.py | 294 | 8265 | """Tests for Incremental PCA."""
import numpy as np
from sklearn.utils.testing import assert_almost_equal
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_raises
from sklearn import datasets
from sklearn.decomposition import PCA, IncrementalPCA
iris = datasets.load_iris()
def test_incremental_pca():
# Incremental PCA on dense arrays.
X = iris.data
batch_size = X.shape[0] // 3
ipca = IncrementalPCA(n_components=2, batch_size=batch_size)
pca = PCA(n_components=2)
pca.fit_transform(X)
X_transformed = ipca.fit_transform(X)
np.testing.assert_equal(X_transformed.shape, (X.shape[0], 2))
assert_almost_equal(ipca.explained_variance_ratio_.sum(),
pca.explained_variance_ratio_.sum(), 1)
for n_components in [1, 2, X.shape[1]]:
ipca = IncrementalPCA(n_components, batch_size=batch_size)
ipca.fit(X)
cov = ipca.get_covariance()
precision = ipca.get_precision()
assert_array_almost_equal(np.dot(cov, precision),
np.eye(X.shape[1]))
def test_incremental_pca_check_projection():
# Test that the projection of data is correct.
rng = np.random.RandomState(1999)
n, p = 100, 3
X = rng.randn(n, p) * .1
X[:10] += np.array([3, 4, 5])
Xt = 0.1 * rng.randn(1, p) + np.array([3, 4, 5])
# Get the reconstruction of the generated data X
# Note that Xt has the same "components" as X, just separated
# This is what we want to ensure is recreated correctly
Yt = IncrementalPCA(n_components=2).fit(X).transform(Xt)
# Normalize
Yt /= np.sqrt((Yt ** 2).sum())
# Make sure that the first element of Yt is ~1, this means
# the reconstruction worked as expected
assert_almost_equal(np.abs(Yt[0][0]), 1., 1)
def test_incremental_pca_inverse():
# Test that the projection of data can be inverted.
rng = np.random.RandomState(1999)
n, p = 50, 3
X = rng.randn(n, p) # spherical data
X[:, 1] *= .00001 # make middle component relatively small
X += [5, 4, 3] # make a large mean
# same check that we can find the original data from the transformed
# signal (since the data is almost of rank n_components)
ipca = IncrementalPCA(n_components=2, batch_size=10).fit(X)
Y = ipca.transform(X)
Y_inverse = ipca.inverse_transform(Y)
assert_almost_equal(X, Y_inverse, decimal=3)
def test_incremental_pca_validation():
# Test that n_components is >=1 and <= n_features.
X = [[0, 1], [1, 0]]
for n_components in [-1, 0, .99, 3]:
assert_raises(ValueError, IncrementalPCA(n_components,
batch_size=10).fit, X)
def test_incremental_pca_set_params():
# Test that components_ sign is stable over batch sizes.
rng = np.random.RandomState(1999)
n_samples = 100
n_features = 20
X = rng.randn(n_samples, n_features)
X2 = rng.randn(n_samples, n_features)
X3 = rng.randn(n_samples, n_features)
ipca = IncrementalPCA(n_components=20)
ipca.fit(X)
# Decreasing number of components
ipca.set_params(n_components=10)
assert_raises(ValueError, ipca.partial_fit, X2)
# Increasing number of components
ipca.set_params(n_components=15)
assert_raises(ValueError, ipca.partial_fit, X3)
# Returning to original setting
ipca.set_params(n_components=20)
ipca.partial_fit(X)
def test_incremental_pca_num_features_change():
# Test that changing n_components will raise an error.
rng = np.random.RandomState(1999)
n_samples = 100
X = rng.randn(n_samples, 20)
X2 = rng.randn(n_samples, 50)
ipca = IncrementalPCA(n_components=None)
ipca.fit(X)
assert_raises(ValueError, ipca.partial_fit, X2)
def test_incremental_pca_batch_signs():
# Test that components_ sign is stable over batch sizes.
rng = np.random.RandomState(1999)
n_samples = 100
n_features = 3
X = rng.randn(n_samples, n_features)
all_components = []
batch_sizes = np.arange(10, 20)
for batch_size in batch_sizes:
ipca = IncrementalPCA(n_components=None, batch_size=batch_size).fit(X)
all_components.append(ipca.components_)
for i, j in zip(all_components[:-1], all_components[1:]):
assert_almost_equal(np.sign(i), np.sign(j), decimal=6)
def test_incremental_pca_batch_values():
# Test that components_ values are stable over batch sizes.
rng = np.random.RandomState(1999)
n_samples = 100
n_features = 3
X = rng.randn(n_samples, n_features)
all_components = []
batch_sizes = np.arange(20, 40, 3)
for batch_size in batch_sizes:
ipca = IncrementalPCA(n_components=None, batch_size=batch_size).fit(X)
all_components.append(ipca.components_)
for i, j in zip(all_components[:-1], all_components[1:]):
assert_almost_equal(i, j, decimal=1)
def test_incremental_pca_partial_fit():
# Test that fit and partial_fit get equivalent results.
rng = np.random.RandomState(1999)
n, p = 50, 3
X = rng.randn(n, p) # spherical data
X[:, 1] *= .00001 # make middle component relatively small
X += [5, 4, 3] # make a large mean
# same check that we can find the original data from the transformed
# signal (since the data is almost of rank n_components)
batch_size = 10
ipca = IncrementalPCA(n_components=2, batch_size=batch_size).fit(X)
pipca = IncrementalPCA(n_components=2, batch_size=batch_size)
# Add one to make sure endpoint is included
batch_itr = np.arange(0, n + 1, batch_size)
for i, j in zip(batch_itr[:-1], batch_itr[1:]):
pipca.partial_fit(X[i:j, :])
assert_almost_equal(ipca.components_, pipca.components_, decimal=3)
def test_incremental_pca_against_pca_iris():
# Test that IncrementalPCA and PCA are approximate (to a sign flip).
X = iris.data
Y_pca = PCA(n_components=2).fit_transform(X)
Y_ipca = IncrementalPCA(n_components=2, batch_size=25).fit_transform(X)
assert_almost_equal(np.abs(Y_pca), np.abs(Y_ipca), 1)
def test_incremental_pca_against_pca_random_data():
# Test that IncrementalPCA and PCA are approximate (to a sign flip).
rng = np.random.RandomState(1999)
n_samples = 100
n_features = 3
X = rng.randn(n_samples, n_features) + 5 * rng.rand(1, n_features)
Y_pca = PCA(n_components=3).fit_transform(X)
Y_ipca = IncrementalPCA(n_components=3, batch_size=25).fit_transform(X)
assert_almost_equal(np.abs(Y_pca), np.abs(Y_ipca), 1)
def test_explained_variances():
# Test that PCA and IncrementalPCA calculations match
X = datasets.make_low_rank_matrix(1000, 100, tail_strength=0.,
effective_rank=10, random_state=1999)
prec = 3
n_samples, n_features = X.shape
for nc in [None, 99]:
pca = PCA(n_components=nc).fit(X)
ipca = IncrementalPCA(n_components=nc, batch_size=100).fit(X)
assert_almost_equal(pca.explained_variance_, ipca.explained_variance_,
decimal=prec)
assert_almost_equal(pca.explained_variance_ratio_,
ipca.explained_variance_ratio_, decimal=prec)
assert_almost_equal(pca.noise_variance_, ipca.noise_variance_,
decimal=prec)
def test_whitening():
# Test that PCA and IncrementalPCA transforms match to sign flip.
X = datasets.make_low_rank_matrix(1000, 10, tail_strength=0.,
effective_rank=2, random_state=1999)
prec = 3
n_samples, n_features = X.shape
for nc in [None, 9]:
pca = PCA(whiten=True, n_components=nc).fit(X)
ipca = IncrementalPCA(whiten=True, n_components=nc,
batch_size=250).fit(X)
Xt_pca = pca.transform(X)
Xt_ipca = ipca.transform(X)
assert_almost_equal(np.abs(Xt_pca), np.abs(Xt_ipca), decimal=prec)
Xinv_ipca = ipca.inverse_transform(Xt_ipca)
Xinv_pca = pca.inverse_transform(Xt_pca)
assert_almost_equal(X, Xinv_ipca, decimal=prec)
assert_almost_equal(X, Xinv_pca, decimal=prec)
assert_almost_equal(Xinv_pca, Xinv_ipca, decimal=prec)
| bsd-3-clause |
AlexRobson/scikit-learn | sklearn/externals/joblib/__init__.py | 85 | 4795 | """ Joblib is a set of tools to provide **lightweight pipelining in
Python**. In particular, joblib offers:
1. transparent disk-caching of the output values and lazy re-evaluation
(memoize pattern)
2. easy simple parallel computing
3. logging and tracing of the execution
Joblib is optimized to be **fast** and **robust** in particular on large
data and has specific optimizations for `numpy` arrays. It is
**BSD-licensed**.
============================== ============================================
**User documentation**: http://pythonhosted.org/joblib
**Download packages**: http://pypi.python.org/pypi/joblib#downloads
**Source code**: http://github.com/joblib/joblib
**Report issues**: http://github.com/joblib/joblib/issues
============================== ============================================
Vision
--------
The vision is to provide tools to easily achieve better performance and
reproducibility when working with long running jobs.
* **Avoid computing twice the same thing**: code is rerun over an
over, for instance when prototyping computational-heavy jobs (as in
scientific development), but hand-crafted solution to alleviate this
issue is error-prone and often leads to unreproducible results
* **Persist to disk transparently**: persisting in an efficient way
arbitrary objects containing large data is hard. Using
joblib's caching mechanism avoids hand-written persistence and
implicitly links the file on disk to the execution context of
the original Python object. As a result, joblib's persistence is
good for resuming an application status or computational job, eg
after a crash.
Joblib strives to address these problems while **leaving your code and
your flow control as unmodified as possible** (no framework, no new
paradigms).
Main features
------------------
1) **Transparent and fast disk-caching of output value:** a memoize or
make-like functionality for Python functions that works well for
arbitrary Python objects, including very large numpy arrays. Separate
persistence and flow-execution logic from domain logic or algorithmic
code by writing the operations as a set of steps with well-defined
inputs and outputs: Python functions. Joblib can save their
computation to disk and rerun it only if necessary::
>>> import numpy as np
>>> from sklearn.externals.joblib import Memory
>>> mem = Memory(cachedir='/tmp/joblib')
>>> import numpy as np
>>> a = np.vander(np.arange(3)).astype(np.float)
>>> square = mem.cache(np.square)
>>> b = square(a) # doctest: +ELLIPSIS
________________________________________________________________________________
[Memory] Calling square...
square(array([[ 0., 0., 1.],
[ 1., 1., 1.],
[ 4., 2., 1.]]))
___________________________________________________________square - 0...s, 0.0min
>>> c = square(a)
>>> # The above call did not trigger an evaluation
2) **Embarrassingly parallel helper:** to make is easy to write readable
parallel code and debug it quickly::
>>> from sklearn.externals.joblib import Parallel, delayed
>>> from math import sqrt
>>> Parallel(n_jobs=1)(delayed(sqrt)(i**2) for i in range(10))
[0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0]
3) **Logging/tracing:** The different functionalities will
progressively acquire better logging mechanism to help track what
has been ran, and capture I/O easily. In addition, Joblib will
provide a few I/O primitives, to easily define define logging and
display streams, and provide a way of compiling a report.
We want to be able to quickly inspect what has been run.
4) **Fast compressed Persistence**: a replacement for pickle to work
efficiently on Python objects containing large data (
*joblib.dump* & *joblib.load* ).
..
>>> import shutil ; shutil.rmtree('/tmp/joblib/')
"""
# PEP0440 compatible formatted version, see:
# https://www.python.org/dev/peps/pep-0440/
#
# Generic release markers:
# X.Y
# X.Y.Z # For bugfix releases
#
# Admissible pre-release markers:
# X.YaN # Alpha release
# X.YbN # Beta release
# X.YrcN # Release Candidate
# X.Y # Final release
#
# Dev branch marker is: 'X.Y.dev' or 'X.Y.devN' where N is an integer.
# 'X.Y.dev0' is the canonical version of 'X.Y.dev'
#
__version__ = '0.9.0b3'
from .memory import Memory, MemorizedResult
from .logger import PrintTime
from .logger import Logger
from .hashing import hash
from .numpy_pickle import dump
from .numpy_pickle import load
from .parallel import Parallel
from .parallel import delayed
from .parallel import cpu_count
| bsd-3-clause |
AlexRobson/scikit-learn | examples/cluster/plot_agglomerative_clustering_metrics.py | 388 | 4492 | """
Agglomerative clustering with different metrics
===============================================
Demonstrates the effect of different metrics on the hierarchical clustering.
The example is engineered to show the effect of the choice of different
metrics. It is applied to waveforms, which can be seen as
high-dimensional vector. Indeed, the difference between metrics is
usually more pronounced in high dimension (in particular for euclidean
and cityblock).
We generate data from three groups of waveforms. Two of the waveforms
(waveform 1 and waveform 2) are proportional one to the other. The cosine
distance is invariant to a scaling of the data, as a result, it cannot
distinguish these two waveforms. Thus even with no noise, clustering
using this distance will not separate out waveform 1 and 2.
We add observation noise to these waveforms. We generate very sparse
noise: only 6% of the time points contain noise. As a result, the
l1 norm of this noise (ie "cityblock" distance) is much smaller than it's
l2 norm ("euclidean" distance). This can be seen on the inter-class
distance matrices: the values on the diagonal, that characterize the
spread of the class, are much bigger for the Euclidean distance than for
the cityblock distance.
When we apply clustering to the data, we find that the clustering
reflects what was in the distance matrices. Indeed, for the Euclidean
distance, the classes are ill-separated because of the noise, and thus
the clustering does not separate the waveforms. For the cityblock
distance, the separation is good and the waveform classes are recovered.
Finally, the cosine distance does not separate at all waveform 1 and 2,
thus the clustering puts them in the same cluster.
"""
# Author: Gael Varoquaux
# License: BSD 3-Clause or CC-0
import matplotlib.pyplot as plt
import numpy as np
from sklearn.cluster import AgglomerativeClustering
from sklearn.metrics import pairwise_distances
np.random.seed(0)
# Generate waveform data
n_features = 2000
t = np.pi * np.linspace(0, 1, n_features)
def sqr(x):
return np.sign(np.cos(x))
X = list()
y = list()
for i, (phi, a) in enumerate([(.5, .15), (.5, .6), (.3, .2)]):
for _ in range(30):
phase_noise = .01 * np.random.normal()
amplitude_noise = .04 * np.random.normal()
additional_noise = 1 - 2 * np.random.rand(n_features)
# Make the noise sparse
additional_noise[np.abs(additional_noise) < .997] = 0
X.append(12 * ((a + amplitude_noise)
* (sqr(6 * (t + phi + phase_noise)))
+ additional_noise))
y.append(i)
X = np.array(X)
y = np.array(y)
n_clusters = 3
labels = ('Waveform 1', 'Waveform 2', 'Waveform 3')
# Plot the ground-truth labelling
plt.figure()
plt.axes([0, 0, 1, 1])
for l, c, n in zip(range(n_clusters), 'rgb',
labels):
lines = plt.plot(X[y == l].T, c=c, alpha=.5)
lines[0].set_label(n)
plt.legend(loc='best')
plt.axis('tight')
plt.axis('off')
plt.suptitle("Ground truth", size=20)
# Plot the distances
for index, metric in enumerate(["cosine", "euclidean", "cityblock"]):
avg_dist = np.zeros((n_clusters, n_clusters))
plt.figure(figsize=(5, 4.5))
for i in range(n_clusters):
for j in range(n_clusters):
avg_dist[i, j] = pairwise_distances(X[y == i], X[y == j],
metric=metric).mean()
avg_dist /= avg_dist.max()
for i in range(n_clusters):
for j in range(n_clusters):
plt.text(i, j, '%5.3f' % avg_dist[i, j],
verticalalignment='center',
horizontalalignment='center')
plt.imshow(avg_dist, interpolation='nearest', cmap=plt.cm.gnuplot2,
vmin=0)
plt.xticks(range(n_clusters), labels, rotation=45)
plt.yticks(range(n_clusters), labels)
plt.colorbar()
plt.suptitle("Interclass %s distances" % metric, size=18)
plt.tight_layout()
# Plot clustering results
for index, metric in enumerate(["cosine", "euclidean", "cityblock"]):
model = AgglomerativeClustering(n_clusters=n_clusters,
linkage="average", affinity=metric)
model.fit(X)
plt.figure()
plt.axes([0, 0, 1, 1])
for l, c in zip(np.arange(model.n_clusters), 'rgbk'):
plt.plot(X[model.labels_ == l].T, c=c, alpha=.5)
plt.axis('tight')
plt.axis('off')
plt.suptitle("AgglomerativeClustering(affinity=%s)" % metric, size=20)
plt.show()
| bsd-3-clause |
AlexRobson/scikit-learn | sklearn/grid_search.py | 102 | 36232 | """
The :mod:`sklearn.grid_search` includes utilities to fine-tune the parameters
of an estimator.
"""
from __future__ import print_function
# Author: Alexandre Gramfort <alexandre.gramfort@inria.fr>,
# Gael Varoquaux <gael.varoquaux@normalesup.org>
# Andreas Mueller <amueller@ais.uni-bonn.de>
# Olivier Grisel <olivier.grisel@ensta.org>
# License: BSD 3 clause
from abc import ABCMeta, abstractmethod
from collections import Mapping, namedtuple, Sized
from functools import partial, reduce
from itertools import product
import operator
import warnings
import numpy as np
from .base import BaseEstimator, is_classifier, clone
from .base import MetaEstimatorMixin, ChangedBehaviorWarning
from .cross_validation import check_cv
from .cross_validation import _fit_and_score
from .externals.joblib import Parallel, delayed
from .externals import six
from .utils import check_random_state
from .utils.random import sample_without_replacement
from .utils.validation import _num_samples, indexable
from .utils.metaestimators import if_delegate_has_method
from .metrics.scorer import check_scoring
__all__ = ['GridSearchCV', 'ParameterGrid', 'fit_grid_point',
'ParameterSampler', 'RandomizedSearchCV']
class ParameterGrid(object):
"""Grid of parameters with a discrete number of values for each.
Can be used to iterate over parameter value combinations with the
Python built-in function iter.
Read more in the :ref:`User Guide <grid_search>`.
Parameters
----------
param_grid : dict of string to sequence, or sequence of such
The parameter grid to explore, as a dictionary mapping estimator
parameters to sequences of allowed values.
An empty dict signifies default parameters.
A sequence of dicts signifies a sequence of grids to search, and is
useful to avoid exploring parameter combinations that make no sense
or have no effect. See the examples below.
Examples
--------
>>> from sklearn.grid_search import ParameterGrid
>>> param_grid = {'a': [1, 2], 'b': [True, False]}
>>> list(ParameterGrid(param_grid)) == (
... [{'a': 1, 'b': True}, {'a': 1, 'b': False},
... {'a': 2, 'b': True}, {'a': 2, 'b': False}])
True
>>> grid = [{'kernel': ['linear']}, {'kernel': ['rbf'], 'gamma': [1, 10]}]
>>> list(ParameterGrid(grid)) == [{'kernel': 'linear'},
... {'kernel': 'rbf', 'gamma': 1},
... {'kernel': 'rbf', 'gamma': 10}]
True
>>> ParameterGrid(grid)[1] == {'kernel': 'rbf', 'gamma': 1}
True
See also
--------
:class:`GridSearchCV`:
uses ``ParameterGrid`` to perform a full parallelized parameter search.
"""
def __init__(self, param_grid):
if isinstance(param_grid, Mapping):
# wrap dictionary in a singleton list to support either dict
# or list of dicts
param_grid = [param_grid]
self.param_grid = param_grid
def __iter__(self):
"""Iterate over the points in the grid.
Returns
-------
params : iterator over dict of string to any
Yields dictionaries mapping each estimator parameter to one of its
allowed values.
"""
for p in self.param_grid:
# Always sort the keys of a dictionary, for reproducibility
items = sorted(p.items())
if not items:
yield {}
else:
keys, values = zip(*items)
for v in product(*values):
params = dict(zip(keys, v))
yield params
def __len__(self):
"""Number of points on the grid."""
# Product function that can handle iterables (np.product can't).
product = partial(reduce, operator.mul)
return sum(product(len(v) for v in p.values()) if p else 1
for p in self.param_grid)
def __getitem__(self, ind):
"""Get the parameters that would be ``ind``th in iteration
Parameters
----------
ind : int
The iteration index
Returns
-------
params : dict of string to any
Equal to list(self)[ind]
"""
# This is used to make discrete sampling without replacement memory
# efficient.
for sub_grid in self.param_grid:
# XXX: could memoize information used here
if not sub_grid:
if ind == 0:
return {}
else:
ind -= 1
continue
# Reverse so most frequent cycling parameter comes first
keys, values_lists = zip(*sorted(sub_grid.items())[::-1])
sizes = [len(v_list) for v_list in values_lists]
total = np.product(sizes)
if ind >= total:
# Try the next grid
ind -= total
else:
out = {}
for key, v_list, n in zip(keys, values_lists, sizes):
ind, offset = divmod(ind, n)
out[key] = v_list[offset]
return out
raise IndexError('ParameterGrid index out of range')
class ParameterSampler(object):
"""Generator on parameters sampled from given distributions.
Non-deterministic iterable over random candidate combinations for hyper-
parameter search. If all parameters are presented as a list,
sampling without replacement is performed. If at least one parameter
is given as a distribution, sampling with replacement is used.
It is highly recommended to use continuous distributions for continuous
parameters.
Note that as of SciPy 0.12, the ``scipy.stats.distributions`` do not accept
a custom RNG instance and always use the singleton RNG from
``numpy.random``. Hence setting ``random_state`` will not guarantee a
deterministic iteration whenever ``scipy.stats`` distributions are used to
define the parameter search space.
Read more in the :ref:`User Guide <grid_search>`.
Parameters
----------
param_distributions : dict
Dictionary where the keys are parameters and values
are distributions from which a parameter is to be sampled.
Distributions either have to provide a ``rvs`` function
to sample from them, or can be given as a list of values,
where a uniform distribution is assumed.
n_iter : integer
Number of parameter settings that are produced.
random_state : int or RandomState
Pseudo random number generator state used for random uniform sampling
from lists of possible values instead of scipy.stats distributions.
Returns
-------
params : dict of string to any
**Yields** dictionaries mapping each estimator parameter to
as sampled value.
Examples
--------
>>> from sklearn.grid_search import ParameterSampler
>>> from scipy.stats.distributions import expon
>>> import numpy as np
>>> np.random.seed(0)
>>> param_grid = {'a':[1, 2], 'b': expon()}
>>> param_list = list(ParameterSampler(param_grid, n_iter=4))
>>> rounded_list = [dict((k, round(v, 6)) for (k, v) in d.items())
... for d in param_list]
>>> rounded_list == [{'b': 0.89856, 'a': 1},
... {'b': 0.923223, 'a': 1},
... {'b': 1.878964, 'a': 2},
... {'b': 1.038159, 'a': 2}]
True
"""
def __init__(self, param_distributions, n_iter, random_state=None):
self.param_distributions = param_distributions
self.n_iter = n_iter
self.random_state = random_state
def __iter__(self):
# check if all distributions are given as lists
# in this case we want to sample without replacement
all_lists = np.all([not hasattr(v, "rvs")
for v in self.param_distributions.values()])
rnd = check_random_state(self.random_state)
if all_lists:
# look up sampled parameter settings in parameter grid
param_grid = ParameterGrid(self.param_distributions)
grid_size = len(param_grid)
if grid_size < self.n_iter:
raise ValueError(
"The total space of parameters %d is smaller "
"than n_iter=%d." % (grid_size, self.n_iter)
+ " For exhaustive searches, use GridSearchCV.")
for i in sample_without_replacement(grid_size, self.n_iter,
random_state=rnd):
yield param_grid[i]
else:
# Always sort the keys of a dictionary, for reproducibility
items = sorted(self.param_distributions.items())
for _ in six.moves.range(self.n_iter):
params = dict()
for k, v in items:
if hasattr(v, "rvs"):
params[k] = v.rvs()
else:
params[k] = v[rnd.randint(len(v))]
yield params
def __len__(self):
"""Number of points that will be sampled."""
return self.n_iter
def fit_grid_point(X, y, estimator, parameters, train, test, scorer,
verbose, error_score='raise', **fit_params):
"""Run fit on one set of parameters.
Parameters
----------
X : array-like, sparse matrix or list
Input data.
y : array-like or None
Targets for input data.
estimator : estimator object
This estimator will be cloned and then fitted.
parameters : dict
Parameters to be set on estimator for this grid point.
train : ndarray, dtype int or bool
Boolean mask or indices for training set.
test : ndarray, dtype int or bool
Boolean mask or indices for test set.
scorer : callable or None.
If provided must be a scorer callable object / function with signature
``scorer(estimator, X, y)``.
verbose : int
Verbosity level.
**fit_params : kwargs
Additional parameter passed to the fit function of the estimator.
error_score : 'raise' (default) or numeric
Value to assign to the score if an error occurs in estimator fitting.
If set to 'raise', the error is raised. If a numeric value is given,
FitFailedWarning is raised. This parameter does not affect the refit
step, which will always raise the error.
Returns
-------
score : float
Score of this parameter setting on given training / test split.
parameters : dict
The parameters that have been evaluated.
n_samples_test : int
Number of test samples in this split.
"""
score, n_samples_test, _ = _fit_and_score(estimator, X, y, scorer, train,
test, verbose, parameters,
fit_params, error_score)
return score, parameters, n_samples_test
def _check_param_grid(param_grid):
if hasattr(param_grid, 'items'):
param_grid = [param_grid]
for p in param_grid:
for v in p.values():
if isinstance(v, np.ndarray) and v.ndim > 1:
raise ValueError("Parameter array should be one-dimensional.")
check = [isinstance(v, k) for k in (list, tuple, np.ndarray)]
if True not in check:
raise ValueError("Parameter values should be a list.")
if len(v) == 0:
raise ValueError("Parameter values should be a non-empty "
"list.")
class _CVScoreTuple (namedtuple('_CVScoreTuple',
('parameters',
'mean_validation_score',
'cv_validation_scores'))):
# A raw namedtuple is very memory efficient as it packs the attributes
# in a struct to get rid of the __dict__ of attributes in particular it
# does not copy the string for the keys on each instance.
# By deriving a namedtuple class just to introduce the __repr__ method we
# would also reintroduce the __dict__ on the instance. By telling the
# Python interpreter that this subclass uses static __slots__ instead of
# dynamic attributes. Furthermore we don't need any additional slot in the
# subclass so we set __slots__ to the empty tuple.
__slots__ = ()
def __repr__(self):
"""Simple custom repr to summarize the main info"""
return "mean: {0:.5f}, std: {1:.5f}, params: {2}".format(
self.mean_validation_score,
np.std(self.cv_validation_scores),
self.parameters)
class BaseSearchCV(six.with_metaclass(ABCMeta, BaseEstimator,
MetaEstimatorMixin)):
"""Base class for hyper parameter search with cross-validation."""
@abstractmethod
def __init__(self, estimator, scoring=None,
fit_params=None, n_jobs=1, iid=True,
refit=True, cv=None, verbose=0, pre_dispatch='2*n_jobs',
error_score='raise'):
self.scoring = scoring
self.estimator = estimator
self.n_jobs = n_jobs
self.fit_params = fit_params if fit_params is not None else {}
self.iid = iid
self.refit = refit
self.cv = cv
self.verbose = verbose
self.pre_dispatch = pre_dispatch
self.error_score = error_score
@property
def _estimator_type(self):
return self.estimator._estimator_type
def score(self, X, y=None):
"""Returns the score on the given data, if the estimator has been refit
This uses the score defined by ``scoring`` where provided, and the
``best_estimator_.score`` method otherwise.
Parameters
----------
X : array-like, shape = [n_samples, n_features]
Input data, where n_samples is the number of samples and
n_features is the number of features.
y : array-like, shape = [n_samples] or [n_samples, n_output], optional
Target relative to X for classification or regression;
None for unsupervised learning.
Returns
-------
score : float
Notes
-----
* The long-standing behavior of this method changed in version 0.16.
* It no longer uses the metric provided by ``estimator.score`` if the
``scoring`` parameter was set when fitting.
"""
if self.scorer_ is None:
raise ValueError("No score function explicitly defined, "
"and the estimator doesn't provide one %s"
% self.best_estimator_)
if self.scoring is not None and hasattr(self.best_estimator_, 'score'):
warnings.warn("The long-standing behavior to use the estimator's "
"score function in {0}.score has changed. The "
"scoring parameter is now used."
"".format(self.__class__.__name__),
ChangedBehaviorWarning)
return self.scorer_(self.best_estimator_, X, y)
@if_delegate_has_method(delegate='estimator')
def predict(self, X):
"""Call predict on the estimator with the best found parameters.
Only available if ``refit=True`` and the underlying estimator supports
``predict``.
Parameters
-----------
X : indexable, length n_samples
Must fulfill the input assumptions of the
underlying estimator.
"""
return self.best_estimator_.predict(X)
@if_delegate_has_method(delegate='estimator')
def predict_proba(self, X):
"""Call predict_proba on the estimator with the best found parameters.
Only available if ``refit=True`` and the underlying estimator supports
``predict_proba``.
Parameters
-----------
X : indexable, length n_samples
Must fulfill the input assumptions of the
underlying estimator.
"""
return self.best_estimator_.predict_proba(X)
@if_delegate_has_method(delegate='estimator')
def predict_log_proba(self, X):
"""Call predict_log_proba on the estimator with the best found parameters.
Only available if ``refit=True`` and the underlying estimator supports
``predict_log_proba``.
Parameters
-----------
X : indexable, length n_samples
Must fulfill the input assumptions of the
underlying estimator.
"""
return self.best_estimator_.predict_log_proba(X)
@if_delegate_has_method(delegate='estimator')
def decision_function(self, X):
"""Call decision_function on the estimator with the best found parameters.
Only available if ``refit=True`` and the underlying estimator supports
``decision_function``.
Parameters
-----------
X : indexable, length n_samples
Must fulfill the input assumptions of the
underlying estimator.
"""
return self.best_estimator_.decision_function(X)
@if_delegate_has_method(delegate='estimator')
def transform(self, X):
"""Call transform on the estimator with the best found parameters.
Only available if the underlying estimator supports ``transform`` and
``refit=True``.
Parameters
-----------
X : indexable, length n_samples
Must fulfill the input assumptions of the
underlying estimator.
"""
return self.best_estimator_.transform(X)
@if_delegate_has_method(delegate='estimator')
def inverse_transform(self, Xt):
"""Call inverse_transform on the estimator with the best found parameters.
Only available if the underlying estimator implements ``inverse_transform`` and
``refit=True``.
Parameters
-----------
Xt : indexable, length n_samples
Must fulfill the input assumptions of the
underlying estimator.
"""
return self.best_estimator_.transform(Xt)
def _fit(self, X, y, parameter_iterable):
"""Actual fitting, performing the search over parameters."""
estimator = self.estimator
cv = self.cv
self.scorer_ = check_scoring(self.estimator, scoring=self.scoring)
n_samples = _num_samples(X)
X, y = indexable(X, y)
if y is not None:
if len(y) != n_samples:
raise ValueError('Target variable (y) has a different number '
'of samples (%i) than data (X: %i samples)'
% (len(y), n_samples))
cv = check_cv(cv, X, y, classifier=is_classifier(estimator))
if self.verbose > 0:
if isinstance(parameter_iterable, Sized):
n_candidates = len(parameter_iterable)
print("Fitting {0} folds for each of {1} candidates, totalling"
" {2} fits".format(len(cv), n_candidates,
n_candidates * len(cv)))
base_estimator = clone(self.estimator)
pre_dispatch = self.pre_dispatch
out = Parallel(
n_jobs=self.n_jobs, verbose=self.verbose,
pre_dispatch=pre_dispatch
)(
delayed(_fit_and_score)(clone(base_estimator), X, y, self.scorer_,
train, test, self.verbose, parameters,
self.fit_params, return_parameters=True,
error_score=self.error_score)
for parameters in parameter_iterable
for train, test in cv)
# Out is a list of triplet: score, estimator, n_test_samples
n_fits = len(out)
n_folds = len(cv)
scores = list()
grid_scores = list()
for grid_start in range(0, n_fits, n_folds):
n_test_samples = 0
score = 0
all_scores = []
for this_score, this_n_test_samples, _, parameters in \
out[grid_start:grid_start + n_folds]:
all_scores.append(this_score)
if self.iid:
this_score *= this_n_test_samples
n_test_samples += this_n_test_samples
score += this_score
if self.iid:
score /= float(n_test_samples)
else:
score /= float(n_folds)
scores.append((score, parameters))
# TODO: shall we also store the test_fold_sizes?
grid_scores.append(_CVScoreTuple(
parameters,
score,
np.array(all_scores)))
# Store the computed scores
self.grid_scores_ = grid_scores
# Find the best parameters by comparing on the mean validation score:
# note that `sorted` is deterministic in the way it breaks ties
best = sorted(grid_scores, key=lambda x: x.mean_validation_score,
reverse=True)[0]
self.best_params_ = best.parameters
self.best_score_ = best.mean_validation_score
if self.refit:
# fit the best estimator using the entire dataset
# clone first to work around broken estimators
best_estimator = clone(base_estimator).set_params(
**best.parameters)
if y is not None:
best_estimator.fit(X, y, **self.fit_params)
else:
best_estimator.fit(X, **self.fit_params)
self.best_estimator_ = best_estimator
return self
class GridSearchCV(BaseSearchCV):
"""Exhaustive search over specified parameter values for an estimator.
Important members are fit, predict.
GridSearchCV implements a "fit" method and a "predict" method like
any classifier except that the parameters of the classifier
used to predict is optimized by cross-validation.
Read more in the :ref:`User Guide <grid_search>`.
Parameters
----------
estimator : object type that implements the "fit" and "predict" methods
A object of that type is instantiated for each grid point.
param_grid : dict or list of dictionaries
Dictionary with parameters names (string) as keys and lists of
parameter settings to try as values, or a list of such
dictionaries, in which case the grids spanned by each dictionary
in the list are explored. This enables searching over any sequence
of parameter settings.
scoring : string, callable or None, optional, default: None
A string (see model evaluation documentation) or
a scorer callable object / function with signature
``scorer(estimator, X, y)``.
fit_params : dict, optional
Parameters to pass to the fit method.
n_jobs : int, default 1
Number of jobs to run in parallel.
pre_dispatch : int, or string, optional
Controls the number of jobs that get dispatched during parallel
execution. Reducing this number can be useful to avoid an
explosion of memory consumption when more jobs get dispatched
than CPUs can process. This parameter can be:
- None, in which case all the jobs are immediately
created and spawned. Use this for lightweight and
fast-running jobs, to avoid delays due to on-demand
spawning of the jobs
- An int, giving the exact number of total jobs that are
spawned
- A string, giving an expression as a function of n_jobs,
as in '2*n_jobs'
iid : boolean, default=True
If True, the data is assumed to be identically distributed across
the folds, and the loss minimized is the total loss per sample,
and not the mean loss across the folds.
cv : integer or cross-validation generator, default=3
If an integer is passed, it is the number of folds.
Specific cross-validation objects can be passed, see
sklearn.cross_validation module for the list of possible objects
refit : boolean, default=True
Refit the best estimator with the entire dataset.
If "False", it is impossible to make predictions using
this GridSearchCV instance after fitting.
verbose : integer
Controls the verbosity: the higher, the more messages.
error_score : 'raise' (default) or numeric
Value to assign to the score if an error occurs in estimator fitting.
If set to 'raise', the error is raised. If a numeric value is given,
FitFailedWarning is raised. This parameter does not affect the refit
step, which will always raise the error.
Examples
--------
>>> from sklearn import svm, grid_search, datasets
>>> iris = datasets.load_iris()
>>> parameters = {'kernel':('linear', 'rbf'), 'C':[1, 10]}
>>> svr = svm.SVC()
>>> clf = grid_search.GridSearchCV(svr, parameters)
>>> clf.fit(iris.data, iris.target)
... # doctest: +NORMALIZE_WHITESPACE +ELLIPSIS
GridSearchCV(cv=None, error_score=...,
estimator=SVC(C=1.0, cache_size=..., class_weight=..., coef0=...,
decision_function_shape=None, degree=..., gamma=...,
kernel='rbf', max_iter=-1, probability=False,
random_state=None, shrinking=True, tol=...,
verbose=False),
fit_params={}, iid=..., n_jobs=1,
param_grid=..., pre_dispatch=..., refit=...,
scoring=..., verbose=...)
Attributes
----------
grid_scores_ : list of named tuples
Contains scores for all parameter combinations in param_grid.
Each entry corresponds to one parameter setting.
Each named tuple has the attributes:
* ``parameters``, a dict of parameter settings
* ``mean_validation_score``, the mean score over the
cross-validation folds
* ``cv_validation_scores``, the list of scores for each fold
best_estimator_ : estimator
Estimator that was chosen by the search, i.e. estimator
which gave highest score (or smallest loss if specified)
on the left out data. Not available if refit=False.
best_score_ : float
Score of best_estimator on the left out data.
best_params_ : dict
Parameter setting that gave the best results on the hold out data.
scorer_ : function
Scorer function used on the held out data to choose the best
parameters for the model.
Notes
------
The parameters selected are those that maximize the score of the left out
data, unless an explicit score is passed in which case it is used instead.
If `n_jobs` was set to a value higher than one, the data is copied for each
point in the grid (and not `n_jobs` times). This is done for efficiency
reasons if individual jobs take very little time, but may raise errors if
the dataset is large and not enough memory is available. A workaround in
this case is to set `pre_dispatch`. Then, the memory is copied only
`pre_dispatch` many times. A reasonable value for `pre_dispatch` is `2 *
n_jobs`.
See Also
---------
:class:`ParameterGrid`:
generates all the combinations of a an hyperparameter grid.
:func:`sklearn.cross_validation.train_test_split`:
utility function to split the data into a development set usable
for fitting a GridSearchCV instance and an evaluation set for
its final evaluation.
:func:`sklearn.metrics.make_scorer`:
Make a scorer from a performance metric or loss function.
"""
def __init__(self, estimator, param_grid, scoring=None, fit_params=None,
n_jobs=1, iid=True, refit=True, cv=None, verbose=0,
pre_dispatch='2*n_jobs', error_score='raise'):
super(GridSearchCV, self).__init__(
estimator, scoring, fit_params, n_jobs, iid,
refit, cv, verbose, pre_dispatch, error_score)
self.param_grid = param_grid
_check_param_grid(param_grid)
def fit(self, X, y=None):
"""Run fit with all sets of parameters.
Parameters
----------
X : array-like, shape = [n_samples, n_features]
Training vector, where n_samples is the number of samples and
n_features is the number of features.
y : array-like, shape = [n_samples] or [n_samples, n_output], optional
Target relative to X for classification or regression;
None for unsupervised learning.
"""
return self._fit(X, y, ParameterGrid(self.param_grid))
class RandomizedSearchCV(BaseSearchCV):
"""Randomized search on hyper parameters.
RandomizedSearchCV implements a "fit" method and a "predict" method like
any classifier except that the parameters of the classifier
used to predict is optimized by cross-validation.
In contrast to GridSearchCV, not all parameter values are tried out, but
rather a fixed number of parameter settings is sampled from the specified
distributions. The number of parameter settings that are tried is
given by n_iter.
If all parameters are presented as a list,
sampling without replacement is performed. If at least one parameter
is given as a distribution, sampling with replacement is used.
It is highly recommended to use continuous distributions for continuous
parameters.
Read more in the :ref:`User Guide <randomized_parameter_search>`.
Parameters
----------
estimator : object type that implements the "fit" and "predict" methods
A object of that type is instantiated for each parameter setting.
param_distributions : dict
Dictionary with parameters names (string) as keys and distributions
or lists of parameters to try. Distributions must provide a ``rvs``
method for sampling (such as those from scipy.stats.distributions).
If a list is given, it is sampled uniformly.
n_iter : int, default=10
Number of parameter settings that are sampled. n_iter trades
off runtime vs quality of the solution.
scoring : string, callable or None, optional, default: None
A string (see model evaluation documentation) or
a scorer callable object / function with signature
``scorer(estimator, X, y)``.
fit_params : dict, optional
Parameters to pass to the fit method.
n_jobs : int, default=1
Number of jobs to run in parallel.
pre_dispatch : int, or string, optional
Controls the number of jobs that get dispatched during parallel
execution. Reducing this number can be useful to avoid an
explosion of memory consumption when more jobs get dispatched
than CPUs can process. This parameter can be:
- None, in which case all the jobs are immediately
created and spawned. Use this for lightweight and
fast-running jobs, to avoid delays due to on-demand
spawning of the jobs
- An int, giving the exact number of total jobs that are
spawned
- A string, giving an expression as a function of n_jobs,
as in '2*n_jobs'
iid : boolean, default=True
If True, the data is assumed to be identically distributed across
the folds, and the loss minimized is the total loss per sample,
and not the mean loss across the folds.
cv : integer or cross-validation generator, optional
If an integer is passed, it is the number of folds (default 3).
Specific cross-validation objects can be passed, see
sklearn.cross_validation module for the list of possible objects
refit : boolean, default=True
Refit the best estimator with the entire dataset.
If "False", it is impossible to make predictions using
this RandomizedSearchCV instance after fitting.
verbose : integer
Controls the verbosity: the higher, the more messages.
random_state : int or RandomState
Pseudo random number generator state used for random uniform sampling
from lists of possible values instead of scipy.stats distributions.
error_score : 'raise' (default) or numeric
Value to assign to the score if an error occurs in estimator fitting.
If set to 'raise', the error is raised. If a numeric value is given,
FitFailedWarning is raised. This parameter does not affect the refit
step, which will always raise the error.
Attributes
----------
grid_scores_ : list of named tuples
Contains scores for all parameter combinations in param_grid.
Each entry corresponds to one parameter setting.
Each named tuple has the attributes:
* ``parameters``, a dict of parameter settings
* ``mean_validation_score``, the mean score over the
cross-validation folds
* ``cv_validation_scores``, the list of scores for each fold
best_estimator_ : estimator
Estimator that was chosen by the search, i.e. estimator
which gave highest score (or smallest loss if specified)
on the left out data. Not available if refit=False.
best_score_ : float
Score of best_estimator on the left out data.
best_params_ : dict
Parameter setting that gave the best results on the hold out data.
Notes
-----
The parameters selected are those that maximize the score of the held-out
data, according to the scoring parameter.
If `n_jobs` was set to a value higher than one, the data is copied for each
parameter setting(and not `n_jobs` times). This is done for efficiency
reasons if individual jobs take very little time, but may raise errors if
the dataset is large and not enough memory is available. A workaround in
this case is to set `pre_dispatch`. Then, the memory is copied only
`pre_dispatch` many times. A reasonable value for `pre_dispatch` is `2 *
n_jobs`.
See Also
--------
:class:`GridSearchCV`:
Does exhaustive search over a grid of parameters.
:class:`ParameterSampler`:
A generator over parameter settins, constructed from
param_distributions.
"""
def __init__(self, estimator, param_distributions, n_iter=10, scoring=None,
fit_params=None, n_jobs=1, iid=True, refit=True, cv=None,
verbose=0, pre_dispatch='2*n_jobs', random_state=None,
error_score='raise'):
self.param_distributions = param_distributions
self.n_iter = n_iter
self.random_state = random_state
super(RandomizedSearchCV, self).__init__(
estimator=estimator, scoring=scoring, fit_params=fit_params,
n_jobs=n_jobs, iid=iid, refit=refit, cv=cv, verbose=verbose,
pre_dispatch=pre_dispatch, error_score=error_score)
def fit(self, X, y=None):
"""Run fit on the estimator with randomly drawn parameters.
Parameters
----------
X : array-like, shape = [n_samples, n_features]
Training vector, where n_samples in the number of samples and
n_features is the number of features.
y : array-like, shape = [n_samples] or [n_samples, n_output], optional
Target relative to X for classification or regression;
None for unsupervised learning.
"""
sampled_params = ParameterSampler(self.param_distributions,
self.n_iter,
random_state=self.random_state)
return self._fit(X, y, sampled_params)
| bsd-3-clause |
isohyt/gensim | gensim/corpora/ucicorpus.py | 68 | 7517 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Copyright (C) 2012 Jonathan Esterhazy <jonathan.esterhazy at gmail.com>
# Licensed under the GNU LGPL v2.1 - http://www.gnu.org/licenses/lgpl.html
"""
University of California, Irvine (UCI) Bag-of-Words format.
http://archive.ics.uci.edu/ml/datasets/Bag+of+Words
"""
from __future__ import with_statement
import logging
from collections import defaultdict
from gensim import utils
from gensim.corpora import Dictionary
from gensim.corpora import IndexedCorpus
from gensim.matutils import MmReader
from gensim.matutils import MmWriter
from six import iteritems, string_types
from six.moves import xrange
logger = logging.getLogger('gensim.corpora.ucicorpus')
class UciReader(MmReader):
def __init__(self, input):
"""
Initialize the reader.
The `input` parameter refers to a file on the local filesystem,
which is expected to be in the UCI Bag-of-Words format.
"""
logger.info('Initializing corpus reader from %s' % input)
self.input = input
with utils.smart_open(self.input) as fin:
self.num_docs = self.num_terms = self.num_nnz = 0
try:
self.num_docs = int(next(fin).strip())
self.num_terms = int(next(fin).strip())
self.num_nnz = int(next(fin).strip())
except StopIteration:
pass
logger.info('accepted corpus with %i documents, %i features, %i non-zero entries' %
(self.num_docs, self.num_terms, self.num_nnz))
def skip_headers(self, input_file):
for lineno, _ in enumerate(input_file):
if lineno == 2:
break
# endclass UciReader
class UciWriter(MmWriter):
"""
Store a corpus in UCI Bag-of-Words format.
This corpus format is identical to MM format, except for
different file headers. There is no format line, and the first
three lines of the file contain number_docs, num_terms, and num_nnz,
one value per line.
This implementation is based on matutils.MmWriter, and works the same way.
"""
MAX_HEADER_LENGTH = 20 # reserve 20 bytes per header value
FAKE_HEADER = utils.to_utf8(' ' * MAX_HEADER_LENGTH + '\n')
def write_headers(self):
"""
Write blank header lines. Will be updated later, once corpus stats are known.
"""
for _ in range(3):
self.fout.write(self.FAKE_HEADER)
self.last_docno = -1
self.headers_written = True
def update_headers(self, num_docs, num_terms, num_nnz):
"""
Update headers with actual values.
"""
offset = 0
values = [utils.to_utf8(str(n)) for n in [num_docs, num_terms, num_nnz]]
for value in values:
if len(value) > len(self.FAKE_HEADER):
raise ValueError('Invalid header: value too large!')
self.fout.seek(offset)
self.fout.write(value)
offset += len(self.FAKE_HEADER)
@staticmethod
def write_corpus(fname, corpus, progress_cnt=1000, index=False):
writer = UciWriter(fname)
writer.write_headers()
num_terms, num_nnz = 0, 0
docno, poslast = -1, -1
offsets = []
for docno, bow in enumerate(corpus):
if docno % progress_cnt == 0:
logger.info("PROGRESS: saving document #%i" % docno)
if index:
posnow = writer.fout.tell()
if posnow == poslast:
offsets[-1] = -1
offsets.append(posnow)
poslast = posnow
vector = [(x, int(y)) for (x, y) in bow if int(y) != 0] # integer count, not floating weights
max_id, veclen = writer.write_vector(docno, vector)
num_terms = max(num_terms, 1 + max_id)
num_nnz += veclen
num_docs = docno + 1
if num_docs * num_terms != 0:
logger.info("saved %ix%i matrix, density=%.3f%% (%i/%i)" %
(num_docs, num_terms,
100.0 * num_nnz / (num_docs * num_terms),
num_nnz,
num_docs * num_terms))
# now write proper headers, by seeking and overwriting the spaces written earlier
writer.update_headers(num_docs, num_terms, num_nnz)
writer.close()
if index:
return offsets
# endclass UciWriter
class UciCorpus(UciReader, IndexedCorpus):
"""
Corpus in the UCI bag-of-words format.
"""
def __init__(self, fname, fname_vocab=None):
IndexedCorpus.__init__(self, fname)
UciReader.__init__(self, fname)
if fname_vocab is None:
fname_vocab = utils.smart_extension(fname, '.vocab')
self.fname = fname
with utils.smart_open(fname_vocab) as fin:
words = [word.strip() for word in fin]
self.id2word = dict(enumerate(words))
self.transposed = True
def __iter__(self):
"""
Interpret a matrix in UCI bag-of-words format as a streamed gensim corpus
(yielding one document at a time).
"""
for docId, doc in super(UciCorpus, self).__iter__():
yield doc # get rid of docId, return the sparse vector only
def create_dictionary(self):
"""
Utility method to generate gensim-style Dictionary directly from
the corpus and vocabulary data.
"""
dictionary = Dictionary()
# replace dfs with defaultdict to avoid downstream KeyErrors
# uci vocabularies may contain terms that are not used in the document data
dictionary.dfs = defaultdict(int)
dictionary.id2token = self.id2word
dictionary.token2id = dict((v, k) for k, v in iteritems(self.id2word))
dictionary.num_docs = self.num_docs
dictionary.num_nnz = self.num_nnz
for docno, doc in enumerate(self):
if docno % 10000 == 0:
logger.info('PROGRESS: processing document %i of %i' % (docno, self.num_docs))
for word, count in doc:
dictionary.dfs[word] += 1
dictionary.num_pos += count
return dictionary
@staticmethod
def save_corpus(fname, corpus, id2word=None, progress_cnt=10000, metadata=False):
"""
Save a corpus in the UCI Bag-of-Words format.
There are actually two files saved: `fname` and `fname.vocab`, where
`fname.vocab` is the vocabulary file.
This function is automatically called by `UciCorpus.serialize`; don't
call it directly, call `serialize` instead.
"""
if id2word is None:
logger.info("no word id mapping provided; initializing from corpus")
id2word = utils.dict_from_corpus(corpus)
num_terms = len(id2word)
else:
num_terms = 1 + max([-1] + id2word.keys())
# write out vocabulary
fname_vocab = utils.smart_extension(fname, '.vocab')
logger.info("saving vocabulary of %i words to %s" % (num_terms, fname_vocab))
with utils.smart_open(fname_vocab, 'wb') as fout:
for featureid in xrange(num_terms):
fout.write(utils.to_utf8("%s\n" % id2word.get(featureid, '---')))
logger.info("storing corpus in UCI Bag-of-Words format: %s" % fname)
return UciWriter.write_corpus(fname, corpus, index=True, progress_cnt=progress_cnt)
# endclass UciCorpus
| lgpl-2.1 |
nburn42/tensorflow | tensorflow/python/data/__init__.py | 3 | 1304 | # Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""`tf.data.Dataset` API for input pipelines.
See the @{$datasets$Importing Data} Programmer's Guide for an overview.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
# pylint: disable=unused-import
from tensorflow.python.data.ops.dataset_ops import Dataset
from tensorflow.python.data.ops.iterator_ops import Iterator
from tensorflow.python.data.ops.readers import FixedLengthRecordDataset
from tensorflow.python.data.ops.readers import TextLineDataset
from tensorflow.python.data.ops.readers import TFRecordDataset
# pylint: enable=unused-import
| apache-2.0 |
CIFASIS/pylearn2 | pylearn2/scripts/datasets/make_downsampled_stl10.py | 44 | 3245 | """
Makes a version of the STL-10 dataset that has been downsampled by a factor of
3 along both axes.
This is to mimic the first step of preprocessing used in
'An Analysis of Single-Layer Networks in Unsupervised Feature Learning'
by Adam Coates, Honglak Lee, and Andrew Y. Ng
This script also translates the data to lie in [-127.5, 127.5] instead of
[0,255]. This makes it play nicer with some of pylearn's visualization tools.
"""
from __future__ import print_function
from theano.compat.six.moves import xrange
from pylearn2.datasets.stl10 import STL10
from pylearn2.datasets.preprocessing import Downsample
from pylearn2.utils import string_utils as string
from pylearn2.utils import serial
import numpy as np
print('Preparing output directory...')
data_dir = string.preprocess('${PYLEARN2_DATA_PATH}')
downsampled_dir = data_dir + '/stl10_32x32'
serial.mkdir( downsampled_dir )
README = open(downsampled_dir + '/README','w')
README.write("""
The .pkl files in this directory may be opened in python using
cPickle, pickle, or pylearn2.serial.load. They contain pylearn2
Dataset objects defining the STL-10 dataset, but downsampled to
size 32x32 and translated to lie in [-127.5, 127.5 ].
They were created with the pylearn2 script make_downsampled_stl10.py
All other files in this directory, including this README, were
created by the same script and are necessary for the other files
to function correctly.
""")
README.close()
preprocessor = Downsample(sampling_factor = [3, 3] )
#Unlabeled dataset is huge, so do it in chunks
#(After downsampling it should be small enough to work with)
final_unlabeled = np.zeros((100*1000,32*32*3),dtype='float32')
for i in xrange(10):
print('Loading unlabeled chunk '+str(i+1)+'/10...')
unlabeled = STL10(which_set = 'unlabeled', center = True,
example_range = (i * 10000, (i+1) * 10000))
print('Preprocessing unlabeled chunk...')
print('before ',(unlabeled.X.min(),unlabeled.X.max()))
unlabeled.apply_preprocessor(preprocessor)
print('after ',(unlabeled.X.min(), unlabeled.X.max()))
final_unlabeled[i*10000:(i+1)*10000,:] = unlabeled.X
unlabeled.set_design_matrix(final_unlabeled)
print('Saving unlabeleding set...')
unlabeled.enable_compression()
unlabeled.use_design_loc(downsampled_dir + '/unlabeled.npy')
serial.save(downsampled_dir+'/unlabeled.pkl',unlabeled)
del unlabeled
import gc
gc.collect()
print('Loading testing set...')
test = STL10(which_set = 'test', center = True)
print('Preprocessing testing set...')
print('before ',(test.X.min(),test.X.max()))
test.apply_preprocessor(preprocessor)
print('after ',(test.X.min(), test.X.max()))
print('Saving testing set...')
test.enable_compression()
test.use_design_loc(downsampled_dir + '/test.npy')
serial.save(downsampled_dir+'/test.pkl',test)
del test
print('Loading training set...')
train = STL10(which_set = 'train', center = True)
print('Preprocessing training set...')
print('before ',(train.X.min(),train.X.max()))
train.apply_preprocessor(preprocessor)
print('after ',(train.X.min(), train.X.max()))
print('Saving training set...')
train.enable_compression()
train.use_design_loc(downsampled_dir + '/train.npy')
serial.save(downsampled_dir+'/train.pkl',train)
del train
| bsd-3-clause |
snnn/tensorflow | tensorflow/contrib/factorization/python/ops/kmeans.py | 10 | 20270 | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""A canned Estimator for k-means clustering."""
# TODO(ccolby): Move clustering_ops.py into this file and streamline the code.
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import time
from tensorflow.contrib.factorization.python.ops import clustering_ops
from tensorflow.python.estimator import estimator
from tensorflow.python.estimator import model_fn as model_fn_lib
from tensorflow.python.estimator.export import export_output
from tensorflow.python.feature_column import feature_column as fc
from tensorflow.python.framework import ops
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import metrics
from tensorflow.python.ops import state_ops
from tensorflow.python.platform import tf_logging as logging
from tensorflow.python.saved_model import signature_constants
from tensorflow.python.summary import summary
from tensorflow.python.training import session_run_hook
from tensorflow.python.training import training_util
class _LossRelativeChangeHook(session_run_hook.SessionRunHook):
"""Stops when the change in loss goes below a tolerance."""
def __init__(self, loss_tensor, tolerance):
"""Creates a _LossRelativeChangeHook.
Args:
loss_tensor: A scalar tensor of the loss value.
tolerance: A relative tolerance of loss change between iterations.
"""
self._loss_tensor = loss_tensor
self._tolerance = tolerance
self._prev_loss = None
def before_run(self, run_context):
del run_context # unused
return session_run_hook.SessionRunArgs(self._loss_tensor)
def after_run(self, run_context, run_values):
loss = run_values.results
assert loss is not None
if self._prev_loss:
relative_change = (
abs(loss - self._prev_loss) / (1 + abs(self._prev_loss)))
if relative_change < self._tolerance:
run_context.request_stop()
self._prev_loss = loss
class _InitializeClustersHook(session_run_hook.SessionRunHook):
"""Initializes the cluster centers.
The chief repeatedly invokes an initialization op until all cluster centers
are initialized. The workers wait for the initialization phase to complete.
"""
def __init__(self, init_op, is_initialized_var, is_chief):
"""Creates an _InitializeClustersHook.
Args:
init_op: An op that, when run, will choose some initial cluster centers.
This op may need to be run multiple times to choose all the centers.
is_initialized_var: A boolean variable reporting whether all initial
centers have been chosen.
is_chief: A boolean specifying whether this task is the chief.
"""
self._init_op = init_op
self._is_initialized_var = is_initialized_var
self._is_chief = is_chief
def after_create_session(self, session, coord):
del coord # unused
assert self._init_op.graph is ops.get_default_graph()
assert self._is_initialized_var.graph is self._init_op.graph
while True:
try:
if session.run(self._is_initialized_var):
break
elif self._is_chief:
session.run(self._init_op)
else:
time.sleep(1)
except RuntimeError as e:
logging.info(e)
def _parse_features_if_necessary(features, feature_columns):
"""Helper function to convert the input points into a usable format.
Args:
features: The input features.
feature_columns: An optionable iterable containing all the feature columns
used by the model. All items in the set should be feature column instances
that can be passed to `tf.feature_column.input_layer`. If this is None,
all features will be used.
Returns:
If `features` is a dict of `k` features (optionally filtered by
`feature_columns`), each of which is a vector of `n` scalars, the return
value is a Tensor of shape `(n, k)` representing `n` input points, where the
items in the `k` dimension are sorted lexicographically by `features` key.
If `features` is not a dict, it is returned unmodified.
"""
if not isinstance(features, dict):
return features
if feature_columns:
return fc.input_layer(features, feature_columns)
keys = sorted(features.keys())
with ops.colocate_with(features[keys[0]]):
return array_ops.concat([features[k] for k in keys], axis=1)
class _ModelFn(object):
"""Model function for the estimator."""
def __init__(self, num_clusters, initial_clusters, distance_metric,
random_seed, use_mini_batch, mini_batch_steps_per_iteration,
kmeans_plus_plus_num_retries, relative_tolerance,
feature_columns):
self._num_clusters = num_clusters
self._initial_clusters = initial_clusters
self._distance_metric = distance_metric
self._random_seed = random_seed
self._use_mini_batch = use_mini_batch
self._mini_batch_steps_per_iteration = mini_batch_steps_per_iteration
self._kmeans_plus_plus_num_retries = kmeans_plus_plus_num_retries
self._relative_tolerance = relative_tolerance
self._feature_columns = feature_columns
def model_fn(self, features, mode, config):
"""Model function for the estimator.
Note that this does not take a `labels` arg. This works, but `input_fn` must
return either `features` or, equivalently, `(features, None)`.
Args:
features: The input points. See `tf.estimator.Estimator`.
mode: See `tf.estimator.Estimator`.
config: See `tf.estimator.Estimator`.
Returns:
A `tf.estimator.EstimatorSpec` (see `tf.estimator.Estimator`) specifying
this behavior:
* `train_op`: Execute one mini-batch or full-batch run of Lloyd's
algorithm.
* `loss`: The sum of the squared distances from each input point to its
closest center.
* `eval_metric_ops`: Maps `SCORE` to `loss`.
* `predictions`: Maps `ALL_DISTANCES` to the distance from each input
point to each cluster center; maps `CLUSTER_INDEX` to the index of
the closest cluster center for each input point.
"""
# input_points is a single Tensor. Therefore, the sharding functionality
# in clustering_ops is unused, and some of the values below are lists of a
# single item.
input_points = _parse_features_if_necessary(features, self._feature_columns)
# Let N = the number of input_points.
# all_distances: A list of one matrix of shape (N, num_clusters). Each value
# is the distance from an input point to a cluster center.
# model_predictions: A list of one vector of shape (N). Each value is the
# cluster id of an input point.
# losses: Similar to cluster_idx but provides the distance to the cluster
# center.
# is_initialized: scalar indicating whether the initial cluster centers
# have been chosen; see init_op.
# init_op: an op to choose the initial cluster centers. A single worker
# repeatedly executes init_op until is_initialized becomes True.
# training_op: an op that runs an iteration of training, either an entire
# Lloyd iteration or a mini-batch of a Lloyd iteration. Multiple workers
# may execute this op, but only after is_initialized becomes True.
(all_distances, model_predictions, losses, is_initialized, init_op,
training_op) = clustering_ops.KMeans(
inputs=input_points,
num_clusters=self._num_clusters,
initial_clusters=self._initial_clusters,
distance_metric=self._distance_metric,
use_mini_batch=self._use_mini_batch,
mini_batch_steps_per_iteration=self._mini_batch_steps_per_iteration,
random_seed=self._random_seed,
kmeans_plus_plus_num_retries=self._kmeans_plus_plus_num_retries
).training_graph()
loss = math_ops.reduce_sum(losses)
summary.scalar('loss/raw', loss)
incr_step = state_ops.assign_add(training_util.get_global_step(), 1)
training_op = control_flow_ops.with_dependencies([training_op, incr_step],
loss)
training_hooks = [
_InitializeClustersHook(init_op, is_initialized, config.is_chief)
]
if self._relative_tolerance is not None:
training_hooks.append(
_LossRelativeChangeHook(loss, self._relative_tolerance))
export_outputs = {
KMeansClustering.ALL_DISTANCES:
export_output.PredictOutput(all_distances[0]),
KMeansClustering.CLUSTER_INDEX:
export_output.PredictOutput(model_predictions[0]),
signature_constants.DEFAULT_SERVING_SIGNATURE_DEF_KEY:
export_output.PredictOutput(model_predictions[0])
}
return model_fn_lib.EstimatorSpec(
mode=mode,
predictions={
KMeansClustering.ALL_DISTANCES: all_distances[0],
KMeansClustering.CLUSTER_INDEX: model_predictions[0],
},
loss=loss,
train_op=training_op,
eval_metric_ops={KMeansClustering.SCORE: metrics.mean(loss)},
training_hooks=training_hooks,
export_outputs=export_outputs)
# TODO(agarwal,ands): support sharded input.
class KMeansClustering(estimator.Estimator):
"""An Estimator for K-Means clustering.
Example:
```
import numpy as np
import tensorflow as tf
num_points = 100
dimensions = 2
points = np.random.uniform(0, 1000, [num_points, dimensions])
def input_fn():
return tf.train.limit_epochs(
tf.convert_to_tensor(points, dtype=tf.float32), num_epochs=1)
num_clusters = 5
kmeans = tf.contrib.factorization.KMeansClustering(
num_clusters=num_clusters, use_mini_batch=False)
# train
num_iterations = 10
previous_centers = None
for _ in xrange(num_iterations):
kmeans.train(input_fn)
cluster_centers = kmeans.cluster_centers()
if previous_centers is not None:
print 'delta:', cluster_centers - previous_centers
previous_centers = cluster_centers
print 'score:', kmeans.score(input_fn)
print 'cluster centers:', cluster_centers
# map the input points to their clusters
cluster_indices = list(kmeans.predict_cluster_index(input_fn))
for i, point in enumerate(points):
cluster_index = cluster_indices[i]
center = cluster_centers[cluster_index]
print 'point:', point, 'is in cluster', cluster_index, 'centered at', center
```
The `SavedModel` saved by the `export_savedmodel` method does not include the
cluster centers. However, the cluster centers may be retrieved by the
latest checkpoint saved during training. Specifically,
```
kmeans.cluster_centers()
```
is equivalent to
```
tf.train.load_variable(
kmeans.model_dir, KMeansClustering.CLUSTER_CENTERS_VAR_NAME)
```
"""
# Valid values for the distance_metric constructor argument.
SQUARED_EUCLIDEAN_DISTANCE = clustering_ops.SQUARED_EUCLIDEAN_DISTANCE
COSINE_DISTANCE = clustering_ops.COSINE_DISTANCE
# Values for initial_clusters constructor argument.
RANDOM_INIT = clustering_ops.RANDOM_INIT
KMEANS_PLUS_PLUS_INIT = clustering_ops.KMEANS_PLUS_PLUS_INIT
# Metric returned by evaluate(): The sum of the squared distances from each
# input point to its closest center.
SCORE = 'score'
# Keys returned by predict().
# ALL_DISTANCES: The distance from each input point to each cluster center.
# CLUSTER_INDEX: The index of the closest cluster center for each input point.
CLUSTER_INDEX = 'cluster_index'
ALL_DISTANCES = 'all_distances'
# Variable name used by cluster_centers().
CLUSTER_CENTERS_VAR_NAME = clustering_ops.CLUSTERS_VAR_NAME
def __init__(self,
num_clusters,
model_dir=None,
initial_clusters=RANDOM_INIT,
distance_metric=SQUARED_EUCLIDEAN_DISTANCE,
random_seed=0,
use_mini_batch=True,
mini_batch_steps_per_iteration=1,
kmeans_plus_plus_num_retries=2,
relative_tolerance=None,
config=None,
feature_columns=None):
"""Creates an Estimator for running KMeans training and inference.
This Estimator implements the following variants of the K-means algorithm:
If `use_mini_batch` is False, it runs standard full batch K-means. Each
training step runs a single iteration of K-Means and must process the full
input at once. To run in this mode, the `input_fn` passed to `train` must
return the entire input dataset.
If `use_mini_batch` is True, it runs a generalization of the mini-batch
K-means algorithm. It runs multiple iterations, where each iteration is
composed of `mini_batch_steps_per_iteration` steps. Each training step
accumulates the contribution from one mini-batch into temporary storage.
Every `mini_batch_steps_per_iteration` steps, the cluster centers are
updated and the temporary storage cleared for the next iteration. Note
that:
* If `mini_batch_steps_per_iteration=1`, the algorithm reduces to the
standard K-means mini-batch algorithm.
* If `mini_batch_steps_per_iteration = num_inputs / batch_size`, the
algorithm becomes an asynchronous version of the full-batch algorithm.
However, there is no guarantee by this implementation that each input
is seen exactly once per iteration. Also, different updates are applied
asynchronously without locking. So this asynchronous version may not
behave exactly like a full-batch version.
Args:
num_clusters: An integer tensor specifying the number of clusters. This
argument is ignored if `initial_clusters` is a tensor or numpy array.
model_dir: The directory to save the model results and log files.
initial_clusters: Specifies how the initial cluster centers are chosen.
One of the following:
* a tensor or numpy array with the initial cluster centers.
* a callable `f(inputs, k)` that selects and returns up to `k` centers
from an input batch. `f` is free to return any number of centers
from `0` to `k`. It will be invoked on successive input batches
as necessary until all `num_clusters` centers are chosen.
* `KMeansClustering.RANDOM_INIT`: Choose centers randomly from an input
batch. If the batch size is less than `num_clusters` then the
entire batch is chosen to be initial cluster centers and the
remaining centers are chosen from successive input batches.
* `KMeansClustering.KMEANS_PLUS_PLUS_INIT`: Use kmeans++ to choose
centers from the first input batch. If the batch size is less
than `num_clusters`, a TensorFlow runtime error occurs.
distance_metric: The distance metric used for clustering. One of:
* `KMeansClustering.SQUARED_EUCLIDEAN_DISTANCE`: Euclidean distance
between vectors `u` and `v` is defined as \\(||u - v||_2\\)
which is the square root of the sum of the absolute squares of
the elements' difference.
* `KMeansClustering.COSINE_DISTANCE`: Cosine distance between vectors
`u` and `v` is defined as \\(1 - (u . v) / (||u||_2 ||v||_2)\\).
random_seed: Python integer. Seed for PRNG used to initialize centers.
use_mini_batch: A boolean specifying whether to use the mini-batch k-means
algorithm. See explanation above.
mini_batch_steps_per_iteration: The number of steps after which the
updated cluster centers are synced back to a master copy. Used only if
`use_mini_batch=True`. See explanation above.
kmeans_plus_plus_num_retries: For each point that is sampled during
kmeans++ initialization, this parameter specifies the number of
additional points to draw from the current distribution before selecting
the best. If a negative value is specified, a heuristic is used to
sample `O(log(num_to_sample))` additional points. Used only if
`initial_clusters=KMeansClustering.KMEANS_PLUS_PLUS_INIT`.
relative_tolerance: A relative tolerance of change in the loss between
iterations. Stops learning if the loss changes less than this amount.
This may not work correctly if `use_mini_batch=True`.
config: See `tf.estimator.Estimator`.
feature_columns: An optionable iterable containing all the feature columns
used by the model. All items in the set should be feature column
instances that can be passed to `tf.feature_column.input_layer`. If this
is None, all features will be used.
Raises:
ValueError: An invalid argument was passed to `initial_clusters` or
`distance_metric`.
"""
if isinstance(initial_clusters, str) and initial_clusters not in [
KMeansClustering.RANDOM_INIT, KMeansClustering.KMEANS_PLUS_PLUS_INIT
]:
raise ValueError(
"Unsupported initialization algorithm '%s'" % initial_clusters)
if distance_metric not in [
KMeansClustering.SQUARED_EUCLIDEAN_DISTANCE,
KMeansClustering.COSINE_DISTANCE
]:
raise ValueError("Unsupported distance metric '%s'" % distance_metric)
super(KMeansClustering, self).__init__(
model_fn=_ModelFn(
num_clusters, initial_clusters, distance_metric, random_seed,
use_mini_batch, mini_batch_steps_per_iteration,
kmeans_plus_plus_num_retries, relative_tolerance,
feature_columns).model_fn,
model_dir=model_dir,
config=config)
def _predict_one_key(self, input_fn, predict_key):
for result in self.predict(input_fn=input_fn, predict_keys=[predict_key]):
yield result[predict_key]
def predict_cluster_index(self, input_fn):
"""Finds the index of the closest cluster center to each input point.
Args:
input_fn: Input points. See `tf.estimator.Estimator.predict`.
Yields:
The index of the closest cluster center for each input point.
"""
for index in self._predict_one_key(input_fn,
KMeansClustering.CLUSTER_INDEX):
yield index
def score(self, input_fn):
"""Returns the sum of squared distances to nearest clusters.
Note that this function is different from the corresponding one in sklearn
which returns the negative sum.
Args:
input_fn: Input points. See `tf.estimator.Estimator.evaluate`. Only one
batch is retrieved.
Returns:
The sum of the squared distance from each point in the first batch of
inputs to its nearest cluster center.
"""
return self.evaluate(input_fn=input_fn, steps=1)[KMeansClustering.SCORE]
def transform(self, input_fn):
"""Transforms each input point to its distances to all cluster centers.
Note that if `distance_metric=KMeansClustering.SQUARED_EUCLIDEAN_DISTANCE`,
this
function returns the squared Euclidean distance while the corresponding
sklearn function returns the Euclidean distance.
Args:
input_fn: Input points. See `tf.estimator.Estimator.predict`.
Yields:
The distances from each input point to each cluster center.
"""
for distances in self._predict_one_key(input_fn,
KMeansClustering.ALL_DISTANCES):
yield distances
def cluster_centers(self):
"""Returns the cluster centers."""
return self.get_variable_value(KMeansClustering.CLUSTER_CENTERS_VAR_NAME)
| apache-2.0 |
alexsavio/scikit-learn | sklearn/neighbors/tests/test_kd_tree.py | 157 | 7852 | import numpy as np
from numpy.testing import assert_array_almost_equal
from sklearn.neighbors.kd_tree import (KDTree, NeighborsHeap,
simultaneous_sort, kernel_norm,
nodeheap_sort, DTYPE, ITYPE)
from sklearn.neighbors.dist_metrics import DistanceMetric
from sklearn.utils.testing import SkipTest, assert_allclose
V = np.random.random((3, 3))
V = np.dot(V, V.T)
DIMENSION = 3
METRICS = {'euclidean': {},
'manhattan': {},
'chebyshev': {},
'minkowski': dict(p=3)}
def brute_force_neighbors(X, Y, k, metric, **kwargs):
D = DistanceMetric.get_metric(metric, **kwargs).pairwise(Y, X)
ind = np.argsort(D, axis=1)[:, :k]
dist = D[np.arange(Y.shape[0])[:, None], ind]
return dist, ind
def test_kd_tree_query():
np.random.seed(0)
X = np.random.random((40, DIMENSION))
Y = np.random.random((10, DIMENSION))
def check_neighbors(dualtree, breadth_first, k, metric, kwargs):
kdt = KDTree(X, leaf_size=1, metric=metric, **kwargs)
dist1, ind1 = kdt.query(Y, k, dualtree=dualtree,
breadth_first=breadth_first)
dist2, ind2 = brute_force_neighbors(X, Y, k, metric, **kwargs)
# don't check indices here: if there are any duplicate distances,
# the indices may not match. Distances should not have this problem.
assert_array_almost_equal(dist1, dist2)
for (metric, kwargs) in METRICS.items():
for k in (1, 3, 5):
for dualtree in (True, False):
for breadth_first in (True, False):
yield (check_neighbors,
dualtree, breadth_first,
k, metric, kwargs)
def test_kd_tree_query_radius(n_samples=100, n_features=10):
np.random.seed(0)
X = 2 * np.random.random(size=(n_samples, n_features)) - 1
query_pt = np.zeros(n_features, dtype=float)
eps = 1E-15 # roundoff error can cause test to fail
kdt = KDTree(X, leaf_size=5)
rad = np.sqrt(((X - query_pt) ** 2).sum(1))
for r in np.linspace(rad[0], rad[-1], 100):
ind = kdt.query_radius([query_pt], r + eps)[0]
i = np.where(rad <= r + eps)[0]
ind.sort()
i.sort()
assert_array_almost_equal(i, ind)
def test_kd_tree_query_radius_distance(n_samples=100, n_features=10):
np.random.seed(0)
X = 2 * np.random.random(size=(n_samples, n_features)) - 1
query_pt = np.zeros(n_features, dtype=float)
eps = 1E-15 # roundoff error can cause test to fail
kdt = KDTree(X, leaf_size=5)
rad = np.sqrt(((X - query_pt) ** 2).sum(1))
for r in np.linspace(rad[0], rad[-1], 100):
ind, dist = kdt.query_radius([query_pt], r + eps, return_distance=True)
ind = ind[0]
dist = dist[0]
d = np.sqrt(((query_pt - X[ind]) ** 2).sum(1))
assert_array_almost_equal(d, dist)
def compute_kernel_slow(Y, X, kernel, h):
d = np.sqrt(((Y[:, None, :] - X) ** 2).sum(-1))
norm = kernel_norm(h, X.shape[1], kernel)
if kernel == 'gaussian':
return norm * np.exp(-0.5 * (d * d) / (h * h)).sum(-1)
elif kernel == 'tophat':
return norm * (d < h).sum(-1)
elif kernel == 'epanechnikov':
return norm * ((1.0 - (d * d) / (h * h)) * (d < h)).sum(-1)
elif kernel == 'exponential':
return norm * (np.exp(-d / h)).sum(-1)
elif kernel == 'linear':
return norm * ((1 - d / h) * (d < h)).sum(-1)
elif kernel == 'cosine':
return norm * (np.cos(0.5 * np.pi * d / h) * (d < h)).sum(-1)
else:
raise ValueError('kernel not recognized')
def test_kd_tree_kde(n_samples=100, n_features=3):
np.random.seed(0)
X = np.random.random((n_samples, n_features))
Y = np.random.random((n_samples, n_features))
kdt = KDTree(X, leaf_size=10)
for kernel in ['gaussian', 'tophat', 'epanechnikov',
'exponential', 'linear', 'cosine']:
for h in [0.01, 0.1, 1]:
dens_true = compute_kernel_slow(Y, X, kernel, h)
def check_results(kernel, h, atol, rtol, breadth_first):
dens = kdt.kernel_density(Y, h, atol=atol, rtol=rtol,
kernel=kernel,
breadth_first=breadth_first)
assert_allclose(dens, dens_true, atol=atol,
rtol=max(rtol, 1e-7))
for rtol in [0, 1E-5]:
for atol in [1E-6, 1E-2]:
for breadth_first in (True, False):
yield (check_results, kernel, h, atol, rtol,
breadth_first)
def test_gaussian_kde(n_samples=1000):
# Compare gaussian KDE results to scipy.stats.gaussian_kde
from scipy.stats import gaussian_kde
np.random.seed(0)
x_in = np.random.normal(0, 1, n_samples)
x_out = np.linspace(-5, 5, 30)
for h in [0.01, 0.1, 1]:
kdt = KDTree(x_in[:, None])
try:
gkde = gaussian_kde(x_in, bw_method=h / np.std(x_in))
except TypeError:
raise SkipTest("Old scipy, does not accept explicit bandwidth.")
dens_kdt = kdt.kernel_density(x_out[:, None], h) / n_samples
dens_gkde = gkde.evaluate(x_out)
assert_array_almost_equal(dens_kdt, dens_gkde, decimal=3)
def test_kd_tree_two_point(n_samples=100, n_features=3):
np.random.seed(0)
X = np.random.random((n_samples, n_features))
Y = np.random.random((n_samples, n_features))
r = np.linspace(0, 1, 10)
kdt = KDTree(X, leaf_size=10)
D = DistanceMetric.get_metric("euclidean").pairwise(Y, X)
counts_true = [(D <= ri).sum() for ri in r]
def check_two_point(r, dualtree):
counts = kdt.two_point_correlation(Y, r=r, dualtree=dualtree)
assert_array_almost_equal(counts, counts_true)
for dualtree in (True, False):
yield check_two_point, r, dualtree
def test_kd_tree_pickle():
import pickle
np.random.seed(0)
X = np.random.random((10, 3))
kdt1 = KDTree(X, leaf_size=1)
ind1, dist1 = kdt1.query(X)
def check_pickle_protocol(protocol):
s = pickle.dumps(kdt1, protocol=protocol)
kdt2 = pickle.loads(s)
ind2, dist2 = kdt2.query(X)
assert_array_almost_equal(ind1, ind2)
assert_array_almost_equal(dist1, dist2)
for protocol in (0, 1, 2):
yield check_pickle_protocol, protocol
def test_neighbors_heap(n_pts=5, n_nbrs=10):
heap = NeighborsHeap(n_pts, n_nbrs)
for row in range(n_pts):
d_in = np.random.random(2 * n_nbrs).astype(DTYPE)
i_in = np.arange(2 * n_nbrs, dtype=ITYPE)
for d, i in zip(d_in, i_in):
heap.push(row, d, i)
ind = np.argsort(d_in)
d_in = d_in[ind]
i_in = i_in[ind]
d_heap, i_heap = heap.get_arrays(sort=True)
assert_array_almost_equal(d_in[:n_nbrs], d_heap[row])
assert_array_almost_equal(i_in[:n_nbrs], i_heap[row])
def test_node_heap(n_nodes=50):
vals = np.random.random(n_nodes).astype(DTYPE)
i1 = np.argsort(vals)
vals2, i2 = nodeheap_sort(vals)
assert_array_almost_equal(i1, i2)
assert_array_almost_equal(vals[i1], vals2)
def test_simultaneous_sort(n_rows=10, n_pts=201):
dist = np.random.random((n_rows, n_pts)).astype(DTYPE)
ind = (np.arange(n_pts) + np.zeros((n_rows, 1))).astype(ITYPE)
dist2 = dist.copy()
ind2 = ind.copy()
# simultaneous sort rows using function
simultaneous_sort(dist, ind)
# simultaneous sort rows using numpy
i = np.argsort(dist2, axis=1)
row_ind = np.arange(n_rows)[:, None]
dist2 = dist2[row_ind, i]
ind2 = ind2[row_ind, i]
assert_array_almost_equal(dist, dist2)
assert_array_almost_equal(ind, ind2)
| bsd-3-clause |
ooici/coi-services | ion/processes/data/transforms/viz/highcharts.py | 1 | 11993 |
'''
@author Raj Singh
@file ion/processes/data/transforms/viz/highcharts.py
@description Convert CDM data to Highcharts datasets used to populate the charts
'''
from pyon.core.exception import BadRequest, Timeout
from pyon.public import log
from ion.core.function.transform_function import SimpleGranuleTransformFunction
from ion.services.dm.utility.granule.record_dictionary import RecordDictionaryTool
from interface.services.dm.ipubsub_management_service import PubsubManagementServiceProcessClient
from coverage_model import ArrayType, ParameterFunctionType
import numpy as np
import ntplib
import time
from pyon.util.containers import get_ion_ts
from ion.util.time_utils import TimeUtils
from ion.core.process.transform import TransformDataProcess
class VizTransformHighCharts(TransformDataProcess):
"""
This class is used for converting incoming data from CDM format to JSON style Google DataTables
Note: One behaviour that this class is expected to achieve specifically is to determine if its supposed
to work as a realtime transform (exists indefinitely and maintains a sliding window of data) or as
a replay transform (one-shot).
[2] This transform behaves as an instantaneous forwarder. There is no waiting for the entire stream
to create the complete datatable. As the granules come in, they are translated to the datatable
'components'. Components, because we are not creating the actual datatable in this code. That's the job
of the viz service to put all the components of a datatable together in JSON format before sending it
to the client
[3] The time stamp in the incoming stream can't be converted to the datetime object here because
the Raw stream definition only expects regular primitives (strings, floats, ints etc)
"""
def __init__(self):
super(VizTransformHighCharts, self).__init__()
def on_start(self):
self.stream_info = self.CFG.get_safe('process.publish_streams', {})
self.stream_names = self.stream_info.keys()
self.stream_ids = self.stream_info.values()
if not self.stream_names or not self.stream_ids:
raise BadRequest('HighCharts Transform: No output streams.')
self.pubsub_management = PubsubManagementServiceProcessClient(process=self)
self.stream_def = self.pubsub_management.read_stream_definition(stream_id=self.stream_ids[0])
super(VizTransformHighCharts,self).on_start()
def recv_packet(self, packet, in_stream_route, in_stream_id):
log.info('HighCharts Transform: Received packet')
outgoing = VizTransformHighChartsAlgorithm.execute(packet, params=self.stream_def._id)
for stream_name in self.stream_names:
publisher = getattr(self, stream_name)
publisher.publish(outgoing)
class VizTransformHighChartsAlgorithm(SimpleGranuleTransformFunction):
@staticmethod
@SimpleGranuleTransformFunction.validate_inputs
def execute(input=None, context=None, config=None, params=None, state=None):
stream_definition_id = params
#init stuff
rdt_for_nones = {}
hc_data = []
normalized_ts = []
hc_allowed_numerical_types = ['int8', 'int16', 'int32', 'int64', 'uint8', 'uint16', 'uint32',
'uint64', 'float32', 'float64','str']
# TODO : Move this in to container parameter
default_precision = 5
rdt = RecordDictionaryTool.load_from_granule(input)
# Buid a local precisions and fill value dictionary to use for parsing data correctly
precisions = {}
fill_values = {}
for field in rdt.fields:
precision_str = rdt.context(field).precision
if not precision_str:
precisions[field] = default_precision
else:
try:
precisions[field] = int(precision_str)
except ValueError:
precisions[field] = default_precision
fill_values[field] = rdt.fill_value(field)
if stream_definition_id == None:
log.error("HighCharts transform: Need a output stream definition to process graphs")
return None
fields = []
fields = rdt.fields
# Ascertain temporal field. Use 'time' as backup
time_field = rdt.temporal_parameter or 'time'
#if 'time' not in rdt: return None
if rdt[time_field] is None:
return None
#time_fill_value = 0.0 # should be derived from the granule's param dict.
time_fill_value = fill_values[time_field] # should be derived from the granule's param dict.
total_num_of_records = len(rdt[time_field])
# convert timestamps from ntp to system
count = 0
normalized_ts = [None] * total_num_of_records
for ts in rdt[time_field]:
if ts == time_fill_value:
normalized_ts[count] = time_fill_value
else:
normalized_ts[count] = float(ntplib.ntp_to_system_time(ts))
count += 1
###### DEBUG ##########
#for field in fields:
# if hasattr(rdt.context(field),'visible'):
# print " >>>>>>>> '", field, "' [visible = ", rdt.context(field).visible,"] : ", rdt[field]
# else:
# print " >>>>>>>> '", field, "' [visible = NOT SPECIFIED] : ", rdt[field]
# Convert the fields in to HC series format
import re
for field in fields:
field_precision = precisions[field]
if field == time_field:
continue
# If a config block was passed, consider only the params listed in it
if config and 'parameters' in config and len(config['parameters']) > 0:
if not field in config['parameters']:
log.info("Skipping ", field, " since it was not present in the list of allowed parameters")
continue
# If the value is none, assign it a small one fill_value array for now to generate description,
# Actual array of fill_values will be assigned later
rdt_field =rdt[field]
if rdt_field == None:
rdt_for_nones[field] = np.array([fill_values[field]] * total_num_of_records)
#rdt_for_nones[field] = [fill_values[field]] * total_num_of_records
rdt_field = rdt_for_nones[field]
# Check if visibility is false (system generated params) or not specified explicitly
if hasattr(rdt.context(field),'visible') and not rdt.context(field).visible:
continue
# If it's a QC parameter ignore it
if field.endswith('_qc'):
continue
# NOTE: The reason why the following libes of code seem to make a lot of branches are to pull the major decisions
# outside the primary loops responsible for arranging data in the data structures to be sent to the charts.
# This is better than having to make the decisions on a per record entry.
# Handle arrays by spliting them them in to individual parameters
context = rdt.context(field)
if (isinstance(context.param_type, ArrayType) or isinstance(context.param_type,ParameterFunctionType)) and len(rdt_field.shape)>1:
# Ignore any field with array dimensionality greater than 2 for now.
if len(rdt_field.shape)>2:
continue
if (rdt_field.dtype == 'string' or rdt_field.dtype not in hc_allowed_numerical_types):
for i in xrange(rdt_field.shape[1]):
series = {}
series["name"] = field + "[" + str(i) + "]"
series["visible"] = False
series["data"] = VizTransformHighChartsAlgorithm.form_series_data_str(normalized_ts, rdt_field, i, time_fill_value, fill_values[field])
hc_data.append(series)
else: # Assume its a float or number
for i in xrange(rdt_field.shape[1]):
series = {}
series["name"] = field + "[" + str(i) + "]"
series["visible"] = True
series["tooltip"] = {"valueDecimals":field_precision}
series["data"] = VizTransformHighChartsAlgorithm.form_series_data_num(normalized_ts, rdt_field, i, time_fill_value, fill_values[field], field_precision)
hc_data.append(series)
else:
if (rdt_field.dtype == 'string' or rdt_field.dtype not in hc_allowed_numerical_types):
series = {}
series["name"] = field
series["visible"] = False
series["data"] = VizTransformHighChartsAlgorithm.form_series_data_str(normalized_ts, rdt_field, None, time_fill_value, fill_values[field])
else:
series = {}
series["name"] = field
series["tooltip"] = {"valueDecimals":field_precision}
series["visible"] = True
series["data"] = VizTransformHighChartsAlgorithm.form_series_data_num(normalized_ts, rdt_field, None, time_fill_value, fill_values[field], field_precision)
# Append series to the hc data
hc_data.append(series)
# Prep the outgoing granule
out_rdt = RecordDictionaryTool(stream_definition_id=stream_definition_id)
#out_dict = {"hc_data": hc_data}
#out_rdt["hc_data"] = np.array([out_dict])
out_rdt["hc_data"] = [hc_data]
out_rdt["viz_timestamp"] = TimeUtils.ts_to_units(rdt.context(time_field).uom, time.time())
log.debug('HighCharts Transform: Sending a granule')
out_granule = out_rdt.to_granule()
return out_granule
@staticmethod
def form_series_data_num(timestamps, val, idx, ts_fill_value, val_fill_value, precision):
num_of_recs = len(timestamps)
_data = np.array([None] * num_of_recs)
if idx != None:
for i in xrange(num_of_recs):
if timestamps[i] == ts_fill_value:
continue
if val[i][idx] == None or val[i][idx] == val_fill_value:
_data[i] = [timestamps[i] * 1000, None]
else:
_data[i] = [timestamps[i] * 1000, round(float(val[i][idx]), precision)]
else:
for i in xrange(num_of_recs):
if timestamps[i] == ts_fill_value:
continue
if val[i] == None or val[i] == val_fill_value:
_data[i] = [timestamps[i] * 1000, None]
else:
_data[i] = [timestamps[i] * 1000, round(float(val[i]), precision)]
return _data
@staticmethod
def form_series_data_str(timestamps, val, idx, ts_fill_value, val_fill_value):
num_of_recs = len(timestamps)
_data = np.array([None] * num_of_recs)
if idx != None:
for i in xrange(num_of_recs):
if timestamps[i] == ts_fill_value:
continue
if not val[i][idx] or val[i][idx] == val_fill_value:
_data[i] = [timestamps[i] * 1000, None]
else:
_data[i] = [timestamps[i] * 1000, str(val[i][idx])]
else:
for i in xrange(num_of_recs):
if timestamps[i] == ts_fill_value:
continue
if not val[i] or val[i] == val_fill_value:
_data[i] = [timestamps[i] * 1000, None]
else:
_data[i] = [timestamps[i] * 1000, str(val[i])]
return _data
| bsd-2-clause |
NVIDIA/FasterTransformer | examples/pytorch/vit/ViT-quantization/vit_int8.py | 1 | 20167 | # coding=utf-8
# Copyright (c) 2022 NVIDIA CORPORATION. All rights reserved.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import copy
import logging
import math
from os.path import join as pjoin
import torch
import torch.nn as nn
import numpy as np
from torch.nn import CrossEntropyLoss, Dropout, Softmax, Linear, Conv2d, LayerNorm
from torch.nn.modules.utils import _pair
import torch.nn.init as init
import torch.nn.functional as F
from scipy import ndimage
import models.configs as configs
from models.modeling_resnet import ResNetV2
logger = logging.getLogger(__name__)
ATTENTION_Q = "MultiHeadDotProductAttention_1/query"
ATTENTION_K = "MultiHeadDotProductAttention_1/key"
ATTENTION_V = "MultiHeadDotProductAttention_1/value"
ATTENTION_OUT = "MultiHeadDotProductAttention_1/out"
FC_0 = "MlpBlock_3/Dense_0"
FC_1 = "MlpBlock_3/Dense_1"
ATTENTION_NORM = "LayerNorm_0"
MLP_NORM = "LayerNorm_2"
QUANT = True
if QUANT:
from pytorch_quantization.nn import QuantLinear, TensorQuantizer
def np2th(weights, conv=False):
"""Possibly convert HWIO to OIHW."""
if conv:
weights = weights.transpose([3, 2, 0, 1])
return torch.from_numpy(weights)
def bias_gelu(bias, y):
x = bias + y
return x * 0.5 * (1.0 + torch.erf(x / 1.41421))
def bias_noact(bias, y):
return bias + y
def swish(x):
return x * torch.sigmoid(x)
ACT2FN = {"gelu": torch.nn.functional.gelu,
"bias_gelu": bias_gelu,
"relu": torch.nn.functional.relu,
"swish": swish,
"bias_noact": bias_noact}
class LinearActivation(nn.Module):
r"""Fused Linear and Activation Module.
"""
__constants__ = ['bias']
def __init__(self, in_features, out_features, act='noact', bias=True, do_quant=True):
super(LinearActivation, self).__init__()
self.in_features = in_features
self.out_features = out_features
self.act_fn = nn.Identity()
self.biased_act_fn = None
if isinstance(act, str) or (sys.version_info[0] == 2 and isinstance(act, unicode)):
if bias and not 'bias' in act:
act = 'bias_' + act
self.biased_act_fn = ACT2FN[act]
else:
self.act_fn = ACT2FN[act]
else:
self.act_fn = act
self.weight = nn.Parameter(torch.Tensor(out_features, in_features))
if bias:
self.bias = nn.Parameter(torch.Tensor(out_features))
else:
self.register_parameter('bias', None)
self.do_quant = do_quant
if QUANT and do_quant:
self._input_quantizer = TensorQuantizer(QuantLinear.default_quant_desc_input)
self._weight_quantizer = TensorQuantizer(QuantLinear.default_quant_desc_weight)
self._aftergemm_quantizer = TensorQuantizer(QuantLinear.default_quant_desc_input)
self.reset_parameters()
def reset_parameters(self):
init.kaiming_normal_(self.weight, a=math.sqrt(5))
if self.bias is not None:
fan_in, _ = init._calculate_fan_in_and_fan_out(self.weight)
bound = 1 / math.sqrt(fan_in)
init.uniform_(self.bias, -bound, bound)
def forward(self, input):
if QUANT and self.do_quant:
input = self._input_quantizer(input)
weight = self._weight_quantizer(self.weight)
else:
weight = self.weight
if not self.bias is None:
if QUANT and self.do_quant:
return self.biased_act_fn(self.bias, self._aftergemm_quantizer(F.linear(input, weight, None)))
else:
return self.biased_act_fn(self.bias, F.linear(input, weight, None))
else:
if QUANT and self.do_quant:
return self.act_fn(self._aftergemm_quantizer(F.linear(input, weight, None)))
else:
return self.act_fn(F.linear(input, weight, None))
def extra_repr(self) -> str:
return 'in_features={}, out_features={}, bias={}'.format(
self.in_features, self.out_features, self.bias is not None
)
class Attention(nn.Module):
def __init__(self, config, vis):
super(Attention, self).__init__()
self.vis = vis
self.num_attention_heads = config.transformer["num_heads"]
self.attention_head_size = int(config.hidden_size / self.num_attention_heads)
self.all_head_size = self.num_attention_heads * self.attention_head_size
self.query = LinearActivation(config.hidden_size, self.all_head_size)
self.key = LinearActivation(config.hidden_size, self.all_head_size)
self.value = LinearActivation(config.hidden_size, self.all_head_size)
self.out = LinearActivation(config.hidden_size, config.hidden_size)
self.attn_dropout = Dropout(config.transformer["attention_dropout_rate"])
self.proj_dropout = Dropout(config.transformer["attention_dropout_rate"])
self.softmax = Softmax(dim=-1)
if QUANT:
self.matmul_q_input_quantizer = TensorQuantizer(QuantLinear.default_quant_desc_input)
self.matmul_k_input_quantizer = TensorQuantizer(QuantLinear.default_quant_desc_input)
self.matmul_v_input_quantizer = TensorQuantizer(QuantLinear.default_quant_desc_input)
self.matmul_a_input_quantizer = TensorQuantizer(QuantLinear.default_quant_desc_input)
self.softmax_input_quantizer = TensorQuantizer(QuantLinear.default_quant_desc_input)
def transpose_for_scores(self, x):
new_x_shape = x.size()[:-1] + (self.num_attention_heads, self.attention_head_size)
x = x.view(*new_x_shape)
return x.permute(0, 2, 1, 3)
def forward(self, hidden_states):
mixed_query_layer = self.query(hidden_states)
mixed_key_layer = self.key(hidden_states)
mixed_value_layer = self.value(hidden_states)
query_layer = self.transpose_for_scores(mixed_query_layer)
key_layer = self.transpose_for_scores(mixed_key_layer)
value_layer = self.transpose_for_scores(mixed_value_layer)
if QUANT:
attention_scores = torch.matmul(self.matmul_q_input_quantizer(query_layer),
self.matmul_k_input_quantizer(key_layer.transpose(-1, -2)))
attention_scores = self.softmax_input_quantizer(attention_scores)
else:
attention_scores = torch.matmul(query_layer, key_layer.transpose(-1, -2))
attention_scores = attention_scores / math.sqrt(self.attention_head_size)
attention_probs = self.softmax(attention_scores)
weights = attention_probs if self.vis else None
attention_probs = self.attn_dropout(attention_probs)
if QUANT:
context_layer = torch.matmul(self.matmul_a_input_quantizer(attention_probs),
self.matmul_v_input_quantizer(value_layer))
else:
context_layer = torch.matmul(attention_probs, value_layer)
context_layer = context_layer.permute(0, 2, 1, 3).contiguous()
new_context_layer_shape = context_layer.size()[:-2] + (self.all_head_size,)
context_layer = context_layer.view(*new_context_layer_shape)
attention_output = self.out(context_layer)
attention_output = self.proj_dropout(attention_output)
return attention_output, weights
class Mlp(nn.Module):
def __init__(self, config):
super(Mlp, self).__init__()
self.fc1 = LinearActivation(config.hidden_size, config.transformer["mlp_dim"], act='noact')
self.fc2 = LinearActivation(config.transformer["mlp_dim"], config.hidden_size, act='noact')
self.act_fn = ACT2FN["gelu"]
self.dropout = Dropout(config.transformer["dropout_rate"])
self._init_weights()
def _init_weights(self):
nn.init.xavier_uniform_(self.fc1.weight)
nn.init.xavier_uniform_(self.fc2.weight)
nn.init.normal_(self.fc1.bias, std=1e-6)
nn.init.normal_(self.fc2.bias, std=1e-6)
def forward(self, x):
x = self.fc1(x)
x = self.act_fn(x)
x = self.dropout(x)
x = self.fc2(x)
x = self.dropout(x)
return x
class Embeddings(nn.Module):
"""Construct the embeddings from patch, position embeddings.
"""
def __init__(self, config, img_size, in_channels=3):
super(Embeddings, self).__init__()
self.hybrid = None
img_size = _pair(img_size)
if config.patches.get("grid") is not None:
grid_size = config.patches["grid"]
patch_size = (img_size[0] // 16 // grid_size[0], img_size[1] // 16 // grid_size[1])
n_patches = (img_size[0] // 16) * (img_size[1] // 16)
self.hybrid = True
else:
patch_size = _pair(config.patches["size"])
n_patches = (img_size[0] // patch_size[0]) * (img_size[1] // patch_size[1])
self.hybrid = False
if self.hybrid:
self.hybrid_model = ResNetV2(block_units=config.resnet.num_layers,
width_factor=config.resnet.width_factor)
in_channels = self.hybrid_model.width * 16
self.patch_embeddings = Conv2d(in_channels=in_channels,
out_channels=config.hidden_size,
kernel_size=patch_size,
stride=patch_size)
self.position_embeddings = nn.Parameter(torch.zeros(1, n_patches+1, config.hidden_size))
self.cls_token = nn.Parameter(torch.zeros(1, 1, config.hidden_size))
self.dropout = Dropout(config.transformer["dropout_rate"])
def forward(self, x):
B = x.shape[0]
cls_tokens = self.cls_token.expand(B, -1, -1)
if self.hybrid:
x = self.hybrid_model(x)
x = self.patch_embeddings(x)
x = x.flatten(2)
x = x.transpose(-1, -2)
x = torch.cat((cls_tokens, x), dim=1)
embeddings = x + self.position_embeddings
embeddings = self.dropout(embeddings)
return embeddings
class Block(nn.Module):
def __init__(self, config, vis):
super(Block, self).__init__()
self.hidden_size = config.hidden_size
self.attention_norm = LayerNorm(config.hidden_size, eps=1e-6)
self.ffn_norm = LayerNorm(config.hidden_size, eps=1e-6)
self.ffn = Mlp(config)
self.attn = Attention(config, vis)
if QUANT:
self.layernorm_input1_quantizer = TensorQuantizer(QuantLinear.default_quant_desc_input)
self.add1_local_input_quantizer = TensorQuantizer(QuantLinear.default_quant_desc_input)
self.add1_residual_input_quantizer = TensorQuantizer(QuantLinear.default_quant_desc_input)
self.layernorm_input2_quantizer = TensorQuantizer(QuantLinear.default_quant_desc_input)
self.add2_local_input_quantizer = TensorQuantizer(QuantLinear.default_quant_desc_input)
self.add2_residual_input_quantizer = TensorQuantizer(QuantLinear.default_quant_desc_input)
def forward(self, x):
h = x
if QUANT:
x = self.attention_norm(self.layernorm_input1_quantizer(x))
else:
x = self.attention_norm(x)
x, weights = self.attn(x)
if QUANT:
x = self.add1_local_input_quantizer(x) + self.add1_residual_input_quantizer(h)
else:
x = x + h
h = x
if QUANT:
x = self.ffn_norm(self.layernorm_input2_quantizer(x))
else:
x = self.ffn_norm(x)
x = self.ffn(x)
# print('adding bias', x[0,0,:8])
if QUANT:
x = self.add2_local_input_quantizer(x) + self.add2_residual_input_quantizer(h)
else:
x = x + h
# print('residual:', h[0,0,:8])
# print('adding bias+res', x[0,0,:8])
return x, weights
def load_from(self, weights, n_block):
ROOT = f"Transformer/encoderblock_{n_block}"
with torch.no_grad():
query_weight = np2th(weights[pjoin(ROOT, ATTENTION_Q, "kernel")]).view(self.hidden_size, self.hidden_size).t()
key_weight = np2th(weights[pjoin(ROOT, ATTENTION_K, "kernel")]).view(self.hidden_size, self.hidden_size).t()
value_weight = np2th(weights[pjoin(ROOT, ATTENTION_V, "kernel")]).view(self.hidden_size, self.hidden_size).t()
out_weight = np2th(weights[pjoin(ROOT, ATTENTION_OUT, "kernel")]).view(self.hidden_size, self.hidden_size).t()
query_bias = np2th(weights[pjoin(ROOT, ATTENTION_Q, "bias")]).view(-1)
key_bias = np2th(weights[pjoin(ROOT, ATTENTION_K, "bias")]).view(-1)
value_bias = np2th(weights[pjoin(ROOT, ATTENTION_V, "bias")]).view(-1)
out_bias = np2th(weights[pjoin(ROOT, ATTENTION_OUT, "bias")]).view(-1)
self.attn.query.weight.copy_(query_weight)
self.attn.key.weight.copy_(key_weight)
self.attn.value.weight.copy_(value_weight)
self.attn.out.weight.copy_(out_weight)
self.attn.query.bias.copy_(query_bias)
self.attn.key.bias.copy_(key_bias)
self.attn.value.bias.copy_(value_bias)
self.attn.out.bias.copy_(out_bias)
mlp_weight_0 = np2th(weights[pjoin(ROOT, FC_0, "kernel")]).t()
mlp_weight_1 = np2th(weights[pjoin(ROOT, FC_1, "kernel")]).t()
mlp_bias_0 = np2th(weights[pjoin(ROOT, FC_0, "bias")]).t()
mlp_bias_1 = np2th(weights[pjoin(ROOT, FC_1, "bias")]).t()
self.ffn.fc1.weight.copy_(mlp_weight_0)
self.ffn.fc2.weight.copy_(mlp_weight_1)
self.ffn.fc1.bias.copy_(mlp_bias_0)
self.ffn.fc2.bias.copy_(mlp_bias_1)
self.attention_norm.weight.copy_(np2th(weights[pjoin(ROOT, ATTENTION_NORM, "scale")]))
self.attention_norm.bias.copy_(np2th(weights[pjoin(ROOT, ATTENTION_NORM, "bias")]))
self.ffn_norm.weight.copy_(np2th(weights[pjoin(ROOT, MLP_NORM, "scale")]))
self.ffn_norm.bias.copy_(np2th(weights[pjoin(ROOT, MLP_NORM, "bias")]))
class Encoder(nn.Module):
def __init__(self, config, vis):
super(Encoder, self).__init__()
self.vis = vis
self.layer = nn.ModuleList()
self.encoder_norm = LayerNorm(config.hidden_size, eps=1e-6)
for _ in range(config.transformer["num_layers"]):
layer = Block(config, vis)
self.layer.append(copy.deepcopy(layer))
def forward(self, hidden_states):
attn_weights = []
for idx, layer_block in enumerate(self.layer):
hidden_states, weights = layer_block(hidden_states)
if self.vis:
attn_weights.append(weights)
encoded = self.encoder_norm(hidden_states)
return encoded, attn_weights
class Transformer(nn.Module):
def __init__(self, config, img_size, vis):
super(Transformer, self).__init__()
self.embeddings = Embeddings(config, img_size=img_size)
self.encoder = Encoder(config, vis)
def forward(self, input_ids):
embedding_output = self.embeddings(input_ids)
encoded, attn_weights = self.encoder(embedding_output)
return encoded, attn_weights
class VisionTransformerINT8(nn.Module):
def __init__(self, config, img_size=224, num_classes=21843, zero_head=False, vis=False):
super(VisionTransformerINT8, self).__init__()
self.num_classes = num_classes
self.zero_head = zero_head
self.classifier = config.classifier
self.transformer = Transformer(config, img_size, vis)
self.head = Linear(config.hidden_size, num_classes)
def forward(self, x, labels=None):
x, attn_weights = self.transformer(x)
logits = self.head(x[:, 0])
if labels is not None:
loss_fct = CrossEntropyLoss()
loss = loss_fct(logits.view(-1, self.num_classes), labels.view(-1))
return logits, loss
else:
return logits, attn_weights
def load_from(self, weights):
with torch.no_grad():
if self.zero_head:
nn.init.zeros_(self.head.weight)
nn.init.zeros_(self.head.bias)
else:
self.head.weight.copy_(np2th(weights["head/kernel"]).t())
self.head.bias.copy_(np2th(weights["head/bias"]).t())
self.transformer.embeddings.patch_embeddings.weight.copy_(np2th(weights["embedding/kernel"], conv=True))
self.transformer.embeddings.patch_embeddings.bias.copy_(np2th(weights["embedding/bias"]))
self.transformer.embeddings.cls_token.copy_(np2th(weights["cls"]))
self.transformer.encoder.encoder_norm.weight.copy_(np2th(weights["Transformer/encoder_norm/scale"]))
self.transformer.encoder.encoder_norm.bias.copy_(np2th(weights["Transformer/encoder_norm/bias"]))
posemb = np2th(weights["Transformer/posembed_input/pos_embedding"])
posemb_new = self.transformer.embeddings.position_embeddings
if posemb.size() == posemb_new.size():
self.transformer.embeddings.position_embeddings.copy_(posemb)
else:
logger.info("load_pretrained: resized variant: %s to %s" % (posemb.size(), posemb_new.size()))
ntok_new = posemb_new.size(1)
if self.classifier == "token":
posemb_tok, posemb_grid = posemb[:, :1], posemb[0, 1:]
ntok_new -= 1
else:
posemb_tok, posemb_grid = posemb[:, :0], posemb[0]
gs_old = int(np.sqrt(len(posemb_grid)))
gs_new = int(np.sqrt(ntok_new))
print('load_pretrained: grid-size from %s to %s' % (gs_old, gs_new))
posemb_grid = posemb_grid.reshape(gs_old, gs_old, -1)
zoom = (gs_new / gs_old, gs_new / gs_old, 1)
posemb_grid = ndimage.zoom(posemb_grid, zoom, order=1)
posemb_grid = posemb_grid.reshape(1, gs_new * gs_new, -1)
posemb = np.concatenate([posemb_tok, posemb_grid], axis=1)
self.transformer.embeddings.position_embeddings.copy_(np2th(posemb))
for bname, block in self.transformer.encoder.named_children():
for uname, unit in block.named_children():
unit.load_from(weights, n_block=uname)
if self.transformer.embeddings.hybrid:
self.transformer.embeddings.hybrid_model.root.conv.weight.copy_(np2th(weights["conv_root/kernel"], conv=True))
gn_weight = np2th(weights["gn_root/scale"]).view(-1)
gn_bias = np2th(weights["gn_root/bias"]).view(-1)
self.transformer.embeddings.hybrid_model.root.gn.weight.copy_(gn_weight)
self.transformer.embeddings.hybrid_model.root.gn.bias.copy_(gn_bias)
for bname, block in self.transformer.embeddings.hybrid_model.body.named_children():
for uname, unit in block.named_children():
unit.load_from(weights, n_block=bname, n_unit=uname)
CONFIGS = {
'ViT-B_16': configs.get_b16_config(),
'ViT-B_32': configs.get_b32_config(),
'ViT-L_16': configs.get_l16_config(),
'ViT-L_32': configs.get_l32_config(),
'ViT-H_14': configs.get_h14_config(),
'R50-ViT-B_16': configs.get_r50_b16_config(),
'testing': configs.get_testing(),
}
| apache-2.0 |
snnn/tensorflow | tensorflow/examples/get_started/regression/imports85.py | 39 | 6589 | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""A dataset loader for imports85.data."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
import numpy as np
import tensorflow as tf
try:
import pandas as pd # pylint: disable=g-import-not-at-top
except ImportError:
pass
URL = "https://archive.ics.uci.edu/ml/machine-learning-databases/autos/imports-85.data"
# Order is important for the csv-readers, so we use an OrderedDict here.
defaults = collections.OrderedDict([
("symboling", [0]),
("normalized-losses", [0.0]),
("make", [""]),
("fuel-type", [""]),
("aspiration", [""]),
("num-of-doors", [""]),
("body-style", [""]),
("drive-wheels", [""]),
("engine-location", [""]),
("wheel-base", [0.0]),
("length", [0.0]),
("width", [0.0]),
("height", [0.0]),
("curb-weight", [0.0]),
("engine-type", [""]),
("num-of-cylinders", [""]),
("engine-size", [0.0]),
("fuel-system", [""]),
("bore", [0.0]),
("stroke", [0.0]),
("compression-ratio", [0.0]),
("horsepower", [0.0]),
("peak-rpm", [0.0]),
("city-mpg", [0.0]),
("highway-mpg", [0.0]),
("price", [0.0])
]) # pyformat: disable
types = collections.OrderedDict((key, type(value[0]))
for key, value in defaults.items())
def _get_imports85():
path = tf.contrib.keras.utils.get_file(URL.split("/")[-1], URL)
return path
def dataset(y_name="price", train_fraction=0.7):
"""Load the imports85 data as a (train,test) pair of `Dataset`.
Each dataset generates (features_dict, label) pairs.
Args:
y_name: The name of the column to use as the label.
train_fraction: A float, the fraction of data to use for training. The
remainder will be used for evaluation.
Returns:
A (train,test) pair of `Datasets`
"""
# Download and cache the data
path = _get_imports85()
# Define how the lines of the file should be parsed
def decode_line(line):
"""Convert a csv line into a (features_dict,label) pair."""
# Decode the line to a tuple of items based on the types of
# csv_header.values().
items = tf.decode_csv(line, list(defaults.values()))
# Convert the keys and items to a dict.
pairs = zip(defaults.keys(), items)
features_dict = dict(pairs)
# Remove the label from the features_dict
label = features_dict.pop(y_name)
return features_dict, label
def has_no_question_marks(line):
"""Returns True if the line of text has no question marks."""
# split the line into an array of characters
chars = tf.string_split(line[tf.newaxis], "").values
# for each character check if it is a question mark
is_question = tf.equal(chars, "?")
any_question = tf.reduce_any(is_question)
no_question = ~any_question
return no_question
def in_training_set(line):
"""Returns a boolean tensor, true if the line is in the training set."""
# If you randomly split the dataset you won't get the same split in both
# sessions if you stop and restart training later. Also a simple
# random split won't work with a dataset that's too big to `.cache()` as
# we are doing here.
num_buckets = 1000000
bucket_id = tf.string_to_hash_bucket_fast(line, num_buckets)
# Use the hash bucket id as a random number that's deterministic per example
return bucket_id < int(train_fraction * num_buckets)
def in_test_set(line):
"""Returns a boolean tensor, true if the line is in the training set."""
# Items not in the training set are in the test set.
# This line must use `~` instead of `not` because `not` only works on python
# booleans but we are dealing with symbolic tensors.
return ~in_training_set(line)
base_dataset = (
tf.data
# Get the lines from the file.
.TextLineDataset(path)
# drop lines with question marks.
.filter(has_no_question_marks))
train = (base_dataset
# Take only the training-set lines.
.filter(in_training_set)
# Decode each line into a (features_dict, label) pair.
.map(decode_line)
# Cache data so you only decode the file once.
.cache())
# Do the same for the test-set.
test = (base_dataset.filter(in_test_set).cache().map(decode_line))
return train, test
def raw_dataframe():
"""Load the imports85 data as a pd.DataFrame."""
# Download and cache the data
path = _get_imports85()
# Load it into a pandas dataframe
df = pd.read_csv(path, names=types.keys(), dtype=types, na_values="?")
return df
def load_data(y_name="price", train_fraction=0.7, seed=None):
"""Get the imports85 data set.
A description of the data is available at:
https://archive.ics.uci.edu/ml/datasets/automobile
The data itself can be found at:
https://archive.ics.uci.edu/ml/machine-learning-databases/autos/imports-85.data
Args:
y_name: the column to return as the label.
train_fraction: the fraction of the dataset to use for training.
seed: The random seed to use when shuffling the data. `None` generates a
unique shuffle every run.
Returns:
a pair of pairs where the first pair is the training data, and the second
is the test data:
`(x_train, y_train), (x_test, y_test) = get_imports85_dataset(...)`
`x` contains a pandas DataFrame of features, while `y` contains the label
array.
"""
# Load the raw data columns.
data = raw_dataframe()
# Delete rows with unknowns
data = data.dropna()
# Shuffle the data
np.random.seed(seed)
# Split the data into train/test subsets.
x_train = data.sample(frac=train_fraction, random_state=seed)
x_test = data.drop(x_train.index)
# Extract the label from the features dataframe.
y_train = x_train.pop(y_name)
y_test = x_test.pop(y_name)
return (x_train, y_train), (x_test, y_test)
| apache-2.0 |
warrenspe/Tokex | test/test_tokenizer.py | 1 | 2594 | import re
from tokex import tokenizers
import _test_case
class TestTokenizer(_test_case.TokexTestCase):
input_string = """
separate words which should be tokenized _separate_ly_
(words) [to] {be} <tokenized> #separately# $from$ ;their; |surrounding| !characters!
\n \t\n\n
== <= >= != ... !=...<=
"""
def test_tokenizer(self):
tokenizer = tokenizers.TokexTokenizer()
self.assertEqual(tokenizer.tokenize(self.input_string), [
"separate", "words", "which", "should", "be", "tokenized", "_separate_ly_",
"(", "words", ")", "[", "to", "]", "{", "be", "}", "<", "tokenized", ">", "#", "separately", "#",
"$", "from", "$", ";", "their", ";", "|", "surrounding", "|", "!", "characters", "!",
"==", "<=", ">=", "!=", "...", "!=...<="
])
def test_tokenize_newlines(self):
tokenizer = tokenizers.TokexTokenizer(tokenize_newlines=True)
self.assertEqual(tokenizer.tokenize(self.input_string), [
"\n", "separate", "words", "which", "should", "be", "tokenized", "_separate_ly_", "\n",
"(", "words", ")", "[", "to", "]", "{", "be", "}", "<", "tokenized", ">", "#", "separately", "#",
"$", "from", "$", ";", "their", ";", "|", "surrounding", "|", "!", "characters", "!", "\n", "\n", "\n", "\n", "\n",
"==", "<=", ">=", "!=", "...", "!=...<=", "\n"
])
tokenizer = tokenizers.TokexTokenizer(tokenize_newlines=True, ignore_empty_lines=True)
self.assertEqual(tokenizer.tokenize(self.input_string), [
"separate", "words", "which", "should", "be", "tokenized", "_separate_ly_", "\n",
"(", "words", ")", "[", "to", "]", "{", "be", "}", "<", "tokenized", ">", "#", "separately", "#",
"$", "from", "$", ";", "their", ";", "|", "surrounding", "|", "!", "characters", "!", "\n",
"==", "<=", ">=", "!=", "...", "!=...<=", "\n"
])
class TestNumericTokenizer(_test_case.TokexTestCase):
input_string = r"""
separate words which should be tokenized _separate_ly_
$1230.1230 123. 12430$ 123,123 1234 123.123
== <= >= != ... !=...<=
"""
def test_numeric_tokenizer(self):
tokenizer = tokenizers.NumericTokenizer()
self.assertEqual(tokenizer.tokenize(self.input_string), [
"separate", "words", "which", "should", "be", "tokenized", "_separate_ly_",
"$1230.1230", "123.", "12430$", "123,123", "1234", "123.123",
"==", "<=", ">=", "!=", "...", "!=...<="
])
| gpl-3.0 |
lucidfrontier45/scikit-learn | examples/cluster/plot_dbscan.py | 5 | 2629 | # -*- coding: utf-8 -*-
"""
===================================
Demo of DBSCAN clustering algorithm
===================================
Finds core samples of high density and expands clusters from them.
"""
print __doc__
import numpy as np
from scipy.spatial import distance
from sklearn.cluster import DBSCAN
from sklearn import metrics
from sklearn.datasets.samples_generator import make_blobs
##############################################################################
# Generate sample data
centers = [[1, 1], [-1, -1], [1, -1]]
X, labels_true = make_blobs(n_samples=750, centers=centers, cluster_std=0.4)
##############################################################################
# Compute similarities
D = distance.squareform(distance.pdist(X))
S = 1 - (D / np.max(D))
##############################################################################
# Compute DBSCAN
db = DBSCAN(eps=0.95, min_samples=10).fit(S)
core_samples = db.core_sample_indices_
labels = db.labels_
# Number of clusters in labels, ignoring noise if present.
n_clusters_ = len(set(labels)) - (1 if -1 in labels else 0)
print 'Estimated number of clusters: %d' % n_clusters_
print "Homogeneity: %0.3f" % metrics.homogeneity_score(labels_true, labels)
print "Completeness: %0.3f" % metrics.completeness_score(labels_true, labels)
print "V-measure: %0.3f" % metrics.v_measure_score(labels_true, labels)
print "Adjusted Rand Index: %0.3f" % \
metrics.adjusted_rand_score(labels_true, labels)
print "Adjusted Mutual Information: %0.3f" % \
metrics.adjusted_mutual_info_score(labels_true, labels)
print ("Silhouette Coefficient: %0.3f" %
metrics.silhouette_score(D, labels, metric='precomputed'))
##############################################################################
# Plot result
import pylab as pl
from itertools import cycle
pl.close('all')
pl.figure(1)
pl.clf()
# Black removed and is used for noise instead.
colors = cycle('bgrcmybgrcmybgrcmybgrcmy')
for k, col in zip(set(labels), colors):
if k == -1:
# Black used for noise.
col = 'k'
markersize = 6
class_members = [index[0] for index in np.argwhere(labels == k)]
cluster_core_samples = [index for index in core_samples
if labels[index] == k]
for index in class_members:
x = X[index]
if index in core_samples and k != -1:
markersize = 14
else:
markersize = 6
pl.plot(x[0], x[1], 'o', markerfacecolor=col,
markeredgecolor='k', markersize=markersize)
pl.title('Estimated number of clusters: %d' % n_clusters_)
pl.show()
| bsd-3-clause |
parksandwildlife/biosys | biosys/apps/main/api/urls.py | 2 | 2676 | from __future__ import absolute_import, unicode_literals, print_function, division
from django.conf.urls import url
from rest_framework import routers
from rest_framework.authtoken.views import obtain_auth_token
from djoser import views as djoser_views
from main.api import views as api_views
router = routers.DefaultRouter()
router.register(r'users?', api_views.UserViewSet, 'user')
router.register(r'programs?', api_views.ProgramViewSet, 'program')
router.register(r'projects?', api_views.ProjectViewSet, 'project')
router.register(r'sites?', api_views.SiteViewSet, 'site')
router.register(r'datasets?', api_views.DatasetViewSet, 'dataset')
router.register(r'records?', api_views.RecordViewSet, 'record')
router.register(r'media', api_views.MediaViewSet, 'media')
router.register(r'project-media', api_views.ProjectMediaViewSet, 'project-media')
router.register(r'dataset-media', api_views.DatasetMediaViewSet, 'dataset-media')
url_patterns = [
url(r'auth-token/', obtain_auth_token, name="auth-token"),
url(r'projects?/(?P<pk>\d+)/sites/?', api_views.ProjectSitesView.as_view(), name='project-sites'), # bulk sites
url(r'projects?/(?P<pk>\d+)/upload-sites/?', api_views.ProjectSitesUploadView.as_view(),
name='upload-sites'), # file upload for sites
url(r'datasets?/(?P<pk>\d+)/records/?', api_views.DatasetRecordsView.as_view(), name='dataset-records'),
# upload data files
url(r'datasets?/(?P<pk>\d+)/upload-records/?', api_views.DatasetUploadRecordsView.as_view(),
name='dataset-upload'),
url(r'statistics/?', api_views.StatisticsView.as_view(), name="statistics"),
url(r'whoami/?', api_views.WhoamiView.as_view(), name="whoami"),
url(r'species/?', api_views.SpeciesView.as_view(), name="species"),
url(r'logout/?', api_views.LogoutView.as_view(), name="logout"),
# utils
url(r'utils/geometry-to-data/dataset/(?P<pk>\d+)/?',
api_views.GeoConvertView.as_view(output='data'),
name="geometry-to-data"
),
url(r'utils/data-to-geometry/dataset/(?P<pk>\d+)/?',
api_views.GeoConvertView.as_view(output='geometry'),
name="data-to-geometry"
),
url(r'utils/infer-dataset/?', api_views.InferDatasetView.as_view(), name='infer-dataset'),
url(r'^password/?$', djoser_views.SetPasswordView.as_view(), name='set-password'),
url(
r'^password/reset/?$',
djoser_views.PasswordResetView.as_view(),
name='password-reset'
),
url(
r'^password/reset/confirm/?$',
djoser_views.PasswordResetConfirmView.as_view(),
name='password-reset-confirm'
),
]
app_name = 'api'
urls = router.urls + url_patterns
| apache-2.0 |
alexeyum/scikit-learn | sklearn/covariance/robust_covariance.py | 103 | 29653 | """
Robust location and covariance estimators.
Here are implemented estimators that are resistant to outliers.
"""
# Author: Virgile Fritsch <virgile.fritsch@inria.fr>
#
# License: BSD 3 clause
import warnings
import numbers
import numpy as np
from scipy import linalg
from scipy.stats import chi2
from . import empirical_covariance, EmpiricalCovariance
from ..utils.extmath import fast_logdet, pinvh
from ..utils import check_random_state, check_array
# Minimum Covariance Determinant
# Implementing of an algorithm by Rousseeuw & Van Driessen described in
# (A Fast Algorithm for the Minimum Covariance Determinant Estimator,
# 1999, American Statistical Association and the American Society
# for Quality, TECHNOMETRICS)
# XXX Is this really a public function? It's not listed in the docs or
# exported by sklearn.covariance. Deprecate?
def c_step(X, n_support, remaining_iterations=30, initial_estimates=None,
verbose=False, cov_computation_method=empirical_covariance,
random_state=None):
"""C_step procedure described in [Rouseeuw1984]_ aiming at computing MCD.
Parameters
----------
X : array-like, shape (n_samples, n_features)
Data set in which we look for the n_support observations whose
scatter matrix has minimum determinant.
n_support : int, > n_samples / 2
Number of observations to compute the robust estimates of location
and covariance from.
remaining_iterations : int, optional
Number of iterations to perform.
According to [Rouseeuw1999]_, two iterations are sufficient to get
close to the minimum, and we never need more than 30 to reach
convergence.
initial_estimates : 2-tuple, optional
Initial estimates of location and shape from which to run the c_step
procedure:
- initial_estimates[0]: an initial location estimate
- initial_estimates[1]: an initial covariance estimate
verbose : boolean, optional
Verbose mode.
random_state : integer or numpy.RandomState, optional
The random generator used. If an integer is given, it fixes the
seed. Defaults to the global numpy random number generator.
cov_computation_method : callable, default empirical_covariance
The function which will be used to compute the covariance.
Must return shape (n_features, n_features)
Returns
-------
location : array-like, shape (n_features,)
Robust location estimates.
covariance : array-like, shape (n_features, n_features)
Robust covariance estimates.
support : array-like, shape (n_samples,)
A mask for the `n_support` observations whose scatter matrix has
minimum determinant.
References
----------
.. [Rouseeuw1999] A Fast Algorithm for the Minimum Covariance Determinant
Estimator, 1999, American Statistical Association and the American
Society for Quality, TECHNOMETRICS
"""
X = np.asarray(X)
random_state = check_random_state(random_state)
return _c_step(X, n_support, remaining_iterations=remaining_iterations,
initial_estimates=initial_estimates, verbose=verbose,
cov_computation_method=cov_computation_method,
random_state=random_state)
def _c_step(X, n_support, random_state, remaining_iterations=30,
initial_estimates=None, verbose=False,
cov_computation_method=empirical_covariance):
n_samples, n_features = X.shape
# Initialisation
support = np.zeros(n_samples, dtype=bool)
if initial_estimates is None:
# compute initial robust estimates from a random subset
support[random_state.permutation(n_samples)[:n_support]] = True
else:
# get initial robust estimates from the function parameters
location = initial_estimates[0]
covariance = initial_estimates[1]
# run a special iteration for that case (to get an initial support)
precision = pinvh(covariance)
X_centered = X - location
dist = (np.dot(X_centered, precision) * X_centered).sum(1)
# compute new estimates
support[np.argsort(dist)[:n_support]] = True
X_support = X[support]
location = X_support.mean(0)
covariance = cov_computation_method(X_support)
# Iterative procedure for Minimum Covariance Determinant computation
det = fast_logdet(covariance)
previous_det = np.inf
while (det < previous_det) and (remaining_iterations > 0):
# save old estimates values
previous_location = location
previous_covariance = covariance
previous_det = det
previous_support = support
# compute a new support from the full data set mahalanobis distances
precision = pinvh(covariance)
X_centered = X - location
dist = (np.dot(X_centered, precision) * X_centered).sum(axis=1)
# compute new estimates
support = np.zeros(n_samples, dtype=bool)
support[np.argsort(dist)[:n_support]] = True
X_support = X[support]
location = X_support.mean(axis=0)
covariance = cov_computation_method(X_support)
det = fast_logdet(covariance)
# update remaining iterations for early stopping
remaining_iterations -= 1
previous_dist = dist
dist = (np.dot(X - location, precision) * (X - location)).sum(axis=1)
# Catch computation errors
if np.isinf(det):
raise ValueError(
"Singular covariance matrix. "
"Please check that the covariance matrix corresponding "
"to the dataset is full rank and that MinCovDet is used with "
"Gaussian-distributed data (or at least data drawn from a "
"unimodal, symmetric distribution.")
# Check convergence
if np.allclose(det, previous_det):
# c_step procedure converged
if verbose:
print("Optimal couple (location, covariance) found before"
" ending iterations (%d left)" % (remaining_iterations))
results = location, covariance, det, support, dist
elif det > previous_det:
# determinant has increased (should not happen)
warnings.warn("Warning! det > previous_det (%.15f > %.15f)"
% (det, previous_det), RuntimeWarning)
results = previous_location, previous_covariance, \
previous_det, previous_support, previous_dist
# Check early stopping
if remaining_iterations == 0:
if verbose:
print('Maximum number of iterations reached')
results = location, covariance, det, support, dist
return results
def select_candidates(X, n_support, n_trials, select=1, n_iter=30,
verbose=False,
cov_computation_method=empirical_covariance,
random_state=None):
"""Finds the best pure subset of observations to compute MCD from it.
The purpose of this function is to find the best sets of n_support
observations with respect to a minimization of their covariance
matrix determinant. Equivalently, it removes n_samples-n_support
observations to construct what we call a pure data set (i.e. not
containing outliers). The list of the observations of the pure
data set is referred to as the `support`.
Starting from a random support, the pure data set is found by the
c_step procedure introduced by Rousseeuw and Van Driessen in
[Rouseeuw1999]_.
Parameters
----------
X : array-like, shape (n_samples, n_features)
Data (sub)set in which we look for the n_support purest observations.
n_support : int, [(n + p + 1)/2] < n_support < n
The number of samples the pure data set must contain.
select : int, int > 0
Number of best candidates results to return.
n_trials : int, nb_trials > 0 or 2-tuple
Number of different initial sets of observations from which to
run the algorithm.
Instead of giving a number of trials to perform, one can provide a
list of initial estimates that will be used to iteratively run
c_step procedures. In this case:
- n_trials[0]: array-like, shape (n_trials, n_features)
is the list of `n_trials` initial location estimates
- n_trials[1]: array-like, shape (n_trials, n_features, n_features)
is the list of `n_trials` initial covariances estimates
n_iter : int, nb_iter > 0
Maximum number of iterations for the c_step procedure.
(2 is enough to be close to the final solution. "Never" exceeds 20).
random_state : integer or numpy.RandomState, default None
The random generator used. If an integer is given, it fixes the
seed. Defaults to the global numpy random number generator.
cov_computation_method : callable, default empirical_covariance
The function which will be used to compute the covariance.
Must return shape (n_features, n_features)
verbose : boolean, default False
Control the output verbosity.
See Also
---------
c_step
Returns
-------
best_locations : array-like, shape (select, n_features)
The `select` location estimates computed from the `select` best
supports found in the data set (`X`).
best_covariances : array-like, shape (select, n_features, n_features)
The `select` covariance estimates computed from the `select`
best supports found in the data set (`X`).
best_supports : array-like, shape (select, n_samples)
The `select` best supports found in the data set (`X`).
References
----------
.. [Rouseeuw1999] A Fast Algorithm for the Minimum Covariance Determinant
Estimator, 1999, American Statistical Association and the American
Society for Quality, TECHNOMETRICS
"""
random_state = check_random_state(random_state)
n_samples, n_features = X.shape
if isinstance(n_trials, numbers.Integral):
run_from_estimates = False
elif isinstance(n_trials, tuple):
run_from_estimates = True
estimates_list = n_trials
n_trials = estimates_list[0].shape[0]
else:
raise TypeError("Invalid 'n_trials' parameter, expected tuple or "
" integer, got %s (%s)" % (n_trials, type(n_trials)))
# compute `n_trials` location and shape estimates candidates in the subset
all_estimates = []
if not run_from_estimates:
# perform `n_trials` computations from random initial supports
for j in range(n_trials):
all_estimates.append(
_c_step(
X, n_support, remaining_iterations=n_iter, verbose=verbose,
cov_computation_method=cov_computation_method,
random_state=random_state))
else:
# perform computations from every given initial estimates
for j in range(n_trials):
initial_estimates = (estimates_list[0][j], estimates_list[1][j])
all_estimates.append(_c_step(
X, n_support, remaining_iterations=n_iter,
initial_estimates=initial_estimates, verbose=verbose,
cov_computation_method=cov_computation_method,
random_state=random_state))
all_locs_sub, all_covs_sub, all_dets_sub, all_supports_sub, all_ds_sub = \
zip(*all_estimates)
# find the `n_best` best results among the `n_trials` ones
index_best = np.argsort(all_dets_sub)[:select]
best_locations = np.asarray(all_locs_sub)[index_best]
best_covariances = np.asarray(all_covs_sub)[index_best]
best_supports = np.asarray(all_supports_sub)[index_best]
best_ds = np.asarray(all_ds_sub)[index_best]
return best_locations, best_covariances, best_supports, best_ds
def fast_mcd(X, support_fraction=None,
cov_computation_method=empirical_covariance,
random_state=None):
"""Estimates the Minimum Covariance Determinant matrix.
Read more in the :ref:`User Guide <robust_covariance>`.
Parameters
----------
X : array-like, shape (n_samples, n_features)
The data matrix, with p features and n samples.
support_fraction : float, 0 < support_fraction < 1
The proportion of points to be included in the support of the raw
MCD estimate. Default is None, which implies that the minimum
value of support_fraction will be used within the algorithm:
`[n_sample + n_features + 1] / 2`.
random_state : integer or numpy.RandomState, optional
The generator used to randomly subsample. If an integer is
given, it fixes the seed. Defaults to the global numpy random
number generator.
cov_computation_method : callable, default empirical_covariance
The function which will be used to compute the covariance.
Must return shape (n_features, n_features)
Notes
-----
The FastMCD algorithm has been introduced by Rousseuw and Van Driessen
in "A Fast Algorithm for the Minimum Covariance Determinant Estimator,
1999, American Statistical Association and the American Society
for Quality, TECHNOMETRICS".
The principle is to compute robust estimates and random subsets before
pooling them into a larger subsets, and finally into the full data set.
Depending on the size of the initial sample, we have one, two or three
such computation levels.
Note that only raw estimates are returned. If one is interested in
the correction and reweighting steps described in [Rouseeuw1999]_,
see the MinCovDet object.
References
----------
.. [Rouseeuw1999] A Fast Algorithm for the Minimum Covariance
Determinant Estimator, 1999, American Statistical Association
and the American Society for Quality, TECHNOMETRICS
.. [Butler1993] R. W. Butler, P. L. Davies and M. Jhun,
Asymptotics For The Minimum Covariance Determinant Estimator,
The Annals of Statistics, 1993, Vol. 21, No. 3, 1385-1400
Returns
-------
location : array-like, shape (n_features,)
Robust location of the data.
covariance : array-like, shape (n_features, n_features)
Robust covariance of the features.
support : array-like, type boolean, shape (n_samples,)
A mask of the observations that have been used to compute
the robust location and covariance estimates of the data set.
"""
random_state = check_random_state(random_state)
X = check_array(X, ensure_min_samples=2, estimator='fast_mcd')
n_samples, n_features = X.shape
# minimum breakdown value
if support_fraction is None:
n_support = int(np.ceil(0.5 * (n_samples + n_features + 1)))
else:
n_support = int(support_fraction * n_samples)
# 1-dimensional case quick computation
# (Rousseeuw, P. J. and Leroy, A. M. (2005) References, in Robust
# Regression and Outlier Detection, John Wiley & Sons, chapter 4)
if n_features == 1:
if n_support < n_samples:
# find the sample shortest halves
X_sorted = np.sort(np.ravel(X))
diff = X_sorted[n_support:] - X_sorted[:(n_samples - n_support)]
halves_start = np.where(diff == np.min(diff))[0]
# take the middle points' mean to get the robust location estimate
location = 0.5 * (X_sorted[n_support + halves_start]
+ X_sorted[halves_start]).mean()
support = np.zeros(n_samples, dtype=bool)
X_centered = X - location
support[np.argsort(np.abs(X_centered), 0)[:n_support]] = True
covariance = np.asarray([[np.var(X[support])]])
location = np.array([location])
# get precision matrix in an optimized way
precision = pinvh(covariance)
dist = (np.dot(X_centered, precision) * (X_centered)).sum(axis=1)
else:
support = np.ones(n_samples, dtype=bool)
covariance = np.asarray([[np.var(X)]])
location = np.asarray([np.mean(X)])
X_centered = X - location
# get precision matrix in an optimized way
precision = pinvh(covariance)
dist = (np.dot(X_centered, precision) * (X_centered)).sum(axis=1)
# Starting FastMCD algorithm for p-dimensional case
if (n_samples > 500) and (n_features > 1):
# 1. Find candidate supports on subsets
# a. split the set in subsets of size ~ 300
n_subsets = n_samples // 300
n_samples_subsets = n_samples // n_subsets
samples_shuffle = random_state.permutation(n_samples)
h_subset = int(np.ceil(n_samples_subsets *
(n_support / float(n_samples))))
# b. perform a total of 500 trials
n_trials_tot = 500
# c. select 10 best (location, covariance) for each subset
n_best_sub = 10
n_trials = max(10, n_trials_tot // n_subsets)
n_best_tot = n_subsets * n_best_sub
all_best_locations = np.zeros((n_best_tot, n_features))
try:
all_best_covariances = np.zeros((n_best_tot, n_features,
n_features))
except MemoryError:
# The above is too big. Let's try with something much small
# (and less optimal)
all_best_covariances = np.zeros((n_best_tot, n_features,
n_features))
n_best_tot = 10
n_best_sub = 2
for i in range(n_subsets):
low_bound = i * n_samples_subsets
high_bound = low_bound + n_samples_subsets
current_subset = X[samples_shuffle[low_bound:high_bound]]
best_locations_sub, best_covariances_sub, _, _ = select_candidates(
current_subset, h_subset, n_trials,
select=n_best_sub, n_iter=2,
cov_computation_method=cov_computation_method,
random_state=random_state)
subset_slice = np.arange(i * n_best_sub, (i + 1) * n_best_sub)
all_best_locations[subset_slice] = best_locations_sub
all_best_covariances[subset_slice] = best_covariances_sub
# 2. Pool the candidate supports into a merged set
# (possibly the full dataset)
n_samples_merged = min(1500, n_samples)
h_merged = int(np.ceil(n_samples_merged *
(n_support / float(n_samples))))
if n_samples > 1500:
n_best_merged = 10
else:
n_best_merged = 1
# find the best couples (location, covariance) on the merged set
selection = random_state.permutation(n_samples)[:n_samples_merged]
locations_merged, covariances_merged, supports_merged, d = \
select_candidates(
X[selection], h_merged,
n_trials=(all_best_locations, all_best_covariances),
select=n_best_merged,
cov_computation_method=cov_computation_method,
random_state=random_state)
# 3. Finally get the overall best (locations, covariance) couple
if n_samples < 1500:
# directly get the best couple (location, covariance)
location = locations_merged[0]
covariance = covariances_merged[0]
support = np.zeros(n_samples, dtype=bool)
dist = np.zeros(n_samples)
support[selection] = supports_merged[0]
dist[selection] = d[0]
else:
# select the best couple on the full dataset
locations_full, covariances_full, supports_full, d = \
select_candidates(
X, n_support,
n_trials=(locations_merged, covariances_merged),
select=1,
cov_computation_method=cov_computation_method,
random_state=random_state)
location = locations_full[0]
covariance = covariances_full[0]
support = supports_full[0]
dist = d[0]
elif n_features > 1:
# 1. Find the 10 best couples (location, covariance)
# considering two iterations
n_trials = 30
n_best = 10
locations_best, covariances_best, _, _ = select_candidates(
X, n_support, n_trials=n_trials, select=n_best, n_iter=2,
cov_computation_method=cov_computation_method,
random_state=random_state)
# 2. Select the best couple on the full dataset amongst the 10
locations_full, covariances_full, supports_full, d = select_candidates(
X, n_support, n_trials=(locations_best, covariances_best),
select=1, cov_computation_method=cov_computation_method,
random_state=random_state)
location = locations_full[0]
covariance = covariances_full[0]
support = supports_full[0]
dist = d[0]
return location, covariance, support, dist
class MinCovDet(EmpiricalCovariance):
"""Minimum Covariance Determinant (MCD): robust estimator of covariance.
The Minimum Covariance Determinant covariance estimator is to be applied
on Gaussian-distributed data, but could still be relevant on data
drawn from a unimodal, symmetric distribution. It is not meant to be used
with multi-modal data (the algorithm used to fit a MinCovDet object is
likely to fail in such a case).
One should consider projection pursuit methods to deal with multi-modal
datasets.
Read more in the :ref:`User Guide <robust_covariance>`.
Parameters
----------
store_precision : bool
Specify if the estimated precision is stored.
assume_centered : Boolean
If True, the support of the robust location and the covariance
estimates is computed, and a covariance estimate is recomputed from
it, without centering the data.
Useful to work with data whose mean is significantly equal to
zero but is not exactly zero.
If False, the robust location and covariance are directly computed
with the FastMCD algorithm without additional treatment.
support_fraction : float, 0 < support_fraction < 1
The proportion of points to be included in the support of the raw
MCD estimate. Default is None, which implies that the minimum
value of support_fraction will be used within the algorithm:
[n_sample + n_features + 1] / 2
random_state : integer or numpy.RandomState, optional
The random generator used. If an integer is given, it fixes the
seed. Defaults to the global numpy random number generator.
Attributes
----------
raw_location_ : array-like, shape (n_features,)
The raw robust estimated location before correction and re-weighting.
raw_covariance_ : array-like, shape (n_features, n_features)
The raw robust estimated covariance before correction and re-weighting.
raw_support_ : array-like, shape (n_samples,)
A mask of the observations that have been used to compute
the raw robust estimates of location and shape, before correction
and re-weighting.
location_ : array-like, shape (n_features,)
Estimated robust location
covariance_ : array-like, shape (n_features, n_features)
Estimated robust covariance matrix
precision_ : array-like, shape (n_features, n_features)
Estimated pseudo inverse matrix.
(stored only if store_precision is True)
support_ : array-like, shape (n_samples,)
A mask of the observations that have been used to compute
the robust estimates of location and shape.
dist_ : array-like, shape (n_samples,)
Mahalanobis distances of the training set (on which `fit` is called)
observations.
References
----------
.. [Rouseeuw1984] `P. J. Rousseeuw. Least median of squares regression.
J. Am Stat Ass, 79:871, 1984.`
.. [Rouseeuw1999] `A Fast Algorithm for the Minimum Covariance Determinant
Estimator, 1999, American Statistical Association and the American
Society for Quality, TECHNOMETRICS`
.. [Butler1993] `R. W. Butler, P. L. Davies and M. Jhun,
Asymptotics For The Minimum Covariance Determinant Estimator,
The Annals of Statistics, 1993, Vol. 21, No. 3, 1385-1400`
"""
_nonrobust_covariance = staticmethod(empirical_covariance)
def __init__(self, store_precision=True, assume_centered=False,
support_fraction=None, random_state=None):
self.store_precision = store_precision
self.assume_centered = assume_centered
self.support_fraction = support_fraction
self.random_state = random_state
def fit(self, X, y=None):
"""Fits a Minimum Covariance Determinant with the FastMCD algorithm.
Parameters
----------
X : array-like, shape = [n_samples, n_features]
Training data, where n_samples is the number of samples
and n_features is the number of features.
y : not used, present for API consistence purpose.
Returns
-------
self : object
Returns self.
"""
X = check_array(X, ensure_min_samples=2, estimator='MinCovDet')
random_state = check_random_state(self.random_state)
n_samples, n_features = X.shape
# check that the empirical covariance is full rank
if (linalg.svdvals(np.dot(X.T, X)) > 1e-8).sum() != n_features:
warnings.warn("The covariance matrix associated to your dataset "
"is not full rank")
# compute and store raw estimates
raw_location, raw_covariance, raw_support, raw_dist = fast_mcd(
X, support_fraction=self.support_fraction,
cov_computation_method=self._nonrobust_covariance,
random_state=random_state)
if self.assume_centered:
raw_location = np.zeros(n_features)
raw_covariance = self._nonrobust_covariance(X[raw_support],
assume_centered=True)
# get precision matrix in an optimized way
precision = pinvh(raw_covariance)
raw_dist = np.sum(np.dot(X, precision) * X, 1)
self.raw_location_ = raw_location
self.raw_covariance_ = raw_covariance
self.raw_support_ = raw_support
self.location_ = raw_location
self.support_ = raw_support
self.dist_ = raw_dist
# obtain consistency at normal models
self.correct_covariance(X)
# re-weight estimator
self.reweight_covariance(X)
return self
def correct_covariance(self, data):
"""Apply a correction to raw Minimum Covariance Determinant estimates.
Correction using the empirical correction factor suggested
by Rousseeuw and Van Driessen in [Rouseeuw1984]_.
Parameters
----------
data : array-like, shape (n_samples, n_features)
The data matrix, with p features and n samples.
The data set must be the one which was used to compute
the raw estimates.
Returns
-------
covariance_corrected : array-like, shape (n_features, n_features)
Corrected robust covariance estimate.
"""
correction = np.median(self.dist_) / chi2(data.shape[1]).isf(0.5)
covariance_corrected = self.raw_covariance_ * correction
self.dist_ /= correction
return covariance_corrected
def reweight_covariance(self, data):
"""Re-weight raw Minimum Covariance Determinant estimates.
Re-weight observations using Rousseeuw's method (equivalent to
deleting outlying observations from the data set before
computing location and covariance estimates). [Rouseeuw1984]_
Parameters
----------
data : array-like, shape (n_samples, n_features)
The data matrix, with p features and n samples.
The data set must be the one which was used to compute
the raw estimates.
Returns
-------
location_reweighted : array-like, shape (n_features, )
Re-weighted robust location estimate.
covariance_reweighted : array-like, shape (n_features, n_features)
Re-weighted robust covariance estimate.
support_reweighted : array-like, type boolean, shape (n_samples,)
A mask of the observations that have been used to compute
the re-weighted robust location and covariance estimates.
"""
n_samples, n_features = data.shape
mask = self.dist_ < chi2(n_features).isf(0.025)
if self.assume_centered:
location_reweighted = np.zeros(n_features)
else:
location_reweighted = data[mask].mean(0)
covariance_reweighted = self._nonrobust_covariance(
data[mask], assume_centered=self.assume_centered)
support_reweighted = np.zeros(n_samples, dtype=bool)
support_reweighted[mask] = True
self._set_covariance(covariance_reweighted)
self.location_ = location_reweighted
self.support_ = support_reweighted
X_centered = data - self.location_
self.dist_ = np.sum(
np.dot(X_centered, self.get_precision()) * X_centered, 1)
return location_reweighted, covariance_reweighted, support_reweighted
| bsd-3-clause |
lucidfrontier45/scikit-learn | benchmarks/bench_glmnet.py | 6 | 3778 | """
To run this, you'll need to have installed.
* glmnet-python
* scikit-learn (of course)
Does two benchmarks
First, we fix a training set and increase the number of
samples. Then we plot the computation time as function of
the number of samples.
In the second benchmark, we increase the number of dimensions of the
training set. Then we plot the computation time as function of
the number of dimensions.
In both cases, only 10% of the features are informative.
"""
import numpy as np
import gc
from time import time
from sklearn.datasets.samples_generator import make_regression
alpha = 0.1
# alpha = 0.01
def rmse(a, b):
return np.sqrt(np.mean((a - b) ** 2))
def bench(factory, X, Y, X_test, Y_test, ref_coef):
gc.collect()
# start time
tstart = time()
clf = factory(alpha=alpha).fit(X, Y)
delta = (time() - tstart)
# stop time
print "duration: %0.3fs" % delta
print "rmse: %f" % rmse(Y_test, clf.predict(X_test))
print "mean coef abs diff: %f" % abs(ref_coef - clf.coef_.ravel()).mean()
return delta
if __name__ == '__main__':
from glmnet.elastic_net import Lasso as GlmnetLasso
from sklearn.linear_model import Lasso as ScikitLasso
# Delayed import of pylab
import pylab as pl
scikit_results = []
glmnet_results = []
n = 20
step = 500
n_features = 1000
n_informative = n_features / 10
n_test_samples = 1000
for i in range(1, n + 1):
print '=================='
print 'Iteration %s of %s' % (i, n)
print '=================='
X, Y, coef_ = make_regression(
n_samples=(i * step) + n_test_samples, n_features=n_features,
noise=0.1, n_informative=n_informative, coef=True)
X_test = X[-n_test_samples:]
Y_test = Y[-n_test_samples:]
X = X[:(i * step)]
Y = Y[:(i * step)]
print "benching scikit: "
scikit_results.append(bench(ScikitLasso, X, Y, X_test, Y_test, coef_))
print "benching glmnet: "
glmnet_results.append(bench(GlmnetLasso, X, Y, X_test, Y_test, coef_))
pl.clf()
xx = range(0, n * step, step)
pl.title('Lasso regression on sample dataset (%d features)' % n_features)
pl.plot(xx, scikit_results, 'b-', label='scikit-learn')
pl.plot(xx, glmnet_results, 'r-', label='glmnet')
pl.legend()
pl.xlabel('number of samples to classify')
pl.ylabel('time (in seconds)')
pl.show()
# now do a bench where the number of points is fixed
# and the variable is the number of features
scikit_results = []
glmnet_results = []
n = 20
step = 100
n_samples = 500
for i in range(1, n + 1):
print '=================='
print 'Iteration %02d of %02d' % (i, n)
print '=================='
n_features = i * step
n_informative = n_features / 10
X, Y, coef_ = make_regression(
n_samples=(i * step) + n_test_samples, n_features=n_features,
noise=0.1, n_informative=n_informative, coef=True)
X_test = X[-n_test_samples:]
Y_test = Y[-n_test_samples:]
X = X[:n_samples]
Y = Y[:n_samples]
print "benching scikit: "
scikit_results.append(bench(ScikitLasso, X, Y, X_test, Y_test, coef_))
print "benching glmnet: "
glmnet_results.append(bench(GlmnetLasso, X, Y, X_test, Y_test, coef_))
xx = np.arange(100, 100 + n * step, step)
pl.figure()
pl.title('Regression in high dimensional spaces (%d samples)' % n_samples)
pl.plot(xx, scikit_results, 'b-', label='scikit-learn')
pl.plot(xx, glmnet_results, 'r-', label='glmnet')
pl.legend()
pl.xlabel('number of features')
pl.ylabel('time (in seconds)')
pl.axis('tight')
pl.show()
| bsd-3-clause |
alexsavio/scikit-learn | sklearn/neighbors/base.py | 16 | 30646 | """Base and mixin classes for nearest neighbors"""
# Authors: Jake Vanderplas <vanderplas@astro.washington.edu>
# Fabian Pedregosa <fabian.pedregosa@inria.fr>
# Alexandre Gramfort <alexandre.gramfort@inria.fr>
# Sparseness support by Lars Buitinck
# Multi-output support by Arnaud Joly <a.joly@ulg.ac.be>
#
# License: BSD 3 clause (C) INRIA, University of Amsterdam
import warnings
from abc import ABCMeta, abstractmethod
import numpy as np
from scipy.sparse import csr_matrix, issparse
from .ball_tree import BallTree
from .kd_tree import KDTree
from ..base import BaseEstimator
from ..metrics import pairwise_distances
from ..metrics.pairwise import PAIRWISE_DISTANCE_FUNCTIONS
from ..utils import check_X_y, check_array, _get_n_jobs, gen_even_slices
from ..utils.fixes import argpartition
from ..utils.multiclass import check_classification_targets
from ..externals import six
from ..externals.joblib import Parallel, delayed
from ..exceptions import NotFittedError
from ..exceptions import DataConversionWarning
VALID_METRICS = dict(ball_tree=BallTree.valid_metrics,
kd_tree=KDTree.valid_metrics,
# The following list comes from the
# sklearn.metrics.pairwise doc string
brute=(list(PAIRWISE_DISTANCE_FUNCTIONS.keys()) +
['braycurtis', 'canberra', 'chebyshev',
'correlation', 'cosine', 'dice', 'hamming',
'jaccard', 'kulsinski', 'mahalanobis',
'matching', 'minkowski', 'rogerstanimoto',
'russellrao', 'seuclidean', 'sokalmichener',
'sokalsneath', 'sqeuclidean',
'yule', 'wminkowski']))
VALID_METRICS_SPARSE = dict(ball_tree=[],
kd_tree=[],
brute=PAIRWISE_DISTANCE_FUNCTIONS.keys())
def _check_weights(weights):
"""Check to make sure weights are valid"""
if weights in (None, 'uniform', 'distance'):
return weights
elif callable(weights):
return weights
else:
raise ValueError("weights not recognized: should be 'uniform', "
"'distance', or a callable function")
def _get_weights(dist, weights):
"""Get the weights from an array of distances and a parameter ``weights``
Parameters
===========
dist: ndarray
The input distances
weights: {'uniform', 'distance' or a callable}
The kind of weighting used
Returns
========
weights_arr: array of the same shape as ``dist``
if ``weights == 'uniform'``, then returns None
"""
if weights in (None, 'uniform'):
return None
elif weights == 'distance':
# if user attempts to classify a point that was zero distance from one
# or more training points, those training points are weighted as 1.0
# and the other points as 0.0
if dist.dtype is np.dtype(object):
for point_dist_i, point_dist in enumerate(dist):
# check if point_dist is iterable
# (ex: RadiusNeighborClassifier.predict may set an element of
# dist to 1e-6 to represent an 'outlier')
if hasattr(point_dist, '__contains__') and 0. in point_dist:
dist[point_dist_i] = point_dist == 0.
else:
dist[point_dist_i] = 1. / point_dist
else:
with np.errstate(divide='ignore'):
dist = 1. / dist
inf_mask = np.isinf(dist)
inf_row = np.any(inf_mask, axis=1)
dist[inf_row] = inf_mask[inf_row]
return dist
elif callable(weights):
return weights(dist)
else:
raise ValueError("weights not recognized: should be 'uniform', "
"'distance', or a callable function")
class NeighborsBase(six.with_metaclass(ABCMeta, BaseEstimator)):
"""Base class for nearest neighbors estimators."""
@abstractmethod
def __init__(self):
pass
def _init_params(self, n_neighbors=None, radius=None,
algorithm='auto', leaf_size=30, metric='minkowski',
p=2, metric_params=None, n_jobs=1):
self.n_neighbors = n_neighbors
self.radius = radius
self.algorithm = algorithm
self.leaf_size = leaf_size
self.metric = metric
self.metric_params = metric_params
self.p = p
self.n_jobs = n_jobs
if algorithm not in ['auto', 'brute',
'kd_tree', 'ball_tree']:
raise ValueError("unrecognized algorithm: '%s'" % algorithm)
if algorithm == 'auto':
if metric == 'precomputed':
alg_check = 'brute'
else:
alg_check = 'ball_tree'
else:
alg_check = algorithm
if callable(metric):
if algorithm == 'kd_tree':
# callable metric is only valid for brute force and ball_tree
raise ValueError(
"kd_tree algorithm does not support callable metric '%s'"
% metric)
elif metric not in VALID_METRICS[alg_check]:
raise ValueError("Metric '%s' not valid for algorithm '%s'"
% (metric, algorithm))
if self.metric_params is not None and 'p' in self.metric_params:
warnings.warn("Parameter p is found in metric_params. "
"The corresponding parameter from __init__ "
"is ignored.", SyntaxWarning, stacklevel=3)
effective_p = metric_params['p']
else:
effective_p = self.p
if self.metric in ['wminkowski', 'minkowski'] and effective_p < 1:
raise ValueError("p must be greater than one for minkowski metric")
self._fit_X = None
self._tree = None
self._fit_method = None
def _fit(self, X):
if self.metric_params is None:
self.effective_metric_params_ = {}
else:
self.effective_metric_params_ = self.metric_params.copy()
effective_p = self.effective_metric_params_.get('p', self.p)
if self.metric in ['wminkowski', 'minkowski']:
self.effective_metric_params_['p'] = effective_p
self.effective_metric_ = self.metric
# For minkowski distance, use more efficient methods where available
if self.metric == 'minkowski':
p = self.effective_metric_params_.pop('p', 2)
if p < 1:
raise ValueError("p must be greater than one "
"for minkowski metric")
elif p == 1:
self.effective_metric_ = 'manhattan'
elif p == 2:
self.effective_metric_ = 'euclidean'
elif p == np.inf:
self.effective_metric_ = 'chebyshev'
else:
self.effective_metric_params_['p'] = p
if isinstance(X, NeighborsBase):
self._fit_X = X._fit_X
self._tree = X._tree
self._fit_method = X._fit_method
return self
elif isinstance(X, BallTree):
self._fit_X = X.data
self._tree = X
self._fit_method = 'ball_tree'
return self
elif isinstance(X, KDTree):
self._fit_X = X.data
self._tree = X
self._fit_method = 'kd_tree'
return self
X = check_array(X, accept_sparse='csr')
n_samples = X.shape[0]
if n_samples == 0:
raise ValueError("n_samples must be greater than 0")
if issparse(X):
if self.algorithm not in ('auto', 'brute'):
warnings.warn("cannot use tree with sparse input: "
"using brute force")
if self.effective_metric_ not in VALID_METRICS_SPARSE['brute']:
raise ValueError("metric '%s' not valid for sparse input"
% self.effective_metric_)
self._fit_X = X.copy()
self._tree = None
self._fit_method = 'brute'
return self
self._fit_method = self.algorithm
self._fit_X = X
if self._fit_method == 'auto':
# A tree approach is better for small number of neighbors,
# and KDTree is generally faster when available
if ((self.n_neighbors is None or
self.n_neighbors < self._fit_X.shape[0] // 2) and
self.metric != 'precomputed'):
if self.effective_metric_ in VALID_METRICS['kd_tree']:
self._fit_method = 'kd_tree'
else:
self._fit_method = 'ball_tree'
else:
self._fit_method = 'brute'
if self._fit_method == 'ball_tree':
self._tree = BallTree(X, self.leaf_size,
metric=self.effective_metric_,
**self.effective_metric_params_)
elif self._fit_method == 'kd_tree':
self._tree = KDTree(X, self.leaf_size,
metric=self.effective_metric_,
**self.effective_metric_params_)
elif self._fit_method == 'brute':
self._tree = None
else:
raise ValueError("algorithm = '%s' not recognized"
% self.algorithm)
if self.n_neighbors is not None:
if self.n_neighbors <= 0:
raise ValueError(
"Expected n_neighbors > 0. Got %d" %
self.n_neighbors
)
return self
@property
def _pairwise(self):
# For cross-validation routines to split data correctly
return self.metric == 'precomputed'
class KNeighborsMixin(object):
"""Mixin for k-neighbors searches"""
def kneighbors(self, X=None, n_neighbors=None, return_distance=True):
"""Finds the K-neighbors of a point.
Returns indices of and distances to the neighbors of each point.
Parameters
----------
X : array-like, shape (n_query, n_features), \
or (n_query, n_indexed) if metric == 'precomputed'
The query point or points.
If not provided, neighbors of each indexed point are returned.
In this case, the query point is not considered its own neighbor.
n_neighbors : int
Number of neighbors to get (default is the value
passed to the constructor).
return_distance : boolean, optional. Defaults to True.
If False, distances will not be returned
Returns
-------
dist : array
Array representing the lengths to points, only present if
return_distance=True
ind : array
Indices of the nearest points in the population matrix.
Examples
--------
In the following example, we construct a NeighborsClassifier
class from an array representing our data set and ask who's
the closest point to [1,1,1]
>>> samples = [[0., 0., 0.], [0., .5, 0.], [1., 1., .5]]
>>> from sklearn.neighbors import NearestNeighbors
>>> neigh = NearestNeighbors(n_neighbors=1)
>>> neigh.fit(samples) # doctest: +ELLIPSIS
NearestNeighbors(algorithm='auto', leaf_size=30, ...)
>>> print(neigh.kneighbors([[1., 1., 1.]])) # doctest: +ELLIPSIS
(array([[ 0.5]]), array([[2]]...))
As you can see, it returns [[0.5]], and [[2]], which means that the
element is at distance 0.5 and is the third element of samples
(indexes start at 0). You can also query for multiple points:
>>> X = [[0., 1., 0.], [1., 0., 1.]]
>>> neigh.kneighbors(X, return_distance=False) # doctest: +ELLIPSIS
array([[1],
[2]]...)
"""
if self._fit_method is None:
raise NotFittedError("Must fit neighbors before querying.")
if n_neighbors is None:
n_neighbors = self.n_neighbors
if X is not None:
query_is_train = False
X = check_array(X, accept_sparse='csr')
else:
query_is_train = True
X = self._fit_X
# Include an extra neighbor to account for the sample itself being
# returned, which is removed later
n_neighbors += 1
train_size = self._fit_X.shape[0]
if n_neighbors > train_size:
raise ValueError(
"Expected n_neighbors <= n_samples, "
" but n_samples = %d, n_neighbors = %d" %
(train_size, n_neighbors)
)
n_samples, _ = X.shape
sample_range = np.arange(n_samples)[:, None]
n_jobs = _get_n_jobs(self.n_jobs)
if self._fit_method == 'brute':
# for efficiency, use squared euclidean distances
if self.effective_metric_ == 'euclidean':
dist = pairwise_distances(X, self._fit_X, 'euclidean',
n_jobs=n_jobs, squared=True)
else:
dist = pairwise_distances(
X, self._fit_X, self.effective_metric_, n_jobs=n_jobs,
**self.effective_metric_params_)
neigh_ind = argpartition(dist, n_neighbors - 1, axis=1)
neigh_ind = neigh_ind[:, :n_neighbors]
# argpartition doesn't guarantee sorted order, so we sort again
neigh_ind = neigh_ind[
sample_range, np.argsort(dist[sample_range, neigh_ind])]
if return_distance:
if self.effective_metric_ == 'euclidean':
result = np.sqrt(dist[sample_range, neigh_ind]), neigh_ind
else:
result = dist[sample_range, neigh_ind], neigh_ind
else:
result = neigh_ind
elif self._fit_method in ['ball_tree', 'kd_tree']:
if issparse(X):
raise ValueError(
"%s does not work with sparse matrices. Densify the data, "
"or set algorithm='brute'" % self._fit_method)
result = Parallel(n_jobs, backend='threading')(
delayed(self._tree.query, check_pickle=False)(
X[s], n_neighbors, return_distance)
for s in gen_even_slices(X.shape[0], n_jobs)
)
if return_distance:
dist, neigh_ind = tuple(zip(*result))
result = np.vstack(dist), np.vstack(neigh_ind)
else:
result = np.vstack(result)
else:
raise ValueError("internal: _fit_method not recognized")
if not query_is_train:
return result
else:
# If the query data is the same as the indexed data, we would like
# to ignore the first nearest neighbor of every sample, i.e
# the sample itself.
if return_distance:
dist, neigh_ind = result
else:
neigh_ind = result
sample_mask = neigh_ind != sample_range
# Corner case: When the number of duplicates are more
# than the number of neighbors, the first NN will not
# be the sample, but a duplicate.
# In that case mask the first duplicate.
dup_gr_nbrs = np.all(sample_mask, axis=1)
sample_mask[:, 0][dup_gr_nbrs] = False
neigh_ind = np.reshape(
neigh_ind[sample_mask], (n_samples, n_neighbors - 1))
if return_distance:
dist = np.reshape(
dist[sample_mask], (n_samples, n_neighbors - 1))
return dist, neigh_ind
return neigh_ind
def kneighbors_graph(self, X=None, n_neighbors=None,
mode='connectivity'):
"""Computes the (weighted) graph of k-Neighbors for points in X
Parameters
----------
X : array-like, shape (n_query, n_features), \
or (n_query, n_indexed) if metric == 'precomputed'
The query point or points.
If not provided, neighbors of each indexed point are returned.
In this case, the query point is not considered its own neighbor.
n_neighbors : int
Number of neighbors for each sample.
(default is value passed to the constructor).
mode : {'connectivity', 'distance'}, optional
Type of returned matrix: 'connectivity' will return the
connectivity matrix with ones and zeros, in 'distance' the
edges are Euclidean distance between points.
Returns
-------
A : sparse matrix in CSR format, shape = [n_samples, n_samples_fit]
n_samples_fit is the number of samples in the fitted data
A[i, j] is assigned the weight of edge that connects i to j.
Examples
--------
>>> X = [[0], [3], [1]]
>>> from sklearn.neighbors import NearestNeighbors
>>> neigh = NearestNeighbors(n_neighbors=2)
>>> neigh.fit(X) # doctest: +ELLIPSIS
NearestNeighbors(algorithm='auto', leaf_size=30, ...)
>>> A = neigh.kneighbors_graph(X)
>>> A.toarray()
array([[ 1., 0., 1.],
[ 0., 1., 1.],
[ 1., 0., 1.]])
See also
--------
NearestNeighbors.radius_neighbors_graph
"""
if n_neighbors is None:
n_neighbors = self.n_neighbors
# kneighbors does the None handling.
if X is not None:
X = check_array(X, accept_sparse='csr')
n_samples1 = X.shape[0]
else:
n_samples1 = self._fit_X.shape[0]
n_samples2 = self._fit_X.shape[0]
n_nonzero = n_samples1 * n_neighbors
A_indptr = np.arange(0, n_nonzero + 1, n_neighbors)
# construct CSR matrix representation of the k-NN graph
if mode == 'connectivity':
A_data = np.ones(n_samples1 * n_neighbors)
A_ind = self.kneighbors(X, n_neighbors, return_distance=False)
elif mode == 'distance':
A_data, A_ind = self.kneighbors(
X, n_neighbors, return_distance=True)
A_data = np.ravel(A_data)
else:
raise ValueError(
'Unsupported mode, must be one of "connectivity" '
'or "distance" but got "%s" instead' % mode)
kneighbors_graph = csr_matrix((A_data, A_ind.ravel(), A_indptr),
shape=(n_samples1, n_samples2))
return kneighbors_graph
class RadiusNeighborsMixin(object):
"""Mixin for radius-based neighbors searches"""
def radius_neighbors(self, X=None, radius=None, return_distance=True):
"""Finds the neighbors within a given radius of a point or points.
Return the indices and distances of each point from the dataset
lying in a ball with size ``radius`` around the points of the query
array. Points lying on the boundary are included in the results.
The result points are *not* necessarily sorted by distance to their
query point.
Parameters
----------
X : array-like, (n_samples, n_features), optional
The query point or points.
If not provided, neighbors of each indexed point are returned.
In this case, the query point is not considered its own neighbor.
radius : float
Limiting distance of neighbors to return.
(default is the value passed to the constructor).
return_distance : boolean, optional. Defaults to True.
If False, distances will not be returned
Returns
-------
dist : array, shape (n_samples,) of arrays
Array representing the distances to each point, only present if
return_distance=True. The distance values are computed according
to the ``metric`` constructor parameter.
ind : array, shape (n_samples,) of arrays
An array of arrays of indices of the approximate nearest points
from the population matrix that lie within a ball of size
``radius`` around the query points.
Examples
--------
In the following example, we construct a NeighborsClassifier
class from an array representing our data set and ask who's
the closest point to [1, 1, 1]:
>>> import numpy as np
>>> samples = [[0., 0., 0.], [0., .5, 0.], [1., 1., .5]]
>>> from sklearn.neighbors import NearestNeighbors
>>> neigh = NearestNeighbors(radius=1.6)
>>> neigh.fit(samples) # doctest: +ELLIPSIS
NearestNeighbors(algorithm='auto', leaf_size=30, ...)
>>> rng = neigh.radius_neighbors([[1., 1., 1.]])
>>> print(np.asarray(rng[0][0])) # doctest: +ELLIPSIS
[ 1.5 0.5]
>>> print(np.asarray(rng[1][0])) # doctest: +ELLIPSIS
[1 2]
The first array returned contains the distances to all points which
are closer than 1.6, while the second array returned contains their
indices. In general, multiple points can be queried at the same time.
Notes
-----
Because the number of neighbors of each point is not necessarily
equal, the results for multiple query points cannot be fit in a
standard data array.
For efficiency, `radius_neighbors` returns arrays of objects, where
each object is a 1D array of indices or distances.
"""
if self._fit_method is None:
raise NotFittedError("Must fit neighbors before querying.")
if X is not None:
query_is_train = False
X = check_array(X, accept_sparse='csr')
else:
query_is_train = True
X = self._fit_X
if radius is None:
radius = self.radius
n_samples = X.shape[0]
if self._fit_method == 'brute':
# for efficiency, use squared euclidean distances
if self.effective_metric_ == 'euclidean':
dist = pairwise_distances(X, self._fit_X, 'euclidean',
n_jobs=self.n_jobs, squared=True)
radius *= radius
else:
dist = pairwise_distances(X, self._fit_X,
self.effective_metric_,
n_jobs=self.n_jobs,
**self.effective_metric_params_)
neigh_ind_list = [np.where(d <= radius)[0] for d in dist]
# See https://github.com/numpy/numpy/issues/5456
# if you want to understand why this is initialized this way.
neigh_ind = np.empty(n_samples, dtype='object')
neigh_ind[:] = neigh_ind_list
if return_distance:
dist_array = np.empty(n_samples, dtype='object')
if self.effective_metric_ == 'euclidean':
dist_list = [np.sqrt(d[neigh_ind[i]])
for i, d in enumerate(dist)]
else:
dist_list = [d[neigh_ind[i]]
for i, d in enumerate(dist)]
dist_array[:] = dist_list
results = dist_array, neigh_ind
else:
results = neigh_ind
elif self._fit_method in ['ball_tree', 'kd_tree']:
if issparse(X):
raise ValueError(
"%s does not work with sparse matrices. Densify the data, "
"or set algorithm='brute'" % self._fit_method)
results = self._tree.query_radius(X, radius,
return_distance=return_distance)
if return_distance:
results = results[::-1]
else:
raise ValueError("internal: _fit_method not recognized")
if not query_is_train:
return results
else:
# If the query data is the same as the indexed data, we would like
# to ignore the first nearest neighbor of every sample, i.e
# the sample itself.
if return_distance:
dist, neigh_ind = results
else:
neigh_ind = results
for ind, ind_neighbor in enumerate(neigh_ind):
mask = ind_neighbor != ind
neigh_ind[ind] = ind_neighbor[mask]
if return_distance:
dist[ind] = dist[ind][mask]
if return_distance:
return dist, neigh_ind
return neigh_ind
def radius_neighbors_graph(self, X=None, radius=None, mode='connectivity'):
"""Computes the (weighted) graph of Neighbors for points in X
Neighborhoods are restricted the points at a distance lower than
radius.
Parameters
----------
X : array-like, shape = [n_samples, n_features], optional
The query point or points.
If not provided, neighbors of each indexed point are returned.
In this case, the query point is not considered its own neighbor.
radius : float
Radius of neighborhoods.
(default is the value passed to the constructor).
mode : {'connectivity', 'distance'}, optional
Type of returned matrix: 'connectivity' will return the
connectivity matrix with ones and zeros, in 'distance' the
edges are Euclidean distance between points.
Returns
-------
A : sparse matrix in CSR format, shape = [n_samples, n_samples]
A[i, j] is assigned the weight of edge that connects i to j.
Examples
--------
>>> X = [[0], [3], [1]]
>>> from sklearn.neighbors import NearestNeighbors
>>> neigh = NearestNeighbors(radius=1.5)
>>> neigh.fit(X) # doctest: +ELLIPSIS
NearestNeighbors(algorithm='auto', leaf_size=30, ...)
>>> A = neigh.radius_neighbors_graph(X)
>>> A.toarray()
array([[ 1., 0., 1.],
[ 0., 1., 0.],
[ 1., 0., 1.]])
See also
--------
kneighbors_graph
"""
if X is not None:
X = check_array(X, accept_sparse=['csr', 'csc', 'coo'])
n_samples2 = self._fit_X.shape[0]
if radius is None:
radius = self.radius
# construct CSR matrix representation of the NN graph
if mode == 'connectivity':
A_ind = self.radius_neighbors(X, radius,
return_distance=False)
A_data = None
elif mode == 'distance':
dist, A_ind = self.radius_neighbors(X, radius,
return_distance=True)
A_data = np.concatenate(list(dist))
else:
raise ValueError(
'Unsupported mode, must be one of "connectivity", '
'or "distance" but got %s instead' % mode)
n_samples1 = A_ind.shape[0]
n_neighbors = np.array([len(a) for a in A_ind])
A_ind = np.concatenate(list(A_ind))
if A_data is None:
A_data = np.ones(len(A_ind))
A_indptr = np.concatenate((np.zeros(1, dtype=int),
np.cumsum(n_neighbors)))
return csr_matrix((A_data, A_ind, A_indptr),
shape=(n_samples1, n_samples2))
class SupervisedFloatMixin(object):
def fit(self, X, y):
"""Fit the model using X as training data and y as target values
Parameters
----------
X : {array-like, sparse matrix, BallTree, KDTree}
Training data. If array or matrix, shape [n_samples, n_features],
or [n_samples, n_samples] if metric='precomputed'.
y : {array-like, sparse matrix}
Target values, array of float values, shape = [n_samples]
or [n_samples, n_outputs]
"""
if not isinstance(X, (KDTree, BallTree)):
X, y = check_X_y(X, y, "csr", multi_output=True)
self._y = y
return self._fit(X)
class SupervisedIntegerMixin(object):
def fit(self, X, y):
"""Fit the model using X as training data and y as target values
Parameters
----------
X : {array-like, sparse matrix, BallTree, KDTree}
Training data. If array or matrix, shape [n_samples, n_features],
or [n_samples, n_samples] if metric='precomputed'.
y : {array-like, sparse matrix}
Target values of shape = [n_samples] or [n_samples, n_outputs]
"""
if not isinstance(X, (KDTree, BallTree)):
X, y = check_X_y(X, y, "csr", multi_output=True)
if y.ndim == 1 or y.ndim == 2 and y.shape[1] == 1:
if y.ndim != 1:
warnings.warn("A column-vector y was passed when a 1d array "
"was expected. Please change the shape of y to "
"(n_samples, ), for example using ravel().",
DataConversionWarning, stacklevel=2)
self.outputs_2d_ = False
y = y.reshape((-1, 1))
else:
self.outputs_2d_ = True
check_classification_targets(y)
self.classes_ = []
self._y = np.empty(y.shape, dtype=np.int)
for k in range(self._y.shape[1]):
classes, self._y[:, k] = np.unique(y[:, k], return_inverse=True)
self.classes_.append(classes)
if not self.outputs_2d_:
self.classes_ = self.classes_[0]
self._y = self._y.ravel()
return self._fit(X)
class UnsupervisedMixin(object):
def fit(self, X, y=None):
"""Fit the model using X as training data
Parameters
----------
X : {array-like, sparse matrix, BallTree, KDTree}
Training data. If array or matrix, shape [n_samples, n_features],
or [n_samples, n_samples] if metric='precomputed'.
"""
return self._fit(X)
| bsd-3-clause |
alexeyum/scikit-learn | sklearn/neural_network/tests/test_stochastic_optimizers.py | 140 | 4310 | import numpy as np
from sklearn.neural_network._stochastic_optimizers import (BaseOptimizer,
SGDOptimizer,
AdamOptimizer)
from sklearn.utils.testing import (assert_array_equal, assert_true,
assert_false, assert_equal)
shapes = [(4, 6), (6, 8), (7, 8, 9)]
def test_base_optimizer():
params = [np.zeros(shape) for shape in shapes]
for lr in [10 ** i for i in range(-3, 4)]:
optimizer = BaseOptimizer(params, lr)
assert_true(optimizer.trigger_stopping('', False))
def test_sgd_optimizer_no_momentum():
params = [np.zeros(shape) for shape in shapes]
for lr in [10 ** i for i in range(-3, 4)]:
optimizer = SGDOptimizer(params, lr, momentum=0, nesterov=False)
grads = [np.random.random(shape) for shape in shapes]
expected = [param - lr * grad for param, grad in zip(params, grads)]
optimizer.update_params(grads)
for exp, param in zip(expected, optimizer.params):
assert_array_equal(exp, param)
def test_sgd_optimizer_momentum():
params = [np.zeros(shape) for shape in shapes]
lr = 0.1
for momentum in np.arange(0.5, 0.9, 0.1):
optimizer = SGDOptimizer(params, lr, momentum=momentum, nesterov=False)
velocities = [np.random.random(shape) for shape in shapes]
optimizer.velocities = velocities
grads = [np.random.random(shape) for shape in shapes]
updates = [momentum * velocity - lr * grad
for velocity, grad in zip(velocities, grads)]
expected = [param + update for param, update in zip(params, updates)]
optimizer.update_params(grads)
for exp, param in zip(expected, optimizer.params):
assert_array_equal(exp, param)
def test_sgd_optimizer_trigger_stopping():
params = [np.zeros(shape) for shape in shapes]
lr = 2e-6
optimizer = SGDOptimizer(params, lr, lr_schedule='adaptive')
assert_false(optimizer.trigger_stopping('', False))
assert_equal(lr / 5, optimizer.learning_rate)
assert_true(optimizer.trigger_stopping('', False))
def test_sgd_optimizer_nesterovs_momentum():
params = [np.zeros(shape) for shape in shapes]
lr = 0.1
for momentum in np.arange(0.5, 0.9, 0.1):
optimizer = SGDOptimizer(params, lr, momentum=momentum, nesterov=True)
velocities = [np.random.random(shape) for shape in shapes]
optimizer.velocities = velocities
grads = [np.random.random(shape) for shape in shapes]
updates = [momentum * velocity - lr * grad
for velocity, grad in zip(velocities, grads)]
updates = [momentum * update - lr * grad
for update, grad in zip(updates, grads)]
expected = [param + update for param, update in zip(params, updates)]
optimizer.update_params(grads)
for exp, param in zip(expected, optimizer.params):
assert_array_equal(exp, param)
def test_adam_optimizer():
params = [np.zeros(shape) for shape in shapes]
lr = 0.001
epsilon = 1e-8
for beta_1 in np.arange(0.9, 1.0, 0.05):
for beta_2 in np.arange(0.995, 1.0, 0.001):
optimizer = AdamOptimizer(params, lr, beta_1, beta_2, epsilon)
ms = [np.random.random(shape) for shape in shapes]
vs = [np.random.random(shape) for shape in shapes]
t = 10
optimizer.ms = ms
optimizer.vs = vs
optimizer.t = t - 1
grads = [np.random.random(shape) for shape in shapes]
ms = [beta_1 * m + (1 - beta_1) * grad
for m, grad in zip(ms, grads)]
vs = [beta_2 * v + (1 - beta_2) * (grad ** 2)
for v, grad in zip(vs, grads)]
learning_rate = lr * np.sqrt(1 - beta_2 ** t) / (1 - beta_1**t)
updates = [-learning_rate * m / (np.sqrt(v) + epsilon)
for m, v in zip(ms, vs)]
expected = [param + update
for param, update in zip(params, updates)]
optimizer.update_params(grads)
for exp, param in zip(expected, optimizer.params):
assert_array_equal(exp, param)
| bsd-3-clause |
hale36/SRTV | lib/guessit/transfo/guess_idnumber.py | 33 | 2761 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# GuessIt - A library for guessing information from filenames
# Copyright (c) 2013 Nicolas Wack <wackou@gmail.com>
#
# GuessIt is free software; you can redistribute it and/or modify it under
# the terms of the Lesser GNU General Public License as published by
# the Free Software Foundation; either version 3 of the License, or
# (at your option) any later version.
#
# GuessIt is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# Lesser GNU General Public License for more details.
#
# You should have received a copy of the Lesser GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
from __future__ import absolute_import, division, print_function, unicode_literals
import re
from guessit.plugins.transformers import Transformer
from guessit.matcher import GuessFinder
_DIGIT = 0
_LETTER = 1
_OTHER = 2
class GuessIdnumber(Transformer):
def __init__(self):
Transformer.__init__(self, 220)
def supported_properties(self):
return ['idNumber']
_idnum = re.compile(r'(?P<idNumber>[a-zA-Z0-9-]{20,})') # 1.0, (0, 0))
def guess_idnumber(self, string, node=None, options=None):
match = self._idnum.search(string)
if match is not None:
result = match.groupdict()
switch_count = 0
switch_letter_count = 0
letter_count = 0
last_letter = None
last = _LETTER
for c in result['idNumber']:
if c in '0123456789':
ci = _DIGIT
elif c in 'abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ':
ci = _LETTER
if c != last_letter:
switch_letter_count += 1
last_letter = c
letter_count += 1
else:
ci = _OTHER
if ci != last:
switch_count += 1
last = ci
switch_ratio = float(switch_count) / len(result['idNumber'])
letters_ratio = (float(switch_letter_count) / letter_count) if letter_count > 0 else 1
# only return the result as probable if we alternate often between
# char type (more likely for hash values than for common words)
if switch_ratio > 0.4 and letters_ratio > 0.4:
return result, match.span()
return None, None
def process(self, mtree, options=None):
GuessFinder(self.guess_idnumber, 0.4, self.log, options).process_nodes(mtree.unidentified_leaves())
| gpl-3.0 |
ARudiuk/mne-python | examples/stats/plot_cluster_stats_evoked.py | 21 | 3000 | """
=======================================================
Permutation F-test on sensor data with 1D cluster level
=======================================================
One tests if the evoked response is significantly different
between conditions. Multiple comparison problem is addressed
with cluster level permutation test.
"""
# Authors: Alexandre Gramfort <alexandre.gramfort@telecom-paristech.fr>
#
# License: BSD (3-clause)
import matplotlib.pyplot as plt
import mne
from mne import io
from mne.stats import permutation_cluster_test
from mne.datasets import sample
print(__doc__)
###############################################################################
# Set parameters
data_path = sample.data_path()
raw_fname = data_path + '/MEG/sample/sample_audvis_filt-0-40_raw.fif'
event_fname = data_path + '/MEG/sample/sample_audvis_filt-0-40_raw-eve.fif'
event_id = 1
tmin = -0.2
tmax = 0.5
# Setup for reading the raw data
raw = io.read_raw_fif(raw_fname)
events = mne.read_events(event_fname)
channel = 'MEG 1332' # include only this channel in analysis
include = [channel]
###############################################################################
# Read epochs for the channel of interest
picks = mne.pick_types(raw.info, meg=False, eog=True, include=include,
exclude='bads')
event_id = 1
reject = dict(grad=4000e-13, eog=150e-6)
epochs1 = mne.Epochs(raw, events, event_id, tmin, tmax, picks=picks,
baseline=(None, 0), reject=reject)
condition1 = epochs1.get_data() # as 3D matrix
event_id = 2
epochs2 = mne.Epochs(raw, events, event_id, tmin, tmax, picks=picks,
baseline=(None, 0), reject=reject)
condition2 = epochs2.get_data() # as 3D matrix
condition1 = condition1[:, 0, :] # take only one channel to get a 2D array
condition2 = condition2[:, 0, :] # take only one channel to get a 2D array
###############################################################################
# Compute statistic
threshold = 6.0
T_obs, clusters, cluster_p_values, H0 = \
permutation_cluster_test([condition1, condition2], n_permutations=1000,
threshold=threshold, tail=1, n_jobs=1)
###############################################################################
# Plot
times = epochs1.times
plt.close('all')
plt.subplot(211)
plt.title('Channel : ' + channel)
plt.plot(times, condition1.mean(axis=0) - condition2.mean(axis=0),
label="ERF Contrast (Event 1 - Event 2)")
plt.ylabel("MEG (T / m)")
plt.legend()
plt.subplot(212)
for i_c, c in enumerate(clusters):
c = c[0]
if cluster_p_values[i_c] <= 0.05:
h = plt.axvspan(times[c.start], times[c.stop - 1],
color='r', alpha=0.3)
else:
plt.axvspan(times[c.start], times[c.stop - 1], color=(0.3, 0.3, 0.3),
alpha=0.3)
hf = plt.plot(times, T_obs, 'g')
plt.legend((h, ), ('cluster p-value < 0.05', ))
plt.xlabel("time (ms)")
plt.ylabel("f-values")
plt.show()
| bsd-3-clause |
alexsavio/scikit-learn | examples/missing_values.py | 70 | 3055 | """
======================================================
Imputing missing values before building an estimator
======================================================
This example shows that imputing the missing values can give better results
than discarding the samples containing any missing value.
Imputing does not always improve the predictions, so please check via cross-validation.
Sometimes dropping rows or using marker values is more effective.
Missing values can be replaced by the mean, the median or the most frequent
value using the ``strategy`` hyper-parameter.
The median is a more robust estimator for data with high magnitude variables
which could dominate results (otherwise known as a 'long tail').
Script output::
Score with the entire dataset = 0.56
Score without the samples containing missing values = 0.48
Score after imputation of the missing values = 0.55
In this case, imputing helps the classifier get close to the original score.
"""
import numpy as np
from sklearn.datasets import load_boston
from sklearn.ensemble import RandomForestRegressor
from sklearn.pipeline import Pipeline
from sklearn.preprocessing import Imputer
from sklearn.model_selection import cross_val_score
rng = np.random.RandomState(0)
dataset = load_boston()
X_full, y_full = dataset.data, dataset.target
n_samples = X_full.shape[0]
n_features = X_full.shape[1]
# Estimate the score on the entire dataset, with no missing values
estimator = RandomForestRegressor(random_state=0, n_estimators=100)
score = cross_val_score(estimator, X_full, y_full).mean()
print("Score with the entire dataset = %.2f" % score)
# Add missing values in 75% of the lines
missing_rate = 0.75
n_missing_samples = np.floor(n_samples * missing_rate)
missing_samples = np.hstack((np.zeros(n_samples - n_missing_samples,
dtype=np.bool),
np.ones(n_missing_samples,
dtype=np.bool)))
rng.shuffle(missing_samples)
missing_features = rng.randint(0, n_features, n_missing_samples)
# Estimate the score without the lines containing missing values
X_filtered = X_full[~missing_samples, :]
y_filtered = y_full[~missing_samples]
estimator = RandomForestRegressor(random_state=0, n_estimators=100)
score = cross_val_score(estimator, X_filtered, y_filtered).mean()
print("Score without the samples containing missing values = %.2f" % score)
# Estimate the score after imputation of the missing values
X_missing = X_full.copy()
X_missing[np.where(missing_samples)[0], missing_features] = 0
y_missing = y_full.copy()
estimator = Pipeline([("imputer", Imputer(missing_values=0,
strategy="mean",
axis=0)),
("forest", RandomForestRegressor(random_state=0,
n_estimators=100))])
score = cross_val_score(estimator, X_missing, y_missing).mean()
print("Score after imputation of the missing values = %.2f" % score)
| bsd-3-clause |
meteorcloudy/tensorflow | tensorflow/contrib/timeseries/python/timeseries/input_pipeline.py | 37 | 40992 | # Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Defines ways of splicing and re-arranging time series.
This file provides methods for reading, parsing, and re-arranging a time
series. The main departure from standard TensorFlow input pipelines is a focus
on "chunking" a time series, i.e. slicing it into small contiguous windows which
are then batched together for training, a form of truncated
backpropagation. This typically provides a significant speedup compared to
looping over the whole series sequentially, by exploiting data parallelism and
by reducing redundant contributions to gradients (due to redundant information
in the series itself).
A series, consisting of times (an increasing vector of integers) and values (one
or more floating point values for each time) along with any exogenous features,
is stored either in memory or on disk in various formats (e.g. "one record per
timestep" on disk, or as a dictionary of Numpy arrays in memory). The location
and format is specified by configuring a `TimeSeriesReader` object
(e.g. `NumpyReader`, `CSVReader`), which reads the data into the TensorFlow
graph. A `TimeSeriesInputFn` object (typically `RandomWindowInputFn`) then
performs windowing and batching.
Time series are passed through this pipeline as dictionaries mapping feature
names to their values. For training and evaluation, these require at minimum
`TrainEvalFeatures.TIMES` (scalar integers, one per timestep) and
`TrainEvalFeatures.VALUES` (may be either univariate or multivariate). Exogenous
features may have any shape, but are likewise associated with a timestep. Times
themselves need not be contiguous or regular (although smaller/fewer gaps are
generally better), but each timestep must have all `VALUES` and any exogenous
features (i.e. times may be missing, but given that a time is specified, every
other feature must also be specified for that step; some models may support
making exogenous updates conditional).
The expected use case of a `TimeSeriesInputFn` is that it is first configured
(for example setting a batch or window size) and passed a reader (a
`TimeSeriesReader` object). The `TimeSeriesInputFn` can then be passed as the
input_fn of an Estimator.
For example, `RandomWindowInputFn` is useful for creating batches of random
chunks of a series for training:
```
# Read data in the default "time,value" CSV format with no header
reader = input_pipeline.CSVReader(csv_file_name)
# Set up windowing and batching for training
train_input_fn = input_pipeline.RandomWindowInputFn(
reader, batch_size=16, window_size=16)
# Fit model parameters to data
estimator.train(input_fn=train_input_fn, steps=150)
```
`RandomWindowInputFn` is the primary tool for training and quantitative
evaluation of time series. `WholeDatasetInputFn`, which reads a whole series
into memory, is useful for qualitative evaluation and preparing to make
predictions with `predict_continuation_input_fn`.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import abc
import numpy
from tensorflow.contrib.timeseries.python.timeseries import feature_keys
from tensorflow.contrib.timeseries.python.timeseries import model_utils
from tensorflow.python.estimator import estimator_lib
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.framework import tensor_shape
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import io_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import nn
from tensorflow.python.ops import parsing_ops
from tensorflow.python.ops import random_ops
from tensorflow.python.ops import state_ops
from tensorflow.python.ops import tensor_array_ops
from tensorflow.python.ops import variable_scope
from tensorflow.python.training import input as input_lib
from tensorflow.python.training import training
from tensorflow.python.util import nest
def predict_continuation_input_fn(
evaluation, steps=None, times=None, exogenous_features=None):
"""An Estimator input_fn for running predict() after evaluate().
If the call to evaluate() we are making predictions based on had a batch_size
greater than one, predictions will start after each of these windows
(i.e. will have the same batch dimension).
Args:
evaluation: The dictionary returned by `Estimator.evaluate`, with keys
FilteringResults.STATE_TUPLE and FilteringResults.TIMES.
steps: The number of steps to predict (scalar), starting after the
evaluation. If `times` is specified, `steps` must not be; one is required.
times: A [batch_size x window_size] array of integers (not a Tensor)
indicating times to make predictions for. These times must be after the
corresponding evaluation. If `steps` is specified, `times` must not be;
one is required. If the batch dimension is omitted, it is assumed to be 1.
exogenous_features: Optional dictionary. If specified, indicates exogenous
features for the model to use while making the predictions. Values must
have shape [batch_size x window_size x ...], where `batch_size` matches
the batch dimension used when creating `evaluation`, and `window_size` is
either the `steps` argument or the `window_size` of the `times` argument
(depending on which was specified).
Returns:
An `input_fn` suitable for passing to the `predict` function of a time
series `Estimator`.
Raises:
ValueError: If `times` or `steps` are misspecified.
"""
if exogenous_features is None:
exogenous_features = {}
predict_times = model_utils.canonicalize_times_or_steps_from_output(
times=times, steps=steps, previous_model_output=evaluation)
features = {
feature_keys.PredictionFeatures.STATE_TUPLE:
evaluation[feature_keys.FilteringResults.STATE_TUPLE],
feature_keys.PredictionFeatures.TIMES:
predict_times
}
features.update(exogenous_features)
def _predict_input_fn():
"""An input_fn for predict()."""
# Prevents infinite iteration with a constant output in an Estimator's
# predict().
limited_features = {}
for key, values in features.items():
limited_values = nest.map_structure(
lambda value: training.limit_epochs(value, num_epochs=1), values)
limited_features[key] = limited_values
return (limited_features, None)
return _predict_input_fn
class TimeSeriesReader(object):
"""Reads from and parses a data source for a `TimeSeriesInputFn`.
This class provides methods that read a few records (`read`) or the full data
set at once (`read_full`), and returns them as dictionaries mapping feature
names to feature Tensors. Please see note at the top of the file for the
structure of these dictionaries. The output is generally chunked by a
`TimeSeriesInputFn` before being passed to the model.
"""
def check_dataset_size(self, minimum_dataset_size):
"""When possible, raises an error if the dataset is too small.
This method allows TimeSeriesReaders to raise informative error messages if
the user has selected a window size in their TimeSeriesInputFn which is
larger than the dataset size. However, many TimeSeriesReaders will not have
access to a dataset size, in which case they do not need to override this
method.
Args:
minimum_dataset_size: The minimum number of records which should be
contained in the dataset. Readers should attempt to raise an error when
possible if an epoch of data contains fewer records.
"""
pass
@abc.abstractmethod
def read(self):
"""Parses one or more records into a feature dictionary.
This method is expected to be called by a `TimeSeriesInputFn` object, and is
not for use with models directly.
A `TimeSeriesReader` object reads multiple records at a single time for
efficiency; the size of these batches is an implementation detail internal
to the input pipeline. These records should generally be sequential,
although some out-of-order records due to file wraparounds are expected and
must be handled by callers.
Returns:
A dictionary mapping feature names to `Tensor` values, each with an
arbitrary batch dimension (for efficiency) as their first dimension.
"""
pass
@abc.abstractmethod
def read_full(self):
"""Return the full dataset.
Largely for interactive use/plotting (or evaluation on small
datasets). Generally not very efficient. Not recommended for training.
Returns:
Same return type as `read`, but with the full dataset rather than an
arbitrary chunk of it. A dictionary mapping feature names to `Tensor`
values, where the size of the first dimension of each `Tensor` is the
number of samples in the entire dataset. These `Tensor`s should be
constant across graph invocations, assuming that the underlying data
remains constant. Current implementations re-read data on each graph
invocation, although this may change in the future.
"""
pass
class NumpyReader(TimeSeriesReader):
"""A time series parser for feeding Numpy arrays to a `TimeSeriesInputFn`.
Avoids embedding data in the graph as constants.
"""
def __init__(self, data, read_num_records_hint=4096):
"""Numpy array input for a `TimeSeriesInputFn`.
Args:
data: A dictionary mapping feature names to Numpy arrays, with two
possible shapes (requires keys `TrainEvalFeatures.TIMES` and
`TrainEvalFeatures.VALUES`):
Univariate; `TIMES` and `VALUES` are both vectors of shape [series
length]
Multivariate; `TIMES` is a vector of shape [series length], `VALUES`
has shape [series length x number of features].
In any case, `VALUES` and any exogenous features must have their shapes
prefixed by the shape of the value corresponding to the `TIMES` key.
read_num_records_hint: The maximum number of samples to read at one time,
for efficiency.
"""
self._features = _canonicalize_numpy_data(
data, require_single_batch=True)
self._read_num_records_hint = read_num_records_hint
def check_dataset_size(self, minimum_dataset_size):
"""Raise an error if the dataset is too small."""
dataset_size = self._features[feature_keys.TrainEvalFeatures.TIMES].shape[1]
if dataset_size < minimum_dataset_size:
raise ValueError(
("A TimeSeriesInputFn is configured to create windows of size {}, "
"but only {} records were available in the dataset. Either decrease "
"the window size or provide more records.").format(
minimum_dataset_size, dataset_size))
def read(self):
"""Returns a large chunk of the Numpy arrays for later re-chunking."""
# Remove the batch dimension from all features
features = {key: numpy.squeeze(value, axis=0)
for key, value in self._features.items()}
return estimator_lib.inputs.numpy_input_fn(
x=features,
# The first dimensions of features are the series length, since we have
# removed the batch dimension above. We now pull out
# self._read_num_records_hint steps of this single time series to pass
# to the TimeSeriesInputFn.
batch_size=self._read_num_records_hint,
num_epochs=None,
shuffle=False)()
def read_full(self):
"""Returns `Tensor` versions of the full Numpy arrays."""
features = estimator_lib.inputs.numpy_input_fn(
x=self._features,
batch_size=1,
num_epochs=None,
queue_capacity=2, # Each queue element is a full copy of the dataset
shuffle=False)()
# TimeSeriesInputFn expect just a batch dimension
return {feature_name: array_ops.squeeze(feature_value, axis=0)
for feature_name, feature_value in features.items()}
class ReaderBaseTimeSeriesParser(TimeSeriesReader):
"""Base for time series readers which wrap a `tf.ReaderBase`."""
def __init__(self, filenames, read_num_records_hint=4096):
"""Configure the time series reader.
Args:
filenames: A string or list of strings indicating files to read records
from.
read_num_records_hint: When not reading a full dataset, indicates the
number of records to transfer in a single chunk (for efficiency). The
actual number transferred at one time may vary.
"""
self._filenames = filenames
self._read_num_records_hint = read_num_records_hint
@abc.abstractmethod
def _get_reader(self):
"""Get an instance of the tf.ReaderBase associated with this class."""
pass
@abc.abstractmethod
def _process_records(self, lines):
"""Given string items, return a processed dictionary of Tensors.
Args:
lines: A 1-dimensional string Tensor, each representing a record to parse
(source dependent, e.g. a line of a file, or a serialized protocol
buffer).
Returns:
A dictionary mapping feature names to their values. The batch dimensions
should match the length of `lines`.
"""
pass
def _get_filename_queue(self, epoch_limit):
"""Constructs a filename queue with an epoch limit.
`epoch_limit` is intended as an error checking fallback to prevent a reader
from infinitely looping in its requests for more work items if none are
available in any file. It should be set high enough that it is never reached
assuming at least one record exists in some file.
Args:
epoch_limit: The maximum number of times to read through the complete list
of files before throwing an OutOfRangeError.
Returns:
A tuple of (filename_queue, epoch_limiter):
filename_queue: A FIFOQueue with filename work items.
epoch_limiter: The local variable used for epoch limitation. This should
be set to zero before a reader is passed `filename_queue` in order to
reset the epoch limiter's state.
"""
epoch_limiter = variable_scope.variable(
initial_value=constant_op.constant(0, dtype=dtypes.int64),
name="epoch_limiter",
trainable=False,
collections=[ops.GraphKeys.LOCAL_VARIABLES])
filenames_tensor = array_ops.reshape(
ops.convert_to_tensor(self._filenames), [-1])
# We can't rely on epoch_limiter being initialized, since queue runners are
# started before local variables are initialized. Instead, we ignore epoch
# limits before variable initialization. This means that prior to variable
# initialization, a QueueRunner may cause a reader to enter an un-checked
# infinite loop. However, as soon as local variables are initialized, we
# will start incrementing and checking epoch_limiter, which will interrupt
# any in-progress loops.
conditional_count_up_to = control_flow_ops.cond(
state_ops.is_variable_initialized(epoch_limiter),
lambda: epoch_limiter.count_up_to(epoch_limit),
lambda: constant_op.constant(0, dtype=dtypes.int64))
with ops.control_dependencies([conditional_count_up_to]):
filenames_tensor = array_ops.identity(filenames_tensor)
filename_queue = input_lib.string_input_producer(
filenames_tensor, shuffle=False, capacity=1)
return filename_queue, epoch_limiter
def read(self):
"""Reads a chunk of data from the `tf.ReaderBase` for later re-chunking."""
# Assuming there is at least one item to be read among all of the files in
# self._filenames, we will not need to go through more than
# self._read_num_records_hint epochs to get a batch of
# self._read_num_records_hint records. Setting this limit and resetting it
# before each reader.read_up_to call prevents infinite looping when there
# are no records available in any of the files.
filename_queue, epoch_limiter = self._get_filename_queue(
epoch_limit=self._read_num_records_hint)
reader = self._get_reader()
epoch_reset_op = state_ops.assign(epoch_limiter, 0)
with ops.control_dependencies([epoch_reset_op]):
_, records = reader.read_up_to(
filename_queue, self._read_num_records_hint)
return self._process_records(records)
def read_full(self):
"""Reads a full epoch of data into memory."""
reader = self._get_reader()
# Set a hard limit of 2 epochs through self._filenames. If there are any
# records available, we should only end up reading the first record in the
# second epoch before exiting the while loop and subsequently resetting the
# epoch limit. If there are no records available in any of the files, this
# hard limit prevents the reader.read_up_to call from looping infinitely.
filename_queue, epoch_limiter = self._get_filename_queue(epoch_limit=2)
epoch_reset_op = state_ops.assign(epoch_limiter, 0)
with ops.control_dependencies([epoch_reset_op]):
first_key, first_value = reader.read_up_to(filename_queue, 1)
# Read until we get a duplicate key (one epoch)
def _while_condition(
current_key, current_value, current_index, collected_records):
del current_value, current_index, collected_records # unused
return math_ops.not_equal(array_ops.squeeze(current_key, axis=0),
array_ops.squeeze(first_key, axis=0))
def _while_body(
current_key, current_value, current_index, collected_records):
del current_key # unused
new_key, new_value = reader.read_up_to(filename_queue, 1)
new_key.set_shape([1])
new_value.set_shape([1])
return (new_key,
new_value,
current_index + 1,
collected_records.write(current_index, current_value))
_, _, _, records_ta = control_flow_ops.while_loop(
_while_condition,
_while_body,
[constant_op.constant([""]), first_value,
0, # current_index starting value
tensor_array_ops.TensorArray( # collected_records
dtype=dtypes.string, size=0, dynamic_size=True)])
records = records_ta.concat()
# Reset the reader when we're done so that subsequent requests for data get
# the dataset in the proper order.
with ops.control_dependencies([records]):
reader_reset_op = reader.reset()
with ops.control_dependencies([reader_reset_op]):
records = array_ops.identity(records)
return self._process_records(records)
class CSVReader(ReaderBaseTimeSeriesParser):
"""Reads from a collection of CSV-formatted files."""
def __init__(self,
filenames,
column_names=(feature_keys.TrainEvalFeatures.TIMES,
feature_keys.TrainEvalFeatures.VALUES),
column_dtypes=None,
skip_header_lines=None,
read_num_records_hint=4096):
"""CSV-parsing reader for a `TimeSeriesInputFn`.
Args:
filenames: A filename or list of filenames to read the time series
from. Each line must have columns corresponding to `column_names`.
column_names: A list indicating names for each
feature. `TrainEvalFeatures.TIMES` and `TrainEvalFeatures.VALUES` are
required; `VALUES` may be repeated to indicate a multivariate series.
column_dtypes: If provided, must be a list with the same length as
`column_names`, indicating dtypes for each column. Defaults to
`tf.int64` for `TrainEvalFeatures.TIMES` and `tf.float32` for
everything else.
skip_header_lines: Passed on to `tf.TextLineReader`; skips this number of
lines at the beginning of each file.
read_num_records_hint: When not reading a full dataset, indicates the
number of records to parse/transfer in a single chunk (for
efficiency). The actual number transferred at one time may be more or
less.
Raises:
ValueError: If required column names are not specified, or if lengths do
not match.
"""
if feature_keys.TrainEvalFeatures.TIMES not in column_names:
raise ValueError("'{}' is a required column.".format(
feature_keys.TrainEvalFeatures.TIMES))
if feature_keys.TrainEvalFeatures.VALUES not in column_names:
raise ValueError("'{}' is a required column.".format(
feature_keys.TrainEvalFeatures.VALUES))
if column_dtypes is not None and len(column_dtypes) != len(column_names):
raise ValueError(
("If specified, the length of column_dtypes must match the length of "
"column_names (got column_dtypes={} and column_names={}).").format(
column_dtypes, column_names))
if sum(1 for column_name in column_names
if column_name == feature_keys.TrainEvalFeatures.TIMES) != 1:
raise ValueError(
"Got more than one times column ('{}'), but exactly "
"one is required.".format(feature_keys.TrainEvalFeatures.TIMES))
self._column_names = column_names
self._column_dtypes = column_dtypes
self._skip_header_lines = skip_header_lines
super(CSVReader, self).__init__(
filenames=filenames, read_num_records_hint=read_num_records_hint)
def _get_reader(self):
return io_ops.TextLineReader(skip_header_lines=self._skip_header_lines)
def _process_records(self, lines):
"""Parse `lines` as CSV records."""
if self._column_dtypes is None:
default_values = [(array_ops.zeros([], dtypes.int64),)
if column_name == feature_keys.TrainEvalFeatures.TIMES
else () for column_name in self._column_names]
else:
default_values = [(array_ops.zeros([], dtype),)
for dtype in self._column_dtypes]
columns = parsing_ops.decode_csv(lines, default_values)
features_lists = {}
for column_name, value in zip(self._column_names, columns):
features_lists.setdefault(column_name, []).append(value)
features = {}
for column_name, values in features_lists.items():
if column_name == feature_keys.TrainEvalFeatures.TIMES:
features[column_name] = values[0]
else:
features[column_name] = array_ops.stack(values, axis=1)
return features
class TFExampleReader(ReaderBaseTimeSeriesParser):
"""Reads and parses `tf.Example`s from a TFRecords file."""
def __init__(self,
filenames,
features):
"""Configure `tf.Example` parsing.
Args:
filenames: A filename or list of filenames to read the time series
from. Each line must have columns corresponding to `column_names`.
features: A dictionary mapping from feature keys to `tf.FixedLenFeature`
objects. Must include `TrainEvalFeatures.TIMES` (scalar integer) and
`TrainEvalFeatures.VALUES` (floating point vector) features.
Raises:
ValueError: If required times/values features are not present.
"""
if feature_keys.TrainEvalFeatures.TIMES not in features:
raise ValueError("'{}' is a required column.".format(
feature_keys.TrainEvalFeatures.TIMES))
if feature_keys.TrainEvalFeatures.VALUES not in features:
raise ValueError("'{}' is a required column.".format(
feature_keys.TrainEvalFeatures.VALUES))
self._features = features
super(TFExampleReader, self).__init__(filenames=filenames)
def _get_reader(self):
return io_ops.TFRecordReader()
def _process_records(self, examples):
"""Parse `tf.Example`s into `Tensors`."""
return parsing_ops.parse_example(
serialized=examples, features=self._features)
class TimeSeriesInputFn(object):
"""Base for classes which create batches of windows from a time series."""
@abc.abstractmethod
def create_batch(self):
"""Creates chunked Tensors from times, values, and other features.
Suitable for use as the input_fn argument of a tf.estimator.Estimator's
fit() or evaluate() method.
Returns:
A tuple of (features, targets):
features: A dictionary with `TrainEvalFeatures.TIMES` and
`TrainEvalFeatures.VALUES` as keys, `TIMES` having an associated value
with shape [batch size x window length], `VALUES` with shape [batch
size x window length x number of features]. Any other features will
also have shapes prefixed with [batch size x window length].
targets: Not used, but must have a value for compatibility with the
Estimator API. That value should be None.
"""
pass
def __call__(self):
# Allow a TimeSeriesInputFn to be used as an input function directly
return self.create_batch()
class WholeDatasetInputFn(TimeSeriesInputFn):
"""Supports passing a full time series to a model for evaluation/inference.
Note that this `TimeSeriesInputFn` is not designed for high throughput, and
should not be used for training. It allows for sequential evaluation on a full
dataset (with sequential in-sample predictions), which then feeds naturally
into `predict_continuation_input_fn` for making out-of-sample
predictions. While this is useful for plotting and interactive use,
`RandomWindowInputFn` is better suited to training and quantitative
evaluation.
"""
# TODO(allenl): A SequentialWindowInputFn for getting model end state without
# loading the whole dataset into memory (or for quantitative evaluation of
# sequential models). Note that an Estimator using such a TimeSeriesInputFn
# won't return in-sample predictions for the whole dataset, which means it
# won't be terribly useful for interactive use/plotting (unless the user
# passes in concat metrics). Also need to be careful about state saving for
# sequential models, particularly the gaps between chunks.
def __init__(self, time_series_reader):
"""Initialize the `TimeSeriesInputFn`.
Args:
time_series_reader: A TimeSeriesReader object.
"""
self._reader = time_series_reader
super(WholeDatasetInputFn, self).__init__()
def create_batch(self):
"""A suitable `input_fn` for an `Estimator`'s `evaluate()`.
Returns:
A dictionary mapping feature names to `Tensors`, each shape
prefixed by [1, data set size] (i.e. a batch size of 1).
"""
features = self._reader.read_full()
# Add a batch dimension of one to each feature.
return ({feature_name: feature_value[None, ...]
for feature_name, feature_value in features.items()},
None)
class RandomWindowInputFn(TimeSeriesInputFn):
"""Wraps a `TimeSeriesReader` to create random batches of windows.
Tensors are first collected into sequential windows (in a windowing queue
created by `tf.train.batch`, based on the order returned from
`time_series_reader`), then these windows are randomly batched (in a
`RandomShuffleQueue`), the Tensors returned by `create_batch` having shapes
prefixed by [`batch_size`, `window_size`].
This `TimeSeriesInputFn` is useful for both training and quantitative
evaluation (but be sure to run several epochs for sequential models such as
`StructuralEnsembleRegressor` to completely flush stale state left over from
training). For qualitative evaluation or when preparing for predictions, use
`WholeDatasetInputFn`.
"""
def __init__(
self, time_series_reader, window_size, batch_size,
queue_capacity_multiplier=1000, shuffle_min_after_dequeue_multiplier=2,
discard_out_of_order=True, discard_consecutive_batches_limit=1000,
jitter=True, num_threads=2, shuffle_seed=None):
"""Configure the RandomWindowInputFn.
Args:
time_series_reader: A TimeSeriesReader object.
window_size: The number of examples to keep together sequentially. This
controls the length of truncated backpropagation: smaller values mean
less sequential computation, which can lead to faster training, but
create a coarser approximation to the gradient (which would ideally be
computed by a forward pass over the entire sequence in order).
batch_size: The number of windows to place together in a batch. Larger
values will lead to more stable gradients during training.
queue_capacity_multiplier: The capacity for the queues used to create
batches, specified as a multiple of `batch_size` (for
RandomShuffleQueue) and `batch_size * window_size` (for the
FIFOQueue). Controls the maximum number of windows stored. Should be
greater than `shuffle_min_after_dequeue_multiplier`.
shuffle_min_after_dequeue_multiplier: The minimum number of windows in the
RandomShuffleQueue after a dequeue, which controls the amount of entropy
introduced during batching. Specified as a multiple of `batch_size`.
discard_out_of_order: If True, windows of data which have times which
decrease (a higher time followed by a lower time) are discarded. If
False, the window and associated features are instead sorted so that
times are non-decreasing. Discarding is typically faster, as models do
not have to deal with artificial gaps in the data. However, discarding
does create a bias where the beginnings and endings of files are
under-sampled.
discard_consecutive_batches_limit: Raise an OutOfRangeError if more than
this number of batches are discarded without a single non-discarded
window (prevents infinite looping when the dataset is too small).
jitter: If True, randomly discards examples between some windows in order
to avoid deterministic chunking patterns. This is important for models
like AR which may otherwise overfit a fixed chunking.
num_threads: Use this number of threads for queues. Setting a value of 1
removes one source of non-determinism (and in combination with
shuffle_seed should provide deterministic windowing).
shuffle_seed: A seed for window shuffling. The default value of None
provides random behavior. With `shuffle_seed` set and
`num_threads=1`, provides deterministic behavior.
"""
self._reader = time_series_reader
self._window_size = window_size
self._reader.check_dataset_size(minimum_dataset_size=self._window_size)
self._batch_size = batch_size
self._queue_capacity_multiplier = queue_capacity_multiplier
self._shuffle_min_after_dequeue_multiplier = (
shuffle_min_after_dequeue_multiplier)
self._discard_out_of_order = discard_out_of_order
self._discard_limit = discard_consecutive_batches_limit
self._jitter = jitter
if num_threads is None:
self._num_threads = self._batch_size
else:
self._num_threads = num_threads
self._shuffle_seed = shuffle_seed
super(RandomWindowInputFn, self).__init__()
def create_batch(self):
"""Create queues to window and batch time series data.
Returns:
A dictionary of Tensors corresponding to the output of `self._reader`
(from the `time_series_reader` constructor argument), each with shapes
prefixed by [`batch_size`, `window_size`].
"""
features = self._reader.read()
if self._jitter:
# TODO(agarwal, allenl): Figure out if more jitter is needed here.
jitter = random_ops.random_uniform(shape=[], maxval=2, dtype=dtypes.int32)
else:
jitter = 0
# To keep things efficient, we pass from the windowing batcher to the
# batch-of-windows batcher in batches. This avoids the need for huge numbers
# of threads, but does mean that jitter is only applied occasionally.
# TODO(allenl): Experiment with different internal passing sizes.
internal_passing_size = self._batch_size
features_windowed = input_lib.batch(
features,
batch_size=self._window_size * internal_passing_size + jitter,
enqueue_many=True,
capacity=(self._queue_capacity_multiplier
* internal_passing_size * self._window_size),
num_threads=self._num_threads)
raw_features_windowed = features_windowed
if self._jitter:
features_windowed = {
key: value[jitter:]
for key, value in features_windowed.items()}
features_windowed = {
key: array_ops.reshape(
value,
array_ops.concat(
[[internal_passing_size, self._window_size],
array_ops.shape(value)[1:]],
axis=0))
for key, value in features_windowed.items()}
batch_and_window_shape = tensor_shape.TensorShape(
[internal_passing_size, self._window_size])
for key in features_windowed.keys():
features_windowed[key].set_shape(
batch_and_window_shape.concatenate(
raw_features_windowed[key].get_shape()[1:]))
# When switching files, we may end up with windows where the time is not
# decreasing, even if times within each file are sorted (and even if those
# files are visited in order, when looping back around to the beginning of
# the first file). This is hard for models to deal with, so we either
# discard such examples, creating a bias where the beginning and end of the
# series is under-sampled, or we sort the window, creating large gaps.
times = features_windowed[feature_keys.TrainEvalFeatures.TIMES]
if self._discard_out_of_order:
non_decreasing = math_ops.reduce_all(
times[:, 1:] >= times[:, :-1], axis=1)
# Ensure that no more than self._discard_limit complete batches are
# discarded contiguously (resetting the count when we find a single clean
# window). This prevents infinite looping when the dataset is smaller than
# the window size.
# TODO(allenl): Figure out a way to return informative errors from
# count_up_to.
discarded_windows_limiter = variable_scope.variable(
initial_value=constant_op.constant(0, dtype=dtypes.int64),
name="discarded_windows_limiter",
trainable=False,
collections=[ops.GraphKeys.LOCAL_VARIABLES])
def _initialized_limit_check():
return control_flow_ops.cond(
math_ops.reduce_any(non_decreasing),
lambda: state_ops.assign(discarded_windows_limiter, 0),
lambda: discarded_windows_limiter.count_up_to(self._discard_limit))
discard_limit_op = control_flow_ops.cond(
state_ops.is_variable_initialized(discarded_windows_limiter),
_initialized_limit_check,
lambda: constant_op.constant(0, dtype=dtypes.int64))
with ops.control_dependencies([discard_limit_op]):
non_decreasing = array_ops.identity(non_decreasing)
else:
_, indices_descending = nn.top_k(
times, k=array_ops.shape(times)[-1], sorted=True)
indices = array_ops.reverse(indices_descending, axis=[0])
features_windowed = {
key: array_ops.gather(params=value, indices=indices)
for key, value in features_windowed.items()
}
non_decreasing = True
features_batched = input_lib.maybe_shuffle_batch(
features_windowed,
num_threads=self._num_threads,
seed=self._shuffle_seed,
batch_size=self._batch_size,
capacity=self._queue_capacity_multiplier * self._batch_size,
min_after_dequeue=(self._shuffle_min_after_dequeue_multiplier *
self._batch_size),
keep_input=non_decreasing,
enqueue_many=True)
return (features_batched, None)
def _canonicalize_numpy_data(data, require_single_batch):
"""Do basic checking and reshaping for Numpy data.
Args:
data: A dictionary mapping keys to Numpy arrays, with several possible
shapes (requires keys `TrainEvalFeatures.TIMES` and
`TrainEvalFeatures.VALUES`):
Single example; `TIMES` is a scalar and `VALUES` is either a scalar or a
vector of length [number of features].
Sequence; `TIMES` is a vector of shape [series length], `VALUES` either
has shape [series length] (univariate) or [series length x number of
features] (multivariate).
Batch of sequences; `TIMES` is a vector of shape [batch size x series
length], `VALUES` has shape [batch size x series length] or [batch
size x series length x number of features].
In any case, `VALUES` and any exogenous features must have their shapes
prefixed by the shape of the value corresponding to the `TIMES` key.
require_single_batch: If True, raises an error if the provided data has a
batch dimension > 1.
Returns:
A dictionary with features normalized to have shapes prefixed with [batch
size x series length]. The sizes of dimensions which were omitted in the
inputs are 1.
Raises:
ValueError: If dimensions are incorrect or do not match, or required
features are missing.
"""
features = {key: numpy.array(value) for key, value in data.items()}
if (feature_keys.TrainEvalFeatures.TIMES not in features or
feature_keys.TrainEvalFeatures.VALUES not in features):
raise ValueError("{} and {} are required features.".format(
feature_keys.TrainEvalFeatures.TIMES,
feature_keys.TrainEvalFeatures.VALUES))
times = features[feature_keys.TrainEvalFeatures.TIMES]
for key, value in features.items():
if value.shape[:len(times.shape)] != times.shape:
raise ValueError(
("All features must have their shapes prefixed by the shape of the"
" times feature. Got shape {} for feature '{}', but shape {} for"
" '{}'").format(value.shape, key, times.shape,
feature_keys.TrainEvalFeatures.TIMES))
if not times.shape: # a single example
if not features[feature_keys.TrainEvalFeatures.VALUES].shape: # univariate
# Add a feature dimension (with one feature)
features[feature_keys.TrainEvalFeatures.VALUES] = features[
feature_keys.TrainEvalFeatures.VALUES][..., None]
elif len(features[feature_keys.TrainEvalFeatures.VALUES].shape) > 1:
raise ValueError(
("Got an unexpected number of dimensions for the '{}' feature."
" Was expecting at most 1 dimension"
" ([number of features]) since '{}' does not "
"have a batch or time dimension, but got shape {}").format(
feature_keys.TrainEvalFeatures.VALUES,
feature_keys.TrainEvalFeatures.TIMES,
features[feature_keys.TrainEvalFeatures.VALUES].shape))
# Add trivial batch and time dimensions for every feature
features = {key: value[None, None, ...] for key, value in features.items()}
if len(times.shape) == 1: # shape [series length]
if len(features[feature_keys.TrainEvalFeatures.VALUES]
.shape) == 1: # shape [series length]
# Add a feature dimension (with one feature)
features[feature_keys.TrainEvalFeatures.VALUES] = features[
feature_keys.TrainEvalFeatures.VALUES][..., None]
elif len(features[feature_keys.TrainEvalFeatures.VALUES].shape) > 2:
raise ValueError(
("Got an unexpected number of dimensions for the '{}' feature."
" Was expecting at most 2 dimensions"
" ([series length, number of features]) since '{}' does not "
"have a batch dimension, but got shape {}").format(
feature_keys.TrainEvalFeatures.VALUES,
feature_keys.TrainEvalFeatures.TIMES,
features[feature_keys.TrainEvalFeatures.VALUES].shape))
# Add trivial batch dimensions for every feature
features = {key: value[None, ...] for key, value in features.items()}
elif len(features[feature_keys.TrainEvalFeatures.TIMES]
.shape) != 2: # shape [batch size, series length]
raise ValueError(
("Got an unexpected number of dimensions for times. Was expecting at "
"most two ([batch size, series length]), but got shape {}.").format(
times.shape))
if require_single_batch:
# We don't expect input to be already batched; batching is done later
if features[feature_keys.TrainEvalFeatures.TIMES].shape[0] != 1:
raise ValueError("Got batch input, was expecting unbatched input.")
return features
| apache-2.0 |
BiaDarkia/scikit-learn | sklearn/feature_extraction/tests/test_feature_hasher.py | 25 | 6216 | from __future__ import unicode_literals
import numpy as np
from numpy.testing import assert_array_equal
from sklearn.feature_extraction import FeatureHasher
from sklearn.utils.testing import (assert_raises, assert_true, assert_equal,
ignore_warnings)
def test_feature_hasher_dicts():
h = FeatureHasher(n_features=16)
assert_equal("dict", h.input_type)
raw_X = [{"foo": "bar", "dada": 42, "tzara": 37},
{"foo": "baz", "gaga": u"string1"}]
X1 = FeatureHasher(n_features=16).transform(raw_X)
gen = (iter(d.items()) for d in raw_X)
X2 = FeatureHasher(n_features=16, input_type="pair").transform(gen)
assert_array_equal(X1.toarray(), X2.toarray())
@ignore_warnings(category=DeprecationWarning)
def test_feature_hasher_strings():
# mix byte and Unicode strings; note that "foo" is a duplicate in row 0
raw_X = [["foo", "bar", "baz", "foo".encode("ascii")],
["bar".encode("ascii"), "baz", "quux"]]
for lg_n_features in (7, 9, 11, 16, 22):
n_features = 2 ** lg_n_features
it = (x for x in raw_X) # iterable
h = FeatureHasher(n_features, non_negative=True, input_type="string")
X = h.transform(it)
assert_equal(X.shape[0], len(raw_X))
assert_equal(X.shape[1], n_features)
assert_true(np.all(X.data > 0))
assert_equal(X[0].sum(), 4)
assert_equal(X[1].sum(), 3)
assert_equal(X.nnz, 6)
def test_feature_hasher_pairs():
raw_X = (iter(d.items()) for d in [{"foo": 1, "bar": 2},
{"baz": 3, "quux": 4, "foo": -1}])
h = FeatureHasher(n_features=16, input_type="pair")
x1, x2 = h.transform(raw_X).toarray()
x1_nz = sorted(np.abs(x1[x1 != 0]))
x2_nz = sorted(np.abs(x2[x2 != 0]))
assert_equal([1, 2], x1_nz)
assert_equal([1, 3, 4], x2_nz)
def test_feature_hasher_pairs_with_string_values():
raw_X = (iter(d.items()) for d in [{"foo": 1, "bar": "a"},
{"baz": u"abc", "quux": 4, "foo": -1}])
h = FeatureHasher(n_features=16, input_type="pair")
x1, x2 = h.transform(raw_X).toarray()
x1_nz = sorted(np.abs(x1[x1 != 0]))
x2_nz = sorted(np.abs(x2[x2 != 0]))
assert_equal([1, 1], x1_nz)
assert_equal([1, 1, 4], x2_nz)
raw_X = (iter(d.items()) for d in [{"bax": "abc"},
{"bax": "abc"}])
x1, x2 = h.transform(raw_X).toarray()
x1_nz = np.abs(x1[x1 != 0])
x2_nz = np.abs(x2[x2 != 0])
assert_equal([1], x1_nz)
assert_equal([1], x2_nz)
assert_array_equal(x1, x2)
def test_hash_empty_input():
n_features = 16
raw_X = [[], (), iter(range(0))]
h = FeatureHasher(n_features=n_features, input_type="string")
X = h.transform(raw_X)
assert_array_equal(X.A, np.zeros((len(raw_X), n_features)))
def test_hasher_invalid_input():
assert_raises(ValueError, FeatureHasher, input_type="gobbledygook")
assert_raises(ValueError, FeatureHasher, n_features=-1)
assert_raises(ValueError, FeatureHasher, n_features=0)
assert_raises(TypeError, FeatureHasher, n_features='ham')
h = FeatureHasher(n_features=np.uint16(2 ** 6))
assert_raises(ValueError, h.transform, [])
assert_raises(Exception, h.transform, [[5.5]])
assert_raises(Exception, h.transform, [[None]])
def test_hasher_set_params():
# Test delayed input validation in fit (useful for grid search).
hasher = FeatureHasher()
hasher.set_params(n_features=np.inf)
assert_raises(TypeError, hasher.fit)
def test_hasher_zeros():
# Assert that no zeros are materialized in the output.
X = FeatureHasher().transform([{'foo': 0}])
assert_equal(X.data.shape, (0,))
@ignore_warnings(category=DeprecationWarning)
def test_hasher_alternate_sign():
X = [list("Thequickbrownfoxjumped")]
Xt = FeatureHasher(alternate_sign=True, non_negative=False,
input_type='string').fit_transform(X)
assert Xt.data.min() < 0 and Xt.data.max() > 0
Xt = FeatureHasher(alternate_sign=True, non_negative=True,
input_type='string').fit_transform(X)
assert Xt.data.min() > 0
Xt = FeatureHasher(alternate_sign=False, non_negative=True,
input_type='string').fit_transform(X)
assert Xt.data.min() > 0
Xt_2 = FeatureHasher(alternate_sign=False, non_negative=False,
input_type='string').fit_transform(X)
# With initially positive features, the non_negative option should
# have no impact when alternate_sign=False
assert_array_equal(Xt.data, Xt_2.data)
@ignore_warnings(category=DeprecationWarning)
def test_hash_collisions():
X = [list("Thequickbrownfoxjumped")]
Xt = FeatureHasher(alternate_sign=True, non_negative=False,
n_features=1, input_type='string').fit_transform(X)
# check that some of the hashed tokens are added
# with an opposite sign and cancel out
assert abs(Xt.data[0]) < len(X[0])
Xt = FeatureHasher(alternate_sign=True, non_negative=True,
n_features=1, input_type='string').fit_transform(X)
assert abs(Xt.data[0]) < len(X[0])
Xt = FeatureHasher(alternate_sign=False, non_negative=True,
n_features=1, input_type='string').fit_transform(X)
assert Xt.data[0] == len(X[0])
@ignore_warnings(category=DeprecationWarning)
def test_hasher_negative():
X = [{"foo": 2, "bar": -4, "baz": -1}.items()]
Xt = FeatureHasher(alternate_sign=False, non_negative=False,
input_type="pair").fit_transform(X)
assert_true(Xt.data.min() < 0 and Xt.data.max() > 0)
Xt = FeatureHasher(alternate_sign=False, non_negative=True,
input_type="pair").fit_transform(X)
assert_true(Xt.data.min() > 0)
Xt = FeatureHasher(alternate_sign=True, non_negative=False,
input_type="pair").fit_transform(X)
assert_true(Xt.data.min() < 0 and Xt.data.max() > 0)
Xt = FeatureHasher(alternate_sign=True, non_negative=True,
input_type="pair").fit_transform(X)
assert_true(Xt.data.min() > 0)
| bsd-3-clause |
DSLituiev/scikit-learn | sklearn/covariance/robust_covariance.py | 103 | 29653 | """
Robust location and covariance estimators.
Here are implemented estimators that are resistant to outliers.
"""
# Author: Virgile Fritsch <virgile.fritsch@inria.fr>
#
# License: BSD 3 clause
import warnings
import numbers
import numpy as np
from scipy import linalg
from scipy.stats import chi2
from . import empirical_covariance, EmpiricalCovariance
from ..utils.extmath import fast_logdet, pinvh
from ..utils import check_random_state, check_array
# Minimum Covariance Determinant
# Implementing of an algorithm by Rousseeuw & Van Driessen described in
# (A Fast Algorithm for the Minimum Covariance Determinant Estimator,
# 1999, American Statistical Association and the American Society
# for Quality, TECHNOMETRICS)
# XXX Is this really a public function? It's not listed in the docs or
# exported by sklearn.covariance. Deprecate?
def c_step(X, n_support, remaining_iterations=30, initial_estimates=None,
verbose=False, cov_computation_method=empirical_covariance,
random_state=None):
"""C_step procedure described in [Rouseeuw1984]_ aiming at computing MCD.
Parameters
----------
X : array-like, shape (n_samples, n_features)
Data set in which we look for the n_support observations whose
scatter matrix has minimum determinant.
n_support : int, > n_samples / 2
Number of observations to compute the robust estimates of location
and covariance from.
remaining_iterations : int, optional
Number of iterations to perform.
According to [Rouseeuw1999]_, two iterations are sufficient to get
close to the minimum, and we never need more than 30 to reach
convergence.
initial_estimates : 2-tuple, optional
Initial estimates of location and shape from which to run the c_step
procedure:
- initial_estimates[0]: an initial location estimate
- initial_estimates[1]: an initial covariance estimate
verbose : boolean, optional
Verbose mode.
random_state : integer or numpy.RandomState, optional
The random generator used. If an integer is given, it fixes the
seed. Defaults to the global numpy random number generator.
cov_computation_method : callable, default empirical_covariance
The function which will be used to compute the covariance.
Must return shape (n_features, n_features)
Returns
-------
location : array-like, shape (n_features,)
Robust location estimates.
covariance : array-like, shape (n_features, n_features)
Robust covariance estimates.
support : array-like, shape (n_samples,)
A mask for the `n_support` observations whose scatter matrix has
minimum determinant.
References
----------
.. [Rouseeuw1999] A Fast Algorithm for the Minimum Covariance Determinant
Estimator, 1999, American Statistical Association and the American
Society for Quality, TECHNOMETRICS
"""
X = np.asarray(X)
random_state = check_random_state(random_state)
return _c_step(X, n_support, remaining_iterations=remaining_iterations,
initial_estimates=initial_estimates, verbose=verbose,
cov_computation_method=cov_computation_method,
random_state=random_state)
def _c_step(X, n_support, random_state, remaining_iterations=30,
initial_estimates=None, verbose=False,
cov_computation_method=empirical_covariance):
n_samples, n_features = X.shape
# Initialisation
support = np.zeros(n_samples, dtype=bool)
if initial_estimates is None:
# compute initial robust estimates from a random subset
support[random_state.permutation(n_samples)[:n_support]] = True
else:
# get initial robust estimates from the function parameters
location = initial_estimates[0]
covariance = initial_estimates[1]
# run a special iteration for that case (to get an initial support)
precision = pinvh(covariance)
X_centered = X - location
dist = (np.dot(X_centered, precision) * X_centered).sum(1)
# compute new estimates
support[np.argsort(dist)[:n_support]] = True
X_support = X[support]
location = X_support.mean(0)
covariance = cov_computation_method(X_support)
# Iterative procedure for Minimum Covariance Determinant computation
det = fast_logdet(covariance)
previous_det = np.inf
while (det < previous_det) and (remaining_iterations > 0):
# save old estimates values
previous_location = location
previous_covariance = covariance
previous_det = det
previous_support = support
# compute a new support from the full data set mahalanobis distances
precision = pinvh(covariance)
X_centered = X - location
dist = (np.dot(X_centered, precision) * X_centered).sum(axis=1)
# compute new estimates
support = np.zeros(n_samples, dtype=bool)
support[np.argsort(dist)[:n_support]] = True
X_support = X[support]
location = X_support.mean(axis=0)
covariance = cov_computation_method(X_support)
det = fast_logdet(covariance)
# update remaining iterations for early stopping
remaining_iterations -= 1
previous_dist = dist
dist = (np.dot(X - location, precision) * (X - location)).sum(axis=1)
# Catch computation errors
if np.isinf(det):
raise ValueError(
"Singular covariance matrix. "
"Please check that the covariance matrix corresponding "
"to the dataset is full rank and that MinCovDet is used with "
"Gaussian-distributed data (or at least data drawn from a "
"unimodal, symmetric distribution.")
# Check convergence
if np.allclose(det, previous_det):
# c_step procedure converged
if verbose:
print("Optimal couple (location, covariance) found before"
" ending iterations (%d left)" % (remaining_iterations))
results = location, covariance, det, support, dist
elif det > previous_det:
# determinant has increased (should not happen)
warnings.warn("Warning! det > previous_det (%.15f > %.15f)"
% (det, previous_det), RuntimeWarning)
results = previous_location, previous_covariance, \
previous_det, previous_support, previous_dist
# Check early stopping
if remaining_iterations == 0:
if verbose:
print('Maximum number of iterations reached')
results = location, covariance, det, support, dist
return results
def select_candidates(X, n_support, n_trials, select=1, n_iter=30,
verbose=False,
cov_computation_method=empirical_covariance,
random_state=None):
"""Finds the best pure subset of observations to compute MCD from it.
The purpose of this function is to find the best sets of n_support
observations with respect to a minimization of their covariance
matrix determinant. Equivalently, it removes n_samples-n_support
observations to construct what we call a pure data set (i.e. not
containing outliers). The list of the observations of the pure
data set is referred to as the `support`.
Starting from a random support, the pure data set is found by the
c_step procedure introduced by Rousseeuw and Van Driessen in
[Rouseeuw1999]_.
Parameters
----------
X : array-like, shape (n_samples, n_features)
Data (sub)set in which we look for the n_support purest observations.
n_support : int, [(n + p + 1)/2] < n_support < n
The number of samples the pure data set must contain.
select : int, int > 0
Number of best candidates results to return.
n_trials : int, nb_trials > 0 or 2-tuple
Number of different initial sets of observations from which to
run the algorithm.
Instead of giving a number of trials to perform, one can provide a
list of initial estimates that will be used to iteratively run
c_step procedures. In this case:
- n_trials[0]: array-like, shape (n_trials, n_features)
is the list of `n_trials` initial location estimates
- n_trials[1]: array-like, shape (n_trials, n_features, n_features)
is the list of `n_trials` initial covariances estimates
n_iter : int, nb_iter > 0
Maximum number of iterations for the c_step procedure.
(2 is enough to be close to the final solution. "Never" exceeds 20).
random_state : integer or numpy.RandomState, default None
The random generator used. If an integer is given, it fixes the
seed. Defaults to the global numpy random number generator.
cov_computation_method : callable, default empirical_covariance
The function which will be used to compute the covariance.
Must return shape (n_features, n_features)
verbose : boolean, default False
Control the output verbosity.
See Also
---------
c_step
Returns
-------
best_locations : array-like, shape (select, n_features)
The `select` location estimates computed from the `select` best
supports found in the data set (`X`).
best_covariances : array-like, shape (select, n_features, n_features)
The `select` covariance estimates computed from the `select`
best supports found in the data set (`X`).
best_supports : array-like, shape (select, n_samples)
The `select` best supports found in the data set (`X`).
References
----------
.. [Rouseeuw1999] A Fast Algorithm for the Minimum Covariance Determinant
Estimator, 1999, American Statistical Association and the American
Society for Quality, TECHNOMETRICS
"""
random_state = check_random_state(random_state)
n_samples, n_features = X.shape
if isinstance(n_trials, numbers.Integral):
run_from_estimates = False
elif isinstance(n_trials, tuple):
run_from_estimates = True
estimates_list = n_trials
n_trials = estimates_list[0].shape[0]
else:
raise TypeError("Invalid 'n_trials' parameter, expected tuple or "
" integer, got %s (%s)" % (n_trials, type(n_trials)))
# compute `n_trials` location and shape estimates candidates in the subset
all_estimates = []
if not run_from_estimates:
# perform `n_trials` computations from random initial supports
for j in range(n_trials):
all_estimates.append(
_c_step(
X, n_support, remaining_iterations=n_iter, verbose=verbose,
cov_computation_method=cov_computation_method,
random_state=random_state))
else:
# perform computations from every given initial estimates
for j in range(n_trials):
initial_estimates = (estimates_list[0][j], estimates_list[1][j])
all_estimates.append(_c_step(
X, n_support, remaining_iterations=n_iter,
initial_estimates=initial_estimates, verbose=verbose,
cov_computation_method=cov_computation_method,
random_state=random_state))
all_locs_sub, all_covs_sub, all_dets_sub, all_supports_sub, all_ds_sub = \
zip(*all_estimates)
# find the `n_best` best results among the `n_trials` ones
index_best = np.argsort(all_dets_sub)[:select]
best_locations = np.asarray(all_locs_sub)[index_best]
best_covariances = np.asarray(all_covs_sub)[index_best]
best_supports = np.asarray(all_supports_sub)[index_best]
best_ds = np.asarray(all_ds_sub)[index_best]
return best_locations, best_covariances, best_supports, best_ds
def fast_mcd(X, support_fraction=None,
cov_computation_method=empirical_covariance,
random_state=None):
"""Estimates the Minimum Covariance Determinant matrix.
Read more in the :ref:`User Guide <robust_covariance>`.
Parameters
----------
X : array-like, shape (n_samples, n_features)
The data matrix, with p features and n samples.
support_fraction : float, 0 < support_fraction < 1
The proportion of points to be included in the support of the raw
MCD estimate. Default is None, which implies that the minimum
value of support_fraction will be used within the algorithm:
`[n_sample + n_features + 1] / 2`.
random_state : integer or numpy.RandomState, optional
The generator used to randomly subsample. If an integer is
given, it fixes the seed. Defaults to the global numpy random
number generator.
cov_computation_method : callable, default empirical_covariance
The function which will be used to compute the covariance.
Must return shape (n_features, n_features)
Notes
-----
The FastMCD algorithm has been introduced by Rousseuw and Van Driessen
in "A Fast Algorithm for the Minimum Covariance Determinant Estimator,
1999, American Statistical Association and the American Society
for Quality, TECHNOMETRICS".
The principle is to compute robust estimates and random subsets before
pooling them into a larger subsets, and finally into the full data set.
Depending on the size of the initial sample, we have one, two or three
such computation levels.
Note that only raw estimates are returned. If one is interested in
the correction and reweighting steps described in [Rouseeuw1999]_,
see the MinCovDet object.
References
----------
.. [Rouseeuw1999] A Fast Algorithm for the Minimum Covariance
Determinant Estimator, 1999, American Statistical Association
and the American Society for Quality, TECHNOMETRICS
.. [Butler1993] R. W. Butler, P. L. Davies and M. Jhun,
Asymptotics For The Minimum Covariance Determinant Estimator,
The Annals of Statistics, 1993, Vol. 21, No. 3, 1385-1400
Returns
-------
location : array-like, shape (n_features,)
Robust location of the data.
covariance : array-like, shape (n_features, n_features)
Robust covariance of the features.
support : array-like, type boolean, shape (n_samples,)
A mask of the observations that have been used to compute
the robust location and covariance estimates of the data set.
"""
random_state = check_random_state(random_state)
X = check_array(X, ensure_min_samples=2, estimator='fast_mcd')
n_samples, n_features = X.shape
# minimum breakdown value
if support_fraction is None:
n_support = int(np.ceil(0.5 * (n_samples + n_features + 1)))
else:
n_support = int(support_fraction * n_samples)
# 1-dimensional case quick computation
# (Rousseeuw, P. J. and Leroy, A. M. (2005) References, in Robust
# Regression and Outlier Detection, John Wiley & Sons, chapter 4)
if n_features == 1:
if n_support < n_samples:
# find the sample shortest halves
X_sorted = np.sort(np.ravel(X))
diff = X_sorted[n_support:] - X_sorted[:(n_samples - n_support)]
halves_start = np.where(diff == np.min(diff))[0]
# take the middle points' mean to get the robust location estimate
location = 0.5 * (X_sorted[n_support + halves_start]
+ X_sorted[halves_start]).mean()
support = np.zeros(n_samples, dtype=bool)
X_centered = X - location
support[np.argsort(np.abs(X_centered), 0)[:n_support]] = True
covariance = np.asarray([[np.var(X[support])]])
location = np.array([location])
# get precision matrix in an optimized way
precision = pinvh(covariance)
dist = (np.dot(X_centered, precision) * (X_centered)).sum(axis=1)
else:
support = np.ones(n_samples, dtype=bool)
covariance = np.asarray([[np.var(X)]])
location = np.asarray([np.mean(X)])
X_centered = X - location
# get precision matrix in an optimized way
precision = pinvh(covariance)
dist = (np.dot(X_centered, precision) * (X_centered)).sum(axis=1)
# Starting FastMCD algorithm for p-dimensional case
if (n_samples > 500) and (n_features > 1):
# 1. Find candidate supports on subsets
# a. split the set in subsets of size ~ 300
n_subsets = n_samples // 300
n_samples_subsets = n_samples // n_subsets
samples_shuffle = random_state.permutation(n_samples)
h_subset = int(np.ceil(n_samples_subsets *
(n_support / float(n_samples))))
# b. perform a total of 500 trials
n_trials_tot = 500
# c. select 10 best (location, covariance) for each subset
n_best_sub = 10
n_trials = max(10, n_trials_tot // n_subsets)
n_best_tot = n_subsets * n_best_sub
all_best_locations = np.zeros((n_best_tot, n_features))
try:
all_best_covariances = np.zeros((n_best_tot, n_features,
n_features))
except MemoryError:
# The above is too big. Let's try with something much small
# (and less optimal)
all_best_covariances = np.zeros((n_best_tot, n_features,
n_features))
n_best_tot = 10
n_best_sub = 2
for i in range(n_subsets):
low_bound = i * n_samples_subsets
high_bound = low_bound + n_samples_subsets
current_subset = X[samples_shuffle[low_bound:high_bound]]
best_locations_sub, best_covariances_sub, _, _ = select_candidates(
current_subset, h_subset, n_trials,
select=n_best_sub, n_iter=2,
cov_computation_method=cov_computation_method,
random_state=random_state)
subset_slice = np.arange(i * n_best_sub, (i + 1) * n_best_sub)
all_best_locations[subset_slice] = best_locations_sub
all_best_covariances[subset_slice] = best_covariances_sub
# 2. Pool the candidate supports into a merged set
# (possibly the full dataset)
n_samples_merged = min(1500, n_samples)
h_merged = int(np.ceil(n_samples_merged *
(n_support / float(n_samples))))
if n_samples > 1500:
n_best_merged = 10
else:
n_best_merged = 1
# find the best couples (location, covariance) on the merged set
selection = random_state.permutation(n_samples)[:n_samples_merged]
locations_merged, covariances_merged, supports_merged, d = \
select_candidates(
X[selection], h_merged,
n_trials=(all_best_locations, all_best_covariances),
select=n_best_merged,
cov_computation_method=cov_computation_method,
random_state=random_state)
# 3. Finally get the overall best (locations, covariance) couple
if n_samples < 1500:
# directly get the best couple (location, covariance)
location = locations_merged[0]
covariance = covariances_merged[0]
support = np.zeros(n_samples, dtype=bool)
dist = np.zeros(n_samples)
support[selection] = supports_merged[0]
dist[selection] = d[0]
else:
# select the best couple on the full dataset
locations_full, covariances_full, supports_full, d = \
select_candidates(
X, n_support,
n_trials=(locations_merged, covariances_merged),
select=1,
cov_computation_method=cov_computation_method,
random_state=random_state)
location = locations_full[0]
covariance = covariances_full[0]
support = supports_full[0]
dist = d[0]
elif n_features > 1:
# 1. Find the 10 best couples (location, covariance)
# considering two iterations
n_trials = 30
n_best = 10
locations_best, covariances_best, _, _ = select_candidates(
X, n_support, n_trials=n_trials, select=n_best, n_iter=2,
cov_computation_method=cov_computation_method,
random_state=random_state)
# 2. Select the best couple on the full dataset amongst the 10
locations_full, covariances_full, supports_full, d = select_candidates(
X, n_support, n_trials=(locations_best, covariances_best),
select=1, cov_computation_method=cov_computation_method,
random_state=random_state)
location = locations_full[0]
covariance = covariances_full[0]
support = supports_full[0]
dist = d[0]
return location, covariance, support, dist
class MinCovDet(EmpiricalCovariance):
"""Minimum Covariance Determinant (MCD): robust estimator of covariance.
The Minimum Covariance Determinant covariance estimator is to be applied
on Gaussian-distributed data, but could still be relevant on data
drawn from a unimodal, symmetric distribution. It is not meant to be used
with multi-modal data (the algorithm used to fit a MinCovDet object is
likely to fail in such a case).
One should consider projection pursuit methods to deal with multi-modal
datasets.
Read more in the :ref:`User Guide <robust_covariance>`.
Parameters
----------
store_precision : bool
Specify if the estimated precision is stored.
assume_centered : Boolean
If True, the support of the robust location and the covariance
estimates is computed, and a covariance estimate is recomputed from
it, without centering the data.
Useful to work with data whose mean is significantly equal to
zero but is not exactly zero.
If False, the robust location and covariance are directly computed
with the FastMCD algorithm without additional treatment.
support_fraction : float, 0 < support_fraction < 1
The proportion of points to be included in the support of the raw
MCD estimate. Default is None, which implies that the minimum
value of support_fraction will be used within the algorithm:
[n_sample + n_features + 1] / 2
random_state : integer or numpy.RandomState, optional
The random generator used. If an integer is given, it fixes the
seed. Defaults to the global numpy random number generator.
Attributes
----------
raw_location_ : array-like, shape (n_features,)
The raw robust estimated location before correction and re-weighting.
raw_covariance_ : array-like, shape (n_features, n_features)
The raw robust estimated covariance before correction and re-weighting.
raw_support_ : array-like, shape (n_samples,)
A mask of the observations that have been used to compute
the raw robust estimates of location and shape, before correction
and re-weighting.
location_ : array-like, shape (n_features,)
Estimated robust location
covariance_ : array-like, shape (n_features, n_features)
Estimated robust covariance matrix
precision_ : array-like, shape (n_features, n_features)
Estimated pseudo inverse matrix.
(stored only if store_precision is True)
support_ : array-like, shape (n_samples,)
A mask of the observations that have been used to compute
the robust estimates of location and shape.
dist_ : array-like, shape (n_samples,)
Mahalanobis distances of the training set (on which `fit` is called)
observations.
References
----------
.. [Rouseeuw1984] `P. J. Rousseeuw. Least median of squares regression.
J. Am Stat Ass, 79:871, 1984.`
.. [Rouseeuw1999] `A Fast Algorithm for the Minimum Covariance Determinant
Estimator, 1999, American Statistical Association and the American
Society for Quality, TECHNOMETRICS`
.. [Butler1993] `R. W. Butler, P. L. Davies and M. Jhun,
Asymptotics For The Minimum Covariance Determinant Estimator,
The Annals of Statistics, 1993, Vol. 21, No. 3, 1385-1400`
"""
_nonrobust_covariance = staticmethod(empirical_covariance)
def __init__(self, store_precision=True, assume_centered=False,
support_fraction=None, random_state=None):
self.store_precision = store_precision
self.assume_centered = assume_centered
self.support_fraction = support_fraction
self.random_state = random_state
def fit(self, X, y=None):
"""Fits a Minimum Covariance Determinant with the FastMCD algorithm.
Parameters
----------
X : array-like, shape = [n_samples, n_features]
Training data, where n_samples is the number of samples
and n_features is the number of features.
y : not used, present for API consistence purpose.
Returns
-------
self : object
Returns self.
"""
X = check_array(X, ensure_min_samples=2, estimator='MinCovDet')
random_state = check_random_state(self.random_state)
n_samples, n_features = X.shape
# check that the empirical covariance is full rank
if (linalg.svdvals(np.dot(X.T, X)) > 1e-8).sum() != n_features:
warnings.warn("The covariance matrix associated to your dataset "
"is not full rank")
# compute and store raw estimates
raw_location, raw_covariance, raw_support, raw_dist = fast_mcd(
X, support_fraction=self.support_fraction,
cov_computation_method=self._nonrobust_covariance,
random_state=random_state)
if self.assume_centered:
raw_location = np.zeros(n_features)
raw_covariance = self._nonrobust_covariance(X[raw_support],
assume_centered=True)
# get precision matrix in an optimized way
precision = pinvh(raw_covariance)
raw_dist = np.sum(np.dot(X, precision) * X, 1)
self.raw_location_ = raw_location
self.raw_covariance_ = raw_covariance
self.raw_support_ = raw_support
self.location_ = raw_location
self.support_ = raw_support
self.dist_ = raw_dist
# obtain consistency at normal models
self.correct_covariance(X)
# re-weight estimator
self.reweight_covariance(X)
return self
def correct_covariance(self, data):
"""Apply a correction to raw Minimum Covariance Determinant estimates.
Correction using the empirical correction factor suggested
by Rousseeuw and Van Driessen in [Rouseeuw1984]_.
Parameters
----------
data : array-like, shape (n_samples, n_features)
The data matrix, with p features and n samples.
The data set must be the one which was used to compute
the raw estimates.
Returns
-------
covariance_corrected : array-like, shape (n_features, n_features)
Corrected robust covariance estimate.
"""
correction = np.median(self.dist_) / chi2(data.shape[1]).isf(0.5)
covariance_corrected = self.raw_covariance_ * correction
self.dist_ /= correction
return covariance_corrected
def reweight_covariance(self, data):
"""Re-weight raw Minimum Covariance Determinant estimates.
Re-weight observations using Rousseeuw's method (equivalent to
deleting outlying observations from the data set before
computing location and covariance estimates). [Rouseeuw1984]_
Parameters
----------
data : array-like, shape (n_samples, n_features)
The data matrix, with p features and n samples.
The data set must be the one which was used to compute
the raw estimates.
Returns
-------
location_reweighted : array-like, shape (n_features, )
Re-weighted robust location estimate.
covariance_reweighted : array-like, shape (n_features, n_features)
Re-weighted robust covariance estimate.
support_reweighted : array-like, type boolean, shape (n_samples,)
A mask of the observations that have been used to compute
the re-weighted robust location and covariance estimates.
"""
n_samples, n_features = data.shape
mask = self.dist_ < chi2(n_features).isf(0.025)
if self.assume_centered:
location_reweighted = np.zeros(n_features)
else:
location_reweighted = data[mask].mean(0)
covariance_reweighted = self._nonrobust_covariance(
data[mask], assume_centered=self.assume_centered)
support_reweighted = np.zeros(n_samples, dtype=bool)
support_reweighted[mask] = True
self._set_covariance(covariance_reweighted)
self.location_ = location_reweighted
self.support_ = support_reweighted
X_centered = data - self.location_
self.dist_ = np.sum(
np.dot(X_centered, self.get_precision()) * X_centered, 1)
return location_reweighted, covariance_reweighted, support_reweighted
| bsd-3-clause |
ChenglongChen/Kaggle_HomeDepot | Code/Chenglong/google_spelling_checker_dict.py | 1 | 170884 | # -*- coding: utf-8 -*-
"""
@author: Chenglong Chen <c.chenglong@gmail.com>
@brief: google spelling checker for search_term
@ref: https://www.kaggle.com/steubk/home-depot-product-search-relevance/fixing-typos
"""
spelling_checker_dict = {
'steele stake': 'steel stake',
'gas mowe': 'gas mower',
'metal plate cover gcfi': 'metal plate cover gfci',
'lawn sprkinler': 'lawn sprinkler',
'ourdoor patio tile': 'outdoor patio tile',
'6 teir shelving': '6 tier shelving',
'storage shelve': 'storage shelf',
'American Standard Bone round toliet': 'American Standard Bone round toilet',
'6 stell': '6 steel',
'fece posts metal': 'fence posts metal',
'cushions outdoorlounge': 'cushions outdoor lounge',
'pricepfister kitchen faucet g135': 'price pfister kitchen faucet g135',
'glaciar bay toiled': 'glacier bay toilet',
'glacie bay dual flush': 'glacier bay dual flush',
'glacier bay tiolet tank lid': 'glacier bay toilet tank lid',
'handycap toilets': 'handicap toilets',
'high boy tolet': 'highboy toilet',
'gas wayer heaters': 'gas water heaters',
'basemetnt window': 'basement window',
'rustollum epoxy': 'rustoleum epoxy',
'air /heaterconditioner window': 'air /heat conditioner window',
'spliter ac unit': 'splitter ac unit',
'berh deck over': 'behr deck over',
'28 snow thower': '28 snow thrower',
'base board molding boundle': 'baseboard molding bundle',
'1 infloor flange': '1 in floor flange',
'10 window sping rod': '10 window spring rod',
'combo powertool kit': 'combo power tool kit',
'desalt impact 18': 'dewalt impact 18',
'rigid lithium ion batteries fuego drill': 'ridgid lithium ion batteries fuego drill',
'fiberglass repir kit': 'fiberglass repair kit',
'portable air condtioners': 'portable air conditioners',
'wall pannels': 'wall panels',
'2X4 SRUDS': '2X4 STUDS',
'frostking window shrink film': 'frost king window shrink film',
'Florescent Light Bulbs': 'Fluorescent Light Bulbs',
'violet flourescent light': 'violet fluorescent light',
'lawn mower- electic': 'lawn mower- electric',
'closetmade': 'closetmaid',
'greecianmarble floor tile': 'grecian marble floor tile',
'join compound wall tile': 'joint compound wall tile',
'montagnia contina floor tile': 'montagna cortina floor tile',
'porcelin floor tile 6x24': 'porcelain floor tile 6x24',
'three wayy': 'three way',
'incide wall heater': 'inside wall heater',
'westminster pedistal combo': 'westminster pedestal combo',
'water softners': 'water softeners',
'miricale': 'miracle',
'sliding windos locks': 'sliding window locks',
'20v dewalt kombo': '20v dewalt combo',
'DEWALT VACCUM': 'DEWALT VACUUM',
'lithium 20 dewalt': 'lithium 20v dewalt',
'water heather': 'water heater',
'riobi blower vac 9056': 'ryobi blower vac 9056',
'DRAWEER PULLS': 'DRAWER PULLS',
'bagged cinder mulch': 'bagged cedar mulch',
'hindges': 'hinges',
'chair rail hieght': 'chair rail height',
'celling light': 'ceiling light',
'tub repair kit procelian': 'tub repair kit porcelain',
'dewalr tools': 'dewalt tools',
'zinc plated flatbraces': 'zinc plated flat braces',
'cieling': 'ceiling',
'control celing fan': 'control ceiling fan',
'roll roofing lap cemet': 'roll roofing lap cement',
'cedart board': 'cedar board',
'lg stcking kit': 'lg stacking kit',
'ajustable ladder feet': 'adjustable ladder feet',
'milwakee M12': 'milwaukee M12',
'garden sprayer non pump': 'garden sprayer no pump',
'roof rdge flashing': 'roof edge flashing',
'cable prime line emergensy open': 'cable prime line emergency open',
'roybi l18v': 'ryobi l18v',
'milwaukee 18-volt lithium-ion cordlessrotary hammerss': 'milwaukee 18-volt lithium-ion cordless rotary hammers',
'bath sinnk': 'bath sink',
'bathro sinks': 'bathroom sinks',
'bathroom pedelal sink': 'bathroom pedestal sink',
'epoxy concrete pain': 'epoxy concrete paint',
'pool suppll': 'pool supply',
'3-3 galvinized tubing': '3-3 galvanized tubing',
'portable air conditionar and heater': 'portable air conditioner and heater',
'vynal windows': 'vinyl windows',
'aluminun tread plate': 'aluminum tread plate',
'3/4 vlve': '3/4 valve',
'kitchen ceiling lightening': 'kitchen ceiling lighting',
'led fixtues for the kitchen': 'led fixtures for the kitchen',
'wall design cermic': 'wall design ceramic',
'door chim buttons': 'door chime buttons',
'plastice corrugated panels': 'plastic corrugated panels',
'doors gaurds': 'doors guards',
'24 inche sink and vanity for bath': '24 inch sink and vanity for bath',
'24 swantone vanity top': '24 swanstone vanity top',
'40 wattsolar charged lights': '40 watt solar charged lights',
'buikids toilet seat': 'buy kids toilet seat',
'toliet seats': 'toilet seats',
'land scaping timbers': 'landscaping timbers',
'everblit heavy duty canvas dropcloth': 'everbilt heavy duty canvas drop cloth',
'3/4 sharkbits': '3/4 sharkbite',
'bath rom toilets': 'bathroom toilets',
'alumanam sheets': 'aluminum sheets',
'huskvarna': 'husqvarna',
'treate 2x4': 'treated 2x4',
'12000 btuair conditioners window': '12000 btu air conditioners window',
'air conditioner vbration': 'air conditioner vibration',
'heith-zenith motion lights': 'heath-zenith motion lights',
'small paint rollerss': 'small paint rollers',
'fencde posts': 'fence posts',
'knoty pine fencing': 'knotty pine fencing',
'metal sheet underpenning': 'metal sheet underpinning',
'plastic untility shelves': 'plastic utility shelves',
'christmass lights': 'christmas lights',
'garlend lights': 'garland lights',
'ceilig fan mount': 'ceiling fan mount',
'paito table and chairs': 'patio table and chairs',
'glacier bay one pice flapper': 'glacier bay one piece flapper',
'dcanvas drop cloth': 'canvas drop cloth',
'lawn mowre covers': 'lawn mower covers',
'vaccum for dw745': 'vacuum for dw745',
'Club cadet primer bulb': 'Cub cadet primer bulb',
'interior door lcoks': 'interior door locks',
'dremel toll kit': 'dremel tool kit',
'round up nozzle replacment': 'roundup nozzle replacement',
'ceder mulch': 'cedar mulch',
'sikalatexr concrete vonding adhesive': 'sikalatex concrete bonding adhesive',
'rigid air compressor': 'ridgid air compressor',
'garge doors': 'garage doors',
'ridding mowers': 'riding mowers',
'ridiing lawnmower': 'riding lawn mower',
'sliding mirror bathroom medicn cabinets': 'sliding mirror bathroom medicine cabinets',
'pastic qtr round': 'plastic quarter round',
'robutussin dh 835 replacement wick': 'robitussin dh 835 replacement wick',
'brick wall panles': 'brick wall panels',
'kitchen floor tikles': 'kitchen floor tiles',
'buffer polishewr': 'buffer polisher',
'keorsene heater wicks': 'kerosene heater wicks',
'1x6 cedar boaed': '1x6 cedar board',
'infered heaters': 'infrared heaters',
'1-1/2in. x 1ft. blk pipe': '1-1/2in. x 1 ft. black pipe',
'show me all 60 inch vaniteis': 'show me all 60 inch vanities',
'cieling fan': 'ceiling fan',
'instant waater heater gas lp': 'instant water heater gas lp',
'woodebn fence panels': 'wooden fence panels',
'hardiboard siding': 'hardie board siding',
'craft an lawn mower': 'craftsman lawn mower',
'kohler wellworth tpoilet': 'kohler wellworth toilet',
'moen dhower faucet': 'moen shower faucet',
'dewalt hand toolsg saw cordless': 'dewalt hand tools saw cordless',
'hindged l bracket': 'hinged l bracket',
'ceiling fan canopie for flst ceiling': 'ceiling fan canopy for flat ceiling',
'furnance vent delfector': 'furnace vent deflector',
'flourescent shop light': 'fluorescent shop light',
'bateries': 'batteries',
'bath wall tile chanpayne': 'bath wall tile champagne',
'floor ceramick': 'floor ceramic',
'stone are mb11': 'stone care mb11',
'traffic master porcelin ceramic tile portland stone': 'trafficmaster porcelain ceramic tile portland stone',
'celing fans hampton bay': 'ceiling fans hampton bay',
'outdoor ceilikng fan with light': 'outdoor ceiling fan with light',
'36in vinale fence': '36in vinyl fence',
'extention ladder little gaint': 'extension ladder little giant',
'closet rod 8 n9ickel': 'closet rod 8 nickel',
'closetmaid wire eight itier organizer': 'closetmaid wire eight tier organizer',
'shorten pendent lighting': 'shorten pendant lighting',
'chainlink gate': 'chain link gate',
'4 flourescent': '4 fluorescent',
'lithium batties': 'lithium batteries',
'24x73 book shelve case white': '24x73 bookshelf case white',
'linoliuml adhesive': 'linoleum adhesive',
'vynal flooring': 'vinyl flooring',
'vynal grip strip': 'vinyl grip strip',
'hagchet': 'hatchet',
'frameless mirro mount': 'frameless mirror mount',
'microwarve cart': 'microwave cart',
'mosia grout sealer': 'mosaic grout sealer',
'backsplach': 'backsplash',
'dimable ceiling strip lights': 'dimmable ceiling strip lights',
'lithum leaf blower': 'lithium leaf blower',
'rayoby batteries': 'ryobi batteries',
'pressure washerparts': 'pressure washer parts',
'rigid 18v lituim ion nicad': 'ridgid 18v lithium ion nicad',
'artric air portable': 'arctic air portable',
'8ft wht veranda post sleeve': '8 ft white veranda post sleeve',
'vynal fence': 'vinyl fence',
'solar naturlas salt': 'solar naturals salt',
'metl flashing': 'metal flashing',
'dog fence batt': 'dog fence battery',
'onda pressure washer': 'honda pressure washer',
'pressue washer': 'pressure washer',
'fridgdare air conditioners': 'frigidaire air conditioners',
'double pain windows': 'double pane windows',
'round flat topmetal post caps': 'round flat top metal post caps',
'1/2\' plyweood': '1/2\' plywood',
'ddummy door knobs interior': 'dummy door knobs interior',
'robi battery lawn trimmer': 'ryobi battery lawn trimmer',
'weewacker edger': 'weed wacker edger',
'prunning shears': 'pruning shears',
'steel enrty doors': 'steel entry doors',
'forimca': 'formica',
'satin nickle door hinge 4 in': 'satin nickel door hinge 4 in',
'garden hose repir cuplings': 'garden hose repair couplings',
'1/3 hoursepower garbage disposal': '1/3 horsepower garbage disposal',
'chicken wire 16 gauze': 'chicken wire 16 gauge',
'wheelbarow': 'wheelbarrow',
'didger': 'dodger',
'hhigh efficiency round toilet in white': 'high efficiency round toilet in white',
'accordian door venetian': 'accordion door venetian',
'patio flurniture covers': 'patio furniture covers',
'through thewall air conditioner': 'through the wall air conditioner',
'Whirpool washer': 'Whirlpool washer',
'4x6treaded wood': '4x6 treated wood',
'preature treated lumber 2in. x12in.x12 ft.': 'pressure treated lumber 2in. x 12 in.x 12 ft.',
'closetmade wood': 'closetmaid wood',
'steam cleanerm mop': 'steam cleaner mop',
'steqamers': 'steamers',
'pendant shads': 'pendant shades',
'battery operated flashingm light': 'battery operated flashing light',
'metal flexable water hose': 'metal flexible water hose',
'air filter for lawn equitment': 'air filter for lawn equipment',
'fiber glass pip insulation': 'fiberglass pipe insulation',
'insallation': 'installation',
'insullation': 'insulation',
'contracor string light': 'contractor string light',
'gas furnace and hotwater': 'gas furnace and hot water',
'rust oleum cabinet stain kit': 'rustoleum cabinet stain kit',
'sjhelf': 'shelf',
'small brackets for selves': 'small brackets for shelves',
'hecurles': 'hercules',
'anderson window grate': 'andersen window grate',
'anderson windows': 'andersen windows',
'lasron slider windows': 'larson slider windows',
'samsung 25.6 french door refridgerator': 'samsung 25.6 french door refrigerator',
'closet doors oganizers': 'closet doors organizers',
'koehler cimarron bathroom sink': 'kohler cimarron bathroom sink',
'kohler pedestal sink cimeron': 'kohler pedestal sink cimarron',
'cover for pole structue': 'cover for pole structure',
'drils': 'drills',
'surface mount channe': 'surface mount channel',
'outside corner- dentil': 'outside corner- dental',
'14heightx24withx15depth air conditioner': '14 heightx24 with 15 depth air conditioner',
'r30 demin insulation': 'r30 denim insulation',
'6 metal tee posts': '6 metal t posts',
'metal fence postsd': 'metal fence posts',
'aluminum l cahnnel': 'aluminum l channel',
'conner trim moulding': 'corner trim moulding',
'cornor board': 'corner board',
'pvc planel glue': 'pvc panel glue',
'3 in 1 vacum, ryobi': '3 in 1 vacuum, ryobi',
'toliet bowl rebuilding kits': 'toilet bowl rebuilding kits',
'swing set accesories': 'swing set accessories',
'ventenatural gas heater': 'vented natural gas heater',
'square ube wood': 'square cube wood',
'swivrl wood anchors': 'swivel wood anchors',
'ge gridle': 'ge griddle',
'pendant shafe': 'pendant shade',
'3/8 pipe galvinized': '3/8 pipe galvanized',
'vaporbarrier, crawl space': 'vapor barrier, crawl space',
'self sealant membrane': 'self sealing membrane',
'husky work bemch': 'husky work bench',
'vanity light fictures': 'vanity light fixtures',
'bed frames headboaed': 'bed frames headboard',
'replace plasticbathroom towel holder': 'replace plastic bathroom towel holder',
'whirlpool diswasher weather stripping': 'whirlpool dishwasher weather stripping',
'36 inch front dooe with casing': '36 inch front door with casing',
'glass back doorr': 'glass back door',
'pre hu door': 'pre hung door',
'backsplash paneks': 'backsplash panels',
'jeffery court mozaic tile': 'jeffrey court mosaic tile',
'floo shets': 'floor sheets',
'gazhose for dryer machine': 'gas hose for dryer machine',
'electric fireplacewater heaters': 'electric fireplace water heaters',
'ceiling mounted lighting fixures': 'ceiling mounted lighting fixtures',
'tools bloowers': 'tools blowers',
'artifical ground cover': 'artificial ground cover',
'waxhers and electric dryers': 'washers and electric dryers',
'outdoor tilees': 'outdoor tiles',
'owens corning ashingles': 'owens corning shingles',
'peper towel holder wall mount': 'paper towel holder wall mount',
'genecrac generators': 'generac generators',
'robyi gas weeder': 'ryobi gas weeder',
'acrtlic tape': 'acrylic tape',
'foam insulaion panels': 'foam insulation panels',
'rumbl;estone': 'rumblestone',
'famed sliding door $289.00': 'framed sliding door $289.00',
'padio door': 'patio door',
'cement boards ciding': 'cement boards siding',
'upholstry': 'upholstery',
'miror interior doors': 'mirror interior doors',
'recessed medicien cabinet': 'recessed medicine cabinet',
'bulked washed sand and gravel': 'bulk washed sand and gravel',
'sheet stock floorinh': 'sheet stock flooring',
'polycarbonite': 'polycarbonate',
'dedwalt cordless drill': 'dewalt cordless drill',
'ryobi power chalking gun': 'ryobi power caulking gun',
'poulan pro lawn motor blades': 'poulan pro lawn mower blades',
'diining set outdoor': 'dining set outdoor',
'granite countertop glu': 'granite countertop glue',
'cyculer saw': 'circular saw',
'kitchenaid frenchdoor ref': 'kitchenaid french door ref',
'rigid wet dry vac': 'ridgid wet dry vac',
'whirlpool caprios 4.3': 'whirlpool cabrio 4.3',
'micro wave ovens': 'microwave ovens',
'8 valleta edger': '8 valletta edger',
'decking hardsware': 'decking hardware',
'utility traiter': 'utility trailer',
'ceilin storage': 'ceiling storage',
'white wall bathroon cabinets': 'white wall bathroom cabinets',
'tsnkless hot water heater': 'tankless hot water heater',
'weed killer consertrated': 'weed killer concentrate',
'milwaukee ha,,er drill': 'milwaukee hammer drill',
'23 ince': '23 inch',
'stone outside tile': 'stone outdoor tile',
'galvanized outdoor celing fan': 'galvanized outdoor ceiling fan',
'oil rubbered bronze dor': 'oil rubbed bronze door',
'vynik tiles peel stick': 'vinyl tiles peel stick',
'window aircondiioner 12000 but': 'window air conditioner 12000 btu',
'60 lb hi strength concrete': '60 lb high strength concrete',
'plexy glass 24 x 24': 'plexiglass 24 x 24',
'porch liht fixture': 'porch light fixture',
'moving trollie': 'moving trolley',
'shoipping cart': 'shopping cart',
'accesory bags': 'accessory bags',
'garage door 70 lb extention spring': 'garage door 70 lb extension spring',
'riobi shop vac filter': 'ryobi shop vac filter',
'wet carpet cleaninig': 'wet carpet cleaning',
'pvd electrical conduit': 'pvc electrical conduit',
'roller up window blinds': 'roll up window blinds',
'uplihght': 'uplight',
'metal shelfs': 'metal shelves',
'dewalt 20v recepicating saw': 'dewalt 20v reciprocating saw',
'outdooor carpet': 'outdoor carpet',
'step latter': 'step ladder',
'kitchen cabinte hardware blue knob': 'kitchen cabinet hardware blue knob',
'pivotangle lock hinge': 'pivot angle lock hinge',
'plasticl panels': 'plastic panels',
'varigated fiber board': 'variegated fiber board',
'battery chages': 'battery charges',
'1/2 inch blk iron coupling': '1/2 inch black iron coupling',
'defiant led armer max': 'defiant led armormax',
'defiant led ight': 'defiant led light',
'led flashlightts': 'led flashlights',
'pfister pasedena 4 center set faucet': 'pfister pasadena 4 center set faucet',
'meguire plastic cleaner': 'meguiars plastic cleaner',
'single board pannel': 'single board panel',
'foundation fent covers': 'foundation vent covers',
'bottom freezer refrdgerators': 'bottom freezer refrigerators',
'colbolt drill bits': 'cobalt drill bits',
'soundfroofing material': 'soundproofing material',
'hanging light masn gar': 'hanging light mason jar',
'drywall mudd': 'drywall mud',
'delta bathroom falcet': 'delta bathroom faucet',
'ridgid 10000 watt': 'rigid 10000 watt',
'pvc edgetape white': 'pvc edge tape white',
'fireplace mantle': 'fireplace mantel',
'drop in sink ovel': 'drop in sink oval',
'40ft aluminumm ladder': '40 ft aluminum ladder',
'rigid shop vac filter': 'ridgid shop vac filter',
'moen single handle valvue rebuild': 'moen single handle valve rebuild',
'hunter ceiling fans accesories strip': 'hunter ceiling fans accessories strip',
'wheel barrel': 'wheelbarrow',
'16 aluminuim ladder': '16 aluminum ladder',
'1/2\' olastic pipe': '1/2\' plastic pipe',
'moen 7570 single hanlel faucet': 'moen 7570 single handle faucet',
'padtio heater': 'patio heater',
'rachet scret drivers': 'ratchet screwdrivers',
'water fountain nozle': 'water fountain nozzle',
'rigid sander': 'ridgid sander',
'anderson 4000 windows': 'andersen 4000 windows',
'doublew stainless': 'double stainless',
'milwakee m12 cordless heated jacket': 'milwaukee m12 cordless heated jacket',
'french door scree doorsscreen door': 'french door screen doors screen door',
'samsung refridegrator': 'samsung refrigerator',
'flurorescent light bulbs': 'fluorescent light bulbs',
'phillips 40t12cw plus florescent tube': 'phillips 40t12cw plus fluorescent tube',
'black and decker timmer parts st4500': 'black and decker trimmer parts st4500',
'gas range slide inove': 'gas range slide in love',
'baldwin lock stets': 'baldwin lock sets',
'6 ft ceder fence': '6 ft cedar fence',
'storeage': 'storage',
'beckett fountin pump': 'beckett fountain pump',
'polyeurethane exterior': 'polyurethane exterior',
'ceiling pannel': 'ceiling panel',
'70 celing fan': '70 ceiling fan',
'vynil barackets': 'vinyl brackets',
'moen kitchen fauchet': 'moen kitchen faucet',
'ridgid model wd1680 filter': 'rigid model wd1680 filter',
'point of use electtric': 'point of use electric',
'stell finished french patio door': 'steel finished french patio door',
'lg elec laundry suite': 'lg electric laundry suite',
'outdoor screem': 'outdoor screen',
'patio chair cushions/marth stewart': 'patio chair cushions/martha stewart',
'24 hollow core closet dor': '24 hollow core closet door',
'rigid miter saw': 'ridgid miter saw',
'ruotor table': 'router table',
'airconditioner decoritive cover unit': 'air conditioner decorative cover unit',
'miwaukee 18v battery and charger': 'milwaukee 18v battery and charger',
'potable air conditioner': 'portable air conditioner',
'perhung 30x80 interior door': 'prehung 30 x 80 interior door',
'6 dewalt skill saw': '6 dewalt skil saw',
'1x8x8 toung and grove': '1x8x8 tongue and groove',
'river feather door threashold': 'river feather door threshold',
'range connnector': 'range connector',
'ligt fixture covers': 'light fixture covers',
'window flasheing': 'window flashing',
'backet metal': 'bracket metal',
'horizantel fence panel': 'horizontal fence panel',
'rug pad 8 x 10': 'rug pad 8x10',
'frigadaire appliances': 'frigidaire appliances',
'bath si k cabinets': 'bath sink cabinets',
'8x10 outside storage': '8x10 outdoor storage',
'earthgrow mulch': 'earthgro mulch',
'10 60 tooth blde': '10 60 tooth blade',
'sink faucet with soap dispencer': 'sink faucet with soap dispenser',
'ridgid job max attatchmens': 'ridgid jobmax attachments',
'ridgid wrachet head': 'ridgid ratchet head',
'celliling light': 'ceiling light',
'waterroo concrete paint': 'waterproof concrete paint',
'americian standard champion 4 toliets': 'american standard champion 4 toilets',
'4 ftawning frame': '4 ft awning frame',
'restour for concrete': 'restore for concrete',
'econo florecent bulb': 'econo fluorescent bulb',
'florecent bulb holder': 'fluorescent bulb holder',
'light fictures': 'light fixtures',
'lihonia 4 led work light': 'lithonia 4 led work light',
'interrior frnch doors': 'interior french doors',
'hamptom bay cusion': 'hampton bay cushion',
'wndows': 'windows',
'porcalain thinset': 'porcelain thinset',
'versabon 50lb': 'versabond 50 lb',
'table for outsde': 'table for outside',
'hoinda gas edger': 'honda gas edger',
'installing sockets for flor': 'installing sockets for floor',
'laguna porcelin tile': 'laguna porcelain tile',
'showe heads in oil rubbed bronze': 'shower heads in oil rubbed bronze',
'chase lounge cushions': 'chaise lounge cushions',
'electric detector in simming pool water': 'electric detector in swimming pool water',
'elongagated toilet seat': 'elongated toilet seat',
'towbehind lawn spreaders': 'tow behind lawn spreaders',
'cable poter': 'cable porter',
'fraiming nailer electric': 'framing nailer electric',
'12 x 12 porcelian floor and wall tile': '12 x 12 porcelain floor and wall tile',
'marrazi': 'marazzi',
'range hoodu': 'range hood',
'whirpool range': 'whirlpool range',
'subway title 3 x 6': 'subway tile 3 x 6',
'untique stone': 'antique stone',
'post sleeveee': 'post sleeve',
'dinning chair seats': 'dining chair seats',
'christmas lights icicle colerful': 'christmas lights icicle colorful',
'colpay garage door molding': 'clopay garage door molding',
'light for public ligthining': 'light for public lightning',
'slate timberland shingle': 'slate timberline shingle',
'cicular saw blad': 'circular saw blade',
'varbide 7 1/4 circular saw blade': 'carbide 7 1/4 circular saw blade',
'10 flourescent bulbs': '10 fluorescent bulbs',
'kids outside furnature': 'kids outside furniture',
'whirpool gas range': 'whirlpool gas range',
'starter fertillzer': 'starter fertilizer',
'toro snowerblower light kit': 'toro snowblower light kit',
'High Wheel String Trimer': 'High Wheel String Trimmer',
'insided house door': 'inside house door',
'3 1/2 non-mortison hinges satin finish': '3 1/2 non-mortise hinges satin finish',
'miracle grow garden soil': 'miracle gro garden soil',
'miracle grow spray dispensers': 'miracle gro spray dispensers',
'alure flooring black oak': 'allure flooring black oak',
'sweeping atatchment for weed wacker': 'sweeping attachment for weed wacker',
'retangle bathroom sinks': 'rectangular bathroom sinks',
'underthe cabinet microwaves': 'under the cabinet microwaves',
'24 inch lover doors': '24 inch louvered doors',
'window drip egedg': 'window drip edge',
'rechargable portable air compressor': 'rechargeable portable air compressor',
'birkmann 5 burner': 'brinkmann 5 burner',
'whirlpool gasnstove self cleaning oven': 'whirlpool gas stove self cleaning oven',
'havc brush': 'hvac brush',
'discharge hose 1.5 inces': 'discharge hose 1.5 inches',
'6 ft laminite countertop': '6 ft laminate countertop',
'pool vaccum': 'pool vacuum',
'1/2 in.x 1/2 in. thread albow male to male': '1/2 in.x 1/2 in. threaded elbow male to male',
'sofet': 'soffit',
'sliding patio doort': 'sliding patio door',
'30inch flourescent tubes': '30 inch fluorescent tubes',
'phillips light bulbs': 'philips light bulbs',
'stainless steel sinl': 'stainless steel sink',
'burgular bars for front porch': 'burglar bars for front porch',
'oach lights': 'coach lights',
'2 in lnsulated bushings': '2 in insulated bushings',
'motion lught': 'motion light',
'residental light sensor security lights': 'residential light sensor security lights',
'vertical blind accsesories': 'vertical blind accessories',
'1/2 in ree bar': '1/2 in rebar',
'cloths rod and shelf brackets': 'clothes rod and shelf brackets',
'fire rated buildng materials': 'fire rated building materials',
'hot point water filer': 'hotpoint water filter',
'bathroom cabinet without fermaldehyde': 'bathroom cabinet without formaldehyde',
'9.6 bvolt': '9.6 volt',
'rustoleum bright coach metallic': 'rustoleum bright coat metallic',
'stone effect sante cecilia top': 'stone effects santa cecilia top',
'suspanded ceiling': 'suspended ceiling',
'4x8 plywood pressure treeted': '4x8 plywood pressure treated',
'acess panel': 'access panel',
'genie excellartor garage door opener': 'genie excelerator garage door opener',
'ge dish washer with 46 dba rating': 'ge dishwasher with 46 dba rating',
'wood and concret stain': 'wood and concrete stain',
'8 foot flour sent': '8 foot fluorescent',
'infared grills': 'infrared grills',
'wirless interconnected smoke dedector': 'wireless interconnected smoke detector',
'luever': 'leuver',
'3 in roung head bolt': '3 in round head bolt',
'rachet': 'ratchet',
'rigid 12 volt': 'ridgid 12 volt',
'sharkbit': 'sharkbite',
'hamiltton collectin': 'hamilton collection',
'kitchen aide wine and beverage refrigerator': 'kitchenaid wine and beverage refrigerator',
'paint markers burgondy color': 'paint markers burgundy color',
'glass washer with sucktion cups': 'glass washer with suction cups',
'andersor doors': 'anderson doors',
'hickory cabinett': 'hickory cabinet',
'repacement can type light bulbs': 'replacement can type light bulbs',
'ceeling patio shades': 'ceiling patio shades',
'white vainty 8 faucet': 'white vanity 8 faucet',
'daylight florisant bulb 36inch': 'daylight fluorescent bulb 36 inch',
'contact paoer': 'contact paper',
'air bathtubes': 'air bathtubs',
'cushions for wecker furniture': 'cushions for wicker furniture',
'galvinized poles 20long': 'galvanized poles 20 long',
'siegel light pendent': 'siegel light pendant',
'spaonges': 'sponges',
'extorior shatters': 'exterior shutters',
'led blubs': 'led bulbs',
'4 inch back flow prenter': '4 inch backflow preventer',
'silding closet doors track': 'sliding closet doors track',
'10000 btu windowair condiioner': '10000 btu window air conditioner',
'sewer pipe hoider': 'sewer pipe holder',
'vinal blind paint': 'vinyl blind paint',
'fuacet': 'faucet',
'picinic tables': 'picnic tables',
'all in one topmount kraus sinks': 'all in one top mount kraus sinks',
'solar post lmapy': 'solar post lamp',
'transormations': 'transformations',
'daltiles sandy beach': 'daltile sandy beach',
'wallmount indoor lights with plug': 'wall mounted indoor lights with plug',
'kennal kit': 'kennel kit',
'46 high output grow florescent bulb': '46 high output grow fluorescent bulb',
'frost fee freezers': 'frost free freezers',
'stainles steel door handle': 'stainless steel door handle',
'combo drill makita 20v': 'combi drill makita 20v',
'shop vacumm': 'shop vacuum',
'primer for led paint': 'primer for lead paint',
'outdoor gas fiepits': 'outdoor gas firepits',
'hallway pendendant lighting': 'hallway pendant lighting',
'chesapeke oak flooring': 'chesapeake oak flooring',
'ryobi multi tool acccessories': 'ryobi multi tool accessories',
'ryobi raidos': 'ryobi radios',
'milwaukee skill saw': 'milwaukee skil saw',
'ligh chrismas hanging tree': 'light christmas hanging tree',
'galvinized screws': 'galvanized screws',
'led circuline bulbs': 'led circline bulbs',
'kholer elongated toilet seat': 'kohler elongated toilet seat',
'tolet seats': 'toilet seats',
'ock blade knife piece 3': 'lock blade knife piece 3',
'portable airconditioner': 'portable air conditioner',
'window aircondition': 'window air conditioner',
'36 vx 72 commercial outdoor mats': '36 x 72 commercial outdoor mats',
'runner commerical': 'runner commercial',
'montagna dappy gray': 'montagna dapple gray',
'soil temperture test kit': 'soil temperature test kit',
'basement tolet': 'basement toilet',
'32 door threshhold': '32 door threshold',
'hampton bay oak bast cabinets': 'hampton bay oak base cabinets',
'charbroil parts': 'char broil parts',
'qucikie mop': 'quickie mop',
'concret anchor bolts': 'concrete anchor bolts',
'24 whtie storage cabinet': '24 white storage cabinet',
'door handle deabolt kit': 'door handle deadbolt kit',
'ge profile 30 inch charcoal folters': 'ge profile 30 inch charcoal filters',
'49 inch napolian vanity top': '49 inch napoleon vanity top',
'4in pvc franco cuppling': '4in pvc fernco coupling',
'graveless gravaless sewer pipe': 'graveless graveless sewer pipe',
'shower fllor': 'shower floor',
'riverera screen doors': 'riviera screen doors',
'animal deterent': 'animal deterrent',
'woodpeckers repellant': 'woodpeckers repellent',
'wood buring insert 200-250': 'wood burning insert 200-250',
'spectrazide ant': 'spectracide ant',
'gas grill accesories': 'gas grill accessories',
'elecronic insect repeller': 'electronic insect repeller',
'slyvanna motion nite light': 'sylvania motion nite light',
'4 in pvs end cap': '4 in pvc end cap',
'delta portor shower and tub trim': 'delta porter shower and tub trim',
'replacment mini bulbs': 'replacement mini bulbs',
'braxilian cherry laminate': 'brazilian cherry laminate',
'15 amp tampe resistant outlets': '15 amp tamper resistant outlets',
'hydraulic jack renat': 'hydraulic jack rental',
'32 x 32 shower baser': '32 x 32 shower base',
'electronic bed bug repellant': 'electronic bed bug repellent',
'ridgid auger': 'rigid auger',
'2000 psi force nozzzle': '2000 psi force nozzle',
'25 height beveragecooler': '25 height beverage cooler',
'anderson windows 400 seriesimpact resistant': 'andersen windows 400 series impact resistant',
'drill 20 lithium battery': 'drill 20v lithium battery',
'extertal air vent cover': 'external air vent cover',
'resin shesd': 'resin sheds',
'8x8x4 conctete block': '8x8x4 concrete block',
'tun faucet spout': 'tub faucet spout',
'continuos curtain rods': 'continuous curtain rods',
'upholstry cleaner': 'upholstery cleaner',
'ureka vaccuum': 'eureka vacuum',
'30 towel rods brushed nicol': '30 towel rods brushed nickel',
'1/2 gal thermos': '1/2 gallon thermos',
'unbralla fabric top only': 'umbrella fabric top only',
'outdoor cieling fans': 'outdoor ceiling fans',
'20 amps cros hinghs breaker': '20 amps cross highs breaker',
'mixing tubn': 'mixing tub',
'gfi circuit breaker': 'gfci circuit breaker',
'wrought iuron fence panels': 'wrought iron fence panels',
'ac air vent sleave': 'ac air vent sleeve',
'air ventalation deflector': 'air ventilation deflector',
'buddahs hand tree': 'buddha\'s hand tree',
'lawm mowers': 'lawn mowers',
'asathbula 7 piece': 'ashtabula 7 piece',
'recessed lightjs': 'recessed lights',
'hing pin door dtop': 'hinge pin door stop',
'elerical outlets plates': 'electrical outlets plates',
'bed tool boc': 'bed tool box',
'16 inch fabn': '16 inch fan',
'battery poerated motion sensor': 'battery operated motion sensor',
'grqss': 'grass',
'troy build trimmer extension': 'troy bilt trimmer extension',
'mansonry impact bit': 'masonry impact bit',
'high output basebord': 'high output baseboard',
'shower door sealparts': 'shower door seal parts',
'12 inch hight wall cabinet': '12 inch height wall cabinet',
'light s for sno throwers': 'lights for snow throwers',
'ceiling medallians': 'ceiling medallions',
'medalion': 'medallion',
'everbilt sloted': 'everbilt slotted',
'transparant redwood stain': 'transparent redwood stain',
'black and decker scub buster extreme': 'black and decker scrub buster extreme',
'mobilehome siding': 'mobile home siding',
'shutter screwws': 'shutter screws',
'hampton pation set with firepit': 'hampton patio set with firepit',
'industreial wire': 'industrial wire',
'vegtable seeds': 'vegetable seeds',
'masterpeice 72': 'masterpiece 72',
'5/4 lumbe': '5/4 lumber',
'dawn to dusk lig': 'dawn to dusk light',
'dusk to dawn motion sensoroutdoor lighting fixtures': 'dusk to dawn motion sensor outdoor lighting fixtures',
'cordless sweeperr': 'cordless sweeper',
'mill valley colle': 'mill valley college',
'outdoorstorage bin': 'outdoor storage bin',
'haging wire': 'hanging wire',
'4 in white recessed haol baffle in soft white': '4 in white recessed led baffle in soft white',
'11 1/2x25 1/2 white aluminun': '11 1/2 x 25 1/2 white aluminum',
'saratoga hickorya': 'saratoga hickory',
'surface gringer': 'surface grinder',
'kidie co2': 'kidde co2',
'batterys and charger kits': 'batteries and charger kits',
'nutru ninja': 'nutri ninja',
'23.5 shower door nickle': '23.5 shower door nickel',
'glass panel retiner': 'glass panel retainer',
'12v replacement blubs': '12v replacement bulbs',
'martha steward': 'martha stewart',
'1 1/2inchbrasswalltube18 inch': '1 1/2 inch brass wall tube 18 inch',
'brown color scheem': 'brown color scheme',
'spiral latters': 'spiral letters',
'24 incyh range': '24 inch range',
'8x8 ezup canopie cover': '8x8 ez up canopy cover',
'kitcheen door blind': 'kitchen door blind',
'flourescent balast 120-2/32is': 'fluorescent ballast 120-2/32is',
'vinyl lattiace': 'vinyl lattice',
'1/4 28 threadded connector': '1/4 28 threaded connector',
'kitchaid 3 burner': 'kitchenaid 3 burner',
'10 condiut pvc': '10 conduit pvc',
'WEBER GRILL GENIS 310': 'WEBER GRILL GENESIS 310',
'wall mount tub fauet moen': 'wall mount tub faucet moen',
'sower cleaner': 'shower cleaner',
'batteryfor alarm system': 'battery for alarm system',
'bed gugs': 'bed bugs',
'show the pric of washer and dryer': 'show the price of washer and dryer',
'washer electic dryer': 'washer electric dryer',
'ho hub couplings': 'no hub couplings',
'battey string trimmers': 'battery string trimmers',
'3/4 in. wide quarteround': '3/4 in. wide quarter round',
'ac dip pans': 'ac drip pans',
'rutland wood stove termometer': 'rutland wood stove thermometer',
'outdoor daucets': 'outdoor faucets',
'badless vacuum cleaners': 'bagless vacuum cleaners',
'dewalt 20 volt xr hamer': 'dewalt 20 volt xr hammer',
'dewalt drillimpact tool 20 volt xr': 'dewalt drill impact tool 20 volt xr',
'martha steward bath mirror': 'martha stewart bath mirror',
'infared thermometer': 'infrared thermometer',
'millwaukee 1/2 ele.c drill': 'milwaukee 1/2 elec drill',
'25 watt 4 foot flourescent': '25 watt 4 foot fluorescent',
'boscj bit': 'bosch bit',
'barbque grills': 'barbecue grills',
'brinkman grill burner': 'brinkmann grill burner',
'malbu replacement led light bubles': 'malibu replacement led light bulbs',
'natural stone tiele': 'natural stone tile',
'stone vaneer': 'stone veneer',
'stone venner sequia': 'stone veneer sequoia',
'ceiling fan replacement clades': 'ceiling fan replacement blades',
'transformet for flurescent tube lights': 'transformer for fluorescent tube lights',
'refrigerator frenchdoor': 'refrigerator french door',
'flourescent paint': 'fluorescent paint',
'marking baint': 'marking paint',
'mirrir hanger': 'mirror hanger',
'chrisymas tree bags': 'christmas tree bags',
'comercial food processor': 'commercial food processor',
'picture haning kitpicture hanging kit': 'picture hanging kit picture hanging kit',
'bathroom vanity cabinetwithouttops': 'bathroom vanity cabinets without tops',
'amcrest survelliance systems': 'amcrest surveillance systems',
'30 inch refigrator': '30 inch refrigerator',
'chain saw eletric': 'chainsaw electric',
'power dprayer': 'power sprayer',
'douglas fur fake christmas trees': 'douglas fir fake christmas trees',
'brinkman grill': 'brinkmann grill',
'dual switch dimer': 'dual switch dimmer',
'Ortho Wed B Gone max': 'Ortho Weed B Gon max',
'ortho weed be gone': 'ortho weed b gon',
'4ft flourescent bulb t8': '4ft fluorescent bulb t8',
'18 volt 1/2 roter hammer': '18 volt 1/2 roto hammer',
'cabinents with drawers': 'cabinets with drawers',
'7 mil trash bgs': '7 mil trash bags',
'1/2 ntp to 1/2': '1/2 npt to 1/2',
'3/8 rachert set': '3/8 ratchet set',
'hunter shower eshaust fan with light': 'hunter shower exhaust fan with light',
'vanity in mahogany mirros': 'vanity in mahogany mirrors',
'hasmmock bed': 'hammock bed',
'composit fencing': 'composite fencing',
'post insurts': 'post inserts',
'3500 psi pressue washer': '3500 psi pressure washer',
'idylus air purifier': 'idylis air purifier',
'garden solenoide valves': 'garden solenoid valves',
'window plastic instulation': 'window plastic insulation',
'engineered wood floorcleaners': 'engineered wood floor cleaners',
'parquee flooring': 'parquet flooring',
'dermal saw max ultra': 'dremel saw max ultra',
'external structual connector screw': 'external structural connector screw',
'tv shelv': 'tv shelf',
'kithen cabinets 18 white': 'kitchen cabinets 18 white',
'1 1/2 couplingg': '1 1/2 coupling',
'porceline faucet handle': 'porcelain faucet handle',
'duplex outlet and ubs charger': 'duplex outlet and usb charger',
'1/4 quarter round cherries jublilee': '1/4 quarter round cherries jubilee',
'lg hausys viaterra': 'lg hausys viatera',
'bear semi transparent cedar stain': 'behr semi transparent cedar stain',
'27 mivrowave': '27 microwave',
'gardinias': 'gardenias',
'ull spectrum plant light': 'full spectrum plant light',
'942196brinkmann 2 burner': '942196 brinkmann 2 burner',
'gargage storage ideas': 'garage storage ideas',
'outside horizontal storage sheds': 'outdoor horizontal storage sheds',
'bouganvilla': 'bougainvillea',
'led recressed lighting': 'led recessed lighting',
'3 x3 marle tile': '3x3 marble tile',
'concrete saw dewall': 'concrete saw dewalt',
'replacement glass for pellet stive': 'replacement glass for pellet stove',
'porcelin tile black pencil tile': 'porcelain tile black pencil tile',
'smoke dectectors': 'smoke detectors',
'humidifier fulters': 'humidifier filters',
'3/4 in. pvc assesories': '3/4 in. pvc accessories',
'12 inch sower head': '12 inch shower head',
'22 mm impact ocket': '22mm impact socket',
'garvanized wood screws': 'galvanized wood screws',
'interlocking rubbber floor mats': 'interlocking rubber floor mats',
'Hose end nozzel': 'Hose end nozzle',
'led energy efficient kitchen lites': 'led energy efficient kitchen lights',
'barn syslet door': 'barn style door',
'rat or mice poision': 'rat or mice poison',
'led ressed deameable lights': 'led recessed dimmable lights',
'prelit tree mutli': 'pre lit tree multi',
'sodering iron': 'soldering iron',
'tub suround': 'tub surround',
'fireplace screen assessories': 'fireplace screen accessories',
'acrilic white paint': 'acrylic white paint',
'gibraltor locking': 'gibraltar locking',
'air conditioner sideays': 'air conditioner sideways',
'white inyrtior paint': 'white interior paint',
'100 watt candlebra': '100 watt candelabra',
'llhampton bay patio rocker': 'hampton bay patio rocker',
'lock brushed nicke;': 'lock brushed nickel;',
'structered media': 'structured media',
'summit 24 inch ss gaqs range': 'summit 24 inch ss gas range',
'ryobl battery': 'ryobi battery',
'replacement carbrator for robyi': 'replacement carburetor for ryobi',
'balist': 'ballast',
'pressuer washer': 'pressure washer',
'22 storage shelve': '22 storage shelf',
'32\' strorm door': '32\' storm door',
'hazardous locationlight fixture globe': 'hazardous location light fixture globe',
'john deer bagger': 'john deere bagger',
'ridinng lawn mowers mulching': 'riding lawn mowers mulching',
'1/2 fpt x 1/2 inch pex': '1/2 npt x 1/2 inch pex',
'2 kindorff straps': '2 kindorf straps',
'telemechanic square d': 'telemecanique square d',
'thresh hold': 'threshold',
'24x24 framless recessed mount mirrored medicine': '24x24 frameless recessed mount mirrored medicine',
'600 connector cylander': '600 connector cylinder',
'well pump submerciable': 'well pump submersible',
'security gate pannel': 'security gate panel',
'1/4-20 jamb nuts': '1/4-20 jam nuts',
'american standard flush valvu': 'american standard flush valve',
'stove adopter': 'stove adapter',
'kitchenaide dishwasher': 'kitchenaid dishwasher',
'roofing leadders': 'roofing ladders',
'heath zenity 180 security light': 'heath zenith 180 security light',
'solar powerd lights': 'solar powered lights',
'24 white walloven': '24 white wall oven',
'kitchen aide mixer': 'kitchenaid mixer',
'10 in w 30 in l inetrior vent': '10 in w 30 in l interior vent',
'co smoke detector kiddie': 'co smoke detector kidde',
'vacum aa bag 58236c': 'vacuum aa bag 58236c',
'sealant for sideing': 'sealant for siding',
'come along and chaincome along and chain': 'come along and chain come along and chain',
'wall paper bprder': 'wallpaper border',
'cararra tile': 'carrara tile',
'14 gauge strranded wire': '14 gauge stranded wire',
'30 gal electirc water heater': '30 gal electric water heater',
'guarter round tile': 'quarter round tile',
'summit gril': 'summit grill',
'gavanized pipe 20 feet': 'galvanized pipe 20 feet',
'melamine sheliving': 'melamine shelving',
'composite fiscia board': 'composite fascia board',
'spunge mop refill': 'sponge mop refill',
'wall mount outside motion dector': 'wall mount outdoor motion detector',
'bisquit tub refinish kit': 'biscuit tub refinish kit',
'patternn paint rollers': 'pattern paint rollers',
'built in wall nitch': 'built in wall niche',
'ironboard built in': 'iron board built in',
'behr melrot': 'behr merlot',
'led shoplightmakita light': 'led shop light makita light',
'armazone': 'amazon',
'soild 6 panel interior door': 'solid 6 panel interior door',
'dishs for 8': 'dishes for 8',
'1 1/4 steel ppes': '1 1/4 steel pipes',
'pull out drw': 'pull out draw',
'swffer mop': 'swiffer mop',
'milwaukee m18 tootls': 'milwaukee m18 tools',
'bronzw phone wall jack cover': 'bronze phone wall jack cover',
'flourscent lights size 18x24': 'fluorescent lights size 18x24',
'berber carpeting destiny doeskin': 'berber carpet destiny doeskin',
'spring heavy dut': 'spring heavy duty',
'2 in pvc pipe incresers': '2 in pvc pipe increasers',
'lifetime rouind table': 'lifetime round table',
'16x26 recesssed medicine cabinets': '16x26 recessed medicine cabinets',
'rolling barn dorr hardware': 'rolling barn door hardware',
'huricane panel caps': 'hurricane panel caps',
'73 inch anderson patio screen doors': '73 inch andersen patio screen doors',
'barbque grill temperature guage': 'barbecue grill temperature gauge',
'bath tub shower repair lit': 'bathtub shower repair kit',
'entery door sidelights': 'entry door sidelights',
'5 burnerner brikman gas grill': '5 burner brinkmann gas grill',
'battub floor mat': 'bathtub floor mat',
'outlet wallplate with cover': 'outlet wall plate with cover',
'fungacide': 'fungicide',
'tuband tile latex caulk': 'tub and tile latex caulk',
'natural gas barbeque': 'natural gas barbecue',
'hallogen bulb flood': 'halogen bulb flood',
'roudulf': 'rudolf',
'cellular shade 23.75x37': 'cellular shade 23.75x 37',
'wyndham vanities with no tops': 'wyndham vanities without tops',
'frigidare gas range': 'frigidaire gas range',
'frigidare refrigerator': 'frigidaire refrigerator',
'dishwasher moiunting kit': 'dishwasher mounting kit',
'black refrigeratore': 'black refrigerator',
'barcello estates light fi': 'bercello estates light fi',
'kohler ch730 maintance kits': 'kohler ch730 maintenance kits',
'phillips led slimline a19': 'philips led slimline a19',
'asburn mahogany medicine cabinate': 'ashburn mahogany medicine cabinet',
'stove top replacement patr': 'stove top replacement part',
'hampton bay pendent light parts': 'hampton bay pendant light parts',
'wall mountreading light': 'wall mount reading light',
'heat on malamine tape': 'heat on melamine tape',
'vinal plank selection': 'vinyl plank selection',
'marble qwhite': 'marble white',
'reheem performance 75 gal water heater': 'rheem performance 75 gal water heater',
'cover for a double barrow grill': 'cover for a double barrel grill',
'water taste kits': 'water test kits',
'roybi gas trimmer repair kit': 'ryobi gas trimmer repair kit',
'masonary dril bits': 'masonry drill bits',
'bath and shower facet set': 'bath and shower faucet set',
'sanding sponce': 'sanding sponge',
'silestone sammples': 'silestone samples',
'ge mwr filter': 'ge mwf filter',
'rectangele garbage can': 'rectangle garbage can',
'light podt sensor': 'light post sensor',
'honewell wireless doorbell': 'honeywell wireless doorbell',
'vertical door slide mechanis': 'vertical door slide mechanism',
'2 inch bye 6 inch thick board': '2 inch by 6 inch thick board',
'28x80 contl splt rh': '28x80 control split rh',
'doors exterior with top windo': 'doors exterior with top window',
'water filter for vanitys': 'water filter for vanities',
'hampton bay geogian wall plates aged bronze': 'hampton bay georgian wall plates aged bronze',
'18 wat let lamps': '18 watt led lamps',
'qstatic cling window film': 'static cling window film',
'eletric pole hedge clippers': 'electric pole hedge clippers',
'moen voss lightin': 'moen voss lighting',
'dreamline showeruni door': 'dreamline shower door',
'dewaqlt air nailers': 'dewalt air nailers',
'hex drill chcuck': 'hex drill chuck',
'vinal siding per box': 'vinyl siding per box',
'verticle blind': 'vertical blind',
'chome framed mirror': 'chrome framed mirror',
'b onnet': 'bonnet',
'dowel sprial': 'dowel spiral',
'deck tdiles': 'deck tiles',
'driveing bits': 'driving bits',
'water putifiers': 'water purifiers',
'clyvus': 'clivus',
'old style nailshand forgednails': 'old style nails hand forged nails',
'grohe essencekitchen faucet': 'grohe essence kitchen faucet',
'femle end hose repair': 'female end hose repair',
'garden hose reair kits': 'garden hose repair kits',
'bathroom facets': 'bathroom faucets',
'kitchenaid refrigerator bottom frrezer': 'kitchenaid refrigerator bottom freezer',
'chrome/polished brass 2-handle 4-in centerset bathroom fauc': 'chrome/polished brass 2-handle 4-in centerset bathroom faucet',
'spackilng knife': 'spackling knife',
'cadelabra light bulbs led': 'candelabra light bulbs led',
'roller bracker for frameless shower doors': 'roller bracket for frameless shower doors',
'morola tile metro penny': 'merola tile metro penny',
'48 inchled tube': '48 inch led tube',
'corner sorage': 'corner storage',
'glaciar bay crystal shower': 'glacier bay crystal shower',
'tosco ivory tile': 'tosca ivory tile',
'elecric screw driver batteries': 'electric screwdriver batteries',
'mobilehome wall paint': 'mobile home wall paint',
'chainsaw rplacement chains': 'chainsaw replacement chains',
'electric guage cable': 'electric gauge cable',
'f15 t5 florescent': 'f15 t5 fluorescent',
'sprinkler conroller': 'sprinkler controller',
'wireless light sitch': 'wireless light switch',
'16x16x60boxes for moving': '16x16x60 boxes for moving',
'engeenered wood': 'engineered wood',
'frigidare microwave': 'frigidaire microwave',
'nals for subfloor': 'nails for subfloor',
'verathane': 'varathane',
'remote controlle light dimmer': 'remote controlled light dimmer',
'koehler shower door': 'kohler shower door',
'burgluar bar tool': 'burglar bar tool',
'greem roofing shingles': 'green roofing shingles',
'milwoki circular saw': 'milwaukee circular saw',
'tub faucets bronza': 'tub faucets bronze',
'bathtubdoor towel racks': 'bathtub door towel racks',
'ac exhaust extention': 'ac exhaust extension',
'outside deck boards composit': 'outside deck boards composite',
'4inch ligh junction box': '4 inch light junction box',
'gardenn containers': 'garden containers',
'plant continers': 'plant containers',
'3 paint bbrush': '3 paint brush',
'26 in woodsaddle stool': '26 in wood saddle stool',
'adhensive with nozzle': 'adhesive with nozzle',
'swanstone kitchen sink accesories': 'swanstone kitchen sink accessories',
'pvc to corragated connector': 'pvc to corrugated connector',
'unsanded grout bisquit': 'unsanded grout biscuit',
'spray paint rust-oleum gray': 'spray paint rustoleum gray',
'brushes drils': 'brushed drills',
'indoor mounting tpe': 'indoor mounting tape',
'indoor grow light blubs': 'indoor grow light bulbs',
'thinset morter': 'thin set mortar',
'flourescent g25 60watt': 'fluorescent g25 60 watt',
'diatemaceous earth': 'diatomaceous earth',
'23\' biview surface mount med cab chestnut': '23\' bi view surface mount med cab chestnut',
'72 hour carpt': '72 hour carpet',
'2 \' galvanise street 90': '2 \' galvanized street 90',
'maytab bravos': 'maytag bravos',
'600w incandecent toggle dimmer': '600w incandescent toggle dimmer',
'galvanized wire 10 guage': 'galvanized wire 10 gauge',
'assemble hight 17 inches': 'assembled height 17 inches',
'pvc t coulpler': 'pvc t coupler',
'water heatere drain pan': 'water heater drain pan',
'faucet steam washers': 'faucet stem washers',
'heat window filtm': 'heat window film',
'dewalt circlular saw blades': 'dewalt circular saw blades',
'5plinth block': 'plinth block',
'french pation doors with sidepanels': 'french patio doors with side panels',
'30 unfinish filler': '30 unfinished filler',
'home depot in cambrige': 'home depot in cambridge',
'faucet siphon hose connecter': 'faucet siphon hose connector',
'black out doors spray paint': 'black outdoor spray paint',
'anderson storm door full view easy install': 'andersen storm door full view easy install',
'ice marker water kits': 'ice maker water kits',
'adhesive magnetized roll': 'adhesive magnetic roll',
'metal kkitchen cabines': 'metal kitchen cabinets',
'2\' x 1 1/2 reducing busing thread': '2\' x 1 1/2 reducing bushing threaded',
'abs rambit pipe saver': 'abs rambut pipe saver',
'33 in w x 18 icnh depth vanity': '33 in w x 18 inch depth vanity',
'built in landry shelving': 'built in laundry shelving',
'grey rubbermaid trash barrells': 'grey rubbermaid trash barrels',
'sawall blades': 'sawzall blades',
'9v battery ackup': '9v battery backup',
'1/2 in. fip x 7/16 in. or 1/2 in. slip joint angle stop valv': '1/2 in. fip x 7/16 in. or 1/2 in. slip joint angle stop valve',
'peir block': 'pier block',
'under ceiling garag storage': 'under ceiling garage storage',
'stone effects backsplash cool fushion': 'stone effects backsplash cool fusion',
'desoldering vacum pump': 'desoldering vacuum pump',
'elrctric welders': 'electric welders',
'unfinushed kitchen cabinets': 'unfinished kitchen cabinets',
'3 pole range reciptical': '3 pole range receptacle',
'sink cutting oard': 'sink cutting board',
'steel tubing falanges': 'steel tubing flanges',
'outdoor unskid tiles': 'outdoor non skid tiles',
'6 round headlag bolt': '6 round head lag bolt',
'cyprees fence': 'cypress fence',
'75 qrt cooler with wheels': '75 quart cooler with wheels',
'buit in themostat': 'built in thermostat',
'speacalty bit set': 'specialty bit set',
'curtain rod classic sqaure finial': 'curtain rod classic square finial',
'silk poinsetia': 'silk poinsettia',
'1 1/4 pvcsch 80': '1 1/4 pvc sch 80',
'grill ousite door': 'grill outside door',
'lumionaire': 'luminaire',
'adienne bathroom vanity light': 'adrienne bathroom vanity light',
'chashing led lights': 'chasing led lights',
'24 inch vessal tops': '24 inch vessel tops',
'co2 detector kiddie': 'co2 detector kidde',
'white glazed 4 tilw': 'white glazed 4 tile',
'wood lattace': 'wood lattice',
'premaid stair railing': 'premade stair railing',
'3 function double walll switch': '3 function double wall switch',
'koehler shower faucet with spray': 'kohler shower faucet with spray',
'askley electric fireplace': 'ashley electric fireplace',
'blind for paladian': 'blind for paladin',
'regancy railin': 'regency railing',
'weatherside purit': 'weatherside purity',
'vent a hood dampr': 'vent a hood damper',
'light tropper 2x4': 'light troffer 2x4',
'30 amp generater receptical': '30 amp generator receptacle',
'prefab wood gate panals': 'prefab wood gate panels',
'floating corner shelfing': 'floating corner shelving',
'fridgidaire dehumidifier': 'frigidaire dehumidifier',
'pegs for cabinent shelves': 'pegs for cabinet shelves',
'100 amp to 200a lit': '100 amp to 200 a lot',
'decorative metal sceen': 'decorative metal screen',
'lacross weather pro center': 'lacrosse weather pro center',
'behr flat white marque': 'behr flat white marquee',
'high output floresant': 'high output fluorescent',
'behr hawian paint': 'behr hawaiian paint',
'pressure vaccuum breaker o-ring': 'pressure vacuum breaker o-ring',
'psint gun': 'paint gun',
'wine coller': 'wine cooler',
'rug ruunners': 'rug runners',
'clock control for fridgidare gas stove': 'clock control for frigidaire gas stove',
'outlet expsnsion surge protector': 'outlet expansion surge protector',
'rigid pipe threader': 'ridgid pipe threader',
'electical box': 'electrical box',
'insect granuels': 'insect granules',
'compsit outside corner': 'composite outside corner',
'cabinet kitchen ligth': 'cabinet kitchen light',
'dewalt ratchet srewdriver': 'dewalt ratchet screwdriver',
'18.5 outside chair cushiobs': '18.5 outside chair cushions',
'fenching and gate latches': 'fencing and gate latches',
'heater for refrigertor': 'heater for refrigerator',
'motion detect indoor': 'motion detector indoor',
'refrigerater french doors ge brand': 'refrigerator french doors ge brand',
'tiki tourches': 'tiki torches',
'gren house kits': 'greenhouse kits',
'5000 btu aircondition': '5000 btu air conditioner',
'airator dishwasher': 'aerator dishwasher',
'2x6 metal brakets': '2x6 metal brackets',
'weldn 3': 'weldon 3',
'ceiling paint pray': 'ceiling paint spray',
'flourescent fixture metal parts': 'fluorescent fixture metal parts',
'natural hickery kitchen cabinets': 'natural hickory kitchen cabinets',
'kitchen aide dishwasher': 'kitchenaid dishwasher',
'led track lightning systems': 'led track lighting systems',
'duplex receptacle nickle': 'duplex receptacle nickel',
'12 foot ceadar': '12 foot cedar',
'faux wood shade 100 jnches': 'faux wood shade 100 inches',
'contracto0r hose': 'contractor hose',
'lspacers for toilet bowl': 'spacers for toilet bowl',
'aftificial prelit christmas trees': 'artificial prelit christmas trees',
'paint colores by rooms': 'paint colors by rooms',
'warm whit led bulb': 'warm white led bulb',
'clamps for unistruct': 'clamps for unistrut',
'kitchen trviso price phister': 'kitchen treviso price pfister',
'10guage copper wire 3 stand': '10 gauge copper wire 3 stand',
'deep frezer with glass cover': 'deep freezer with glass cover',
'powder clorine shock treatment': 'powder chlorine shock treatment',
'galvanaized can': 'galvanized can',
'prebent aluminum facia': 'prebent aluminum fascia',
'vinyl scrapper for jack hammer': 'vinyl scraper for jack hammer',
'dwaft outside plants': 'dwarf outside plants',
'tilebath walls small': 'tile bath walls small',
'2 ton aircondition': '2 ton air conditioner',
'martha stewart metalic paint gallon': 'martha stewart metallic paint gallon',
'schilage electronic deadbolts locks': 'schlage electronic deadbolts locks',
'60x65shower doors': '60x65 shower doors',
'tile slide cuter': 'tile slide cutter',
'eagle peak hoickory': 'eagle peak hickory',
'gas black range worldpool': 'gas black range whirlpool',
'trigger makita skillsaw': 'trigger makita skil saw',
'hardi lap hanger': 'hardie lap hanger',
'master flow insolated duct wrap': 'master flow insulated duct wrap',
'replacment stove knobs': 'replacement stove knobs',
'outdoor alrm': 'outdoor alarm',
'wireless outdoor thermom': 'wireless outdoor thermometer',
'faun paint': 'fawn paint',
'wireless security caamera': 'wireless security camera',
'fiet electric led gu10': 'feit electric led gu10',
'stair unners': 'stair runners',
'stainstess steel spray paint': 'stainless steel spray paint',
'mount blanv': 'mont blanc',
'riobi power tool combo': 'ryobi power tool combo',
'24 sydey collection': '24 sydney collection',
'air compresser': 'air compressor',
'no tresspassing signs': 'no trespassing signs',
'flexable 6 inch': 'flexible 6 inch',
'wall beveled framelessmirror': 'wall beveled frameless mirror',
'slidein range bisque': 'slide in range bisque',
'router templit kits letters': 'router template kits letters',
'american sandard 1660.225,': 'american standard 1660.225,',
'onyx sand porcelian': 'onyx sand porcelain',
'watherproof electrical boxes': 'weatherproof electrical boxes',
'carpet remmnant': 'carpet remnant',
'8\' sigle wall gal pipe': '8\' single wall galv pipe',
'byfold hinges': 'bi fold hinges',
'terra cota quarry stones': 'terracotta quarry stones',
'rustolem appliance touch up paint': 'rustoleum appliance touch up paint',
'rain nickle': 'rain nickel',
'whirlpool light bulb part 8206232': 'whirlpool light bulb part 8206232a',
'Vigaro fall fertilizer': 'Vigoro fall fertilizer',
'pneumatic cynlinder': 'pneumatic cylinder',
'20 ft electical romex': '20 ft electrical romex',
'medicine cabinets recessable black': 'medicine cabinets recessed black',
'krass 30 inch kitchen sink': 'kraus 30 inch kitchen sink',
'stainless steel grat': 'stainless steel grate',
'suncort 8\' duct fans': 'suncourt 8\' duct fans',
'nutmag mirrors': 'nutmeg mirrors',
'clawfoot tub faucit kit': 'clawfoot tub faucet kit',
'protective pper': 'protective paper',
'touchless dishwashing kintchen dispenser': 'touchless dishwashing kitchen dispenser',
'air temperture contorl valve': 'air temperature control valve',
'melinger hand truck wheals': 'melinger hand truck wheels',
'watt premiere water filters': 'watt premier water filters',
'weed killer spray contaner': 'weed killer spray container',
'18in hardware coth': '18in hardware cloth',
'ac window supprt': 'ac window support',
'vegetable plannter': 'vegetable planter',
'soap punp': 'soap pump',
'wall paper murial glue': 'wallpaper mural glue',
'vertical binds hardware': 'vertical blinds hardware',
'rubbermaid verital sheds': 'rubbermaid vertical sheds',
'1/2 in. extension joint': '1/2 in. expansion joint',
'curtin rods': 'curtain rods',
'edge glued rounda': 'edge glued rounds',
'plywood edge taope': 'plywood edge tape',
'36\' copoktop': '36\' cooktop',
'curtains non black out': 'curtains not blackout',
'honolule center drain': 'honolulu center drain',
'toliet snake': 'toilet snake',
'black and deckerbattery pack': 'black and decker battery pack',
'beer and wine combination frigerator': 'beer and wine combination refrigerator',
'mess wire fencing': 'mesh wire fencing',
'ecosmart 90 led daylight br30': 'ecosmart 90w led daylight br30',
'miniture bulbs 2 pin': 'miniature bulbs 2 pin',
'dishwasher water connection vlave': 'dishwasher water connection valve',
'ant bait raps': 'ant bait traps',
'coragated aluimin special order': 'corrugated aluminum special order',
'carpot canopy 10x20': 'carport canopy 10x20',
'10 foot white ethjernet cable': '10 foot white ethernet cable',
'polished chrome cbinet pulls': 'polished chrome cabinet pulls',
'cooper tubing': 'copper tubing',
'dwarf pereniel plants': 'dwarf perennial plants',
'lampost motion detector': 'lamp post motion detector',
'3 gutter oulets': '3 gutter outlets',
'kohler shower ddoors for tubs in nickel': 'kohler shower doors for tubs in nickel',
'zep liquid air fresher': 'zep liquid air freshener',
'rewiring built in oven': 'wiring built in oven',
'10/4 SJ CABLE': '10/4 SJO CABLE',
'tempered glass wndow': 'tempered glass window',
'mataeials needed for paver patio': 'materials needed for paver patio',
'tankles water heater gas outdoor': 'tankless water heater gas outdoor',
'ypermethrin': 'cypermethrin',
'kwikset halifax door leaver': 'kwikset halifax door lever',
'ryobi coordless 18v starter kit': 'ryobi cordless 18v starter kit',
'habor gazeebo': 'harbor gazebo',
'electric barbeque grills': 'electric barbecue grills',
'rasin raised garden bed': 'resin raised garden bed',
'barbeque grills big and easy': 'barbecue grills big and easy',
'floor warming matt': 'floor warming mat',
'machette': 'machete',
'cool tube lgts': 'cool tube lights',
'universal faucet connect': 'universal faucet connector',
'daltile hexgon': 'daltile hexagon',
'hurracaine brackets': 'hurricane brackets',
'martha stewart curtiins': 'martha stewart curtains',
'byfold doors': 'bifold doors',
'2 tier adjustable cabinet orgainzer': '2 tier adjustable cabinet organizer',
'7w compact flourescent bulb': '7w compact fluorescent bulb',
'singel wall stove pipe': 'single wall stove pipe',
'wheeld trimmer': 'wheeled trimmer',
'boader rocks': 'border rocks',
'crown moldinf jig': 'crown molding jig',
'small refridgerators': 'small refrigerators',
'blind courner': 'blind corner',
'asphault gap repair': 'asphalt gap repair',
'no. 30 ridgid cutting wheel': 'no. 30 rigid cutting wheel',
'battery cable conector': 'battery cable connector',
'coranado baseboard pine': 'coronado baseboard pine',
'cerrowire 18 gauge': 'cerro wire 18 gauge',
'universal exstention cord': 'universal extension cord',
'wirlpool counterdepth side by side refrigrator': 'whirlpool counter depth side by side refrigerator',
'cedar bahr 502 stain': 'cedar behr 502 stain',
'small tracerse rods': 'small traverse rods',
'yelloe safet tape': 'yellow safety tape',
'elctric heating lamps': 'electric heating lamps',
't8 flourescent bulbs': 't8 fluorescent bulbs',
'u bents fluorescent': 'u bend fluorescent',
'pergo lamate flooring': 'pergo laminate flooring',
'sweenys mole and gopher repelant': 'sweeney\'s mole and gopher repellent',
'rg6 connecto': 'rg6 connector',
'ge electriv burners': 'ge electric burners',
'replacement part for koehler toilet kb3': 'replacement part for kohler toilet kb3',
'furiture paint, stain and varnish': 'furniture paint, stain and varnish',
'mission prarie camber top slab': 'mission prairie camber top slab',
'mirr edge': 'mirror edge',
'orbital sanding disck': 'orbital sanding disc',
'quickrete 50 lb mix': 'quikrete 50 lb mix',
'high efficiency dust baf rigid vac': 'high efficiency dust bag ridgid vac',
'liminate flooring cleaning': 'laminate flooring cleaning',
'gtxworks trimmer spools': 'gt worx trimmer spools',
'securty bar mounts': 'security bar mounts',
'fall supression kit': 'fall suppression kit',
'weatherproof boom box': 'waterproof boombox',
'geld wen 2500 96 x 36': 'jeld wen 2500 96 x 36',
'enfineered floors drifting sand': 'engineered floors drifting sand',
'well pump back presure valve': 'well pump back pressure valve',
'heavy duty shevlving': 'heavy duty shelving',
'mmodel': 'model',
'frigidare stainless refrig': 'frigidaire stainless refrig',
'rusteoulm spray paint': 'rustoleum spray paint',
't5 high output ligh': 't5 high output light',
'sandpap': 'sandpaper',
'cerowire 12 gauge': 'cerro wire 12 gauge',
'what rings for toitets': 'what rings for toilets',
'infrared theomomter': 'infrared thermometer',
'1x6 toungh groove': '1x6 tongue groove',
'v ceader board': 'v cedar board',
'sodpstone': 'soapstone',
'10 yeaer smoke detectors/carbon monoxide combo': '10 year smoke detectors/carbon monoxide combo',
'kkohler toilet seat': 'kohler toilet seat',
'pink toliet seat elongated': 'pink toilet seat elongated',
'flexiblr bit': 'flexible bit',
'coleman instasmart grill': 'coleman instastart grill',
'exide battery 75,car battrey': 'exide battery 75,car battery',
'black cherry stainer': 'black cherry stain',
'1x4 pre primed mfd trim': '1 x 4 pre primed mdf trim',
'mnt movr combo shovel': 'mnt move combo shovel',
'100 watt candlabra bulb': '100 watt candelabra bulb',
'samsung black stainles': 'samsung black stainless',
'dewalt jig saw blad': 'dewalt jig saw blade',
'alluminum downspout connector': 'aluminum downspout connector',
'alltyp of fences': 'all type of fences',
'clow hammer 16 0z': 'claw hammer 16 0z',
'tomatoe plants': 'tomato plants',
'white lacquer wall selves': 'white lacquer wall shelves',
'pressure guage': 'pressure gauge',
'slid pad': 'slide pad',
'female hose connectore': 'female hose connector',
'solor lamp outside': 'solar lamp outside',
'daltile urban camoflogue': 'daltile urban camouflage',
'deocorative screws for hanging pictures': 'decorative screws for hanging pictures',
'kitchen composie double sinks': 'kitchen composite double sinks',
'whitesilicone': 'white silicone',
'self contained recepticles': 'self contained receptacles',
'brass handel door': 'brass handle door',
'charley brown christmas trees': 'charlie brown christmas trees',
'carbon fiber vinel': 'carbon fiber vinyl',
'phillips fluorescent 40': 'philips fluorescent 40',
'36 inxh return air grill': '36 inch return air grill',
'garden pond pump impellor': 'garden pond pump impeller',
'vinal flooring 25 year warranty': 'vinyl flooring 25 year warranty',
'mulcing blades for troy built': 'mulching blades for troy bilt',
'5 1/4 deckboard': '5 1/4 deck board',
'plaste dip': 'plasti dip',
'cemnt pads for makita bo5030': 'cement pads for makita bo5030',
'ge beverage refriderator': 'ge beverage refrigerator',
'bathroom plummbing': 'bathroom plumbing',
'gas pire column': 'gas fire column',
'confrence': 'conference',
'clock cuitain rod wood': 'clock curtain rod wood',
'decrotive outdoor lighting': 'decorative outdoor lighting',
'ballast for single t12 fluorscent bulb': 'ballast for single t12 fluorescent bulb',
'workstar cordless and recharable work light': 'workstar cordless and rechargeable work light',
'light bulb 250 cfl': 'light bulb 250w cfl',
'rubber gromet': 'rubber grommet',
'spray metallicpaint': 'spray metallic paint',
'paint quart zise': 'paint quart size',
'blinds for portch': 'blinds for porch',
'sable browj 95': 'sable brown 95',
'1/2 conduet': '1/2 conduit',
'wooden curton rod brackets': 'wooden curtain rod brackets',
'corbels and shelfs': 'corbels and shelves',
'seimens typ qt breaker': 'siemens type qt breaker',
'steel builco': 'steel bilco',
'metal joinst': 'metal joist',
'externol patio doors': 'external patio doors',
'FENSE LIGHTING': 'FENCE LIGHTING',
'oil bronzed wine glass rack': 'oiled bronze wine glass rack',
'klien lether pouch': 'klein leather pouch',
'shark rocket filtes': 'shark rocket filters',
'4x7 ruggs': '4 x 7 rugs',
'24 elicreic stove': '24 electric stove',
'grill hasmilton': 'grill hamilton',
'air vents for plumping': 'air vents for plumbing',
'gazebo with shelfs': 'gazebo with shelves',
'expanding plastic sleeves for scews': 'expanding plastic sleeves for screws',
'oli rubbed bronze drain': 'oil rubbed bronze drain',
'clothsline rope': 'clothesline rope',
'stove gas replacement knops': 'stove gas replacement knobs',
'rechargale batteries for solar lights': 'rechargeable batteries for solar lights',
'standard artificial grasa synthetic lawn turf': 'standard artificial grass synthetic lawn turf',
'new deck for rtz 50': 'new deck for rzt 50',
'wire shelv liner': 'wire shelf liner',
'wood paint with primerin blue': 'wood paint with primer in blue',
'fabreeze': 'febreze',
'ceilng fan': 'ceiling fan',
'manuel for 425 - 1649': 'manual for 425 - 1649',
'14 in dimond circular saw blade': '14 in diamond circular saw blade',
'berhr solid 213 deep': 'behr solid 213 deep',
'driveway m arkers': 'driveway markers',
'commercil threshold': 'commercial threshold',
'multinozzle spray painting': 'multi nozzle spray painting',
'shower nitch': 'shower niche',
'1/2x1/2 quater round': '1/2 x 1/2 quarter round',
'Insulted work gloves': 'Insulated work gloves',
'5000 lumnes': '5000 lumens',
'magnets for gromets': 'magnets for grommets',
'toro springkler': 'toro sprinkler',
'motion sensoring black decorative lamp': 'motion sensing black decorative lamp',
'proclean concentrated drain cleaner': 'pro clean concentrated drain cleaner',
'feather river doorsth sidelights': 'feather river doors sidelights',
'ridgid powerwasher parts': 'ridgid power washer parts',
'skill pressure sander': 'skil pressure sander',
'outdoor vertical sheda': 'outdoor vertical sheds',
'brick web thin brick flats': 'brickweb thin brick flats',
'airguard undelayment': 'airguard underlayment',
'toyotaa': 'toyota',
'round rug for kitch': 'round rug for kitchen',
'round one piece tiolet': 'round one piece toilet',
'sppeed square': 'speed square',
'adirondak chair': 'adirondack chair',
'hickory hadwre touch of spring': 'hickory hardware touch of spring',
'garge door handle': 'garage door handle',
'whiteled tree': 'white led tree',
'airosol epoxy paint': 'aerosol epoxy paint',
'ice ring machine': 'ice rink machine',
'deep expresso walnut/new ellenton': 'deep espresso walnut/new ellenton',
'interior walls bieges brown': 'interior walls beige brown',
'pet disinfectent': 'pet disinfectant',
'altra furniture parsons credenza desk with drawer and bookc': 'altra furniture parsons credenza desk with drawer and books',
'gorilla gold cpvc gluetm': 'gorilla gold cpvc glue',
'aligator clips': 'alligator clips',
'irrigation pipe connectoer': 'irrigation pipe connector',
'citronella fire pot fue': 'citronella fire pot fuel',
'garden spreklers heads': 'garden sprinklers heads',
'light swith insulation': 'light switch insulation',
'dual lock 3m veclro': 'dual lock 3m velcro',
'water proof mc connecter': 'waterproof dc connector',
'snow blowerr scraper blade': 'snowblower scraper blade',
'vesel tub': 'vessel tub',
'carrrs': 'careers',
'odl 6\' x 6\'retractable screens': 'odl 6\' x 6 retractable screens',
'outdoord storage locker': 'outdoor storage locker',
'standing seam roof panals': 'standing seam roof panels',
'phillips 65w 2 pack': 'philips 65w 2 pack',
'2 squares double 5 vinly siding': '2 squares double 5 vinyl siding',
'fabric steam cleamer': 'fabric steam cleaner',
'scikkens stain': 'sikkens stain',
'polyethylne cap': 'polyethylene cap',
'decorative interor glass doors': 'decorative interior glass doors',
'vanity top for two vessell': 'vanity top for two vessel',
'giant bird of paridise': 'giant bird of paradise',
'almeda hickory': 'alameda hickory',
'cabinet ba rpulls in bronze': 'cabinet bar pulls in bronze',
'l screwa': 'l screws',
'johan deer 0 turns': 'john deere 0 turns',
'milwankee 7 pc set': 'milwaukee 7 pc set',
'faucet pl801l 18 guage': 'faucet pl801l 18 gauge',
'12 light bronze chandilier': '12 light bronze chandelier',
'flourecent light plastic covers': 'fluorescent light plastic covers',
'roof pannel foam': 'roof panel foam',
'under cabinet lighting ro-hs': 'under cabinet lighting rohs',
'round lshower kit': 'round shower kit',
'concreet enchors': 'concrete anchors',
'woodwen pallet': 'wooden pallet',
'shigles': 'shingles',
'comercial plank doors': 'commercial plank doors',
'stainless steel kithen faucet with soap dispenser': 'stainless steel kitchen faucet with soap dispenser',
'm4 50 srcew': 'm4 50 screw',
'splitbolt connector': 'split bolt connector',
'charming 18 roll': 'charmin 18 roll',
'table glass oatu': 'table glass oahu',
'kohlor flush for toilet tank 4421': 'kohler flush for toilet tank 4421',
'outdoor pendant lioghting': 'outdoor pendant lighting',
'24 inflex gas line': '24 in flex gas line',
'lawn mower rechargeable batterys': 'lawn mower rechargeable batteries',
'merola metalic tile': 'merola metallic tile',
'above ground pool vaccume': 'above ground pool vacuum',
'bosss water softner': 'boss water softener',
'moen one handel kitchen faucet repair parts': 'moen one handle kitchen faucet repair parts',
'sanding machinehardwood floors': 'sanding machine hardwood floors',
'super patch driverway sealler': 'super patch driveway sealer',
'sschlueter shower system': 'schluter shower system',
'offset flang': 'offset flange',
'aluminium tube rectangle': 'aluminium tube rectangular',
'legrad keystone cat5e jack': 'legrand keystone cat5e jack',
'yellow jacket extenison cord': 'yellow jacket extension cord',
'Habachi': 'Hibachi',
'mini pendant braket': 'mini pendant bracket',
'hose to presure washer': 'hose to pressure washer',
'gliddon speed wall': 'glidden speed wall',
'new age produucts': 'new age products',
'archor tub and shower faucet trim': 'archer tub and shower faucet trim',
'space saving stoage': 'space saving storage',
'vinyl flooring that clicks togther': 'vinyl flooring that clicks together',
'gladden smooth stone': 'glidden smooth stone',
'knape vogt baseket': 'knape vogt basket',
'ul liquidthight 25': 'ul liquidtight 25',
'white glossy furniture pain': 'white gloss furniture paint',
'square bannister': 'square banister',
'greenh wall paint': 'green wall paint',
'tile medalions for the floor or wall': 'tile medallions for the floor or wall',
'milwalke brewers garden flag': 'milwaukee brewers garden flag',
'versatiube': 'versatube',
'kenocen can nozzle': 'kenken can nozzle',
'mosaic esterior': 'mosaic exterior',
'winow wheels': 'window wheels',
'stud popers': 'stud poppers',
'trane 2.5 toon 13 seer heat pump': 'trane 2.5 ton 13 seer heat pump',
'ultra vue quick screeen': 'ultra vue quick screen',
'watterproof cleated boots': 'waterproof cleated boots',
'hdx pneumaitic paint': 'hdx pneumatic paint',
'biscue dishwashers': 'bisque dishwashers',
'sunbrella sipcovers': 'sunbrella slipcovers',
'miracle grow water absorbing crystals': 'miracle gro water absorbing crystals',
'disposal rim and stopperkohler': 'disposal rim and stopper kohler',
'long brakets': 'long brackets',
'freplace gloves': 'fireplace gloves',
'ridgid power drve pipe threadrs': 'ridgid power drive pipe threader',
'12x24 shefl': '12x24 shelf',
'1x6 prime molding': '1x6 primed molding',
'countertop soap dispensor': 'countertop soap dispenser',
'bushbutton for door bell': 'push button for doorbell',
'cauk saver': 'caulk saver',
'rubber stipper': 'rubber stopper',
'16 inch flourescent': '16 inch fluorescent',
'pendents amber': 'pendants amber',
'newtone broan round 751': 'nutone broan round 751',
'danze shower vlve': 'danze shower valve',
'wooden track drawer replacment': 'wooden track drawer replacement',
'single granit bathroom vanity': 'single granite bathroom vanity',
'oval steele tubs': 'oval steel tubs',
'liquid weed and feeed': 'liquid weed and feed',
'outodoor oatoman': 'outdoor ottoman',
'nutone vaccum wall plate': 'nutone vacuum wall plate',
'collor clamp': 'collar clamp',
'pure air ultra filtration syste,m': 'pure air ultra filtration system',
'llantana': 'lantana',
'white melimine cabinet': 'white melamine cabinet',
'2-handlet diverter repair kit': '2-handle diverter repair kit',
'mosiac lamps': 'mosaic lamps',
'exterior pipeinsulation': 'exterior pipe insulation',
'warm espresso bamboo quarteround': 'warm espresso bamboo quarter round',
'hardwood medialons': 'hardwood medallions',
'tub/hand shoer diverter with trim': 'tub/hand shower diverter with trim',
'locite 2 plus 1': 'loctite 2 plus 1',
'kwiksest door handle delta': 'kwikset door handle delta',
'frame nail hitschi': 'frame nailer hitachi',
'30 mirrow medicine cabinet': '30 mirrored medicine cabinet',
'pecane trees': 'pecan trees',
'lifeproof carpet sample lower trasure': 'lifeproof carpet sample lower treasure',
'umbrell hole ring': 'umbrella hole ring',
'melmane wood': 'melamine wood',
'melomine accessories': 'melamine accessories',
'windows single hang': 'windows single hung',
'portabe bar': 'portable bar',
'crystable table set lamps': 'crystal table set lamps',
'schlage handleset bermingham': 'schlage handleset birmingham',
'lp gas converion kit': 'lp gas conversion kit',
'quart exterior semi glass enamel': 'quart exterior semi gloss enamel',
'woodrx ultra natural': 'wood rx ultra natural',
'brushed barringnton': 'brushed barrington',
'leather lgue': 'leather glue',
'moen bronze low arch faucet': 'moen bronze low arc faucet',
'18 inch linen closit': '18 inch linen closet',
'bear paint green myth': 'behr paint green myth',
'solar light rechargable batteries': 'solar light rechargeable batteries',
'solar powered emergancy unit': 'solar powered emergency unit',
'kohler 3 handle shower reapair kit': 'kohler 3 handle shower repair kit',
'thermadore black cast kitchen sink': 'thermador black cast kitchen sink',
'dental shelf door': 'dentil shelf door',
'seed starting mixx': 'seed starting mix',
'rubberaid dust mop': 'rubbermaid dust mop',
'phillips bugle-head finethread sharp': 'phillips bugle-head fine thread sharp',
'black laminate shelfing': 'black laminate shelving',
'ice maker cylinoid ge': 'ice maker solenoid ge',
'home decorators mantle green': 'home decorators mantel green',
'perrenial white daisy like': 'perennial white daisy like',
'chamber-top halifax glass dooor': 'chamber-top halifax glass door',
'depp well socket set': 'deep well socket set',
'hanger racc vertical': 'hanger rack vertical',
'tool package with pilers,needlenose': 'tool package with pliers,needlenose',
'fome core board': 'foam core board',
'colaroo outdoor shades corded': 'coolaroo outdoor shades corded',
'decoator chain': 'decorator chain',
'rust oleum dark hunter green spray enamel paint': 'rustoleum dark hunter green spray enamel paint',
'lights and siloutte': 'lights and silhouette',
'real live orchred plants': 'real live orchid plants',
'2ftx3ft industrail rbber mat': '2ftx3ft industrial rubber mat',
'fernace vent shut off': 'furnace vent shut off',
'cedar wood balisters': 'cedar wood balusters',
'gliden premium semi gloss quart': 'glidden premium semi gloss quart',
'mosaic tile costal mist': 'mosaic tile coastal mist',
'toilet lever kphler brass': 'toilet lever kohler brass',
'front doors - poinye zinc': 'front doors - pointe zinc',
'matte bailey mohogany': 'matte bailey mahogany',
'wesleyand': 'wesleyan',
'plasic diffuser': 'plastic diffuser',
'cover kage for pet': 'cover page for pet',
'network agapter': 'network adapter',
'whitehaus bathroom sinl': 'whitehaus bathroom sink',
'icey tech': 'icey tek',
'kaorik wine': 'kalorik wine',
'susbenders': 'suspenders',
'policarbonate case': 'polycarbonate case',
'shaw livng rugs model rac66': 'shaw living rugs model rac66',
'carpet in bassment': 'carpet in basement',
'bifold doorsfold plantation': 'bi fold doors fold plantation',
'handheld seed speader': 'handheld seed spreader',
'hot dipped galvinized coil nails': 'hot dipped galvanized coil nails',
'hand saw sharpner': 'hand saw sharpener',
'mattress foam protecter': 'mattress foam protector',
'n utdriver bit': 'nut driver bit',
'lattice wwod tone': 'lattice wood tone',
'our door receptacles': 'outdoor receptacles',
'great outdors': 'great outdoors',
'exterior string ligts': 'exterior string lights',
'dog ,cat,repellant': 'dog ,cat,repellent',
'20a wht nylon duple': '20a wht nylon duplex',
'fatmax leveler premier': 'fatmax level premier',
'ralph laren brown paints': 'ralph lauren brown paints',
'liquid bi fuels': 'liquid biofuels',
'scrubbin sponge': 'scrubbing sponge',
'ceramic tile tooth brush and soap holder': 'ceramic tile toothbrush and soap holder',
'cultured marbl;e shower walls': 'cultured marble shower walls',
'did recorder player': 'dvd recorder player',
'golith': 'goliath',
'black maytag french door refrigirator': 'black maytag french door refrigerator',
'stair nose santos maogani': 'stair nose santos mahogany',
'l tub fauctes': 'l tub faucets',
'eyebolt brass': 'eye bolt brass',
'terracotta exteriorpaint': 'terracotta exterior paint',
'manuel venting sky light': 'manual venting skylight',
'bathroom fan motion sencer': 'bathroom fan motion sensor',
'hard start capacitator': 'hard start capacitor',
'windows gazing bead': 'windows glazing bead',
'vanitiy top back splach': 'vanity top backsplash',
'large yellow screw inground anchors': 'large yellow screw in ground anchors',
'heavy duty polyurathane': 'heavy duty polyurethane',
'folfable stool': 'foldable stool',
'charlston south carolina': 'charleston south carolina',
'pine flooring, tang end grove': 'pine flooring, tongue and groove',
'starter fuil': 'starter fuel',
'granite colr group prices': 'granite color group prices',
'calanvreas': 'calaveras',
'golden krome spray': 'gold chrome spray',
'g e micewave': 'g e microwave',
'sheet meatal hole cutter': 'sheet metal hole cutter',
'zurn hot short stemcartridge': 'zurn hot short stem cartridge',
'outdoor picture ftames': 'outdoor picture frames',
'shower pad porceleain': 'shower pan porcelain',
'battery under counter lightening': 'battery under counter lighting',
'elictric door bail': 'electric door bell',
'barbeque insert': 'barbecue insert',
'barclay glass bathroom shelfs': 'barclay glass bathroom shelves',
'preserva wood caryon': 'preserva wood crayon',
'white grey floor tile mosiac': 'white grey floor tile mosaic',
'minwax wood puty': 'minwax wood putty',
'the govenore': 'the governor',
'diverter 5 in. tub spout with slip fit connection in chrom': 'diverter 5 in. tub spout with slip fit connection in chrome',
'vinyl plank blue slatr': 'vinyl plank blue slate',
'frameless shwoer panel': 'frameless shower panel',
'virtue usa huntshire': 'virtu usa huntshire',
'3.5 Hindge': '3.5 Hinge',
'round plastic tablrs': 'round plastic tables',
'paint storage contaiers': 'paint storage containers',
'centerset 2-handle weall': 'centerset 2-handle wall',
'wax ring with self taping bolts': 'wax ring with self tapping bolts',
'gama sonic winsor pier base': 'gama sonic windsor pier base',
'pilla windows': 'pella windows',
'dresser acessories': 'dresser accessories',
'duel compression 1/2 x 3/8 valve': 'dual compression 1/2 x 3/8 valve',
'american atanderd plebe 4086': 'american standard plebe 4086',
'dyson ball allergy vaccume': 'dyson ball allergy vacuum',
'low woltage relay': 'low voltage relay',
'hand steam cleanere': 'hand steam cleaner',
'eiectric concrte mixer': 'electric concrete mixer',
'pemco sill extender': 'pemko sill extender',
'silver branzing rods': 'silver brazing rods',
'sanding beltsr': 'sanding belts',
'dorr faceplates': 'door faceplates',
'stainless steel ball beating for hinges': 'stainless steel ball bearing for hinges',
'stabilty': 'stability',
'hose bibb replacement valve': 'hose bib replacement valve',
'long shower curtins': 'long shower curtains',
'crub rubber': 'crumb rubber',
'swivel saftey cover': 'swivel safety cover',
'makita oscilating saw': 'makita oscillating saw',
'whithaus faucet speckled brass': 'whitehaus faucet speckled brass',
'energy efficent skylight': 'energy efficient skylight',
'garden seed packs': 'garden seed packets',
'boshe double bevel sliding miter saw': 'bosch double bevel sliding miter saw',
'taylor test lit': 'taylor test kit',
'chargril grill': 'charbroil grill',
'over ran': 'overran',
'recipricating saw 15 amp': 'reciprocating saw 15 amp',
'mikita 18v 2.6 ah': 'makita 18v 2.6 ah',
'no burn spry': 'no burn spray',
'cuctis soil': 'cactus soil',
'brushed stainless cabin ate hardware': 'brushed stainless cabinet hardware',
'fork lift strps': 'forklift straps',
'electrian': 'electrician',
'doorbell chimes and transformr': 'doorbell chimes and transformer',
'faux diamondplate': 'faux diamond plate',
'milstead vintage maple engineered flooring': 'millstead vintage maple engineered flooring',
'ce tech coaxial cablenail in clips': 'ce tech coaxial cable nail in clips',
'bq heat distributipn plates': 'bbq heat distribution plates',
'metal lath stuko': 'metal lath stucco',
'cord less drill portcable': 'cordless drill porter cable',
'round bulb sting lights': 'round bulb string lights',
'lp coversion kit maytag dryer': 'lp conversion kit maytag dryer',
'chase lounger covers': 'chaise lounge covers',
'insl-x pure step': 'insl-x sure step',
'gerber knife tactiical': 'gerber knife tactical',
'deecals number': 'decals number',
'hampton bat 26\'. w tilt out hamper white': 'hampton bay 26\'. w tilt out hamper white',
'outdoor chritstmas light remote': 'outdoor christmas light remote',
'wood fuelpellets': 'wood fuel pellets',
'cpipe lamp': 'pipe lamp',
'wiemans stainless cleaner': 'weimans stainless cleaner',
'10 roll up outside blinds': '10 roll up outdoor blinds',
'wainscote': 'wainscot',
'heat resistant spicket': 'heat resistant spigot',
'garage shelve': 'garage shelf',
'shevles': 'shelves',
'storage shelfs': 'storage shelves',
'proipane': 'propane',
'ventless gas heters': 'ventless gas heaters',
'vinal fencing': 'vinyl fencing',
'toliet bowl': 'toilet bowl',
'toliet bowl wrench': 'toilet bowl wrench',
'fanc wire': 'fancy wire',
't post fence assesories': 't post fence accessories',
'telescooping ladder': 'telescoping ladder',
'spring haven brown all weather wicked': 'spring haven brown all weather wicker',
'36 exterior steele door': '36 exterior steel door',
'faucetskitchen': 'faucets kitchen',
'batt insulatiom': 'batt insulation',
'congolium': 'congoleum',
'vinal flooring': 'vinyl flooring',
'vynil floorring': 'vinyl flooring',
'clacier bay toliet': 'glacier bay toilet',
'GLAZER BAY TOILET': 'GLACIER BAY TOILET',
'norton hot water heater ingniter': 'norton hot water heater igniter',
'undercounter lighs': 'under counter lights',
'stainless refridgerator': 'stainless refrigerator',
'stainless steel refridgerator': 'stainless steel refrigerator',
'window ac manuel operation': 'window ac manual operation',
'rustolem': 'rustoleum',
'18v drill brushles': '18v drill brushless',
'dining sets outdo?': 'dining sets outdoor?',
'eat resistant epoxy': 'heat resistant epoxy',
'cordless drils': 'cordless drills',
'3 piece bfush set': '3 piece brush set',
'kitchen faucet installtion tools': 'kitchen faucet installation tools',
'Moen Kitchen sink fauset': 'Moen Kitchen sink faucet',
'plaqstic bucket': 'plastic bucket',
'3m winow film': '3m window film',
'water softner': 'water softener',
'flourescent light bulp': 'fluorescent light bulb',
'closermaid cabinet': 'closetmaid cabinet',
'raised panel extirior doors': 'raised panel exterior doors',
'blcktop repair kit': 'blacktop repair kit',
'peal and stick flashning': 'peel and stick flashing',
'marshaltown 6 inch': 'marshalltown 6 inch',
'vynel wall tiles': 'vinyl wall tiles',
'presusre treated post': 'pressure treated post',
'LAWN LEAF VACUM': 'LAWN LEAF VACUUM',
'space heatres': 'space heaters',
'alumium fence 6 ft 6ft': 'aluminum fence 6 ft 6 ft',
'bathroom sinks kholer': 'bathroom sinks kohler',
'pedistal': 'pedestal',
'clear eppoxy': 'clear epoxy',
'wood fir plank flooring': 'wood for plank flooring',
'quickcrete waterproof cement': 'quikrete waterproof cement',
'rood rake': 'roof rake',
'propane gas tank meater': 'propane gas tank meter',
'ac cooling fin straightenrs': 'ac cooling fin straightener',
'slidng panel lock': 'sliding panel lock',
'closet maiid cabinets': 'closet maid cabinets',
'ridge power tools combo packs': 'ridgid power tools combo packs',
'backsplash tiiles': 'backsplash tiles',
'cabinet knobsd': 'cabinet knobs',
'cabnet knobs': 'cabinet knobs',
'dealt air compressor parts': 'dewalt air compressor parts',
'spgot': 'spigot',
'paver bricks scolloped': 'paver bricks scalloped',
'CHASE LOUNGE': 'CHAISE LOUNGE',
'layndry tu': 'laundry tu',
'submeribale pedistal sump pump': 'submersible pedestal sump pump',
'celling fans': 'ceiling fans',
'wall sconse': 'wall sconce',
'93 inch widecellular shades': '93 inch wide cellular shades',
'post white ligth': 'post white light',
'palmero brushed nickel ceiling fan': 'palermo brushed nickel ceiling fan',
'aromaticeatern red cedar planking': 'aromatic eastern red cedar planking',
'black and decker hobby crafter': 'black and decker hobbycrafter',
'front load fridaire': 'front load frigidaire',
'pedestial washer': 'pedestal washer',
'whilrpool front loader washer': 'whirlpool front loader washer',
'extrior louvored wood door 30x80': 'exterior louvered wood door 30x80',
'interior doorser': 'interior doors',
'dill battery 12v model g0805': 'drill battery 12v model g0805',
'10 stair lader': '10 stair ladder',
'milwakee 1/2 impact cordless': 'milwaukee 1/2 impact cordless',
'kolher': 'kohler',
'floor slealer': 'floor sealer',
'high traffic floor polurethane paint': 'high traffic floor polyurethane paint',
'sawzall blades miluakee': 'sawzall blades milwaukee',
'vaccum hose': 'vacuum hose',
'vynal repalcement windows': 'vinyl replacement windows',
'vinil for flors': 'vinyl for floors',
'led withe': 'led white',
'squar flushmount lights': 'square flush mount lights',
'huskey 18': 'husky 18',
'remove oder from kerosine': 'remove odor from kerosene',
'25ft huskt tape': '25 ft husky tape',
'plastic corrougeted roofing': 'plastic corrugated roofing',
'kholerhighland white toilet': 'kohler highline white toilet',
'toilet seat for briggs toliet': 'toilet seat for briggs toilet',
'steel shelve': 'steel shelf',
'dig irritation drip': 'dig irrigation drip',
'kohler pedastal sink': 'kohler pedestal sink',
'high loss natural jabota': 'high loss natural jatoba',
'Huskavarna': 'Husqvarna',
'power cordclass 2 power model xy_2900600_u': 'power cord class 2 power model xy_2900600_u',
'treaated plywood': 'treated plywood',
'air condtioning wall unit': 'air conditioning wall unit',
'wall air conditioneer': 'wall air conditioner',
'window ac insaller': 'window ac installer',
'sensor porch ligts': 'sensor porch lights',
'miricile applet or and tray': 'miracle applet or and tray',
'paint refil tray': 'paint refill tray',
'door knobs exteria': 'door knobs exterior',
'exhaustless portable airconditioner': 'exhaustless portable air conditioner',
'portable aircondition': 'portable air conditioner',
'oscilliating too': 'oscillating tool',
'PYWOOD': 'PLYWOOD',
'rigid nailer': 'ridgid nailer',
'bankoft toilet biscuit': 'bancroft toilet biscuit',
'mown pull down faucet': 'moen pull down faucet',
'lo gas water heater': 'low gas water heater',
'richman water heater': 'richmond water heater',
'tall toliet': 'tall toilet',
'ridding mower covers': 'riding mower covers',
'hole angel jig': 'hole angle jig',
'10 deep kitchen sink porcelin': '10 deep kitchen sink porcelain',
'plastic tiles pcv': 'plastic tiles pvc',
'vinyl sheeti': 'vinyl sheet',
'samsungelectric ranges': 'samsung electric ranges',
'frameless shoer doors': 'frameless shower doors',
'webber charcoal grill': 'weber charcoal grill',
'kerosine heaters': 'kerosene heaters',
'kersone heaters': 'kerosene heaters',
'propain heater': 'propane heater',
'heating elements for dyer whirlpool': 'heating elements for dryer whirlpool',
'safty glasses': 'safety glasses',
'eletric stove': 'electric stove',
'Schecule 40 Pipe': 'Schedule 40 Pipe',
'bayonett saw blades': 'bayonet saw blades',
'sconses': 'sconces',
'52\' pinacle ceiling fan': '52\' pinnacle ceiling fan',
'atic fans with lubers': 'attic fans with louvers',
'cealing fans': 'ceiling fans',
'hampton bay out door celing fan': 'hampton bay outdoor ceiling fan',
'out door celing fan': 'outdoor ceiling fan',
'kitchen exaust fan': 'kitchen exhaust fan',
'Cimmaron': 'Cimarron',
'fridgedaire': 'frigidaire',
'frigidaire washer door striker/catch': 'frigidaire washer door striker/latch',
'lawn mover wrench': 'lawn mower wrench',
'bmboo lattecie': 'bamboo lattice',
'1 handle tub and shower faucet shower and tub vlaves': '1 handle tub and shower faucet shower and tub valves',
'hansgroph faucets bathroom': 'hansgrohe faucets bathroom',
'led light bulbsbulbs': 'led light bulbs bulbs',
'landscape srone': 'landscape stone',
'braid nailer combo kit': 'brad nailer combo kit',
'doors for mobilhomes': 'doors for mobile homes',
'smaller closet lights': 'small closet lights',
'traficmaster': 'trafficmaster',
'hardi board smooth': 'hardie board smooth',
'wainscoating': 'wainscoting',
'galvanisedround fire pit ring': 'galvanized round fire pit ring',
'electrichot water heaters residential': 'electric hot water heaters residential',
'garage shelf unjit': 'garage shelf unit',
'stone baxksplash': 'stone backsplash',
'pendent cealing fixture': 'pendant ceiling fixture',
'undercabinet ligghts': 'under cabinet lights',
'martha stewartcabinet pull': 'martha stewart cabinet pull',
'4 fluorescant fixture covers': '4 fluorescent fixture covers',
'exterior vanyl french door': 'exterior vinyl french door',
'adheasive': 'adhesive',
'lineulium floor': 'linoleum floor',
'plexiglass selves': 'plexiglass shelves',
'Allure mellowood flooring': 'Allure mellow wood flooring',
'allure tile sedon?': 'allure tile sedona?',
'allure vinyl tilecordoba': 'allure vinyl tile cordoba',
'wood veener facing for kitchen cabinets': 'wood veneer facing for kitchen cabinets',
'painters plastice': 'painters plastic',
'granitne sealer': 'granite sealer',
'55 inch cultured marble vanity tope': '55 inch cultured marble vanity top',
'mirros': 'mirrors',
'garge floor paint': 'garage floor paint',
'weather indoor and outpoor temp': 'weather indoor and outdoor temp',
'ryobi blower with batery': 'ryobi blower with battery',
'powerwasher hose': 'power washer hose',
'mikita 9.5 volt drill': 'makita 9.5 volt drill',
'vinal fence straps': 'vinyl fence straps',
'black chandelier wjth black shades': 'black chandelier with black shades',
'medecine cabinet': 'medicine cabinet',
'medicient cabinet': 'medicine cabinet',
'serface mount medicine cabinets': 'surface mount medicine cabinets',
'husqvarna presure washer': 'husqvarna pressure washer',
'back yard weather forecasteer': 'backyard weather forecaster',
'chain link fenceing': 'chain link fencing',
'jogsaw tool': 'jigsaw tool',
'lg ruff wall instalation': 'lg ruff wall installation',
'pcv pipe sement': 'pvc pipe cement',
'hardi trim': 'hardietrim',
'vynal siding insol': 'vinyl siding insol',
'cheapete gas 40 gallon hot water heater': 'cheapest gas 40 gallon hot water heater',
'powervent water heater': 'power vent water heater',
'exterieur door 32 inch': 'exterior door 32 inch',
'vynal floor matting': 'vinyl floor matting',
'door knobsw': 'door knobs',
'black decke weed eaters': 'black decker weed eaters',
'lectric string trimmer cst1200r': 'electric string trimmer cst1200r',
'1.4 mircowave over the stove': '1.4 microwave over the stove',
'stove excaust fan': 'stove exhaust fan',
'mobile home extior doors': 'mobile home exterior doors',
'wood lathesw': 'wood lathes',
'anderson replacement double hung window 34.5x36.5': 'andersen replacement double hung window 34.5x 36.5',
'contrcator baseboard': 'contractor baseboard',
'moehn kitchen facet 87211srssd': 'moen kitchen faucet 87211srs',
'repare kit for 2-handle side sprayer kitchen faucet': 'repair kit for 2-handle side sprayer kitchen faucet',
'ecco friendly garden hose': 'eco friendly garden hose',
'flex gardn hose': 'flex garden hose',
'garden host 50': 'garden hose 50',
'bathroon lighting': 'bathroom lighting',
'lanscape timber': 'landscape timber',
'bathroom valnity lights': 'bathroom vanity lights',
'gas pressure regular': 'gas pressure regulator',
'ashely 48 in electric chi': 'ashley 48 in electric chi',
'2x6 treted 8ft long': '2x6 treated 8ft long',
'wheel borrow': 'wheelbarrow',
'whellbarrow': 'wheelbarrow',
'scement bags': 'cement bags',
'accordian door': 'accordion door',
'Electic Lawn Mowers': 'Electric Lawn Mowers',
'hampton bay cabinetscornor cabinetupper': 'hampton bay cabinets corner cabinet upper',
'electric pump for sprying': 'electric pump for spraying',
'front foor 2 siding': 'front door 2 siding',
'whirlpool lgas dryer': 'whirlpool gas dryer',
'pressure treated lumber spaint': 'pressure treated lumber paint',
'rhee. 40 gallon water heaters': 'rheem. 40 gallon water heaters',
'8x96 white decrotive shelf': '8x96 white decorative shelf',
'bathroom pendastal': 'bathroom pedestal',
'r25/r30 faced insullation': 'r25/r30 faced insulation',
'heavy dutty letter support': 'heavy duty letter support',
'ceder decking': 'cedar decking',
'negitave air machine': 'negative air machine',
'outdoor maouse traps': 'outdoor mouse traps',
'storeage shed': 'storage shed',
'car canoply': 'car canopy',
'commerical tile': 'commercial tile',
'1 1/2 colated rock screws': '1 1/2 collated rock screws',
'sheeet rock mud': 'sheetrock mud',
'counterdepth fridge': 'counter depth fridge',
'maytag refregirator': 'maytag refrigerator',
'whirlpool french door frig 30 wide': 'whirlpool french door fridge 30 wide',
'wirlpool 30 wide french door': 'whirlpool 30 wide french door',
'dleta shower faucet handles': 'delta shower faucet handles',
'38 grainte composit sink': '38 granite composite sink',
'blown in insulaation': 'blown in insulation',
'foam insulatino': 'foam insulation',
'doors interiorwith door jams': 'doors interior with door jams',
'residentialsteel door and frame': 'residential steel door and frame',
'wood swimg set kits': 'wood swing set kits',
'quickcrete resurfacer': 'quikrete resurfacer',
'2 inch srew cap': '2 inch screw cap',
'30 gar builtin ranges': '30 gas built in ranges',
'samsong stive': 'samsung stove',
'chissel': 'chisel',
'rigid compound miter saw': 'ridgid compound miter saw',
'rigid compound miter saw dust pouch': 'ridgid compound miter saw dust pouch',
'shampoo and lotion automatice dispenser': 'shampoo and lotion automatic dispenser',
'wall scone': 'wall sconce',
'rubber for refridgerators': 'rubber for refrigerators',
'water proofing shower membrame': 'waterproofing shower membrane',
'fridigdaire back gas range': 'frigidaire black gas range',
'cabrio dryder': 'cabrio dryer',
'whilrpool cabrio dryer': 'whirlpool cabrio dryer',
'light switcht sensor': 'light switch sensor',
'calutta marble laminate countertop': 'calcutta marble laminate countertop',
'vinylcorner boards 4 inch': 'vinyl corner boards 4 inch',
'plastix box': 'plastic box',
'scurity screen doors': 'security screen doors',
'nonadhesive vinyl flooring': 'non adhesive vinyl flooring',
'trafficmaster interloclk': 'trafficmaster interlock',
'anntenias': 'antennas',
'clothes dryer srand': 'clothes dryer stand',
'eletric water heater': 'electric water heater',
'sharkbike push to connect 3/4': 'sharkbite push to connect 3/4',
'fuel nozzle furnance': 'fuel nozzle furnace',
'ryobi one batery': 'ryobi one battery',
'5/8 floring plywood weatherproof': '5/8 flooring plywood weatherproof',
'mitter saw manual': 'miter saw manual',
'selenoid for dryer': 'solenoid for dryer',
'presure coated wood': 'pressure coated wood',
'composote lumber': 'composite lumber',
'14 awgsoilid wire': '14 awg solid wire',
'welded wire fenching 12 gauge': 'welded wire fencing 12 gauge',
'patio chair cusions': 'patio chair cushions',
'viynl patches': 'vinyl patches',
'7 in. stove pie': '7 in. stove pipe',
'whirlpoolgas stove': 'whirlpool gas stove',
'whirpool microwave 1.4 cu ft': 'whirlpool microwave 1.4 cu ft',
'whirpool refrigerator': 'whirlpool refrigerator',
'3\' nailes': '3\' nails',
'nailer tooal': 'nailer tool',
'weed barier': 'weed barrier',
'oped garage door indicator': 'open garage door indicator',
'styrafoam': 'styrofoam',
'10 foot step laddert': '10 foot step ladder',
'3 1/2 hardwar': '3 1/2 hardware',
'double control shower vavle': 'double control shower valve',
'replacement shower encosure rod': 'replacement shower enclosure rod',
'baby gurad gate': 'baby guard gate',
'joint compund light weight': 'joint compound lightweight',
'sheetrock high preformance joint compound': 'sheetrock high performance joint compound',
'1x2 appearnce boards': '1x2 appearance boards',
'lumber 2x8 composit': 'lumber 2x8 composite',
'floot ball': 'float ball',
'dewalt empact driver': 'dewalt impact driver',
'bosh cordless combo set': 'bosch cordless combo set',
'ryobi 18v battwery': 'ryobi 18v battery',
'kihchen cabinet slidr shelves': 'kitchen cabinet slide shelves',
'chesnut border edging': 'chestnut border edging',
'outdoor seat cushions 24.5 whte': 'outdoor seat cushions 24.5 white',
'12x12 tile msaic': '12x12 tile mosaic',
'skill screwdriver battery': 'skil screwdriver battery',
'manual for airens lawnmower': 'manual for ariens lawn mower',
'gas stabilisor': 'gas stabilizer',
'4 x 4 white pocelain tile': '4 x 4 white porcelain tile',
'rigid pipe cutter': 'ridgid pipe cutter',
'24 regrigerators': '24 refrigerators',
'refrigerato 33 inch wide': 'refrigerator 33 inch wide',
'smudge proof stainless steele': 'smudge proof stainless steel',
'whirpool amana': 'whirlpool amana',
'moen banbury 24 in. doubletowel bar': 'moen banbury 24 in. double towel bar',
'4\' r;ubber top set base': '4\' rubber top set base',
'extension springes': 'extension springs',
'grass string trimmer electric homelight': 'grass string trimmer electric homelite',
'craftman style lights': 'craftsman style lights',
'glacier bay delmare expresso wall mirror': 'glacier bay del mar espresso wall mirror',
'dollie 600 lbs': 'dolly 600 lbs',
'patio tille': 'patio tile',
'eucalptus white board': 'eucalyptus white board',
'vynal tile': 'vinyl tile',
'heat reducing window flim': 'heat reducing window film',
'Porach Light': 'Porch Light',
'brissell zing vacuum bags': 'bissell zing vacuum bags',
'toillet': 'toilet',
'kitchen aid refrigirator light bulb:': 'kitchenaid refrigerator light bulb:',
'chadelier': 'chandelier',
'cararra marble': 'carrara marble',
'coedless makita chainsaw with batteries': 'cordless makita chainsaw with batteries',
'mikita cordless drill': 'makita cordless drill',
'antique brass hindges for doors': 'antique brass hinges for doors',
'riobi battery': 'ryobi battery',
'feerzer': 'freezer',
'schlade wirell door lock': 'schlage wireless door lock',
'water proff board': 'waterproof board',
'celing light holder': 'ceiling light holder',
'wood toold': 'wood tools',
'4 inch insolation': '4 inch insulation',
'Urehtane Foam Sheet': 'Urethane Foam Sheet',
'4 center lavatory facuet': '4 center lavatory faucet',
'Shower facuet': 'Shower faucet',
'electric dyrer heater elemnet': 'electric dryer heater element',
'milluakee drill bits': 'milwaukee drill bits',
'scrren wire': 'screen wire',
'safegaurd 30 synthetic felt': 'safeguard 30 synthetic felt',
'hampden bay chandelier': 'hampton bay chandelier',
'1/2 inch pnumatic stapler': '1/2 inch pneumatic stapler',
'12\' firetreat 2x4': '12\' fire treated 2x4',
'american-standarfairfield elongated one-piece 1.6 gpf toilet': 'american-standard fairfield elongated one-piece 1.6 gpf toilet',
'toilet aquaia': 'toilet aquia',
'Comercial electric': 'Commercial electric',
'light puff defuser': 'light puff diffuser',
'ryobi drill prass': 'ryobi drill press',
'110v ectric dryers': '110v electric dryers',
'FIRE RESTISTANT BOARD': 'FIRE RESISTANT BOARD',
'vinyle plankj': 'vinyl plank',
'cordless backpack vaccume': 'cordless backpack vacuum',
'hampton baysolar bird lights': 'hampton bay solar bird lights',
'kohler chair height elongated toliet': 'kohler chair height elongated toilet',
'electic fireplace': 'electric fireplace',
'hampton bay jmestown': 'hampton bay jamestown',
'surfacemount kitchen sink': 'surface mount kitchen sink',
'rigid wet nozzelsqueegee': 'ridgid wet nozzle squeegee',
'vacumns': 'vacuums',
'gble vent': 'gable vent',
'ventalation': 'ventilation',
'biinds and shades': 'blinds and shades',
'copact drills cordless': 'compact drills cordless',
'ridge 18v hammer': 'ridgid 18v hammer',
'heavy dutty garden hose': 'heavy duty garden hose',
'1/2\' extirior plywood': '1/2\' exterior plywood',
'gutter water reflector': 'gutter water deflector',
'under cabinet led light accesory pack': 'under cabinet led light accessory pack',
'armstroung floor adhesive': 'armstrong floor adhesive',
'whirlpoolstainless steel refrig': 'whirlpool stainless steel refrig',
'black and decker elctric': 'black and decker electric',
'cordless edgere': 'cordless edger',
'white electrtical outlets': 'white electrical outlets',
'tan unmbrella': 'tan umbrella',
'gothic fence picketts': 'gothic fence pickets',
'vinyl 1 bilnd': 'vinyl 1 blinds',
'console tab;le': 'console table',
'T-5 florescent light fixtures': 'T-5 fluorescent light fixtures',
'royobi pedestal grinder wheel': 'ryobi pedestal grinder wheel',
'wall panaling': 'wall paneling',
'PORCH STAIR RAILLING': 'PORCH STAIR RAILING',
'micro fibe': 'microfiber',
'champion toliet part': 'champion toilet parts',
'rr vaccum filter': 'rr vacuum filter',
'exhust fan': 'exhaust fan',
'corragated metal': 'corrugated metal',
'gasolene generaters and inverters': 'gasoline generators and inverters',
'stailess steel top stoves': 'stainless steel top stoves',
'top freezer refrigeratot': 'top freezer refrigerator',
'3/4 inche rock': '3/4 inch rock',
'12 roofing pannel': '12 roofing panel',
'blakck in decker edger': 'black and decker edger',
'tile scrapper': 'tile scraper',
'brick morter': 'brick mortar',
'cement blodks': 'cement blocks',
'unmortified mortor': 'unmodified mortar',
'bifold door hardw': 'bifold door hardware',
'metal scerews': 'metal screws',
'sliding doos for backyard': 'sliding doors for backyard',
'screen fame corner': 'screen frame corner',
'electric lawn mowerectrical': 'electric lawn mower electrical',
'clacer bay all n one sink': 'glacier bay all in one sink',
'sola water fountain': 'solar water fountain',
'closet clothes rackclosetmaid': 'closet clothes rack closetmaid',
'passload': 'paslode',
'kitchen tile backspl': 'kitchen tile backsplash',
'viyle fencing': 'vinyl fencing',
'flexible tourche extension': 'flexible torch extension',
'6 pnl molded': '6 panel molded',
'soild core flush pre hung door': 'solid core flush prehung door',
'convction heater': 'convection heater',
'closet orginizer shoe rack wire': 'closet organizer shoe rack wire',
'freesstanding': 'free standing',
'mmirror closet doors': 'mirror closet doors',
'maratha stewart monogram wreath': 'martha stewart monogram wreath',
'edsel heavy duty 5': 'edsal heavy duty 5',
'11 ft extension cord groud': '11 ft extension cord ground',
'indoor/otdoor extensions cords e176194': 'indoor/outdoor extension cords e176194',
'outdoor extention cords e': 'outdoor extension cords e',
'unface insulation 23 inches wide': 'unfaced insulation 23 inches wide',
'porble toilets': 'portable toilets',
'toilet saftey seat': 'toilet safety seat',
'silca sand': 'silica sand',
'tall 18 in storage cabnet': 'tall 18 in storage cabinet',
'20x8 storge shed': '20 x 8 storage shed',
'rubbermade shed': 'rubbermaid shed',
'rubbermaid resin storage cabnetsn': 'rubbermaid resin storage cabinets',
'cedar wod chips': 'cedar wood chips',
'hidraulic tools': 'hydraulic tools',
'celing fans with lighting and remote': 'ceiling fans with lighting and remote',
'fridigidaire drop in oven': 'frigidaire drop in oven',
'tub surround pices': 'tub surround prices',
'allure flooring oak expresso': 'allure flooring oak espresso',
'pass and seymore light cover switch': 'pass and seymour light cover switch',
'28x54 replacment window': '28x54 replacement windows',
'anderson windows new constraction': 'anderson windows new construction',
'swamp oolers': 'swamp coolers',
'wahing machines': 'washing machines',
'interior primed mdf crown mouldin': 'interior primed mdf crown moulding',
'built in convectionoven': 'built in convection oven',
'flpwers for your garden': 'flowers for your garden',
'closetr rod': 'closet rod',
'unfinished wide bplanked hickory flooring': 'unfinished wide plank hickory flooring',
'48v to 110 invertor': '48v to 110v inverter',
'landscape wateting': 'landscape watering',
'sockets for fluorescence fixtres': 'sockets for fluorescent fixtures',
'woodceramic floor tile': 'wood ceramic floor tile',
'brigsg and stations 500 seris': 'briggs and stations 500 series',
'green carpert': 'green carpet',
'pressure treated step tread 6ft': 'pressure treated stair tread 6ft',
'hand pump gfor water': 'hand pump for water',
'rutic lighting': 'rustic lighting',
'cender blocks': 'cinder blocks',
'talsrar': 'talstar',
'rybi power tools': 'ryobi power tools',
'portercable 6 gal': 'porter cable 6 gal',
'table covers waterproff': 'table covers waterproof',
'solid alium square tubing': 'solid aluminum square tubing',
'deck post jhardware': 'deck post hardware',
'hunter new bronzel fans': 'hunter new bronze fans',
'16d framin': '16d framing',
'moen brushed nickel batharoom': 'moen brushed nickel bathroom',
'barriar plastic': 'barrier plastic',
'window ac/hehat units': 'window ac/heat units',
'icycle lights': 'icicle lights',
'4 gallon expanion': '4 gallon expansion',
'floor mount lawndry seek': 'floor mount laundry sink',
'high addhesion primer': 'high adhesion primer',
'24 gauge wire connectorsa': '24 gauge wire connectors',
'sterio wire for indoor speakers': 'stereo wire for indoor speakers',
'garage bicyclestorage': 'garage bicycle storage',
'how mustall tankless water heater': 'how install tankless water heater',
'chelsea white acrylic oval in rectangl': 'chelsea white acrylic oval in rectangle',
'cleaning jeta for whirlpool': 'cleaning jets for whirlpool',
'bathroom faucet replacment valve': 'bathroom faucet replacement valve',
'3x5 cemet board': '3x5 cement board',
'vaccumm': 'vacuum',
'ghroe shower headstrong shower heads': 'grohe shower headstrong shower heads',
'mial boxes': 'mail boxes',
'claw tups': 'claw tips',
'facia corner brace': 'fascia corner brace',
'pegisas sink top': 'pegasus sink top',
'mirroes for doors': 'mirrors for doors',
'counter depth refridgidere': 'counter depth refrigerator',
'corrigaed fiberglass roofing': 'corrugated fiberglass roofing',
'window airconditionerwith heaters': 'window air conditioners with heaters',
'extention rail for opener': 'extension rail for opener',
'whitecomposite fascia board': 'white composite fascia board',
'vanity topp 31 white': 'vanity top 31 white',
'underhood range fan': 'under hood range fan',
'price pfister trevisa': 'price pfister treviso',
'milwaukee cordlees tools': 'milwaukee cordless tools',
'pendent light': 'pendant light',
'pre-emergent weed contro': 'pre-emergent weed control',
'is this item in stoes?': 'is this item in store?',
'door home secutity': 'door home security',
'3oo watt haalogen bulbs': '3oo watt halogen bulbs',
'96 in flourescent bulbs': '96 in fluorescent bulbs',
'shop ceiling fane': 'shop ceiling fan',
'aaa batteries everready gold': 'aaa batteries eveready gold',
'buth tub faucet': 'bathtub faucet',
'delta montecello tub faucet': 'delta monticello tub faucet',
'ge spring water heater': 'geospring water heater',
'ge water heater egnighter': 'ge water heater igniter',
'31x19 one piecs bathroom sink': '31x19 one piece bathroom sink',
'replacment clips for wire rack': 'replacement clips for wire rack',
'ac air diverer': 'ac air diverter',
'3 sewer pipce': '3 sewer pipe',
'3\' electical pipe': '3\' electrical pipe',
'large outside horizontal storage shed': 'large outdoor horizontal storage shed',
'swing hangar hardware': 'swing hanger hardware',
'dim able balafon flood light': 'dimmable balafon flood light',
'phillips exterior led': 'philips exterior led',
'banity 11 watt light bulb': 'vanity 11 watt light bulb',
'kithchen install': 'kitchen install',
'magnet stainless steel for diswasher': 'magnet stainless steel for dishwasher',
'phone spliter': 'phone splitter',
'receptical': 'receptacle',
'water resistent electrical outlets': 'water resistant electrical outlets',
'kitchenaid superb oven': 'kitchenaid superba oven',
'403esprit 2x4 ceing tile': '403 esprit 2x4 ceiling tile',
'wall excess panel': 'wall access panel',
'drop celing tiles': 'drop ceiling tiles',
'pvc drop in celing tiles': 'pvc drop in ceiling tiles',
'pl gas hose': 'lp gas hose',
'12 v landscaping ligtening fixture': '12v landscape lighting fixture',
'behr white external semigloss paint': 'behr white exterior semi gloss paint',
'GRAGE DOOR OPENER': 'GARAGE DOOR OPENER',
'grage doors': 'garage doors',
'24 inch med oak base': '24 inch medium oak base',
'okeefes working hands': 'o\'keeffe\'s working hands',
'phenofin': 'penofin',
'8 foot galvinezed': '8 foot galvanized',
'12 mobil home air duct': '12 mobile home air duct',
'door hinges for americana refrigator': 'door hinges for americana refrigerator',
'tub drain kit bronz': 'tub drain kit bronze',
'halligon light bulb': 'halogen light bulb',
'husky rachet': 'husky ratchet',
'andersen vnyl windows': 'andersen vinyl windows',
'balwind double cilynder lock': 'baldwin double cylinder lock',
'drop down ceiling ppanel': 'drop down ceiling panel',
'arearugs and mats': 'area rugs and mats',
'dark expresso paint for wood': 'dark espresso paint for wood',
'melamine shelvees': 'melamine shelves',
'mosaic whitel and black tile': 'mosaic white and black tile',
'8 wre wheel': '8 wire wheel',
'9\' plna replament blade': '9\' plane replacement blade',
'saw zall blades': 'sawzall blades',
'pain pot': 'paint pot',
'drain cleaneraner machines': 'drain cleaner machines',
'anderson storm doors pet': 'andersen storm doors pet',
'basement window replacement insructions': 'basement window replacement instructions',
'grill cover brinkman double grill': 'grill cover brinkmann double grill',
'gerber daisies': 'gerbera daisies',
'gerber daisy': 'gerbera daisy',
'exterior wood stainolid color': 'exterior wood stain color',
'2700 br30 led': '2700k br30 led',
'3m wheather stripping': '3m weather stripping',
'barn doorhinges': 'barn door hinges',
'plywood progect': 'plywood project',
'28 guage screen': '28 gauge screen',
'lampsade pendent light': 'lamp shade pendant light',
'kitchen cabiner corner': 'kitchen cabinet corner',
'paatio swings': 'patio swings',
'12 bar chian for echo': '12 bar chain for echo',
'bix max 7x7': 'big max 7x7',
'bathtub faucethandle replacement parts': 'bathtub faucet handle replacement parts',
'prelit spiral trees': 'pre lit spiral trees',
'12 sthel chainsaws': '12 stihl chainsaws',
'10 ft drain house': '10 ft drain hose',
'american standard tiolet flappers': 'american standard toilet flappers',
'solar out doors post lights': 'solar outdoor post lights',
'kitchen cabinet with counertop': 'kitchen cabinet with countertop',
'Painting Cabniets': 'Painting Cabinets',
'18x18 teracota porcelain floor tiles': '18x18 terracotta porcelain floor tiles',
'drywal': 'drywall',
'pencle trim tile': 'pencil trim tile',
'vinyl latice': 'vinyl lattice',
'angle findeer': 'angle finder',
'laminate tile comercial': 'laminate tile commercial',
'couner deep refrigerators': 'counter deep refrigerators',
'chritmas tree': 'christmas tree',
'plug in carbon monoxcide': 'plug in carbon monoxide',
'cabinet handels': 'cabinet handles',
'frigidair drop in': 'frigidaire drop in',
'7\' hex hed bolt': '7\' hex head bolt',
'vent fllters': 'vent filters',
'horizontall': 'horizontal',
'3 x 6 blace tile': '3 x 6 black tile',
'rostoluem spray paint': 'rustoleum spray paint',
'power drill battery an charger': 'power drill battery and charger',
'rayobi blue charger': 'ryobi blue charger',
'robyi': 'ryobi',
'5/4 pressure treaded decking': '5/4 pressure treated decking',
'white carrara herring bome': 'white carrara herringbone',
'sailr blue': 'sailor blue',
'charbroil classic': 'char broil classic',
'14 electric concrete saw with vc-u dch300': '14 electric concrete saw with vac-u dch 300',
'potable air conditioners': 'portable air conditioners',
'fin heating tubeing': 'fin heating tubing',
'fine/line baseboarrd': 'fine/line baseboard',
'hot water heating eliment': 'hot water heating element',
'toiet': 'toilet',
'hole house fan': 'whole house fan',
'montaga bay tile': 'montego bay tile',
'40 gal liquid propan': '40 gal liquid propane',
'4 x 4 pos cap': '4x4 post cap',
'white quartz cointertop': 'white quartz countertop',
'elongated bone toilest': 'elongated bone toilet',
'white acryl paint': 'white acrylic paint',
'foundstion vents': 'foundation vents',
'sqeaky carpet stair kit': 'squeaky carpet stair kit',
'defusiers for floors': 'diffusers for floors',
'8\' galvanized roll top edginh': '8\' galvanized roll top edging',
'marithon water heater element': 'marathon water heater element',
'wirerless light switch': 'wireless light switch',
'moen posi-temp tim kit': 'moen posi-temp trim kit',
'shower dooroil rubbed bronze': 'shower door oil rubbed bronze',
'wireing': 'wiring',
'kitchen aid architecs series 11': 'kitchenaid architect series 11',
'wall oven combon': 'wall oven combo',
'survival babkpack': 'survival backpack',
'wire dstaples': 'wire staples',
'4in drain gratewhite': '4in drain grate white',
'shitch cover': 'switch cover',
'vitarera quartz': 'viatera quartz',
'5/8-in masonary drill bit': '5/8-in masonry drill bit',
'brinkman grill grates': 'brinkmann grill grates',
'pest repellant': 'pest repellent',
'bathun drain plunger': 'bathtub drain plunger',
'incounter gas cook range': 'encounter gas cook range',
'peat moss bails': 'peat moss bales',
'3-piece bath accessory kit in chrom': '3-piece bath accessory kit in chrome',
'alameda hickey laminate': 'alameda hickory laminate',
'flooring moisture barier': 'flooring moisture barrier',
'vinylcove base': 'vinyl cove base',
'ge diswasher': 'ge dishwasher',
'b10 led bub': 'b10 led bulb',
'cub cadetcordless hedge trimmer': 'cub cadet cordless hedge trimmer',
'hampton bay jewelery armoire wht': 'hampton bay jewelry armoire white',
'perenials': 'perennials',
'heat ventss': 'heat vents',
'mobil home glass door': 'mobile home glass door',
'lamanet floor cutter': 'laminate floor cutter',
'on off valvefor tub faucet': 'on off valve for tub faucet',
'assie grill fire and ash': 'aussie grill fire and ash',
'hanging worklight fixtures ceiling': 'hanging work light fixtures ceiling',
'20 amp tamper resitance duplex receptacle': '20 amp tamper resistant duplex receptacle',
'liqwuid nail': 'liquid nail',
'1/2 tee pvcp': '1/2 tee pvc',
'toilet repair kit cadet 3 flowise 2-piece 1.28 gpf round fro': 'toilet repair kit cadet 3 flowise 2-piece 1.28 gpf round front',
'50 amp turn look plug': '50 amp turn lock plug',
'6x6 colunm caps': '6x6 column caps',
'12 valleta': '12 valletta',
'pellitized lime': 'pelletized lime',
'concrete sonic tub': 'concrete sonic tube',
'110 air conditior an heat': '110 air conditioner and heat',
'what is best for settingfence posts in soil?': 'what is best for setting fence posts in soil?',
'washer dryer folding worksurface': 'washer dryer folding work surface',
'outdoor spigot spliter': 'outdoor spigot splitter',
'alumiunm gate': 'aluminum gate',
'lawm mower': 'lawn mower',
'door floor plate slideing doors': 'door floor plate sliding doors',
'akkegro': 'allegro',
'wead burner': 'weed burner',
'galvinized nails 3': 'galvanized nails 3',
'artifical turf border': 'artificial turf border',
'oppeuss light trim ring': 'oppeus light trim ring',
'12 ft john boat': '12ft jon boat',
'outdoor coucg': 'outdoor couch',
'drywall panel hoisst': 'drywall panel hoist',
'ego hainsaw': 'ego chainsaw',
'hibascus plant': 'hibiscus plant',
'pullbehind fertilizer spreader': 'pull behind fertilizer spreader',
'door latch uard': 'door latch guard',
'water suppy box': 'water supply box',
'octagon eve vents': 'octagon eave vents',
'el ctrical s ez': 'electrical sez',
'varnishe': 'varnish',
'klien rg6': 'klein rg6',
'floor matt': 'floor mat',
'60 shower ddor': '60 shower door',
'blue tapeexhaust fan/light': 'blue tape exhaust fan/light',
'rocks hydrophonics': 'rocks hydroponics',
'mesquito spray': 'mosquito spray',
'alumiun grove in': 'aluminum grove in',
'lithonia outdoor wall paks': 'lithonia outdoor wall packs',
'60 in. shower door brushed nicker': '60 in. shower door brushed nickel',
'makit 12v': 'makita 12v',
'black and yellow non skip tape': 'black and yellow non skid tape',
'skylifghts': 'skylights',
'led hale gin g9': 'led halogen g9',
'electrical pipe flexable': 'electrical pipe flexible',
'emt stroas': 'emt straps',
'ridged 1 emt conduit': 'rigid 1 emt conduit',
'baliey window roller shades': 'bailey window roller shades',
'hampton bay reswood valley 5 pc patio seating set with fire': 'hampton bay redwood valley 5 pc patio seating set with fire',
'lawn grass catchbag': 'lawn grass catcher bag',
'1/4 lauwan under layment': '1/4 lauan underlayment',
'window tintinig': 'window tinting',
'4 inch round bellbox cover': '4 inch round bell box cover',
'vinal latice fence': 'vinyl lattice fence',
'solar pest repelers': 'solar pest repellers',
'barn doorspring latches': 'barn door spring latches',
'3 gauge copper phhn': '3 gauge copper thhn',
'three wire hottube': 'three wire hot tub',
'shope cloths': 'shop clothes',
'bbostitch tool set': 'bostitch tool set',
'outdoor hightop dining': 'outdoor high top dining',
'delata raincan': 'delta raincan',
'soap wash maching tilde': 'soap wash machine tilde',
'16 ftdecking boards': '16 ft decking boards',
'1 amp receptical': '1 amp receptacle',
'outdoor gfi': 'outdoor gfci',
'bbq burner replacment': 'bbq burner replacement',
'levin 25 wat usb': 'levin 25 watt usb',
'delta diverte rhandle in rb': 'delta diverter handle in rb',
'3 pane craftsman door': '3 panel craftsman door',
'charolettetown': 'charlottetown',
'raised toelit sseat': 'raised toilet seat',
'webber spirit gas grill': 'weber spirit gas grill',
'adapter for extention cord': 'adapter for extension cord',
'bathrub and shower wall kits': 'bathtub and shower wall kits',
'sofit vents 4x16': 'soffit vents 4 x 16',
'1/2 inch isp water supply line': '1/2 inch ips water supply line',
'eurothem thermostatic valve': 'eurotherm thermostatic valve',
'plactic totes 36 inches wide': 'plastic totes 36 inches wide',
'pest control diat': 'pest control diet',
'black cobwoys star': 'black cowboys star',
'whirpool oven 5.1': 'whirlpool oven 5.1',
'min fridges for campers': 'mini fridges for campers',
'howards restore a finish': 'howards restor a finish',
'ge just cut fraiser fur': 'ge just cut fraser fir',
'25 watt warmlight bulb': '25 watt warm light bulb',
'kichen island': 'kitchen island',
'duel mount stainless steel sinks': 'dual mount stainless steel sinks',
'home sevalance cameras': 'home surveillance cameras',
'marbel vinyl tile': 'marble vinyl tile',
'30 entry door 9 litr': '30 entry door 9 lite',
'roxul sale n sound': 'roxul safe n sound',
'4 guage use': '4 gauge use',
'jigsaw tblades': 'jigsaw t blades',
'jigsaww blades': 'jigsaw blades',
'clawfoot tub cutain': 'clawfoot tub curtain',
'raised garden ed': 'raised garden bed',
'58.75x80 sliding glass door': '58.75x 80 sliding glass door',
'1/4 nich tee': '1/4 inch tee',
'alluminun wire splice': 'aluminum wire splice',
'2 sheet metal screrw': '2 sheet metal screw',
'non electically conductive epoxy': 'non electrically conductive epoxy',
'led fluoreecent light replacement': 'led fluorescent light replacement',
't8 8 ft 4-light flourescent fixture': 't8 8 ft 4-light fluorescent fixture',
'othor ant killer': 'ortho ant killer',
'spectacide for lawnscarpenter ants': 'spectracide for lawns carpenter ants',
'ccurved shower door': 'curved shower door',
'4in pvc electrcial boxes': '4in pvc electrical boxes',
'hampton bay fan replacemtn': 'hampton bay fan replacement',
'6\' remodel can valted celing cans': '6\' remodel can vaulted ceiling cans',
'roman tub faucers': 'roman tub faucets',
'flourescent paint by rustoleum': 'fluorescent paint by rustoleum',
'hidden fastners': 'hidden fasteners',
'otdoor sola': 'outdoor solar',
'solar post l8ghts': 'solar post lights',
'plus 3 tintet': 'plus 3 tinted',
'barbeque tools': 'barbecue tools',
'circular flourecent lights': 'circular fluorescent lights',
'rain barrells': 'rain barrels',
'gagarage storage cabinets': 'garage storage cabinets',
'brown blasplash tile': 'brown backsplash tile',
'evap cooler theromsat': 'evap cooler thermostat',
'undergroud telephone wire': 'underground telephone wire',
'cop mail adapter': 'cop male adapter',
'set crews for glass': 'set screws for glass',
'roybi lazer circular saw': 'ryobi laser circular saw',
'walnuit stain': 'walnut stain',
'ruber door extension': 'rubber door extension',
'home decorators cinamon': 'home decorators cinnamon',
'apoxy patch': 'epoxy patch',
'batroom fan heater light': 'bathroom fan heater light',
'commercial radient ceiling heaters': 'commercial radiant ceiling heaters',
'surveilance camera': 'surveillance camera',
'tub facet set': 'tub faucet set',
'solistone pebbble': 'solistone pebble',
'1 1/4 galvenized steel pipe fittings': '1 1/4 galvanized steel pipe fittings',
'22.4 cubit feet refrigerator': '22.4 cubic feet refrigerator',
'behr premium plus ultrta': 'behr premium plus ultra',
'autoficial grass': 'artificial grass',
'huskey scocket set': 'husky socket set',
'husky black toll boxes': 'husky black tool boxes',
'isunderlayment requiered for metal roof': 'is underlayment required for metal roof',
'safety glass with perscription': 'safety glass with prescription',
'polished brass 8 spread lavitory faucet': 'polished brass 8 spread lavatory faucet',
'heat only therostats': 'heat only thermostats',
'65 watt dim able': '65 watt dimmable',
'1-1/4 pocket hole screwsw': '1-1/4 pocket hole screws',
'wwod floor runner': 'wood floor runner',
'bostic wood floor glue': 'bostik wood floor glue',
'hand shovles': 'hand shovels',
'garage orgnize': 'garage organizer',
'diamond plate storge unit': 'diamond plate storage unit',
'silcone': 'silicone',
'packing suplies': 'packing supplies',
'ridgid planner': 'ridgid planer',
'shower fiberglas': 'shower fiberglass',
'curtain rod wrp': 'curtain rod wrap',
'fire place accessories gas loggs': 'fireplace accessories gas logs',
'recesseingd light housing': 'recessed light housing',
'100 amps circuit braker': '100 amps circuit breaker',
'delta satin nickle shower systems': 'delta satin nickel shower systems',
'auqatic shower & bath': 'aquatic shower',
'termini mosquito garlic spray': 'terminix mosquito garlic spray',
'arbourist safety climbing belt': 'arborist safety climbing belt',
'vynal wood fence': 'vinyl wood fence',
'acrylic primere': 'acrylic primer',
'20\' facia board': '20\' fascia board',
'17 1/2 high tolite': '17 1/2 high toilet',
'howard restore a finish': 'howard restor a finish',
'tub enclouseure with tub': 'tub enclosure with tub',
'leaf guards for stomr windows': 'leaf guards for storm windows',
'sliding tub soors': 'sliding tub doors',
'amdry wallpanel': 'amdry wall panel',
'22.1 refrierator': '22.1 refrigerator',
'fram boxes': 'frame boxes',
'patio tbricks': 'patio bricks',
'6 foot treshold': '6 foot threshold',
'florencet light cover': 'fluorescent light cover',
'taracota drain pan': 'terracotta drain pan',
'smaller single deadbolt lock': 'small single deadbolt lock',
'lmainate boards': 'laminate boards',
'acuria lattace panels': 'acurio lattice panels',
'adirondeck cusion': 'adirondack cushion',
'oscilating fan': 'oscillating fan',
'washing machine plug adapator': 'washing machine plug adapter',
'concrette pier': 'concrete pier',
'southren gray tile': 'southern gray tile',
'dealt portable table saw table': 'dewalt portable table saw table',
'matte heat resistant pain': 'matte heat resistant paint',
'White Temper Resistant Duplex Outlet': 'White Tamper Resistant Duplex Outlet',
'screws for deckin': 'screws for decking',
'20 gl. hose end sprayer': '20 gal. hose end sprayer',
'sliding door storage cabi nets': 'sliding door storage cabinets',
'tinted masonary sealer': 'tinted masonry sealer',
'kids toilet seateat': 'kids toilet seat eat',
'anderson storm door screen roller': 'andersen storm door screen roller',
'vaccuum cleaners for hardwood and carpet': 'vacuum cleaners for hardwood and carpet',
'copper baluseter': 'copper baluster',
'aluninion circular blade': 'aluminium circular blade',
'ceiling light nickle 2-light': 'ceiling light nickel 2-light',
'adirondac, patio chair': 'adirondack, patio chair',
'flourescent tube': 'fluorescent tube',
'polyurethane adhesiv': 'polyurethane adhesive',
'extirior clear spray paint': 'exterior clear spray paint',
'outdoor faucwts': 'outdoor faucets',
'asphaul based coating': 'asphalt based coating',
'3/8 couipling': '3/8 coupling',
'2x4x10 pressure treater': '2x4x10 pressure treated',
'koehler faucet': 'kohler faucet',
'led rop light clips': 'led rope light clips',
'square d double brakers': 'square d double breakers',
'30 inchesbathroom vanity': '30 inches bathroom vanity',
'1/2 \' copper fiting': '1/2 \' copper fitting',
'capital cap for colum': 'capital cap for column',
'grass turf pavewrs': 'grass turf pavers',
'lowvoltage indoor accent lights': 'low voltage indoor accent lights',
'dremel minimate cordless moto tool': 'dremel minimite cordless moto tool',
'96 right hand miter tyhoon ice': '96 right hand miter typhoon ice',
'magnet base tool loight': 'magnetic base tool light',
'robi 18v saw': 'ryobi 18v saw',
'5 light hanging chandielier': '5 light hanging chandelier',
'Moem faucet repair': 'Moen faucet repair',
'3x6 daltile white 101 kohler': '3x6 daltile white k101 kohler',
'lock cmbo': 'lock combo',
'trimmer/edger\'s, gas powered': 'trimmer/edgers, gas powered',
'generaor for fridge': 'generator for fridge',
'led light bulbs dimable spot': 'led light bulbs dimmable spot',
'outdoor seatting cushions': 'outdoor seating cushions',
'full size frigde': 'full size fridge',
'ASHPHALT SEALER': 'ASPHALT SEALER',
'behr ultra pint': 'behr ultra paint',
'emparador mosaic bamboo brick': 'emperador mosaic bamboo brick',
'bath mirror cabintes': 'bath mirror cabinets',
'floor squeege': 'floor squeegee',
'squeege': 'squeegee',
'allure golden oaksku579331': 'allure golden oak sku 579331',
'artificial turf for petrs': 'artificial turf for pets',
'8 foot florescent light bulb': '8 foot fluorescent light bulb',
'3x3 diamond thread plate': '3x3 diamond tread plate',
'handical rail': 'handicap rail',
'moen grab bar securemount': 'moen grab bar secure mount',
'ceiling mount electical box': 'ceiling mount electrical box',
'stainless steal hose clamps': 'stainless steel hose clamps',
'sod grass san agustino': 'sod grass san agustin',
'bateries 9v': 'batteries 9v',
'kohler brushed nickle framless shower doors': 'kohler brushed nickel frameless shower doors',
'mirro shower doors': 'mirror shower doors',
'daylillies': 'daylilies',
'fridgedaire fridge': 'frigidaire fridge',
'storage buiding 12\' x 20\'': 'storage building 12\' x 20\'',
'pvc valvez': 'pvc valves',
'socket magnectic extension': 'socket magnetic extension',
'shop vac aacessories': 'shop vac accessories',
'roll jp door': 'roll up door',
'rollup door': 'roll up door',
'steibler eltron': 'stiebel eltron',
'liquid itght non metalic': 'liquid tight non metallic',
'metalic lquid tight': 'metallic liquid tight',
'22 bin plastic drawer parts storage organiz': '22 bin plastic drawer parts storage organizer',
'marroon roof screws': 'maroon roof screws',
'battery opererated lighting': 'battery operated lighting',
'roybi pop up': 'ryobi pop up',
'connectorv 30': 'connector 30',
'ge gfi braker 30amp': 'ge gfci breaker 30 amp',
'pipe swer': 'pipe sewer',
'treaded pvc pipe fitting': 'threaded pvc pipe fitting',
'cornewr bathtub': 'corner bathtub',
'whirlpool apron bathtup': 'whirlpool apron bathtub',
'veranda facia': 'veranda fascia',
'rrecessed light trim ring': 'recessed light trim ring',
'1 light steele sconce': '1 light steel sconce',
'7\' 90 elboq': '7\' 90 elbow',
'drawer guides and slides': 'drawer glides and slides',
'christmsa dog': 'christmas dog',
'light weight coccrete': 'lightweight concrete',
'hardwoo flooring 2 1/4 in': 'hardwood flooring 2 1/4 in',
'garden hose filter attactchent': 'garden hose filter attachment',
'milwaukie saw blades': 'milwaukee saw blades',
'dewalt extention cord': 'dewalt extension cord',
'hampton bay high gloss jabot laminate': 'hampton bay high gloss jatoba laminate',
'20v blacker and decker charger': '20v black and decker charger',
'15 water depth bathub': '15 water depth bathtub',
'magnetized wall covering': 'magnetic wall covering',
'fire brick and morter': 'fire brick and mortar',
'anderson french wood patio door 400 series': 'andersen frenchwood patio door 400 series',
'outdoor baners': 'outdoor banners',
'osciallating blade to cut tile': 'oscillating blade to cut tile',
'one way valae': 'one way valve',
'black decker matris': 'black decker matrix',
'makita skill saw': 'makita skil saw',
'tuscon patio pavers': 'tucson patio pavers',
'plastic florring': 'plastic flooring',
'fungicidal seed innoculant': 'fungicidal seed inoculant',
'pcv coated hardware cloth': 'pvc coated hardware cloth',
'2x2 ceiling tilepantq22s': '2x2 ceiling tile paint 22s',
'rectangulat wihite ceramic sink bathroom': 'rectangular white ceramic sink bathroom',
'battery operataed wall light': 'battery operated wall light',
'72 inchtrack light': '72 inch track light',
'suny citrus fertilizer': 'sunny citrus fertilizer',
'48 inch aluminum shower curtin rod': '48 inch aluminum shower curtain rod',
'dehumidifyer': 'dehumidifier',
'earthquaike': 'earthquake',
'phillips led sparkle light bulbs': 'philips led sparkle light bulbs',
'metalic silver spray': 'metallic silver spray',
'all retaing wall': 'all retaining wall',
'high temperate sealant': 'high temperature sealant',
'greecian white porcelein marble': 'greecian white porcelain marble',
'shelves stailess stel': 'shelves stainless steel',
'wallmounted garage shelves': 'wall mounted garage shelves',
'remote meat thermom': 'remote meat thermometer',
'pvc threaded elbo': 'pvc threaded elbow',
'summit 20 in elctric range': 'summit 20 in electric range',
'groung fault electric outlet': 'ground fault electrical outlet',
'prenneols flower seeds': 'perennials flower seeds',
'hyrdaulic oil for kohler': 'hydraulic oil for kohler',
'hot/cold porcelin handles': 'hot/cold porcelain handles',
'white vanites with tops': 'white vanities with tops',
'exterier door keypad': 'exterior door keypad',
'purpor power': 'purple power',
'automatic drower closer': 'automatic drawer closer',
'potable firepace': 'portable fireplace',
'azelas': 'azaleas',
'mta distributions log splitter': 'mta distributors log splitter',
'standing town rack': 'standing towel rack',
'zinser stain cover': 'zinsser stain cover',
'weed trimer push type': 'weed trimmer push type',
'centipe grass seed': 'centipede grass seed',
'36 curved showered curtain rod': '36 curved shower curtain rod',
'4 quck grip 101': '4 quick grip 101',
'metal gringing weel 5/8': 'metal grinding wheel 5/8',
'weelbarrow': 'wheelbarrow',
'baraar emy': 'bazaar emy',
'wetbar sink and faucet': 'wet bar sink and faucet',
'perenial flowers': 'perennial flowers',
'infred turkey fryer': 'infrared turkey fryer',
'oil rubbed bronse bathroom lighting': 'oil rubbed bronze bathroom lighting',
'solor power lighting for exterior': 'solar power lighting for exterior',
'infloor heating antifreeze': 'in floor heating antifreeze',
'galvinized conduit pipe': 'galvanized conduit pipe',
'double curtain rod connecter': 'double curtain rod connector',
'drop cieling tiles 2ft by 4 ft': 'drop ceiling tiles 2ft by 4ft',
'plug in led night lite photocell': 'plug in led night light photocell',
'rough limber': 'rough lumber',
'48x48 windoww': '48x48 window',
'high intensity t5 flourescent lights': 'high intensity t5 fluorescent lights',
'brinly hardy 40 inc tow behind': 'brinly hardy 40 inch tow behind',
'ornge 5x7 rugs': 'orange 5x7 rugs',
'kitchenmaid built-in double drawer': 'kitchenaid built-in double drawer',
'safety latter': 'safety ladder',
'blind replacemetn': 'blind replacement',
'stainless steeel collated nails': 'stainless steel collated nails',
'hang rials barnyard doors': 'hang rails barnyard doors',
'tall black toliet': 'tall black toilet',
'fint tube': 'find tube',
'24 inches rerefrigerator': '24 inches refrigerator',
'ge microwave wall oven comb': 'ge microwave wall oven combo',
'presure treated': 'pressure treated',
'husky 46 9 drawer mobil': 'husky 46 9 drawer mobile',
'apartment size ge refrigertor stainless steel': 'apartment size ge refrigerator stainless steel',
'penedtrating stain': 'penetrating stain',
'briggsstraton 11 horse air filter': 'briggs stratton 11 horse air filter',
'hoovwe cordless vacuum cleaners': 'hoover cordless vacuum cleaners',
'tumbler dryer hose and claps': 'tumble dryer hose and clamps',
'antique truch': 'antique truck',
'hohler black and tan': 'kohler black and tan',
'spray and forget house nad deck': 'spray and forget house and deck',
'apriaire humidifier water panel': 'aprilaire humidifier water panel',
'unsanded groutr': 'unsanded grout',
'60 wat soft watt 2700k a19 dimibal led': '60 watt soft watt 2700k a19 dimmable led',
'7.5 mconnection for 9000 btu': '7.5 connection for 9000 btu',
'dimer switch and fan control': 'dimmer switch and fan control',
'granitecounter top cararra': 'granite countertop carrara',
'20 amp decor outlet ivory': '20 amp decora outlet ivory',
'rock wall papper': 'rock wallpaper',
'thin set fray': 'thin set gray',
'glass mirrior doors 72x80': 'glass mirror doors 72x80',
'heirloom whie': 'heirloom white',
'wood shelfing': 'wood shelving',
'kohler top mont bathroom sink': 'kohler top mount bathroom sink',
'outdoor dust to dawn light': 'outdoor dusk to dawn light',
'windowbalance': 'window balance',
'gunstock oak liamate': 'gunstock oak laminate',
'gardden benches': 'garden benches',
'strended electrical wire': 'stranded electrical wire',
'counter refinsher': 'counter refinishing',
'unfinished wood p-lant stand': 'unfinished wood plant stand',
'celing fan 60': 'ceiling fan 60',
'porta nailor': 'porta nailer',
't fittin': 't fitting',
'bousch lazer level gll2-80p': 'bosch laser level gll2-80p',
'2 1/2 inch nail boxe': '2 1/2 inch nail box',
'bonda body filler': 'bondo body filler',
'window manganetic lock': 'window magnetic lock',
'cat 5 cable uv restance': 'cat 5 cable uv resistance',
'3 4 toilet phlange': '3 4 toilet flange',
'aa batteried': 'aa batteries',
'6 pvc flixible coupling pipe': '6 pvc flexible coupling pipe',
'7 footaluminum awning': '7 foot aluminum awning',
'carburator': 'carburetor',
'water mainfold': 'water manifold',
'kholer bathroom wall lights': 'kohler bathroom wall lights',
'toro belt pully': 'toro belt pulley',
'paper lawn tefuse bags': 'paper lawn refuse bags',
'wadrobe moving boxes': 'wardrobe moving boxes',
'ultra clarifer, pool': 'ultra clarifier, pool',
'trash caninet slide': 'trash cabinet slide',
'craftig pvc cabinets': 'crafting pvc cabinets',
'plastic organozers': 'plastic organizers',
'rj45 crinp tool': 'rj45 crimp tool',
'darby 18 inch dishwasher': 'danby 18 inch dishwasher',
'10 x 10 gaxebo garden house': '10x10 gazebo garden house',
'colonial caseing': 'colonial casing',
'tarp for outsid furniture': 'tarp for outside furniture',
'phlne batteries': 'phone batteries',
'eatrhwise mower blades': 'earthwise mower blades',
'outdoor artifical lawn': 'outdoor artificial lawn',
'dual mount porcelin kitchen sinks': 'dual mount porcelain kitchen sinks',
'sflexible shower': 'flexible shower',
'savfavieh rug pad': 'safavieh rug pad',
'tigerwood perigo laminate flooring': 'tigerwood pergo laminate flooring',
'2\' flourescent lighting': '2\' fluorescent lighting',
'concerte stair railings': 'concrete stair railings',
'indoor infered heaters': 'indoor infrared heaters',
'tensil ties': 'tinsel ties',
'20 ampweather proof recepticles': '20 amp weatherproof receptacles',
'hdmi cabl': 'hdmi cable',
'matage double oven ranges': 'maytag double oven ranges',
'navarra sierra passage doorknob set': 'navarra sierra passage door knob set',
'outdoor furniture cover martha steward': 'outdoor furniture cover martha stewart',
'divonshire': 'devonshire',
'marine grade painr': 'marine grade paint',
'counter and appliance gaperaser': 'counter and appliance gap eraser',
'whirpool range hood 36': 'whirlpool range hood 36',
'flourecent': 'fluorescent',
'drain spoutts': 'drain spouts',
'1/4 shut off velves': '1/4 shut off valves',
'porta cool': 'portacool',
'yard walll': 'yard wall',
'kohler elongaterd toilet seat': 'kohler elongated toilet seat',
'kohler lighted tolet seats': 'kohler lighted toilet seats',
'cree led bub 6-pack': 'cree led bulb 6-pack',
'concrere chisel': 'concrete chisel',
'pedistal sink, 27\'': 'pedestal sink, 27\'',
'florsent replacement diffuser': 'fluorescent replacement diffuser',
'chlorox': 'clorox',
'core aeretor': 'core aerator',
'water proofing connector': 'waterproof connector',
'washer/dryr': 'washer/dryer',
'cambria java refridgerator': 'cambria java refrigerator',
'decrotive metal deck rail incecerts': 'decorative metal deck rail inserts',
'whirl pool water heater pilot': 'whirlpool water heater pilot',
'siemens double pole gfi': 'siemens double pole gfci',
'hampton bay alenxander oak': 'hampton bay alexander oak',
'32 inchvinyl screen doors': '32 inch vinyl screen doors',
'hamptonbay shaker cabinets wall': 'hampton bay shaker cabinets wall',
'3/8 entension': '3/8 extension',
'10x12 outdoor gazabos': '10x12 outdoor gazebos',
'seet metal tools': 'sheet metal tools',
'boch gll': 'bosch gll',
'dealt 8v screwdriver': 'dewalt 8v screwdriver',
'hand heald showers and ada grab bars': 'hand held showers and ada grab bars',
'200 amp outdoor circut breaker panel': '200 amp outdoor circuit breaker panel',
'fingerprint lockset': 'fingerprint locks',
'weekender powerwasher extension arms': 'weekender power washer extension arms',
'makita drill batterie charger': 'makita drill battery charger',
'ridgid fan': 'rigid fan',
'swifer wet cloth': 'swiffer wet cloth',
'hot water recirculator': 'hot water recirculation',
'riding mower blabes': 'riding mower blades',
'chain sherpeners': 'chain sharpeners',
'relief valve for rudd hot water heater': 'relief valve for ruud hot water heater',
'ceiling light brackt': 'ceiling light bracket',
'perferated pipe': 'perforated pipe',
'bath room sink accecories': 'bathroom sink accessories',
'ding room set': 'dining room set',
'2 ton expoxy': '2 ton epoxy',
'cutkler hammer breaker': 'cutler hammer breaker',
'red color cauking': 'red color caulking',
'strap and t hindge': 'strap and t hinge',
'screw driver 10 iches': 'screwdriver 10 inches',
'shower glass slelves': 'shower glass shelves',
'playststion 4 destiny bundle': 'playstation 4 destiny bundle',
'air conditiooning filter 14\'': 'air conditioning filter 14\'',
'sliding reversable patio door': 'sliding reversible patio door',
'rust oleam pinters touch black': 'rust oleum painters touch black',
'apron sink firecaly two bowl': 'apron sink fireclay two bowl',
'condesate pump': 'condensate pump',
'bronze outdoor ceiling dan': 'bronze outdoor ceiling fan',
'8 guage wire': '8 gauge wire',
'capacitor for quaterhorse motor 110 volts': 'capacitor for quarter horse motor 110 volts',
'anderson storm doors antique bronze': 'andersen storm doors antique bronze',
'gas enthonal free': 'gas ethanol free',
'is item at homedop': 'is item at home depot',
'drain stopper exstension': 'drain stopper extension',
'no tresspassing': 'no trespassing',
'100 gallon storage ben': '100 gallon storage bin',
'paint hardner': 'paint hardener',
'mystick permanent adhesive value pack': 'mystik permanent adhesive value pack',
'clear vlyvynal an rolls': 'clear polyvinyl and rolls',
'kliz primers': 'kilz primers',
'one way scrue removal tool': 'one way screw removal tool',
'stainless dishwaser smugde proof': 'stainless dishwasher smudge proof',
'hex shank drill bitt sets': 'hex shank drill bit sets',
'3.9 high effeciency front load washer': '3.9 high efficiency front load washer',
'concret patio floor': 'concrete patio floor',
'in the ground rodiron plant hanger': 'in the ground rod iron plant hanger',
'anderson storm door series 2500 sandtone polished brass': 'andersen storm door series 2500 sandstone polished brass',
'stainless steele screws': 'stainless steel screws',
'spray sealent for showers': 'spray sealant for showers',
'split line air conditioing': 'split line air conditioning',
'water softner pellet': 'water softener pellet',
'shelac': 'shellac',
'helti tools': 'hilti tools',
'PHILLIPS POST LIGHT BULB': 'PHILIPS POST LIGHT BULB',
'post light bulbl': 'post light bulb',
'tiolet': 'toilet',
'indoor home decor raindeer': 'indoor home decor reindeer',
'dinning tables': 'dining tables',
'patio dinning tables': 'patio dining tables',
'dremel router acessary': 'dremel router accessory',
'accordion door harware': 'accordion door hardware',
'edget tape': 'edge tape',
'verneer edging tool': 'veneer edging tool',
'drywall fastner': 'drywall fastener',
'heat pump acessories': 'heat pump accessories',
'scroll saw spirsl blade': 'scroll saw spiral blade',
'kitchen mat boack': 'kitchen mat black',
'chamberlain chain and pulliepaarts': 'chamberlain chain and pulley parts',
'swivle fitting for gas': 'swivel fitting for gas',
'SOLDERING IRORN': 'SOLDERING IRON',
'oaint marker': 'paint marker',
'upsidedowncan marker paint': 'upside down can marker paint',
'rope chritsmas lights': 'rope christmas lights',
'shower curtin rod': 'shower curtain rod',
'scoaring pads': 'scouring pads',
'spring set for price fister': 'spring set for price pfister',
'laquer thinner': 'lacquer thinner',
'mout faucet water filter': 'mount faucet water filter',
'NEUMATIC DOOR ARM': 'PNEUMATIC DOOR ARM',
'ceiling tile square fotage': 'ceiling tile square footage',
'ne angle base': 'neo angle base',
'1/4 in.-20 x 1 in. stainless steel flat-head socket cap scre': '1/4 in.-20 x 1 in. stainless steel flat-head socket cap screw',
'flexable pipe for propane': 'flexible pipe for propane',
'daltile accent peices': 'daltile accent pieces',
'specticide weed and grass rtu refill': 'spectracide weed and grass rtu refill',
'wood ddeck kits': 'wood deck kits',
'closetmaid hang9ing shelf': 'closetmaid hanging shelf',
'asb shower with curtian': 'asb shower with curtain',
'ptouch labeling tape': 'p touch labeling tape',
'misquito': 'mosquito',
'yard fooger': 'yard fogger',
'plastic splash guarf': 'plastic splash guard',
'3 light celling mount': '3 light ceiling mount',
'textered wallpaper': 'textured wallpaper',
'thermostat w remote senser': 'thermostat w remote sensor',
'spray oil prier': 'spray oil primer',
'maxx shower door': 'maax shower door',
'corion shower base': 'corian shower base',
'stapler hammers': 'staple hammers',
'2in non metalic standing coupling': '2in non metallic standing coupling',
'backyard xs capes': 'backyard xscapes',
'kraylon non skid': 'krylon non skid',
'pendent lights wit conversion kits': 'pendant lights with conversion kits',
'american wood charllotesville natural hickory': 'american wood charlottesville natural hickory',
'1/0 aqg': '1/0 awg',
'artci shag rug': 'arctic shag rug',
'omen single hole bathroom faucet': 'moen single hole bathroom faucet',
'john deere d100 sereissnow blade': 'john deere d100 series snow blade',
'brownbrick wallpaper': 'brown brick wallpaper',
'clear corrougated sheets': 'clear corrugated sheets',
'pressuer control valve': 'pressure control valve',
'white acryllic sheet': 'white acrylic sheet',
'wg307work jaw saw': 'wg307 worx jawsaw',
'plaskolight ceiling panel': 'plaskolite ceiling panel',
'charger y maintainer': 'charger and maintainer',
'waterless urinal conversion kist': 'waterless urinal conversion kit',
'hot water heating recirculitating pumps': 'hot water heater recirculating pumps',
'two gang carlton switch red dpt': 'two gang carlton switch red dot',
'kohler shower cartidges': 'kohler shower cartridges',
'rigid portable tool boxes': 'ridgid portable tool boxes',
'magniflier lamp': 'magnifier lamp',
'irragation controler': 'irrigation controller',
'minala rope': 'manila rope',
'wood sculture tool': 'wood sculpture tool',
'combination fan and lightwall switches': 'combination fan and light wall switches',
'acid stian': 'acid stain',
'bathtub deck mouted faucet with sprayer': 'bathtub deck mounted faucet with sprayer',
'attachments for zero turn touro': 'attachments for zero turn toro',
'wood pellats for grills': 'wood pellets for grills',
'whirpool 7000 washer': 'whirlpool 7000 washer',
'kitchenover sink lighting': 'kitchen over sink lighting',
'pegasus antique black side spalsh': 'pegasus antique black side splash',
'lock tight pl': 'loctite pl',
'landscasping ms international polish black stone': 'landscaping ms international polish black stone',
'1.4 cubit ft micro wave': '1.4 cubic ft microwave',
'square soffet vents': 'square soffit vents',
'exterior for pastic shutters': 'exterior for plastic shutters',
'exterior hous shutters': 'exterior house shutters',
'nutone ventiliation fan parts': 'nutone ventilation fan parts',
'belt anf tie rack': 'belt and tie rack',
'no elecetrity lights': 'no electricity lights',
'merola porcelain mosiac': 'merola porcelain mosaic',
'knotches': 'notches',
'savavieh soho': 'safavieh soho',
'double doors with security licks': 'double doors with security locks',
'glass tile backsp gpxtpnrf': 'glass tile backsp gpx pnrf',
'cabibet shelf pins': 'cabinet shelf pins',
'kolher repair': 'kohler repair',
'mantle brakets': 'mantle brackets',
'masonry painnt': 'masonry paint',
'muliti locks': 'multi locks',
'serger sewimg machine': 'serger sewing machine',
'mirror installation hardwawrd': 'mirror installation hardware',
'walnut porcelian': 'walnut porcelain',
'40 airens mulching kit': '40 ariens mulching kit',
'porcelaine cleaner': 'porcelain cleaner',
'monococcon 8x8 ceramic azuvi tile': 'monococcion 8x8 ceramic azuvi tile',
'black patioo set': 'black patio set',
'3/8 viyl j channel': '3/8 vinyl j channel',
'5/8 j chann': '5/8 j channel',
'home alerty': 'home alert',
'linen storage cabnit': 'linen storage cabinet',
'natur gas heat': 'natural gas heat',
'repacement toilet handle': 'replacement toilet handle',
'poyurethane clear satin': 'polyurethane clear satin',
'garbage desposal': 'garbage disposal',
'fire restaint paint': 'fire resistant paint',
'bathroom floting ball': 'bathroom floating ball',
'kitchen aid processer': 'kitchenaid processor',
'fire extinguishhers': 'fire extinguishers',
'trex fenc': 'trex fence',
'circular sawshop vac': 'circular saw shop vac',
'arylic wood paint': 'acrylic wood paint',
'appache mills plush tiles': 'apache mills plush tiles',
'phillips tuvpl-l 36': 'philips tuv pl-l 36',
'framed inerior door': 'framed interior door',
'end squicky floor': 'end squeaky floor',
'hoover prower scub deluxe': 'hoover power scrub deluxe',
'pernennial grass seed': 'perennial grass seed',
'phone linesplice connectors': 'phone line splice connectors',
'grow boz and pots': 'grow box and pots',
'organic leafgrow soil': 'organic leaf grow soil',
'6 foot pation table': '6 foot patio table',
'replacement patio unbrella pole': 'replacement patio umbrella pole',
'exteriro door 30 * 80': 'exterior door 30 * 80',
'oilrubbed bronze 3/8in riser': 'oil rubbed bronze 3/8in riser',
'latge storage containers': 'large storage containers',
'fridgidaire water filter': 'frigidaire water filter',
'sheeking for log cabin': 'seeking for log cabin',
'modern shower facuet': 'modern shower faucet',
'mirror, brushed nichel': 'mirror, brushed nickel',
'antic brass chandelier': 'antique brass chandelier',
'bufflo box wrench': 'buffalo box wrench',
'armstrong hardwood flooring422250z5p': 'armstrong hardwood flooring 422250z5p',
'mixet math faucet': 'mixet bath faucet',
'24 port patch pane': '24 port patch panel',
'black postlantern': 'black post lantern',
'needel valve': 'needle valve',
'wood ballusters': 'wood balusters',
'sharkbite sprinler': 'sharkbite sprinkler',
'1/2 hp genie screw drive garage door openner': '1/2 hp genie screw drive garage door opener',
'black dimmable gimble lights': 'black dimmable gimbal lights',
'power gable mount attic fac': 'power gable mount attic fan',
'door threshholds': 'door thresholds',
'rubber office chair sweel': 'rubber office chair wheel',
'16x7 garage door sandtone': '16x7 garage door sandstone',
'dal tile 12x24 porcelaine black tile': 'daltile 12x24 porcelain black tile',
'non ferroue saw blade': 'non ferrous saw blade',
'aluminum three way swich': 'aluminum three way switch',
'racheting wrench': 'ratcheting wrench',
'shower wal hook': 'shower wall hook',
'inflatable pool pumper': 'inflatable pool pump',
'cub cadet 46 balde': 'cub cadet 46 blade',
'spade terminalsnylon insulated': 'spade terminals nylon insulated',
'jimmyproof lock': 'jimmy proof lock',
'braSS pie fittings': 'braSS pipe fittings',
'brushed nichol hanging lights': 'brushed nickel hanging lights',
'lockbox keydoor lock': 'lockbox key door lock',
'white cabnet 30 inch base': 'white cabinet 30 inch base',
'ryobi replacemet batteries': 'ryobi replacement batteries',
'bath bord': 'bath board',
'aerp garden': 'aerogarden',
'white sign lettters': 'white sign letters',
'sqaure vessel sink': 'square vessel sink',
'i beam brackest': 'i beam brackets',
'paint for aluminun siding': 'paint for aluminum siding',
'digital temp monotor': 'digital temp monitor',
'floatinf shelving': 'floating shelving',
'light buld for stinger zapper': 'light bulb for stinger zapper',
'custom counterto': 'custom countertop',
'replacement delta faucet cartrigdge': 'replacement delta faucet cartridge',
'laundry bnasket': 'laundry basket',
'air conditon cooper soft': 'air conditioner copper soft',
'wood qwik bolts': 'wood kwik bolts',
'bolt conrete anchors': 'bolt concrete anchors',
'outdoor dining se?': 'outdoor dining set?',
'glass sheet mosiacs': 'glass sheet mosaics',
'whites parkle': 'white sparkle',
'fiskers titanium 1 1/2 loppers': 'fiskars titanium 1 1/2 loppers',
'cement mason bit': 'cement masonry bit',
'bananna leaves plant': 'banana leaves plant',
'fi nish screws': 'finish screws',
'tolet handle left hand': 'toilet handle left hand',
'sika repair shp': 'sika repair shop',
'murry circuit breakers 20 amps': 'murray circuit breakers 20 amps',
'hand pipe theader': 'hand pipe threader',
'powermate walkbehind trimmer': 'powermate walk behind trimmer',
'metal clothes handing carts': 'metal clothes hanging carts',
'electric radiatior heat': 'electric radiator heat',
'shopvac filter hepa': 'shop vac filter hepa',
'hampton bay fenving': 'hampton bay fencing',
'knife sharppener': 'knife sharpener',
'atttic heat barrier': 'attic heat barrier',
'wondow curtains': 'window curtains',
'american standard town square widespread facet': 'american standard town square widespread faucet',
'5.0 chest freezerz': '5.0 chest freezers',
'20 amp surger protector': '20 amp surge protector',
'f 30 flourescent light fixture': 'f30 fluorescent light fixture',
'1/2 inch rubber lep tips': '1/2 inch rubber leg tips',
'threader rod end coupler': 'threaded rod end coupler',
'lamated counter tops': 'laminate countertops',
'railing kit system round ballusters': 'railing kit system round balusters',
'sintetic grass': 'synthetic grass',
'landry sink': 'laundry sink',
'solar led light dust to dawn': 'solar led light dusk to dawn',
'pegro xp coffee step': 'pergo xp coffee step',
'maytag two door refridgerator': 'maytag two door refrigerator',
'reprobramable combination lock': 'programmable combination lock',
'pnematic flooring nails 16 gauge': 'pneumatic flooring nailer 16 gauge',
'outide dog kennel': 'outside dog kennel',
'6 incn door knocker': '6 inch door knocker',
'non programmable vertical thermost': 'non programmable vertical thermostat',
'windser light coco': 'windsor light coco',
'cooling towes': 'cooling towers',
'glacier bay shower catridge': 'glacier bay shower cartridge',
'ge discontinnued top freezers': 'ge discontinued top freezers',
'security camaras': 'security cameras',
'toiles partes': 'toilet parts',
'pegasus ntique brass': 'pegasus antique brass',
'water pic shower head chrome': 'waterpik shower head chrome',
'85 gall tall 4500': '85 gal tall 4500',
'contempery ceiling fans': 'contemporary ceiling fans',
'toile seat lid': 'toilet seat lid',
'milwaukee noncontact tester': 'milwaukee non contact tester',
'emser ocuntry': 'emser country',
'front screen for a gazeebo': 'front screen for a gazebo',
'fatpack 18v': 'fat pack 18v',
'bathroom kraft made': 'bathroom kraftmaid',
'1/4 qk connect x 1/8 mip': '1/4 quick connect x 1/8 mip',
'plate for faucet stoper': 'plate for faucet stopper',
'femaie gas fitting quick disonnect': 'female gas fitting quick disconnect',
'recesse light bulbs': 'recessed light bulbs',
'3m 60926 vapor catridges': '3m 60926 vapor cartridges',
'weather strip for commerial door': 'weather strip for commercial door',
'arcadia mettal locks': 'arcadia metal locks',
'gekko gauges': 'gecko gauges',
'frigidaire water firlters': 'frigidaire water filters',
'30 par haolgen bulbs': '30 par halogen bulbs',
'red devil scraperreplacement bldes': 'red devil scraper replacement blades',
'gcfi outlet': 'gfci outlet',
'mohawk oak wood fllors': 'mohawk oak wood floors',
'all porpose stools': 'all purpose stools',
'primered floor molding': 'primed floor molding',
'glass cleaner concintrete': 'glass cleaner concentrate',
'30 amp surface mount recepticle': '30 amp surface mount receptacle',
'60 x 100 aluminun mesh': '60 x 100 aluminum mesh',
'tile border black and whit': 'tile border black and white',
'peir mount black': 'pier mount black',
'xtra wide baby gates': 'extra wide baby gates',
'roffing caulk': 'roofing caulk',
'1/2 inc pvc treaded connector': '1/2 inch pvc threaded connector',
'electric hock for lift': 'electric shock for lift',
'greak': 'greek',
'airfilter 20x24': 'air filter 20x24',
'extenion cord storage': 'extension cord storage',
'shluter': 'schluter',
'circular saw rrip fence': 'circular saw rip fence',
'HEATED TOLIET SEAT': 'HEATED TOILET SEAT',
'rount magnet': 'round magnet',
'handi cap sink faucett': 'handicap sink faucet',
'arc fault circute breaker 1pole 15 amp': 'arc fault circuit breaker 1 pole 15 amp',
'oreck full reease carpet cleaner': 'oreck full release carpet cleaner',
'min split mounting brackets': 'mini split mounting brackets',
'kholer sink 20x17': 'kohler sink 20x17',
'heavy duty extensoion cordyellow only': 'heavy duty extension cord yellow only',
'3 newll post': '3 newel post',
'veraluz 4 light bathroom vanity': 'varaluz 4 light bathroom vanity',
'anual combo': 'annual combo',
'ciling pan': 'ceiling pan',
'syllicone lube': 'silicone lube',
'hdx 20\' hight velocity floor fan': 'hdx 20\' high velocity floor fan',
'30 inch kitchenaide cooktops': '30 inch kitchenaid cooktops',
'kusshuln concrete mixer': 'kushlan concrete mixer',
'roles of concreate mesh': 'roles of concrete mesh',
'hardward for pull out waste bin': 'hardware for pull out waste bin',
'glass towel bar braket': 'glass towel bar bracket',
'living room cabnets': 'living room cabinets',
'1-1/4 extention pvc': '1-1/4 extension pvc',
'metal double gain boxes': 'metal double gang boxes',
'fabric umbella': 'fabric umbrella',
'club cadet 46 belt': 'cub cadet 46 belt',
'window air conditionerriding lawn mowers': 'window air conditioner riding lawn mowers',
'digital cammera': 'digital camera',
'prppane pan': 'propane pan',
'oride plant': 'pride plant',
'home decorator outoddor patio cordless shades': 'home decorator outdoor patio cordless shades',
'1x1 square tubeing': '1x1 square tubing',
'water filter for frigidaire refrigirator': 'water filter for frigidaire refrigerator',
'linier track pendant': 'linear track pendant',
'medal stud finder': 'metal stud finder',
'mke m12 heated hoddie kit': 'mke m12 heated hoodie kit',
'bilt in pool': 'built in pool',
'buit in shower base': 'built in shower base',
'grohsafe roughin valve 35015': 'grohsafe rough in valve 35015',
'tank insualation': 'tank insulation',
'khols double toilet bowl': 'kohl\'s double toilet bowl',
'atlantiic can racks': 'atlantic can racks',
'skylites': 'skylights',
'kwikset passive door knob': 'kwikset passage door knob',
'loadspeaker': 'loudspeaker',
'koehler enamel cast iron sink': 'kohler enameled cast iron sink',
'tood handle lock': 'todd handle lock',
'sable brow grout': 'sable brown grout',
'rewd bird feeder': 'red bird feeder',
'lilac aera rug': 'lilac area rug',
'lightsavannah 3-light burnished ing fixtures': 'light savannah 3-light burnished ing fixtures',
'clear vynil for patio': 'clear vinyl for patio',
'intersate battery': 'interstate battery',
'jeldewen prairie mission door': 'jeld wen prairie mission door',
'honey oak tmolding': 'honey oak t molding',
'COMPLET SHOWER KIT': 'COMPLETE SHOWER KIT',
'36\' florescent light bulb': '36\' fluorescent light bulb',
'melon sunbrellap': 'melon sunbrella',
'28 kg washign machine': '28 kg washing machine',
'metal trash cas': 'metal trash cans',
'front door with side transome': 'front door with side transom',
'tribecia': 'tribeca',
'exterior shutters byrgundy': 'exterior shutters burgundy',
'light switchvers for little girls': 'light switches for little girls',
'miraposa whirlpool tub': 'mariposa whirlpool tub',
'schoolhouse pendqnt light': 'schoolhouse pendant light',
'cablrail': 'cable rail',
'vinly seat cleaner': 'vinyl seat cleaner',
'metal 3 tiertrolley': 'metal 3 tier trolley',
'white pendant uplight': 'white pendant light',
'lbathroom vanity lights chrome 3': 'bathroom vanity lights chrome 3',
'brushed nickel knobw': 'brushed nickel knobs',
'Renassaince': 'Renaissance',
'simpon strong tie wedge': 'simpson strong tie wedge',
'silocone repairs': 'silicone repairs',
'chocolate brown blackspash': 'chocolate brown backsplash',
'portabel tabel, plastic': 'portable table, plastic',
'safavieh courtyard dark biege area rug': 'safavieh courtyard dark beige area rug',
'theromometer smart': 'thermometer smart',
'hummngbird feeders': 'hummingbird feeders',
'diverter handels': 'diverter handles',
'dynamic desighn planters': 'dynamic design planters',
'pri meld flush bi fold doors': 'primed flush bifold doors',
'fisher and penkel': 'fisher and paykel',
'price of 1 gal beher marquee paint': 'price of 1 gal behr marquee paint',
'makersbot': 'makerbot',
'shelter logic sun sahde': 'shelterlogic sun shade',
'moen 4 port pex vavle': 'moen 4 port pex valve',
'ceiling fan extension wre': 'ceiling fan extension wire',
'single knobreplacement for shower kohler': 'single knob replacement for shower kohler',
'high gloss waterborne acrylic enamal': 'high gloss waterborne acrylic enamel',
'cattale': 'cattle',
'double deountable': 'double demountable',
'fantsastic': 'fantastic',
'milwaulkee battery charger': 'milwaukee battery charger',
'tandom 30 20': 'tandem 30 20',
'schluter kurdie': 'schluter kerdi',
'square buckes': 'square buckets',
'pro series vinal post': 'pro series vinyl post',
'krud cutter rust': 'krud kutter rust',
'warm espresso distresed': 'warm espresso distressed',
'levinton phone tv combo': 'leviton phone tv combo',
'makita planner knives': 'makita planer knives',
'barictric walk in tubs': 'bariatric walk in tubs',
'woper blades': 'wiper blades',
'kidcraft 18 doll furniture': 'kidkraft 18 doll furniture',
'stickon shower wall tower': 'stick on shower wall tower',
'riding lawn mower accesores': 'riding lawn mower accessories',
'towel bar nickel gracier 18\'': 'towel bar nickel glacier 18\'',
'compreshion repair kit': 'compression repair kit',
'huskie air compressors accessories': 'husky air compressors accessories',
'36 inch neo angle glass doooors': '36 inch neo angle glass doors',
'gerber cohort fine edg knife': 'gerber cohort fine edge knife',
'work force prpane heatr': 'workforce propane heater',
'progress lighting nottingdon': 'progress lighting nottington',
'dog leash atachments': 'dog leash attachments',
'elaphent ear': 'elephant ear',
'veeneer wood tape': 'veneer wood tape',
'siccsers': 'scissors',
'klien folding 6ft ruler': 'klein folding 6ft ruler',
'wall socket covedrs': 'wall socket covers',
'klein 8 inch plies': 'klein 8 inch pliers',
'screen doors: screen tight doors 32 in. unfinished wood t-ba': 'screen doors: screen tight doors 32 in. unfinished wood t-bar',
'g e dishwaaher': 'g e dishwasher',
'white semigloass': 'white semi gloss',
'shop swiming pools': 'shop swimming pools',
'rectangular baulaster': 'rectangular baluster',
'cedar 0roofing shingles': 'cedar roofing shingles',
'prehung door fanlite': 'prehung door fan lite',
'martha suart carpet tobacco leaf': 'martha stewart carpet tobacco leaf',
'furnance gas upflow': 'furnace gas upflow',
'spalted m aple': 'spalted maple',
'crimpling pleirs': 'crimping pliers',
'cold stem for glacer bay faucets': 'cold stem for glacier bay faucets',
'holegen flood light 35w': 'halogen flood light 35w',
'ridgid ipact wrench': 'rigid impact wrench',
'twin wsher dryer gas': 'twin washer dryer gas',
'Diamond HArd Acrylic Enamal': 'Diamond HArd Acrylic Enamel',
'stainless steel wall pannels': 'stainless steel wall panels',
'perenial bulb': 'perennial bulb',
'caroilne avenue 36 in single vanity in white marble top in l': 'caroline avenue 36 in single vanity in white marble top in l',
'broadway collectionchrome vanity fixture': 'broadway collection chrome vanity fixture',
'vogoro flower': 'vigoro flower',
'guarge parnel': 'gauge panel',
'sweeep pan': 'sweep pan',
'dewalt magnetic drive quide': 'dewalt magnetic drive guide',
'milwuakee magnetic drive guide': 'milwaukee magnetic drive guide',
'stainlss steel wire wheels': 'stainless steel wire wheels',
'deltile 3x6 ceramic blue': 'daltile 3x6 ceramic blue',
'discontinuedbrown and tan area rug': 'discontinued brown and tan area rug',
'frost protectionm': 'frost protection',
'5 tier chandalier': '5 tier chandelier',
'perry hickory laminte': 'perry hickory laminate',
'carpet chessnut': 'carpet chestnut',
'midnight blue irridecent': 'midnight blue iridescent',
'under cabinet black flourescent': 'under cabinet black fluorescent',
'concord charcole runner': 'concord charcoal runner',
'gibrallar post series cedar post': 'gibraltar post series cedar post',
'jefrrey court 3x12': 'jeffrey court 3x12',
'baking panb': 'baking pan',
'dustless ginder': 'dustless grinder',
'paw print doorbe;;': 'paw print doorbell;;',
'rustolium paint american accesnts': 'rustoleum paint american accents',
'costum key': 'custom key',
'halh circle glass shelf': 'half circle glass shelf',
'pedestial snk': 'pedestal sink',
'cordless celullar': 'cordless cellular',
'scounces wall light outside': 'sconces wall light outside',
'gas powere wood chipper': 'gas powered wood chipper',
'hampton bay brillant maple laminate': 'hampton bay brilliant maple laminate',
't8 flourescent bulbs 4 ft 2 pack': 't8 fluorescent bulbs 4 ft 2 pack',
'leminate floor alexandrea': 'laminate floor alexandria',
'reflector 50w flurecent': 'reflector 50w fluorescent',
'he xl 44 range': 'ge xl44 range',
'branch protctor paint': 'branch protector paint',
'rehargeable aa batteries for landscape lighting': 'rechargeable aa batteries for landscape lighting',
'msa safet work hat': 'msa safety work hat',
'conemporary hanging outdoor light fixture': 'contemporary hanging outdoor light fixture',
'piano door hing': 'piano door hinge',
'kohler whole houser generator': 'kohler whole house generator',
'dynasty collecion': 'dynasty collection',
'chesapeke nightstand in cherry': 'chesapeake nightstand in cherry',
'kohler glas shower door 4ft': 'kohler glass shower door 4ft',
'apartment size refreidgerator': 'apartment size refrigerator',
'centerpise': 'centerprise',
'motar for large tilw': 'mortar for large tile',
'bathroom lightning 48 inch': 'bathroom lighting 48 inch',
'panle clamp': 'panel clamp',
'roll up door fo shed': 'roll up door for shed',
'oil rubbed bronze airgap for dishwasher': 'oil rubbed bronze air gap for dishwasher',
'multi plub adapter': 'multi plug adapter',
'decorative clarance': 'decorative clarence',
'tamper resistant combo outet black': 'tamper resistant combo outlet black',
'polyurethane collors': 'polyurethane colors',
'scrool lever': 'scroll lever',
'gentec smoke detector': 'gentex smoke detector',
'kohler claxton biscuit sink': 'kohler caxton biscuit sink',
'strapping for cielings': 'strapping for ceilings',
'wall mounteddrop leaf table': 'wall mounted drop leaf table',
'chamberlain intercomm': 'chamberlain intercom',
'sumpter oask': 'sumpter oak',
'torino chandler 5 light bn': 'torino chandelier 5 light bn',
'allure red mahoghany': 'allure red mahogany',
'ge personal eletrical home security': 'ge personal electric home security',
'for rent sighn': 'for rent sign',
'coper clad aluminum': 'copper clad aluminum',
'homeywell cool moisture humidifier filters': 'honeywell cool moisture humidifier filters',
'hdc fairlawm jasper cane': 'hdc fairlawn jasper cane',
'wire fen c e': 'wire fence',
'cap screww everbilt 1/4in x2in': 'cap screw everbilt 1/4in x2in',
'metal urathane': 'metal urethane',
'blitz colth': 'blitz cloth',
'commercial accunts': 'commercial accounts',
'electic chainsaw worx': 'electric chainsaw worx',
'power toll accesories': 'power tool accessories',
'leviton - decora 3 gang midway nylon wall plate - light almo': 'leviton - decora 3 gang midway nylon wall plate - light almond',
'pond filter mediumpond filter pads': 'pond filter media pond filter pads',
'tall wine cabnet': 'tall wine cabinet',
'bulk calking': 'bulk caulking',
'insolated cooler with a strap': 'insulated cooler with a strap',
'concete placer': 'concrete placer',
'transmissin leak stopper': 'transmission leak stopper',
'toilet in buisk': 'toilet in buick',
'black wire hidder': 'black wire hider',
'braid trim ceramic title molding': 'braid trim ceramic tile molding',
'laundry tub fosets valves': 'laundry tub faucets valves',
'schlage plymoth orbit oil rubbed bronze': 'schlage plymouth orbit oil rubbed bronze',
'romanic poetry flat interior paint': 'romantic poetry flat interior paint',
'worklight 500 watt bullbs': 'worklight 500 watt bulbs',
'elvies ornament': 'elvis ornament',
'dpcam camera': 'dropcam camera',
'clorine tabs for septic': 'chlorine tabs for septic',
'interor door framed': 'interior door frame',
'hot dipped galvanized screwes': 'hot dipped galvanized screws',
'14 ft. w x29 ft. l x 14 ft.h': '14 ft. w x 29 ft. x 14 ft.h',
'water resistent top': 'water resistant top',
'galvinize 2 in box of screws': 'galvanized 2 in box of screws',
'taupe teasure carpet': 'taupe treasure carpet',
'nickle vanity lighting mosaics': 'nickel vanity lighting mosaics',
'heat circualtor': 'heat circulator',
'flexible pvc joing': 'flexible pvc joint',
'14 metal abresive blade': '14 metal abrasive blade',
'foldin g patio doors': 'folding patio doors',
'primeline mirror sliding doors': 'prime line mirror sliding doors',
'sanora maple flooring': 'sonora maple flooring',
'plastic paint containwes with lid': 'plastic paint containers with lid',
'deck fasting systems': 'deck fastening systems',
'long handled squeege window cleaning': 'long handled squeegee window cleaning',
'lsnd scape trim edger': 'landscape trim edger',
'rust oleum aged iron': 'rustoleum aged iron',
'redi ledge cooner': 'redi ledge corner',
'milwakee work radio': 'milwaukee work radio',
'progress piedmot': 'progress piedmont',
'home security camera cablee': 'home security camera cable',
'white rock daltale': 'white rock daltile',
'japenes lilacs': 'japanese lilacs',
'thickrubber mat': 'thick rubber mat',
'topdown bottom up shades': 'top down bottom up shades',
'locktite 9oz 2in1 premium sealant': 'loctite 9oz 2in1 premium sealant',
'evaporative thermstate': 'evaporative thermostat',
'red devil paint cleanaer': 'red devil paint cleaner',
'beer wine refrigeratr': 'beer wine refrigerator',
'forced air vents covrs': 'forced air vents covers',
'ew drops marquee paint': 'dew drops marquee paint',
'kitchen sink and fawcet black dual mount': 'kitchen sink and faucet black dual mount',
'dimmable fluoreecent': 'dimmable fluorescent',
'textured 6 pannel hollow core primed composite prehung inter': 'textured 6 panel hollow core primed composite prehung inter',
'dakato 4 light': 'dakota 4 light',
'playset handels': 'playset handles',
'vauhhan hammers': 'vaughan hammers',
'sterling frosted glass shower ath doors': 'sterling frosted glass shower bath doors',
'autom tic drawer lite': 'automatic drawer light',
'all trellisses': 'all trellises',
'american standard 5324.019 enlongate toilet seat': 'american standard 5324.019 elongated toilet seat',
'15 in built in maytag trash compactorr': '15 in built in maytag trash compactor',
'3 butto pico pj-3b': '3 button pico pj-3b',
'ligth': 'light',
'sissors': 'scissors'
} | mit |
untom/scikit-learn | examples/classification/plot_lda.py | 163 | 2224 | """
====================================================================
Normal and Shrinkage Linear Discriminant Analysis for classification
====================================================================
Shows how shrinkage improves classification.
"""
from __future__ import division
import numpy as np
import matplotlib.pyplot as plt
from sklearn.datasets import make_blobs
from sklearn.lda import LDA
n_train = 20 # samples for training
n_test = 200 # samples for testing
n_averages = 50 # how often to repeat classification
n_features_max = 75 # maximum number of features
step = 4 # step size for the calculation
def generate_data(n_samples, n_features):
"""Generate random blob-ish data with noisy features.
This returns an array of input data with shape `(n_samples, n_features)`
and an array of `n_samples` target labels.
Only one feature contains discriminative information, the other features
contain only noise.
"""
X, y = make_blobs(n_samples=n_samples, n_features=1, centers=[[-2], [2]])
# add non-discriminative features
if n_features > 1:
X = np.hstack([X, np.random.randn(n_samples, n_features - 1)])
return X, y
acc_clf1, acc_clf2 = [], []
n_features_range = range(1, n_features_max + 1, step)
for n_features in n_features_range:
score_clf1, score_clf2 = 0, 0
for _ in range(n_averages):
X, y = generate_data(n_train, n_features)
clf1 = LDA(solver='lsqr', shrinkage='auto').fit(X, y)
clf2 = LDA(solver='lsqr', shrinkage=None).fit(X, y)
X, y = generate_data(n_test, n_features)
score_clf1 += clf1.score(X, y)
score_clf2 += clf2.score(X, y)
acc_clf1.append(score_clf1 / n_averages)
acc_clf2.append(score_clf2 / n_averages)
features_samples_ratio = np.array(n_features_range) / n_train
plt.plot(features_samples_ratio, acc_clf1, linewidth=2,
label="LDA with shrinkage", color='r')
plt.plot(features_samples_ratio, acc_clf2, linewidth=2,
label="LDA", color='g')
plt.xlabel('n_features / n_samples')
plt.ylabel('Classification accuracy')
plt.legend(loc=1, prop={'size': 12})
plt.suptitle('LDA vs. shrinkage LDA (1 discriminative feature)')
plt.show()
| bsd-3-clause |
BiaDarkia/scikit-learn | examples/linear_model/plot_sparse_logistic_regression_20newsgroups.py | 52 | 4172 | """
=====================================================
Multiclass sparse logisitic regression on newgroups20
=====================================================
Comparison of multinomial logistic L1 vs one-versus-rest L1 logistic regression
to classify documents from the newgroups20 dataset. Multinomial logistic
regression yields more accurate results and is faster to train on the larger
scale dataset.
Here we use the l1 sparsity that trims the weights of not informative
features to zero. This is good if the goal is to extract the strongly
discriminative vocabulary of each class. If the goal is to get the best
predictive accuracy, it is better to use the non sparsity-inducing l2 penalty
instead.
A more traditional (and possibly better) way to predict on a sparse subset of
input features would be to use univariate feature selection followed by a
traditional (l2-penalised) logistic regression model.
"""
import time
import matplotlib.pyplot as plt
import numpy as np
from sklearn.datasets import fetch_20newsgroups_vectorized
from sklearn.linear_model import LogisticRegression
from sklearn.model_selection import train_test_split
print(__doc__)
# Author: Arthur Mensch
t0 = time.clock()
# We use SAGA solver
solver = 'saga'
# Turn down for faster run time
n_samples = 10000
# Memorized fetch_rcv1 for faster access
dataset = fetch_20newsgroups_vectorized('all')
X = dataset.data
y = dataset.target
X = X[:n_samples]
y = y[:n_samples]
X_train, X_test, y_train, y_test = train_test_split(X, y,
random_state=42,
stratify=y,
test_size=0.1)
train_samples, n_features = X_train.shape
n_classes = np.unique(y).shape[0]
print('Dataset 20newsgroup, train_samples=%i, n_features=%i, n_classes=%i'
% (train_samples, n_features, n_classes))
models = {'ovr': {'name': 'One versus Rest', 'iters': [1, 3]},
'multinomial': {'name': 'Multinomial', 'iters': [1, 3, 7]}}
for model in models:
# Add initial chance-level values for plotting purpose
accuracies = [1 / n_classes]
times = [0]
densities = [1]
model_params = models[model]
# Small number of epochs for fast runtime
for this_max_iter in model_params['iters']:
print('[model=%s, solver=%s] Number of epochs: %s' %
(model_params['name'], solver, this_max_iter))
lr = LogisticRegression(solver=solver,
multi_class=model,
C=1,
penalty='l1',
fit_intercept=True,
max_iter=this_max_iter,
random_state=42,
)
t1 = time.clock()
lr.fit(X_train, y_train)
train_time = time.clock() - t1
y_pred = lr.predict(X_test)
accuracy = np.sum(y_pred == y_test) / y_test.shape[0]
density = np.mean(lr.coef_ != 0, axis=1) * 100
accuracies.append(accuracy)
densities.append(density)
times.append(train_time)
models[model]['times'] = times
models[model]['densities'] = densities
models[model]['accuracies'] = accuracies
print('Test accuracy for model %s: %.4f' % (model, accuracies[-1]))
print('%% non-zero coefficients for model %s, '
'per class:\n %s' % (model, densities[-1]))
print('Run time (%i epochs) for model %s:'
'%.2f' % (model_params['iters'][-1], model, times[-1]))
fig = plt.figure()
ax = fig.add_subplot(111)
for model in models:
name = models[model]['name']
times = models[model]['times']
accuracies = models[model]['accuracies']
ax.plot(times, accuracies, marker='o',
label='Model: %s' % name)
ax.set_xlabel('Train time (s)')
ax.set_ylabel('Test accuracy')
ax.legend()
fig.suptitle('Multinomial vs One-vs-Rest Logistic L1\n'
'Dataset %s' % '20newsgroups')
fig.tight_layout()
fig.subplots_adjust(top=0.85)
run_time = time.clock() - t0
print('Example run in %.3f s' % run_time)
plt.show()
| bsd-3-clause |
cac2003/incubator-singa | tool/python/examples/mnist_rbm4.py | 3 | 1708 | #!/usr/bin/env python
#/************************************************************
#*
#* Licensed to the Apache Software Foundation (ASF) under one
#* or more contributor license agreements. See the NOTICE file
#* distributed with this work for additional information
#* regarding copyright ownership. The ASF licenses this file
#* to you under the Apache License, Version 2.0 (the
#* "License"); you may not use this file except in compliance
#* with the License. You may obtain a copy of the License at
#*
#* http://www.apache.org/licenses/LICENSE-2.0
#*
#* Unless required by applicable law or agreed to in writing,
#* software distributed under the License is distributed on an
#* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
#* KIND, either express or implied. See the License for the
#* specific language governing permissions and limitations
#* under the License.
#*
#*************************************************************/
import sys, os
sys.path.append(os.path.join(os.path.dirname(__file__),'..'))
from singa.model import *
from examples.datasets import mnist
rbmid = 4
pvalues = {'batchsize' : 100, 'shape' : 784, 'std_value' : 255}
X_train, X_test, workspace = mnist.load_data(
workspace = 'examples/rbm/rbm'+str(rbmid),
nb_rbm = rbmid,
checkpoint_steps = 6000,
**pvalues)
m = Energy('rbm'+str(rbmid), sys.argv)
out_dim = [1000, 500, 250, 30]
m.add(RBM(out_dim, sampling='gaussian', w_std=0.1, b_wd=0))
sgd = SGD(lr=0.001, decay=0.0002, momentum=0.8)
topo = Cluster(workspace)
m.compile(optimizer=sgd, cluster=topo)
m.fit(X_train, alg='cd', nb_epoch=6000)
#result = m.evaluate(X_test, test_steps=100, test_freq=500)
| apache-2.0 |
dipapaspyros/bdo_platform | bdo_main_app/urls.py | 1 | 1445 | from django.conf.urls import url
import bdo_main_app.views as views
import service_builder.views as sb_views
urlpatterns = [
# home & signup
url('^$', views.home, name='home'),
url('^bdo/$', views.dataset_search, name='bdo'),
url('^search/$', views.search, name='search'),
url('^exploretools/$', views.exploretools, name='exploretools'),
url('^analytics-environment/$', views.search, name='search'),
# datasets
url('^datasets/(?P<dataset_id>[\w-]+)/$', views.dataset, name='dataset-details'),
# on demand
# url('^on-demand/$', views.on_demand_search, name='on-demand'),
# url('^on-demand/create/$', views.on_demand_create, name='on-demand-create'),
# service and dashboards
url('^services/$', views.services, name='services'), #Service Marketplace
url('^services/dashboard/(?P<pk>\d+)/$', views.view_dashboard, name='view_dashboard'),
url('^services/service/(?P<pk>\d+)/$', sb_views.load_service, name='load_service'),
url('^pilot/wave-energy/$', views.load_nester_service, name='wave-energy-pilot'),
url('^pilot/anomaly-detection/$', views.load_xmile_service, name='wave-energy-pilot'),
url('^pilot/oil-spill-simulation/$', views.load_hcmr_service, name='wave-energy-pilot'),
url('^pilot/fault-prediction-anek/$', views.load_anek_service, name='wave-energy-pilot'),
url('^pilot/fault-prediction-fnk/$', views.load_fnk_service, name='wave-energy-pilot'),
]
| mit |
nmayorov/scikit-learn | sklearn/cluster/tests/test_birch.py | 339 | 5603 | """
Tests for the birch clustering algorithm.
"""
from scipy import sparse
import numpy as np
from sklearn.cluster.tests.common import generate_clustered_data
from sklearn.cluster.birch import Birch
from sklearn.cluster.hierarchical import AgglomerativeClustering
from sklearn.datasets import make_blobs
from sklearn.linear_model import ElasticNet
from sklearn.metrics import pairwise_distances_argmin, v_measure_score
from sklearn.utils.testing import assert_greater_equal
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_greater
from sklearn.utils.testing import assert_almost_equal
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import assert_warns
def test_n_samples_leaves_roots():
# Sanity check for the number of samples in leaves and roots
X, y = make_blobs(n_samples=10)
brc = Birch()
brc.fit(X)
n_samples_root = sum([sc.n_samples_ for sc in brc.root_.subclusters_])
n_samples_leaves = sum([sc.n_samples_ for leaf in brc._get_leaves()
for sc in leaf.subclusters_])
assert_equal(n_samples_leaves, X.shape[0])
assert_equal(n_samples_root, X.shape[0])
def test_partial_fit():
# Test that fit is equivalent to calling partial_fit multiple times
X, y = make_blobs(n_samples=100)
brc = Birch(n_clusters=3)
brc.fit(X)
brc_partial = Birch(n_clusters=None)
brc_partial.partial_fit(X[:50])
brc_partial.partial_fit(X[50:])
assert_array_equal(brc_partial.subcluster_centers_,
brc.subcluster_centers_)
# Test that same global labels are obtained after calling partial_fit
# with None
brc_partial.set_params(n_clusters=3)
brc_partial.partial_fit(None)
assert_array_equal(brc_partial.subcluster_labels_, brc.subcluster_labels_)
def test_birch_predict():
# Test the predict method predicts the nearest centroid.
rng = np.random.RandomState(0)
X = generate_clustered_data(n_clusters=3, n_features=3,
n_samples_per_cluster=10)
# n_samples * n_samples_per_cluster
shuffle_indices = np.arange(30)
rng.shuffle(shuffle_indices)
X_shuffle = X[shuffle_indices, :]
brc = Birch(n_clusters=4, threshold=1.)
brc.fit(X_shuffle)
centroids = brc.subcluster_centers_
assert_array_equal(brc.labels_, brc.predict(X_shuffle))
nearest_centroid = pairwise_distances_argmin(X_shuffle, centroids)
assert_almost_equal(v_measure_score(nearest_centroid, brc.labels_), 1.0)
def test_n_clusters():
# Test that n_clusters param works properly
X, y = make_blobs(n_samples=100, centers=10)
brc1 = Birch(n_clusters=10)
brc1.fit(X)
assert_greater(len(brc1.subcluster_centers_), 10)
assert_equal(len(np.unique(brc1.labels_)), 10)
# Test that n_clusters = Agglomerative Clustering gives
# the same results.
gc = AgglomerativeClustering(n_clusters=10)
brc2 = Birch(n_clusters=gc)
brc2.fit(X)
assert_array_equal(brc1.subcluster_labels_, brc2.subcluster_labels_)
assert_array_equal(brc1.labels_, brc2.labels_)
# Test that the wrong global clustering step raises an Error.
clf = ElasticNet()
brc3 = Birch(n_clusters=clf)
assert_raises(ValueError, brc3.fit, X)
# Test that a small number of clusters raises a warning.
brc4 = Birch(threshold=10000.)
assert_warns(UserWarning, brc4.fit, X)
def test_sparse_X():
# Test that sparse and dense data give same results
X, y = make_blobs(n_samples=100, centers=10)
brc = Birch(n_clusters=10)
brc.fit(X)
csr = sparse.csr_matrix(X)
brc_sparse = Birch(n_clusters=10)
brc_sparse.fit(csr)
assert_array_equal(brc.labels_, brc_sparse.labels_)
assert_array_equal(brc.subcluster_centers_,
brc_sparse.subcluster_centers_)
def check_branching_factor(node, branching_factor):
subclusters = node.subclusters_
assert_greater_equal(branching_factor, len(subclusters))
for cluster in subclusters:
if cluster.child_:
check_branching_factor(cluster.child_, branching_factor)
def test_branching_factor():
# Test that nodes have at max branching_factor number of subclusters
X, y = make_blobs()
branching_factor = 9
# Purposefully set a low threshold to maximize the subclusters.
brc = Birch(n_clusters=None, branching_factor=branching_factor,
threshold=0.01)
brc.fit(X)
check_branching_factor(brc.root_, branching_factor)
brc = Birch(n_clusters=3, branching_factor=branching_factor,
threshold=0.01)
brc.fit(X)
check_branching_factor(brc.root_, branching_factor)
# Raises error when branching_factor is set to one.
brc = Birch(n_clusters=None, branching_factor=1, threshold=0.01)
assert_raises(ValueError, brc.fit, X)
def check_threshold(birch_instance, threshold):
"""Use the leaf linked list for traversal"""
current_leaf = birch_instance.dummy_leaf_.next_leaf_
while current_leaf:
subclusters = current_leaf.subclusters_
for sc in subclusters:
assert_greater_equal(threshold, sc.radius)
current_leaf = current_leaf.next_leaf_
def test_threshold():
# Test that the leaf subclusters have a threshold lesser than radius
X, y = make_blobs(n_samples=80, centers=4)
brc = Birch(threshold=0.5, n_clusters=None)
brc.fit(X)
check_threshold(brc, 0.5)
brc = Birch(threshold=5.0, n_clusters=None)
brc.fit(X)
check_threshold(brc, 5.)
| bsd-3-clause |
DSLituiev/scikit-learn | examples/applications/wikipedia_principal_eigenvector.py | 16 | 7819 | """
===============================
Wikipedia principal eigenvector
===============================
A classical way to assert the relative importance of vertices in a
graph is to compute the principal eigenvector of the adjacency matrix
so as to assign to each vertex the values of the components of the first
eigenvector as a centrality score:
http://en.wikipedia.org/wiki/Eigenvector_centrality
On the graph of webpages and links those values are called the PageRank
scores by Google.
The goal of this example is to analyze the graph of links inside
wikipedia articles to rank articles by relative importance according to
this eigenvector centrality.
The traditional way to compute the principal eigenvector is to use the
power iteration method:
http://en.wikipedia.org/wiki/Power_iteration
Here the computation is achieved thanks to Martinsson's Randomized SVD
algorithm implemented in the scikit.
The graph data is fetched from the DBpedia dumps. DBpedia is an extraction
of the latent structured data of the Wikipedia content.
"""
# Author: Olivier Grisel <olivier.grisel@ensta.org>
# License: BSD 3 clause
from __future__ import print_function
from bz2 import BZ2File
import os
from datetime import datetime
from pprint import pprint
from time import time
import numpy as np
from scipy import sparse
from sklearn.decomposition import randomized_svd
from sklearn.externals.joblib import Memory
from sklearn.externals.six.moves.urllib.request import urlopen
from sklearn.externals.six import iteritems
print(__doc__)
###############################################################################
# Where to download the data, if not already on disk
redirects_url = "http://downloads.dbpedia.org/3.5.1/en/redirects_en.nt.bz2"
redirects_filename = redirects_url.rsplit("/", 1)[1]
page_links_url = "http://downloads.dbpedia.org/3.5.1/en/page_links_en.nt.bz2"
page_links_filename = page_links_url.rsplit("/", 1)[1]
resources = [
(redirects_url, redirects_filename),
(page_links_url, page_links_filename),
]
for url, filename in resources:
if not os.path.exists(filename):
print("Downloading data from '%s', please wait..." % url)
opener = urlopen(url)
open(filename, 'wb').write(opener.read())
print()
###############################################################################
# Loading the redirect files
memory = Memory(cachedir=".")
def index(redirects, index_map, k):
"""Find the index of an article name after redirect resolution"""
k = redirects.get(k, k)
return index_map.setdefault(k, len(index_map))
DBPEDIA_RESOURCE_PREFIX_LEN = len("http://dbpedia.org/resource/")
SHORTNAME_SLICE = slice(DBPEDIA_RESOURCE_PREFIX_LEN + 1, -1)
def short_name(nt_uri):
"""Remove the < and > URI markers and the common URI prefix"""
return nt_uri[SHORTNAME_SLICE]
def get_redirects(redirects_filename):
"""Parse the redirections and build a transitively closed map out of it"""
redirects = {}
print("Parsing the NT redirect file")
for l, line in enumerate(BZ2File(redirects_filename)):
split = line.split()
if len(split) != 4:
print("ignoring malformed line: " + line)
continue
redirects[short_name(split[0])] = short_name(split[2])
if l % 1000000 == 0:
print("[%s] line: %08d" % (datetime.now().isoformat(), l))
# compute the transitive closure
print("Computing the transitive closure of the redirect relation")
for l, source in enumerate(redirects.keys()):
transitive_target = None
target = redirects[source]
seen = set([source])
while True:
transitive_target = target
target = redirects.get(target)
if target is None or target in seen:
break
seen.add(target)
redirects[source] = transitive_target
if l % 1000000 == 0:
print("[%s] line: %08d" % (datetime.now().isoformat(), l))
return redirects
# disabling joblib as the pickling of large dicts seems much too slow
#@memory.cache
def get_adjacency_matrix(redirects_filename, page_links_filename, limit=None):
"""Extract the adjacency graph as a scipy sparse matrix
Redirects are resolved first.
Returns X, the scipy sparse adjacency matrix, redirects as python
dict from article names to article names and index_map a python dict
from article names to python int (article indexes).
"""
print("Computing the redirect map")
redirects = get_redirects(redirects_filename)
print("Computing the integer index map")
index_map = dict()
links = list()
for l, line in enumerate(BZ2File(page_links_filename)):
split = line.split()
if len(split) != 4:
print("ignoring malformed line: " + line)
continue
i = index(redirects, index_map, short_name(split[0]))
j = index(redirects, index_map, short_name(split[2]))
links.append((i, j))
if l % 1000000 == 0:
print("[%s] line: %08d" % (datetime.now().isoformat(), l))
if limit is not None and l >= limit - 1:
break
print("Computing the adjacency matrix")
X = sparse.lil_matrix((len(index_map), len(index_map)), dtype=np.float32)
for i, j in links:
X[i, j] = 1.0
del links
print("Converting to CSR representation")
X = X.tocsr()
print("CSR conversion done")
return X, redirects, index_map
# stop after 5M links to make it possible to work in RAM
X, redirects, index_map = get_adjacency_matrix(
redirects_filename, page_links_filename, limit=5000000)
names = dict((i, name) for name, i in iteritems(index_map))
print("Computing the principal singular vectors using randomized_svd")
t0 = time()
U, s, V = randomized_svd(X, 5, n_iter=3)
print("done in %0.3fs" % (time() - t0))
# print the names of the wikipedia related strongest components of the the
# principal singular vector which should be similar to the highest eigenvector
print("Top wikipedia pages according to principal singular vectors")
pprint([names[i] for i in np.abs(U.T[0]).argsort()[-10:]])
pprint([names[i] for i in np.abs(V[0]).argsort()[-10:]])
def centrality_scores(X, alpha=0.85, max_iter=100, tol=1e-10):
"""Power iteration computation of the principal eigenvector
This method is also known as Google PageRank and the implementation
is based on the one from the NetworkX project (BSD licensed too)
with copyrights by:
Aric Hagberg <hagberg@lanl.gov>
Dan Schult <dschult@colgate.edu>
Pieter Swart <swart@lanl.gov>
"""
n = X.shape[0]
X = X.copy()
incoming_counts = np.asarray(X.sum(axis=1)).ravel()
print("Normalizing the graph")
for i in incoming_counts.nonzero()[0]:
X.data[X.indptr[i]:X.indptr[i + 1]] *= 1.0 / incoming_counts[i]
dangle = np.asarray(np.where(X.sum(axis=1) == 0, 1.0 / n, 0)).ravel()
scores = np.ones(n, dtype=np.float32) / n # initial guess
for i in range(max_iter):
print("power iteration #%d" % i)
prev_scores = scores
scores = (alpha * (scores * X + np.dot(dangle, prev_scores))
+ (1 - alpha) * prev_scores.sum() / n)
# check convergence: normalized l_inf norm
scores_max = np.abs(scores).max()
if scores_max == 0.0:
scores_max = 1.0
err = np.abs(scores - prev_scores).max() / scores_max
print("error: %0.6f" % err)
if err < n * tol:
return scores
return scores
print("Computing principal eigenvector score using a power iteration method")
t0 = time()
scores = centrality_scores(X, max_iter=100, tol=1e-10)
print("done in %0.3fs" % (time() - t0))
pprint([names[i] for i in np.abs(scores).argsort()[-10:]])
| bsd-3-clause |
rajat1994/scikit-learn | examples/manifold/plot_compare_methods.py | 258 | 4031 | """
=========================================
Comparison of Manifold Learning methods
=========================================
An illustration of dimensionality reduction on the S-curve dataset
with various manifold learning methods.
For a discussion and comparison of these algorithms, see the
:ref:`manifold module page <manifold>`
For a similar example, where the methods are applied to a
sphere dataset, see :ref:`example_manifold_plot_manifold_sphere.py`
Note that the purpose of the MDS is to find a low-dimensional
representation of the data (here 2D) in which the distances respect well
the distances in the original high-dimensional space, unlike other
manifold-learning algorithms, it does not seeks an isotropic
representation of the data in the low-dimensional space.
"""
# Author: Jake Vanderplas -- <vanderplas@astro.washington.edu>
print(__doc__)
from time import time
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import Axes3D
from matplotlib.ticker import NullFormatter
from sklearn import manifold, datasets
# Next line to silence pyflakes. This import is needed.
Axes3D
n_points = 1000
X, color = datasets.samples_generator.make_s_curve(n_points, random_state=0)
n_neighbors = 10
n_components = 2
fig = plt.figure(figsize=(15, 8))
plt.suptitle("Manifold Learning with %i points, %i neighbors"
% (1000, n_neighbors), fontsize=14)
try:
# compatibility matplotlib < 1.0
ax = fig.add_subplot(251, projection='3d')
ax.scatter(X[:, 0], X[:, 1], X[:, 2], c=color, cmap=plt.cm.Spectral)
ax.view_init(4, -72)
except:
ax = fig.add_subplot(251, projection='3d')
plt.scatter(X[:, 0], X[:, 2], c=color, cmap=plt.cm.Spectral)
methods = ['standard', 'ltsa', 'hessian', 'modified']
labels = ['LLE', 'LTSA', 'Hessian LLE', 'Modified LLE']
for i, method in enumerate(methods):
t0 = time()
Y = manifold.LocallyLinearEmbedding(n_neighbors, n_components,
eigen_solver='auto',
method=method).fit_transform(X)
t1 = time()
print("%s: %.2g sec" % (methods[i], t1 - t0))
ax = fig.add_subplot(252 + i)
plt.scatter(Y[:, 0], Y[:, 1], c=color, cmap=plt.cm.Spectral)
plt.title("%s (%.2g sec)" % (labels[i], t1 - t0))
ax.xaxis.set_major_formatter(NullFormatter())
ax.yaxis.set_major_formatter(NullFormatter())
plt.axis('tight')
t0 = time()
Y = manifold.Isomap(n_neighbors, n_components).fit_transform(X)
t1 = time()
print("Isomap: %.2g sec" % (t1 - t0))
ax = fig.add_subplot(257)
plt.scatter(Y[:, 0], Y[:, 1], c=color, cmap=plt.cm.Spectral)
plt.title("Isomap (%.2g sec)" % (t1 - t0))
ax.xaxis.set_major_formatter(NullFormatter())
ax.yaxis.set_major_formatter(NullFormatter())
plt.axis('tight')
t0 = time()
mds = manifold.MDS(n_components, max_iter=100, n_init=1)
Y = mds.fit_transform(X)
t1 = time()
print("MDS: %.2g sec" % (t1 - t0))
ax = fig.add_subplot(258)
plt.scatter(Y[:, 0], Y[:, 1], c=color, cmap=plt.cm.Spectral)
plt.title("MDS (%.2g sec)" % (t1 - t0))
ax.xaxis.set_major_formatter(NullFormatter())
ax.yaxis.set_major_formatter(NullFormatter())
plt.axis('tight')
t0 = time()
se = manifold.SpectralEmbedding(n_components=n_components,
n_neighbors=n_neighbors)
Y = se.fit_transform(X)
t1 = time()
print("SpectralEmbedding: %.2g sec" % (t1 - t0))
ax = fig.add_subplot(259)
plt.scatter(Y[:, 0], Y[:, 1], c=color, cmap=plt.cm.Spectral)
plt.title("SpectralEmbedding (%.2g sec)" % (t1 - t0))
ax.xaxis.set_major_formatter(NullFormatter())
ax.yaxis.set_major_formatter(NullFormatter())
plt.axis('tight')
t0 = time()
tsne = manifold.TSNE(n_components=n_components, init='pca', random_state=0)
Y = tsne.fit_transform(X)
t1 = time()
print("t-SNE: %.2g sec" % (t1 - t0))
ax = fig.add_subplot(250)
plt.scatter(Y[:, 0], Y[:, 1], c=color, cmap=plt.cm.Spectral)
plt.title("t-SNE (%.2g sec)" % (t1 - t0))
ax.xaxis.set_major_formatter(NullFormatter())
ax.yaxis.set_major_formatter(NullFormatter())
plt.axis('tight')
plt.show()
| bsd-3-clause |
ClimbsRocks/scikit-learn | sklearn/metrics/tests/test_ranking.py | 31 | 41905 | from __future__ import division, print_function
import numpy as np
from itertools import product
import warnings
from scipy.sparse import csr_matrix
from sklearn import datasets
from sklearn import svm
from sklearn import ensemble
from sklearn.datasets import make_multilabel_classification
from sklearn.random_projection import sparse_random_matrix
from sklearn.utils.validation import check_array, check_consistent_length
from sklearn.utils.validation import check_random_state
from sklearn.utils.testing import assert_raises, clean_warning_registry
from sklearn.utils.testing import assert_raise_message
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_almost_equal
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_warns
from sklearn.metrics import auc
from sklearn.metrics import average_precision_score
from sklearn.metrics import coverage_error
from sklearn.metrics import label_ranking_average_precision_score
from sklearn.metrics import precision_recall_curve
from sklearn.metrics import label_ranking_loss
from sklearn.metrics import roc_auc_score
from sklearn.metrics import roc_curve
from sklearn.exceptions import UndefinedMetricWarning
###############################################################################
# Utilities for testing
def make_prediction(dataset=None, binary=False):
"""Make some classification predictions on a toy dataset using a SVC
If binary is True restrict to a binary classification problem instead of a
multiclass classification problem
"""
if dataset is None:
# import some data to play with
dataset = datasets.load_iris()
X = dataset.data
y = dataset.target
if binary:
# restrict to a binary classification task
X, y = X[y < 2], y[y < 2]
n_samples, n_features = X.shape
p = np.arange(n_samples)
rng = check_random_state(37)
rng.shuffle(p)
X, y = X[p], y[p]
half = int(n_samples / 2)
# add noisy features to make the problem harder and avoid perfect results
rng = np.random.RandomState(0)
X = np.c_[X, rng.randn(n_samples, 200 * n_features)]
# run classifier, get class probabilities and label predictions
clf = svm.SVC(kernel='linear', probability=True, random_state=0)
probas_pred = clf.fit(X[:half], y[:half]).predict_proba(X[half:])
if binary:
# only interested in probabilities of the positive case
# XXX: do we really want a special API for the binary case?
probas_pred = probas_pred[:, 1]
y_pred = clf.predict(X[half:])
y_true = y[half:]
return y_true, y_pred, probas_pred
###############################################################################
# Tests
def _auc(y_true, y_score):
"""Alternative implementation to check for correctness of
`roc_auc_score`."""
pos_label = np.unique(y_true)[1]
# Count the number of times positive samples are correctly ranked above
# negative samples.
pos = y_score[y_true == pos_label]
neg = y_score[y_true != pos_label]
diff_matrix = pos.reshape(1, -1) - neg.reshape(-1, 1)
n_correct = np.sum(diff_matrix > 0)
return n_correct / float(len(pos) * len(neg))
def _average_precision(y_true, y_score):
"""Alternative implementation to check for correctness of
`average_precision_score`."""
pos_label = np.unique(y_true)[1]
n_pos = np.sum(y_true == pos_label)
order = np.argsort(y_score)[::-1]
y_score = y_score[order]
y_true = y_true[order]
score = 0
for i in range(len(y_score)):
if y_true[i] == pos_label:
# Compute precision up to document i
# i.e, percentage of relevant documents up to document i.
prec = 0
for j in range(0, i + 1):
if y_true[j] == pos_label:
prec += 1.0
prec /= (i + 1.0)
score += prec
return score / n_pos
def test_roc_curve():
# Test Area under Receiver Operating Characteristic (ROC) curve
y_true, _, probas_pred = make_prediction(binary=True)
expected_auc = _auc(y_true, probas_pred)
for drop in [True, False]:
fpr, tpr, thresholds = roc_curve(y_true, probas_pred,
drop_intermediate=drop)
roc_auc = auc(fpr, tpr)
assert_array_almost_equal(roc_auc, expected_auc, decimal=2)
assert_almost_equal(roc_auc, roc_auc_score(y_true, probas_pred))
assert_equal(fpr.shape, tpr.shape)
assert_equal(fpr.shape, thresholds.shape)
def test_roc_curve_end_points():
# Make sure that roc_curve returns a curve start at 0 and ending and
# 1 even in corner cases
rng = np.random.RandomState(0)
y_true = np.array([0] * 50 + [1] * 50)
y_pred = rng.randint(3, size=100)
fpr, tpr, thr = roc_curve(y_true, y_pred, drop_intermediate=True)
assert_equal(fpr[0], 0)
assert_equal(fpr[-1], 1)
assert_equal(fpr.shape, tpr.shape)
assert_equal(fpr.shape, thr.shape)
def test_roc_returns_consistency():
# Test whether the returned threshold matches up with tpr
# make small toy dataset
y_true, _, probas_pred = make_prediction(binary=True)
fpr, tpr, thresholds = roc_curve(y_true, probas_pred)
# use the given thresholds to determine the tpr
tpr_correct = []
for t in thresholds:
tp = np.sum((probas_pred >= t) & y_true)
p = np.sum(y_true)
tpr_correct.append(1.0 * tp / p)
# compare tpr and tpr_correct to see if the thresholds' order was correct
assert_array_almost_equal(tpr, tpr_correct, decimal=2)
assert_equal(fpr.shape, tpr.shape)
assert_equal(fpr.shape, thresholds.shape)
def test_roc_nonrepeating_thresholds():
# Test to ensure that we don't return spurious repeating thresholds.
# Duplicated thresholds can arise due to machine precision issues.
dataset = datasets.load_digits()
X = dataset['data']
y = dataset['target']
# This random forest classifier can only return probabilities
# significant to two decimal places
clf = ensemble.RandomForestClassifier(n_estimators=100, random_state=0)
# How well can the classifier predict whether a digit is less than 5?
# This task contributes floating point roundoff errors to the probabilities
train, test = slice(None, None, 2), slice(1, None, 2)
probas_pred = clf.fit(X[train], y[train]).predict_proba(X[test])
y_score = probas_pred[:, :5].sum(axis=1) # roundoff errors begin here
y_true = [yy < 5 for yy in y[test]]
# Check for repeating values in the thresholds
fpr, tpr, thresholds = roc_curve(y_true, y_score, drop_intermediate=False)
assert_equal(thresholds.size, np.unique(np.round(thresholds, 2)).size)
def test_roc_curve_multi():
# roc_curve not applicable for multi-class problems
y_true, _, probas_pred = make_prediction(binary=False)
assert_raises(ValueError, roc_curve, y_true, probas_pred)
def test_roc_curve_confidence():
# roc_curve for confidence scores
y_true, _, probas_pred = make_prediction(binary=True)
fpr, tpr, thresholds = roc_curve(y_true, probas_pred - 0.5)
roc_auc = auc(fpr, tpr)
assert_array_almost_equal(roc_auc, 0.90, decimal=2)
assert_equal(fpr.shape, tpr.shape)
assert_equal(fpr.shape, thresholds.shape)
def test_roc_curve_hard():
# roc_curve for hard decisions
y_true, pred, probas_pred = make_prediction(binary=True)
# always predict one
trivial_pred = np.ones(y_true.shape)
fpr, tpr, thresholds = roc_curve(y_true, trivial_pred)
roc_auc = auc(fpr, tpr)
assert_array_almost_equal(roc_auc, 0.50, decimal=2)
assert_equal(fpr.shape, tpr.shape)
assert_equal(fpr.shape, thresholds.shape)
# always predict zero
trivial_pred = np.zeros(y_true.shape)
fpr, tpr, thresholds = roc_curve(y_true, trivial_pred)
roc_auc = auc(fpr, tpr)
assert_array_almost_equal(roc_auc, 0.50, decimal=2)
assert_equal(fpr.shape, tpr.shape)
assert_equal(fpr.shape, thresholds.shape)
# hard decisions
fpr, tpr, thresholds = roc_curve(y_true, pred)
roc_auc = auc(fpr, tpr)
assert_array_almost_equal(roc_auc, 0.78, decimal=2)
assert_equal(fpr.shape, tpr.shape)
assert_equal(fpr.shape, thresholds.shape)
def test_roc_curve_one_label():
y_true = [1, 1, 1, 1, 1, 1, 1, 1, 1, 1]
y_pred = [0, 1, 0, 1, 0, 1, 0, 1, 0, 1]
# assert there are warnings
w = UndefinedMetricWarning
fpr, tpr, thresholds = assert_warns(w, roc_curve, y_true, y_pred)
# all true labels, all fpr should be nan
assert_array_equal(fpr,
np.nan * np.ones(len(thresholds)))
assert_equal(fpr.shape, tpr.shape)
assert_equal(fpr.shape, thresholds.shape)
# assert there are warnings
fpr, tpr, thresholds = assert_warns(w, roc_curve,
[1 - x for x in y_true],
y_pred)
# all negative labels, all tpr should be nan
assert_array_equal(tpr,
np.nan * np.ones(len(thresholds)))
assert_equal(fpr.shape, tpr.shape)
assert_equal(fpr.shape, thresholds.shape)
def test_roc_curve_toydata():
# Binary classification
y_true = [0, 1]
y_score = [0, 1]
tpr, fpr, _ = roc_curve(y_true, y_score)
roc_auc = roc_auc_score(y_true, y_score)
assert_array_almost_equal(tpr, [0, 1])
assert_array_almost_equal(fpr, [1, 1])
assert_almost_equal(roc_auc, 1.)
y_true = [0, 1]
y_score = [1, 0]
tpr, fpr, _ = roc_curve(y_true, y_score)
roc_auc = roc_auc_score(y_true, y_score)
assert_array_almost_equal(tpr, [0, 1, 1])
assert_array_almost_equal(fpr, [0, 0, 1])
assert_almost_equal(roc_auc, 0.)
y_true = [1, 0]
y_score = [1, 1]
tpr, fpr, _ = roc_curve(y_true, y_score)
roc_auc = roc_auc_score(y_true, y_score)
assert_array_almost_equal(tpr, [0, 1])
assert_array_almost_equal(fpr, [0, 1])
assert_almost_equal(roc_auc, 0.5)
y_true = [1, 0]
y_score = [1, 0]
tpr, fpr, _ = roc_curve(y_true, y_score)
roc_auc = roc_auc_score(y_true, y_score)
assert_array_almost_equal(tpr, [0, 1])
assert_array_almost_equal(fpr, [1, 1])
assert_almost_equal(roc_auc, 1.)
y_true = [1, 0]
y_score = [0.5, 0.5]
tpr, fpr, _ = roc_curve(y_true, y_score)
roc_auc = roc_auc_score(y_true, y_score)
assert_array_almost_equal(tpr, [0, 1])
assert_array_almost_equal(fpr, [0, 1])
assert_almost_equal(roc_auc, .5)
y_true = [0, 0]
y_score = [0.25, 0.75]
# assert UndefinedMetricWarning because of no positive sample in y_true
tpr, fpr, _ = assert_warns(UndefinedMetricWarning, roc_curve, y_true, y_score)
assert_raises(ValueError, roc_auc_score, y_true, y_score)
assert_array_almost_equal(tpr, [0., 0.5, 1.])
assert_array_almost_equal(fpr, [np.nan, np.nan, np.nan])
y_true = [1, 1]
y_score = [0.25, 0.75]
# assert UndefinedMetricWarning because of no negative sample in y_true
tpr, fpr, _ = assert_warns(UndefinedMetricWarning, roc_curve, y_true, y_score)
assert_raises(ValueError, roc_auc_score, y_true, y_score)
assert_array_almost_equal(tpr, [np.nan, np.nan])
assert_array_almost_equal(fpr, [0.5, 1.])
# Multi-label classification task
y_true = np.array([[0, 1], [0, 1]])
y_score = np.array([[0, 1], [0, 1]])
assert_raises(ValueError, roc_auc_score, y_true, y_score, average="macro")
assert_raises(ValueError, roc_auc_score, y_true, y_score,
average="weighted")
assert_almost_equal(roc_auc_score(y_true, y_score, average="samples"), 1.)
assert_almost_equal(roc_auc_score(y_true, y_score, average="micro"), 1.)
y_true = np.array([[0, 1], [0, 1]])
y_score = np.array([[0, 1], [1, 0]])
assert_raises(ValueError, roc_auc_score, y_true, y_score, average="macro")
assert_raises(ValueError, roc_auc_score, y_true, y_score,
average="weighted")
assert_almost_equal(roc_auc_score(y_true, y_score, average="samples"), 0.5)
assert_almost_equal(roc_auc_score(y_true, y_score, average="micro"), 0.5)
y_true = np.array([[1, 0], [0, 1]])
y_score = np.array([[0, 1], [1, 0]])
assert_almost_equal(roc_auc_score(y_true, y_score, average="macro"), 0)
assert_almost_equal(roc_auc_score(y_true, y_score, average="weighted"), 0)
assert_almost_equal(roc_auc_score(y_true, y_score, average="samples"), 0)
assert_almost_equal(roc_auc_score(y_true, y_score, average="micro"), 0)
y_true = np.array([[1, 0], [0, 1]])
y_score = np.array([[0.5, 0.5], [0.5, 0.5]])
assert_almost_equal(roc_auc_score(y_true, y_score, average="macro"), .5)
assert_almost_equal(roc_auc_score(y_true, y_score, average="weighted"), .5)
assert_almost_equal(roc_auc_score(y_true, y_score, average="samples"), .5)
assert_almost_equal(roc_auc_score(y_true, y_score, average="micro"), .5)
def test_roc_curve_drop_intermediate():
# Test that drop_intermediate drops the correct thresholds
y_true = [0, 0, 0, 0, 1, 1]
y_score = [0., 0.2, 0.5, 0.6, 0.7, 1.0]
tpr, fpr, thresholds = roc_curve(y_true, y_score, drop_intermediate=True)
assert_array_almost_equal(thresholds, [1., 0.7, 0.])
# Test dropping thresholds with repeating scores
y_true = [0, 0, 0, 0, 0, 0, 0,
1, 1, 1, 1, 1, 1]
y_score = [0., 0.1, 0.6, 0.6, 0.7, 0.8, 0.9,
0.6, 0.7, 0.8, 0.9, 0.9, 1.0]
tpr, fpr, thresholds = roc_curve(y_true, y_score, drop_intermediate=True)
assert_array_almost_equal(thresholds,
[1.0, 0.9, 0.7, 0.6, 0.])
def test_auc():
# Test Area Under Curve (AUC) computation
x = [0, 1]
y = [0, 1]
assert_array_almost_equal(auc(x, y), 0.5)
x = [1, 0]
y = [0, 1]
assert_array_almost_equal(auc(x, y), 0.5)
x = [1, 0, 0]
y = [0, 1, 1]
assert_array_almost_equal(auc(x, y), 0.5)
x = [0, 1]
y = [1, 1]
assert_array_almost_equal(auc(x, y), 1)
x = [0, 0.5, 1]
y = [0, 0.5, 1]
assert_array_almost_equal(auc(x, y), 0.5)
def test_auc_duplicate_values():
# Test Area Under Curve (AUC) computation with duplicate values
# auc() was previously sorting the x and y arrays according to the indices
# from numpy.argsort(x), which was reordering the tied 0's in this example
# and resulting in an incorrect area computation. This test detects the
# error.
x = [-2.0, 0.0, 0.0, 0.0, 1.0]
y1 = [2.0, 0.0, 0.5, 1.0, 1.0]
y2 = [2.0, 1.0, 0.0, 0.5, 1.0]
y3 = [2.0, 1.0, 0.5, 0.0, 1.0]
for y in (y1, y2, y3):
assert_array_almost_equal(auc(x, y, reorder=True), 3.0)
def test_auc_errors():
# Incompatible shapes
assert_raises(ValueError, auc, [0.0, 0.5, 1.0], [0.1, 0.2])
# Too few x values
assert_raises(ValueError, auc, [0.0], [0.1])
# x is not in order
assert_raises(ValueError, auc, [1.0, 0.0, 0.5], [0.0, 0.0, 0.0])
def test_auc_score_non_binary_class():
# Test that roc_auc_score function returns an error when trying
# to compute AUC for non-binary class values.
rng = check_random_state(404)
y_pred = rng.rand(10)
# y_true contains only one class value
y_true = np.zeros(10, dtype="int")
assert_raise_message(ValueError, "ROC AUC score is not defined",
roc_auc_score, y_true, y_pred)
y_true = np.ones(10, dtype="int")
assert_raise_message(ValueError, "ROC AUC score is not defined",
roc_auc_score, y_true, y_pred)
y_true = -np.ones(10, dtype="int")
assert_raise_message(ValueError, "ROC AUC score is not defined",
roc_auc_score, y_true, y_pred)
# y_true contains three different class values
y_true = rng.randint(0, 3, size=10)
assert_raise_message(ValueError, "multiclass format is not supported",
roc_auc_score, y_true, y_pred)
clean_warning_registry()
with warnings.catch_warnings(record=True):
rng = check_random_state(404)
y_pred = rng.rand(10)
# y_true contains only one class value
y_true = np.zeros(10, dtype="int")
assert_raise_message(ValueError, "ROC AUC score is not defined",
roc_auc_score, y_true, y_pred)
y_true = np.ones(10, dtype="int")
assert_raise_message(ValueError, "ROC AUC score is not defined",
roc_auc_score, y_true, y_pred)
y_true = -np.ones(10, dtype="int")
assert_raise_message(ValueError, "ROC AUC score is not defined",
roc_auc_score, y_true, y_pred)
# y_true contains three different class values
y_true = rng.randint(0, 3, size=10)
assert_raise_message(ValueError, "multiclass format is not supported",
roc_auc_score, y_true, y_pred)
def test_precision_recall_curve():
y_true, _, probas_pred = make_prediction(binary=True)
_test_precision_recall_curve(y_true, probas_pred)
# Use {-1, 1} for labels; make sure original labels aren't modified
y_true[np.where(y_true == 0)] = -1
y_true_copy = y_true.copy()
_test_precision_recall_curve(y_true, probas_pred)
assert_array_equal(y_true_copy, y_true)
labels = [1, 0, 0, 1]
predict_probas = [1, 2, 3, 4]
p, r, t = precision_recall_curve(labels, predict_probas)
assert_array_almost_equal(p, np.array([0.5, 0.33333333, 0.5, 1., 1.]))
assert_array_almost_equal(r, np.array([1., 0.5, 0.5, 0.5, 0.]))
assert_array_almost_equal(t, np.array([1, 2, 3, 4]))
assert_equal(p.size, r.size)
assert_equal(p.size, t.size + 1)
def test_precision_recall_curve_pos_label():
y_true, _, probas_pred = make_prediction(binary=False)
pos_label = 2
p, r, thresholds = precision_recall_curve(y_true,
probas_pred[:, pos_label],
pos_label=pos_label)
p2, r2, thresholds2 = precision_recall_curve(y_true == pos_label,
probas_pred[:, pos_label])
assert_array_almost_equal(p, p2)
assert_array_almost_equal(r, r2)
assert_array_almost_equal(thresholds, thresholds2)
assert_equal(p.size, r.size)
assert_equal(p.size, thresholds.size + 1)
def _test_precision_recall_curve(y_true, probas_pred):
# Test Precision-Recall and aread under PR curve
p, r, thresholds = precision_recall_curve(y_true, probas_pred)
precision_recall_auc = auc(r, p)
assert_array_almost_equal(precision_recall_auc, 0.85, 2)
assert_array_almost_equal(precision_recall_auc,
average_precision_score(y_true, probas_pred))
assert_almost_equal(_average_precision(y_true, probas_pred),
precision_recall_auc, 1)
assert_equal(p.size, r.size)
assert_equal(p.size, thresholds.size + 1)
# Smoke test in the case of proba having only one value
p, r, thresholds = precision_recall_curve(y_true,
np.zeros_like(probas_pred))
precision_recall_auc = auc(r, p)
assert_array_almost_equal(precision_recall_auc, 0.75, 3)
assert_equal(p.size, r.size)
assert_equal(p.size, thresholds.size + 1)
def test_precision_recall_curve_errors():
# Contains non-binary labels
assert_raises(ValueError, precision_recall_curve,
[0, 1, 2], [[0.0], [1.0], [1.0]])
def test_precision_recall_curve_toydata():
with np.errstate(all="raise"):
# Binary classification
y_true = [0, 1]
y_score = [0, 1]
p, r, _ = precision_recall_curve(y_true, y_score)
auc_prc = average_precision_score(y_true, y_score)
assert_array_almost_equal(p, [1, 1])
assert_array_almost_equal(r, [1, 0])
assert_almost_equal(auc_prc, 1.)
y_true = [0, 1]
y_score = [1, 0]
p, r, _ = precision_recall_curve(y_true, y_score)
auc_prc = average_precision_score(y_true, y_score)
assert_array_almost_equal(p, [0.5, 0., 1.])
assert_array_almost_equal(r, [1., 0., 0.])
assert_almost_equal(auc_prc, 0.25)
y_true = [1, 0]
y_score = [1, 1]
p, r, _ = precision_recall_curve(y_true, y_score)
auc_prc = average_precision_score(y_true, y_score)
assert_array_almost_equal(p, [0.5, 1])
assert_array_almost_equal(r, [1., 0])
assert_almost_equal(auc_prc, .75)
y_true = [1, 0]
y_score = [1, 0]
p, r, _ = precision_recall_curve(y_true, y_score)
auc_prc = average_precision_score(y_true, y_score)
assert_array_almost_equal(p, [1, 1])
assert_array_almost_equal(r, [1, 0])
assert_almost_equal(auc_prc, 1.)
y_true = [1, 0]
y_score = [0.5, 0.5]
p, r, _ = precision_recall_curve(y_true, y_score)
auc_prc = average_precision_score(y_true, y_score)
assert_array_almost_equal(p, [0.5, 1])
assert_array_almost_equal(r, [1, 0.])
assert_almost_equal(auc_prc, .75)
y_true = [0, 0]
y_score = [0.25, 0.75]
assert_raises(Exception, precision_recall_curve, y_true, y_score)
assert_raises(Exception, average_precision_score, y_true, y_score)
y_true = [1, 1]
y_score = [0.25, 0.75]
p, r, _ = precision_recall_curve(y_true, y_score)
assert_almost_equal(average_precision_score(y_true, y_score), 1.)
assert_array_almost_equal(p, [1., 1., 1.])
assert_array_almost_equal(r, [1, 0.5, 0.])
# Multi-label classification task
y_true = np.array([[0, 1], [0, 1]])
y_score = np.array([[0, 1], [0, 1]])
assert_raises(Exception, average_precision_score, y_true, y_score,
average="macro")
assert_raises(Exception, average_precision_score, y_true, y_score,
average="weighted")
assert_almost_equal(average_precision_score(y_true, y_score,
average="samples"), 1.)
assert_almost_equal(average_precision_score(y_true, y_score,
average="micro"), 1.)
y_true = np.array([[0, 1], [0, 1]])
y_score = np.array([[0, 1], [1, 0]])
assert_raises(Exception, average_precision_score, y_true, y_score,
average="macro")
assert_raises(Exception, average_precision_score, y_true, y_score,
average="weighted")
assert_almost_equal(average_precision_score(y_true, y_score,
average="samples"), 0.625)
assert_almost_equal(average_precision_score(y_true, y_score,
average="micro"), 0.625)
y_true = np.array([[1, 0], [0, 1]])
y_score = np.array([[0, 1], [1, 0]])
assert_almost_equal(average_precision_score(y_true, y_score,
average="macro"), 0.25)
assert_almost_equal(average_precision_score(y_true, y_score,
average="weighted"), 0.25)
assert_almost_equal(average_precision_score(y_true, y_score,
average="samples"), 0.25)
assert_almost_equal(average_precision_score(y_true, y_score,
average="micro"), 0.25)
y_true = np.array([[1, 0], [0, 1]])
y_score = np.array([[0.5, 0.5], [0.5, 0.5]])
assert_almost_equal(average_precision_score(y_true, y_score,
average="macro"), 0.75)
assert_almost_equal(average_precision_score(y_true, y_score,
average="weighted"), 0.75)
assert_almost_equal(average_precision_score(y_true, y_score,
average="samples"), 0.75)
assert_almost_equal(average_precision_score(y_true, y_score,
average="micro"), 0.75)
def test_score_scale_invariance():
# Test that average_precision_score and roc_auc_score are invariant by
# the scaling or shifting of probabilities
y_true, _, probas_pred = make_prediction(binary=True)
roc_auc = roc_auc_score(y_true, probas_pred)
roc_auc_scaled = roc_auc_score(y_true, 100 * probas_pred)
roc_auc_shifted = roc_auc_score(y_true, probas_pred - 10)
assert_equal(roc_auc, roc_auc_scaled)
assert_equal(roc_auc, roc_auc_shifted)
pr_auc = average_precision_score(y_true, probas_pred)
pr_auc_scaled = average_precision_score(y_true, 100 * probas_pred)
pr_auc_shifted = average_precision_score(y_true, probas_pred - 10)
assert_equal(pr_auc, pr_auc_scaled)
assert_equal(pr_auc, pr_auc_shifted)
def check_lrap_toy(lrap_score):
# Check on several small example that it works
assert_almost_equal(lrap_score([[0, 1]], [[0.25, 0.75]]), 1)
assert_almost_equal(lrap_score([[0, 1]], [[0.75, 0.25]]), 1 / 2)
assert_almost_equal(lrap_score([[1, 1]], [[0.75, 0.25]]), 1)
assert_almost_equal(lrap_score([[0, 0, 1]], [[0.25, 0.5, 0.75]]), 1)
assert_almost_equal(lrap_score([[0, 1, 0]], [[0.25, 0.5, 0.75]]), 1 / 2)
assert_almost_equal(lrap_score([[0, 1, 1]], [[0.25, 0.5, 0.75]]), 1)
assert_almost_equal(lrap_score([[1, 0, 0]], [[0.25, 0.5, 0.75]]), 1 / 3)
assert_almost_equal(lrap_score([[1, 0, 1]], [[0.25, 0.5, 0.75]]),
(2 / 3 + 1 / 1) / 2)
assert_almost_equal(lrap_score([[1, 1, 0]], [[0.25, 0.5, 0.75]]),
(2 / 3 + 1 / 2) / 2)
assert_almost_equal(lrap_score([[0, 0, 1]], [[0.75, 0.5, 0.25]]), 1 / 3)
assert_almost_equal(lrap_score([[0, 1, 0]], [[0.75, 0.5, 0.25]]), 1 / 2)
assert_almost_equal(lrap_score([[0, 1, 1]], [[0.75, 0.5, 0.25]]),
(1 / 2 + 2 / 3) / 2)
assert_almost_equal(lrap_score([[1, 0, 0]], [[0.75, 0.5, 0.25]]), 1)
assert_almost_equal(lrap_score([[1, 0, 1]], [[0.75, 0.5, 0.25]]),
(1 + 2 / 3) / 2)
assert_almost_equal(lrap_score([[1, 1, 0]], [[0.75, 0.5, 0.25]]), 1)
assert_almost_equal(lrap_score([[1, 1, 1]], [[0.75, 0.5, 0.25]]), 1)
assert_almost_equal(lrap_score([[0, 0, 1]], [[0.5, 0.75, 0.25]]), 1 / 3)
assert_almost_equal(lrap_score([[0, 1, 0]], [[0.5, 0.75, 0.25]]), 1)
assert_almost_equal(lrap_score([[0, 1, 1]], [[0.5, 0.75, 0.25]]),
(1 + 2 / 3) / 2)
assert_almost_equal(lrap_score([[1, 0, 0]], [[0.5, 0.75, 0.25]]), 1 / 2)
assert_almost_equal(lrap_score([[1, 0, 1]], [[0.5, 0.75, 0.25]]),
(1 / 2 + 2 / 3) / 2)
assert_almost_equal(lrap_score([[1, 1, 0]], [[0.5, 0.75, 0.25]]), 1)
assert_almost_equal(lrap_score([[1, 1, 1]], [[0.5, 0.75, 0.25]]), 1)
# Tie handling
assert_almost_equal(lrap_score([[1, 0]], [[0.5, 0.5]]), 0.5)
assert_almost_equal(lrap_score([[0, 1]], [[0.5, 0.5]]), 0.5)
assert_almost_equal(lrap_score([[1, 1]], [[0.5, 0.5]]), 1)
assert_almost_equal(lrap_score([[0, 0, 1]], [[0.25, 0.5, 0.5]]), 0.5)
assert_almost_equal(lrap_score([[0, 1, 0]], [[0.25, 0.5, 0.5]]), 0.5)
assert_almost_equal(lrap_score([[0, 1, 1]], [[0.25, 0.5, 0.5]]), 1)
assert_almost_equal(lrap_score([[1, 0, 0]], [[0.25, 0.5, 0.5]]), 1 / 3)
assert_almost_equal(lrap_score([[1, 0, 1]], [[0.25, 0.5, 0.5]]),
(2 / 3 + 1 / 2) / 2)
assert_almost_equal(lrap_score([[1, 1, 0]], [[0.25, 0.5, 0.5]]),
(2 / 3 + 1 / 2) / 2)
assert_almost_equal(lrap_score([[1, 1, 1]], [[0.25, 0.5, 0.5]]), 1)
assert_almost_equal(lrap_score([[1, 1, 0]], [[0.5, 0.5, 0.5]]), 2 / 3)
assert_almost_equal(lrap_score([[1, 1, 1, 0]], [[0.5, 0.5, 0.5, 0.5]]),
3 / 4)
def check_zero_or_all_relevant_labels(lrap_score):
random_state = check_random_state(0)
for n_labels in range(2, 5):
y_score = random_state.uniform(size=(1, n_labels))
y_score_ties = np.zeros_like(y_score)
# No relevant labels
y_true = np.zeros((1, n_labels))
assert_equal(lrap_score(y_true, y_score), 1.)
assert_equal(lrap_score(y_true, y_score_ties), 1.)
# Only relevant labels
y_true = np.ones((1, n_labels))
assert_equal(lrap_score(y_true, y_score), 1.)
assert_equal(lrap_score(y_true, y_score_ties), 1.)
# Degenerate case: only one label
assert_almost_equal(lrap_score([[1], [0], [1], [0]],
[[0.5], [0.5], [0.5], [0.5]]), 1.)
def check_lrap_error_raised(lrap_score):
# Raise value error if not appropriate format
assert_raises(ValueError, lrap_score,
[0, 1, 0], [0.25, 0.3, 0.2])
assert_raises(ValueError, lrap_score, [0, 1, 2],
[[0.25, 0.75, 0.0], [0.7, 0.3, 0.0], [0.8, 0.2, 0.0]])
assert_raises(ValueError, lrap_score, [(0), (1), (2)],
[[0.25, 0.75, 0.0], [0.7, 0.3, 0.0], [0.8, 0.2, 0.0]])
# Check that y_true.shape != y_score.shape raise the proper exception
assert_raises(ValueError, lrap_score, [[0, 1], [0, 1]], [0, 1])
assert_raises(ValueError, lrap_score, [[0, 1], [0, 1]], [[0, 1]])
assert_raises(ValueError, lrap_score, [[0, 1], [0, 1]], [[0], [1]])
assert_raises(ValueError, lrap_score, [[0, 1]], [[0, 1], [0, 1]])
assert_raises(ValueError, lrap_score, [[0], [1]], [[0, 1], [0, 1]])
assert_raises(ValueError, lrap_score, [[0, 1], [0, 1]], [[0], [1]])
def check_lrap_only_ties(lrap_score):
# Check tie handling in score
# Basic check with only ties and increasing label space
for n_labels in range(2, 10):
y_score = np.ones((1, n_labels))
# Check for growing number of consecutive relevant
for n_relevant in range(1, n_labels):
# Check for a bunch of positions
for pos in range(n_labels - n_relevant):
y_true = np.zeros((1, n_labels))
y_true[0, pos:pos + n_relevant] = 1
assert_almost_equal(lrap_score(y_true, y_score),
n_relevant / n_labels)
def check_lrap_without_tie_and_increasing_score(lrap_score):
# Check that Label ranking average precision works for various
# Basic check with increasing label space size and decreasing score
for n_labels in range(2, 10):
y_score = n_labels - (np.arange(n_labels).reshape((1, n_labels)) + 1)
# First and last
y_true = np.zeros((1, n_labels))
y_true[0, 0] = 1
y_true[0, -1] = 1
assert_almost_equal(lrap_score(y_true, y_score),
(2 / n_labels + 1) / 2)
# Check for growing number of consecutive relevant label
for n_relevant in range(1, n_labels):
# Check for a bunch of position
for pos in range(n_labels - n_relevant):
y_true = np.zeros((1, n_labels))
y_true[0, pos:pos + n_relevant] = 1
assert_almost_equal(lrap_score(y_true, y_score),
sum((r + 1) / ((pos + r + 1) * n_relevant)
for r in range(n_relevant)))
def _my_lrap(y_true, y_score):
"""Simple implementation of label ranking average precision"""
check_consistent_length(y_true, y_score)
y_true = check_array(y_true)
y_score = check_array(y_score)
n_samples, n_labels = y_true.shape
score = np.empty((n_samples, ))
for i in range(n_samples):
# The best rank correspond to 1. Rank higher than 1 are worse.
# The best inverse ranking correspond to n_labels.
unique_rank, inv_rank = np.unique(y_score[i], return_inverse=True)
n_ranks = unique_rank.size
rank = n_ranks - inv_rank
# Rank need to be corrected to take into account ties
# ex: rank 1 ex aequo means that both label are rank 2.
corr_rank = np.bincount(rank, minlength=n_ranks + 1).cumsum()
rank = corr_rank[rank]
relevant = y_true[i].nonzero()[0]
if relevant.size == 0 or relevant.size == n_labels:
score[i] = 1
continue
score[i] = 0.
for label in relevant:
# Let's count the number of relevant label with better rank
# (smaller rank).
n_ranked_above = sum(rank[r] <= rank[label] for r in relevant)
# Weight by the rank of the actual label
score[i] += n_ranked_above / rank[label]
score[i] /= relevant.size
return score.mean()
def check_alternative_lrap_implementation(lrap_score, n_classes=5,
n_samples=20, random_state=0):
_, y_true = make_multilabel_classification(n_features=1,
allow_unlabeled=False,
random_state=random_state,
n_classes=n_classes,
n_samples=n_samples)
# Score with ties
y_score = sparse_random_matrix(n_components=y_true.shape[0],
n_features=y_true.shape[1],
random_state=random_state)
if hasattr(y_score, "toarray"):
y_score = y_score.toarray()
score_lrap = label_ranking_average_precision_score(y_true, y_score)
score_my_lrap = _my_lrap(y_true, y_score)
assert_almost_equal(score_lrap, score_my_lrap)
# Uniform score
random_state = check_random_state(random_state)
y_score = random_state.uniform(size=(n_samples, n_classes))
score_lrap = label_ranking_average_precision_score(y_true, y_score)
score_my_lrap = _my_lrap(y_true, y_score)
assert_almost_equal(score_lrap, score_my_lrap)
def test_label_ranking_avp():
for fn in [label_ranking_average_precision_score, _my_lrap]:
yield check_lrap_toy, fn
yield check_lrap_without_tie_and_increasing_score, fn
yield check_lrap_only_ties, fn
yield check_zero_or_all_relevant_labels, fn
yield check_lrap_error_raised, label_ranking_average_precision_score
for n_samples, n_classes, random_state in product((1, 2, 8, 20),
(2, 5, 10),
range(1)):
yield (check_alternative_lrap_implementation,
label_ranking_average_precision_score,
n_classes, n_samples, random_state)
def test_coverage_error():
# Toy case
assert_almost_equal(coverage_error([[0, 1]], [[0.25, 0.75]]), 1)
assert_almost_equal(coverage_error([[0, 1]], [[0.75, 0.25]]), 2)
assert_almost_equal(coverage_error([[1, 1]], [[0.75, 0.25]]), 2)
assert_almost_equal(coverage_error([[0, 0]], [[0.75, 0.25]]), 0)
assert_almost_equal(coverage_error([[0, 0, 0]], [[0.25, 0.5, 0.75]]), 0)
assert_almost_equal(coverage_error([[0, 0, 1]], [[0.25, 0.5, 0.75]]), 1)
assert_almost_equal(coverage_error([[0, 1, 0]], [[0.25, 0.5, 0.75]]), 2)
assert_almost_equal(coverage_error([[0, 1, 1]], [[0.25, 0.5, 0.75]]), 2)
assert_almost_equal(coverage_error([[1, 0, 0]], [[0.25, 0.5, 0.75]]), 3)
assert_almost_equal(coverage_error([[1, 0, 1]], [[0.25, 0.5, 0.75]]), 3)
assert_almost_equal(coverage_error([[1, 1, 0]], [[0.25, 0.5, 0.75]]), 3)
assert_almost_equal(coverage_error([[1, 1, 1]], [[0.25, 0.5, 0.75]]), 3)
assert_almost_equal(coverage_error([[0, 0, 0]], [[0.75, 0.5, 0.25]]), 0)
assert_almost_equal(coverage_error([[0, 0, 1]], [[0.75, 0.5, 0.25]]), 3)
assert_almost_equal(coverage_error([[0, 1, 0]], [[0.75, 0.5, 0.25]]), 2)
assert_almost_equal(coverage_error([[0, 1, 1]], [[0.75, 0.5, 0.25]]), 3)
assert_almost_equal(coverage_error([[1, 0, 0]], [[0.75, 0.5, 0.25]]), 1)
assert_almost_equal(coverage_error([[1, 0, 1]], [[0.75, 0.5, 0.25]]), 3)
assert_almost_equal(coverage_error([[1, 1, 0]], [[0.75, 0.5, 0.25]]), 2)
assert_almost_equal(coverage_error([[1, 1, 1]], [[0.75, 0.5, 0.25]]), 3)
assert_almost_equal(coverage_error([[0, 0, 0]], [[0.5, 0.75, 0.25]]), 0)
assert_almost_equal(coverage_error([[0, 0, 1]], [[0.5, 0.75, 0.25]]), 3)
assert_almost_equal(coverage_error([[0, 1, 0]], [[0.5, 0.75, 0.25]]), 1)
assert_almost_equal(coverage_error([[0, 1, 1]], [[0.5, 0.75, 0.25]]), 3)
assert_almost_equal(coverage_error([[1, 0, 0]], [[0.5, 0.75, 0.25]]), 2)
assert_almost_equal(coverage_error([[1, 0, 1]], [[0.5, 0.75, 0.25]]), 3)
assert_almost_equal(coverage_error([[1, 1, 0]], [[0.5, 0.75, 0.25]]), 2)
assert_almost_equal(coverage_error([[1, 1, 1]], [[0.5, 0.75, 0.25]]), 3)
# Non trival case
assert_almost_equal(coverage_error([[0, 1, 0], [1, 1, 0]],
[[0.1, 10., -3], [0, 1, 3]]),
(1 + 3) / 2.)
assert_almost_equal(coverage_error([[0, 1, 0], [1, 1, 0], [0, 1, 1]],
[[0.1, 10, -3], [0, 1, 3], [0, 2, 0]]),
(1 + 3 + 3) / 3.)
assert_almost_equal(coverage_error([[0, 1, 0], [1, 1, 0], [0, 1, 1]],
[[0.1, 10, -3], [3, 1, 3], [0, 2, 0]]),
(1 + 3 + 3) / 3.)
def test_coverage_tie_handling():
assert_almost_equal(coverage_error([[0, 0]], [[0.5, 0.5]]), 0)
assert_almost_equal(coverage_error([[1, 0]], [[0.5, 0.5]]), 2)
assert_almost_equal(coverage_error([[0, 1]], [[0.5, 0.5]]), 2)
assert_almost_equal(coverage_error([[1, 1]], [[0.5, 0.5]]), 2)
assert_almost_equal(coverage_error([[0, 0, 0]], [[0.25, 0.5, 0.5]]), 0)
assert_almost_equal(coverage_error([[0, 0, 1]], [[0.25, 0.5, 0.5]]), 2)
assert_almost_equal(coverage_error([[0, 1, 0]], [[0.25, 0.5, 0.5]]), 2)
assert_almost_equal(coverage_error([[0, 1, 1]], [[0.25, 0.5, 0.5]]), 2)
assert_almost_equal(coverage_error([[1, 0, 0]], [[0.25, 0.5, 0.5]]), 3)
assert_almost_equal(coverage_error([[1, 0, 1]], [[0.25, 0.5, 0.5]]), 3)
assert_almost_equal(coverage_error([[1, 1, 0]], [[0.25, 0.5, 0.5]]), 3)
assert_almost_equal(coverage_error([[1, 1, 1]], [[0.25, 0.5, 0.5]]), 3)
def test_label_ranking_loss():
assert_almost_equal(label_ranking_loss([[0, 1]], [[0.25, 0.75]]), 0)
assert_almost_equal(label_ranking_loss([[0, 1]], [[0.75, 0.25]]), 1)
assert_almost_equal(label_ranking_loss([[0, 0, 1]], [[0.25, 0.5, 0.75]]),
0)
assert_almost_equal(label_ranking_loss([[0, 1, 0]], [[0.25, 0.5, 0.75]]),
1 / 2)
assert_almost_equal(label_ranking_loss([[0, 1, 1]], [[0.25, 0.5, 0.75]]),
0)
assert_almost_equal(label_ranking_loss([[1, 0, 0]], [[0.25, 0.5, 0.75]]),
2 / 2)
assert_almost_equal(label_ranking_loss([[1, 0, 1]], [[0.25, 0.5, 0.75]]),
1 / 2)
assert_almost_equal(label_ranking_loss([[1, 1, 0]], [[0.25, 0.5, 0.75]]),
2 / 2)
# Undefined metrics - the ranking doesn't matter
assert_almost_equal(label_ranking_loss([[0, 0]], [[0.75, 0.25]]), 0)
assert_almost_equal(label_ranking_loss([[1, 1]], [[0.75, 0.25]]), 0)
assert_almost_equal(label_ranking_loss([[0, 0]], [[0.5, 0.5]]), 0)
assert_almost_equal(label_ranking_loss([[1, 1]], [[0.5, 0.5]]), 0)
assert_almost_equal(label_ranking_loss([[0, 0, 0]], [[0.5, 0.75, 0.25]]),
0)
assert_almost_equal(label_ranking_loss([[1, 1, 1]], [[0.5, 0.75, 0.25]]),
0)
assert_almost_equal(label_ranking_loss([[0, 0, 0]], [[0.25, 0.5, 0.5]]),
0)
assert_almost_equal(label_ranking_loss([[1, 1, 1]], [[0.25, 0.5, 0.5]]), 0)
# Non trival case
assert_almost_equal(label_ranking_loss([[0, 1, 0], [1, 1, 0]],
[[0.1, 10., -3], [0, 1, 3]]),
(0 + 2 / 2) / 2.)
assert_almost_equal(label_ranking_loss(
[[0, 1, 0], [1, 1, 0], [0, 1, 1]],
[[0.1, 10, -3], [0, 1, 3], [0, 2, 0]]),
(0 + 2 / 2 + 1 / 2) / 3.)
assert_almost_equal(label_ranking_loss(
[[0, 1, 0], [1, 1, 0], [0, 1, 1]],
[[0.1, 10, -3], [3, 1, 3], [0, 2, 0]]),
(0 + 2 / 2 + 1 / 2) / 3.)
# Sparse csr matrices
assert_almost_equal(label_ranking_loss(
csr_matrix(np.array([[0, 1, 0], [1, 1, 0]])),
[[0.1, 10, -3], [3, 1, 3]]),
(0 + 2 / 2) / 2.)
def test_ranking_appropriate_input_shape():
# Check that y_true.shape != y_score.shape raise the proper exception
assert_raises(ValueError, label_ranking_loss, [[0, 1], [0, 1]], [0, 1])
assert_raises(ValueError, label_ranking_loss, [[0, 1], [0, 1]], [[0, 1]])
assert_raises(ValueError, label_ranking_loss,
[[0, 1], [0, 1]], [[0], [1]])
assert_raises(ValueError, label_ranking_loss, [[0, 1]], [[0, 1], [0, 1]])
assert_raises(ValueError, label_ranking_loss,
[[0], [1]], [[0, 1], [0, 1]])
assert_raises(ValueError, label_ranking_loss, [[0, 1], [0, 1]], [[0], [1]])
def test_ranking_loss_ties_handling():
# Tie handling
assert_almost_equal(label_ranking_loss([[1, 0]], [[0.5, 0.5]]), 1)
assert_almost_equal(label_ranking_loss([[0, 1]], [[0.5, 0.5]]), 1)
assert_almost_equal(label_ranking_loss([[0, 0, 1]], [[0.25, 0.5, 0.5]]),
1 / 2)
assert_almost_equal(label_ranking_loss([[0, 1, 0]], [[0.25, 0.5, 0.5]]),
1 / 2)
assert_almost_equal(label_ranking_loss([[0, 1, 1]], [[0.25, 0.5, 0.5]]), 0)
assert_almost_equal(label_ranking_loss([[1, 0, 0]], [[0.25, 0.5, 0.5]]), 1)
assert_almost_equal(label_ranking_loss([[1, 0, 1]], [[0.25, 0.5, 0.5]]), 1)
assert_almost_equal(label_ranking_loss([[1, 1, 0]], [[0.25, 0.5, 0.5]]), 1)
| bsd-3-clause |
466152112/scikit-learn | sklearn/metrics/metrics.py | 232 | 1262 | import warnings
warnings.warn("sklearn.metrics.metrics is deprecated and will be removed in "
"0.18. Please import from sklearn.metrics",
DeprecationWarning)
from .ranking import auc
from .ranking import average_precision_score
from .ranking import label_ranking_average_precision_score
from .ranking import precision_recall_curve
from .ranking import roc_auc_score
from .ranking import roc_curve
from .classification import accuracy_score
from .classification import classification_report
from .classification import confusion_matrix
from .classification import f1_score
from .classification import fbeta_score
from .classification import hamming_loss
from .classification import hinge_loss
from .classification import jaccard_similarity_score
from .classification import log_loss
from .classification import matthews_corrcoef
from .classification import precision_recall_fscore_support
from .classification import precision_score
from .classification import recall_score
from .classification import zero_one_loss
from .regression import explained_variance_score
from .regression import mean_absolute_error
from .regression import mean_squared_error
from .regression import median_absolute_error
from .regression import r2_score
| bsd-3-clause |
466152112/scikit-learn | examples/ensemble/plot_gradient_boosting_quantile.py | 385 | 2114 | """
=====================================================
Prediction Intervals for Gradient Boosting Regression
=====================================================
This example shows how quantile regression can be used
to create prediction intervals.
"""
import numpy as np
import matplotlib.pyplot as plt
from sklearn.ensemble import GradientBoostingRegressor
np.random.seed(1)
def f(x):
"""The function to predict."""
return x * np.sin(x)
#----------------------------------------------------------------------
# First the noiseless case
X = np.atleast_2d(np.random.uniform(0, 10.0, size=100)).T
X = X.astype(np.float32)
# Observations
y = f(X).ravel()
dy = 1.5 + 1.0 * np.random.random(y.shape)
noise = np.random.normal(0, dy)
y += noise
y = y.astype(np.float32)
# Mesh the input space for evaluations of the real function, the prediction and
# its MSE
xx = np.atleast_2d(np.linspace(0, 10, 1000)).T
xx = xx.astype(np.float32)
alpha = 0.95
clf = GradientBoostingRegressor(loss='quantile', alpha=alpha,
n_estimators=250, max_depth=3,
learning_rate=.1, min_samples_leaf=9,
min_samples_split=9)
clf.fit(X, y)
# Make the prediction on the meshed x-axis
y_upper = clf.predict(xx)
clf.set_params(alpha=1.0 - alpha)
clf.fit(X, y)
# Make the prediction on the meshed x-axis
y_lower = clf.predict(xx)
clf.set_params(loss='ls')
clf.fit(X, y)
# Make the prediction on the meshed x-axis
y_pred = clf.predict(xx)
# Plot the function, the prediction and the 90% confidence interval based on
# the MSE
fig = plt.figure()
plt.plot(xx, f(xx), 'g:', label=u'$f(x) = x\,\sin(x)$')
plt.plot(X, y, 'b.', markersize=10, label=u'Observations')
plt.plot(xx, y_pred, 'r-', label=u'Prediction')
plt.plot(xx, y_upper, 'k-')
plt.plot(xx, y_lower, 'k-')
plt.fill(np.concatenate([xx, xx[::-1]]),
np.concatenate([y_upper, y_lower[::-1]]),
alpha=.5, fc='b', ec='None', label='90% prediction interval')
plt.xlabel('$x$')
plt.ylabel('$f(x)$')
plt.ylim(-10, 20)
plt.legend(loc='upper left')
plt.show()
| bsd-3-clause |
466152112/scikit-learn | sklearn/svm/setup.py | 318 | 3157 | import os
from os.path import join
import numpy
from sklearn._build_utils import get_blas_info
def configuration(parent_package='', top_path=None):
from numpy.distutils.misc_util import Configuration
config = Configuration('svm', parent_package, top_path)
config.add_subpackage('tests')
# Section LibSVM
# we compile both libsvm and libsvm_sparse
config.add_library('libsvm-skl',
sources=[join('src', 'libsvm', 'libsvm_template.cpp')],
depends=[join('src', 'libsvm', 'svm.cpp'),
join('src', 'libsvm', 'svm.h')],
# Force C++ linking in case gcc is picked up instead
# of g++ under windows with some versions of MinGW
extra_link_args=['-lstdc++'],
)
libsvm_sources = ['libsvm.c']
libsvm_depends = [join('src', 'libsvm', 'libsvm_helper.c'),
join('src', 'libsvm', 'libsvm_template.cpp'),
join('src', 'libsvm', 'svm.cpp'),
join('src', 'libsvm', 'svm.h')]
config.add_extension('libsvm',
sources=libsvm_sources,
include_dirs=[numpy.get_include(),
join('src', 'libsvm')],
libraries=['libsvm-skl'],
depends=libsvm_depends,
)
### liblinear module
cblas_libs, blas_info = get_blas_info()
if os.name == 'posix':
cblas_libs.append('m')
liblinear_sources = ['liblinear.c',
join('src', 'liblinear', '*.cpp')]
liblinear_depends = [join('src', 'liblinear', '*.h'),
join('src', 'liblinear', 'liblinear_helper.c')]
config.add_extension('liblinear',
sources=liblinear_sources,
libraries=cblas_libs,
include_dirs=[join('..', 'src', 'cblas'),
numpy.get_include(),
blas_info.pop('include_dirs', [])],
extra_compile_args=blas_info.pop('extra_compile_args',
[]),
depends=liblinear_depends,
# extra_compile_args=['-O0 -fno-inline'],
** blas_info)
## end liblinear module
# this should go *after* libsvm-skl
libsvm_sparse_sources = ['libsvm_sparse.c']
config.add_extension('libsvm_sparse', libraries=['libsvm-skl'],
sources=libsvm_sparse_sources,
include_dirs=[numpy.get_include(),
join("src", "libsvm")],
depends=[join("src", "libsvm", "svm.h"),
join("src", "libsvm",
"libsvm_sparse_helper.c")])
return config
if __name__ == '__main__':
from numpy.distutils.core import setup
setup(**configuration(top_path='').todict())
| bsd-3-clause |
ahoyosid/scikit-learn | sklearn/svm/setup.py | 318 | 3157 | import os
from os.path import join
import numpy
from sklearn._build_utils import get_blas_info
def configuration(parent_package='', top_path=None):
from numpy.distutils.misc_util import Configuration
config = Configuration('svm', parent_package, top_path)
config.add_subpackage('tests')
# Section LibSVM
# we compile both libsvm and libsvm_sparse
config.add_library('libsvm-skl',
sources=[join('src', 'libsvm', 'libsvm_template.cpp')],
depends=[join('src', 'libsvm', 'svm.cpp'),
join('src', 'libsvm', 'svm.h')],
# Force C++ linking in case gcc is picked up instead
# of g++ under windows with some versions of MinGW
extra_link_args=['-lstdc++'],
)
libsvm_sources = ['libsvm.c']
libsvm_depends = [join('src', 'libsvm', 'libsvm_helper.c'),
join('src', 'libsvm', 'libsvm_template.cpp'),
join('src', 'libsvm', 'svm.cpp'),
join('src', 'libsvm', 'svm.h')]
config.add_extension('libsvm',
sources=libsvm_sources,
include_dirs=[numpy.get_include(),
join('src', 'libsvm')],
libraries=['libsvm-skl'],
depends=libsvm_depends,
)
### liblinear module
cblas_libs, blas_info = get_blas_info()
if os.name == 'posix':
cblas_libs.append('m')
liblinear_sources = ['liblinear.c',
join('src', 'liblinear', '*.cpp')]
liblinear_depends = [join('src', 'liblinear', '*.h'),
join('src', 'liblinear', 'liblinear_helper.c')]
config.add_extension('liblinear',
sources=liblinear_sources,
libraries=cblas_libs,
include_dirs=[join('..', 'src', 'cblas'),
numpy.get_include(),
blas_info.pop('include_dirs', [])],
extra_compile_args=blas_info.pop('extra_compile_args',
[]),
depends=liblinear_depends,
# extra_compile_args=['-O0 -fno-inline'],
** blas_info)
## end liblinear module
# this should go *after* libsvm-skl
libsvm_sparse_sources = ['libsvm_sparse.c']
config.add_extension('libsvm_sparse', libraries=['libsvm-skl'],
sources=libsvm_sparse_sources,
include_dirs=[numpy.get_include(),
join("src", "libsvm")],
depends=[join("src", "libsvm", "svm.h"),
join("src", "libsvm",
"libsvm_sparse_helper.c")])
return config
if __name__ == '__main__':
from numpy.distutils.core import setup
setup(**configuration(top_path='').todict())
| bsd-3-clause |
466152112/scikit-learn | examples/ensemble/plot_ensemble_oob.py | 257 | 3265 | """
=============================
OOB Errors for Random Forests
=============================
The ``RandomForestClassifier`` is trained using *bootstrap aggregation*, where
each new tree is fit from a bootstrap sample of the training observations
:math:`z_i = (x_i, y_i)`. The *out-of-bag* (OOB) error is the average error for
each :math:`z_i` calculated using predictions from the trees that do not
contain :math:`z_i` in their respective bootstrap sample. This allows the
``RandomForestClassifier`` to be fit and validated whilst being trained [1].
The example below demonstrates how the OOB error can be measured at the
addition of each new tree during training. The resulting plot allows a
practitioner to approximate a suitable value of ``n_estimators`` at which the
error stabilizes.
.. [1] T. Hastie, R. Tibshirani and J. Friedman, "Elements of Statistical
Learning Ed. 2", p592-593, Springer, 2009.
"""
import matplotlib.pyplot as plt
from collections import OrderedDict
from sklearn.datasets import make_classification
from sklearn.ensemble import RandomForestClassifier, ExtraTreesClassifier
# Author: Kian Ho <hui.kian.ho@gmail.com>
# Gilles Louppe <g.louppe@gmail.com>
# Andreas Mueller <amueller@ais.uni-bonn.de>
#
# License: BSD 3 Clause
print(__doc__)
RANDOM_STATE = 123
# Generate a binary classification dataset.
X, y = make_classification(n_samples=500, n_features=25,
n_clusters_per_class=1, n_informative=15,
random_state=RANDOM_STATE)
# NOTE: Setting the `warm_start` construction parameter to `True` disables
# support for paralellised ensembles but is necessary for tracking the OOB
# error trajectory during training.
ensemble_clfs = [
("RandomForestClassifier, max_features='sqrt'",
RandomForestClassifier(warm_start=True, oob_score=True,
max_features="sqrt",
random_state=RANDOM_STATE)),
("RandomForestClassifier, max_features='log2'",
RandomForestClassifier(warm_start=True, max_features='log2',
oob_score=True,
random_state=RANDOM_STATE)),
("RandomForestClassifier, max_features=None",
RandomForestClassifier(warm_start=True, max_features=None,
oob_score=True,
random_state=RANDOM_STATE))
]
# Map a classifier name to a list of (<n_estimators>, <error rate>) pairs.
error_rate = OrderedDict((label, []) for label, _ in ensemble_clfs)
# Range of `n_estimators` values to explore.
min_estimators = 15
max_estimators = 175
for label, clf in ensemble_clfs:
for i in range(min_estimators, max_estimators + 1):
clf.set_params(n_estimators=i)
clf.fit(X, y)
# Record the OOB error for each `n_estimators=i` setting.
oob_error = 1 - clf.oob_score_
error_rate[label].append((i, oob_error))
# Generate the "OOB error rate" vs. "n_estimators" plot.
for label, clf_err in error_rate.items():
xs, ys = zip(*clf_err)
plt.plot(xs, ys, label=label)
plt.xlim(min_estimators, max_estimators)
plt.xlabel("n_estimators")
plt.ylabel("OOB error rate")
plt.legend(loc="upper right")
plt.show()
| bsd-3-clause |
schets/scikit-learn | sklearn/semi_supervised/label_propagation.py | 24 | 15181 | # coding=utf8
"""
Label propagation in the context of this module refers to a set of
semisupervised classification algorithms. In the high level, these algorithms
work by forming a fully-connected graph between all points given and solving
for the steady-state distribution of labels at each point.
These algorithms perform very well in practice. The cost of running can be very
expensive, at approximately O(N^3) where N is the number of (labeled and
unlabeled) points. The theory (why they perform so well) is motivated by
intuitions from random walk algorithms and geometric relationships in the data.
For more information see the references below.
Model Features
--------------
Label clamping:
The algorithm tries to learn distributions of labels over the dataset. In the
"Hard Clamp" mode, the true ground labels are never allowed to change. They
are clamped into position. In the "Soft Clamp" mode, they are allowed some
wiggle room, but some alpha of their original value will always be retained.
Hard clamp is the same as soft clamping with alpha set to 1.
Kernel:
A function which projects a vector into some higher dimensional space. This
implementation supprots RBF and KNN kernels. Using the RBF kernel generates
a dense matrix of size O(N^2). KNN kernel will generate a sparse matrix of
size O(k*N) which will run much faster. See the documentation for SVMs for
more info on kernels.
Examples
--------
>>> from sklearn import datasets
>>> from sklearn.semi_supervised import LabelPropagation
>>> label_prop_model = LabelPropagation()
>>> iris = datasets.load_iris()
>>> random_unlabeled_points = np.where(np.random.random_integers(0, 1,
... size=len(iris.target)))
>>> labels = np.copy(iris.target)
>>> labels[random_unlabeled_points] = -1
>>> label_prop_model.fit(iris.data, labels)
... # doctest: +NORMALIZE_WHITESPACE +ELLIPSIS
LabelPropagation(...)
Notes
-----
References:
[1] Yoshua Bengio, Olivier Delalleau, Nicolas Le Roux. In Semi-Supervised
Learning (2006), pp. 193-216
[2] Olivier Delalleau, Yoshua Bengio, Nicolas Le Roux. Efficient
Non-Parametric Function Induction in Semi-Supervised Learning. AISTAT 2005
"""
# Authors: Clay Woolam <clay@woolam.org>
# Licence: BSD
from abc import ABCMeta, abstractmethod
from scipy import sparse
import numpy as np
from ..base import BaseEstimator, ClassifierMixin
from ..metrics.pairwise import rbf_kernel
from ..utils.graph import graph_laplacian
from ..utils.extmath import safe_sparse_dot
from ..utils.validation import check_X_y, check_is_fitted
from ..externals import six
from ..neighbors.unsupervised import NearestNeighbors
### Helper functions
def _not_converged(y_truth, y_prediction, tol=1e-3):
"""basic convergence check"""
return np.abs(y_truth - y_prediction).sum() > tol
class BaseLabelPropagation(six.with_metaclass(ABCMeta, BaseEstimator,
ClassifierMixin)):
"""Base class for label propagation module.
Parameters
----------
kernel : {'knn', 'rbf'}
String identifier for kernel function to use.
Only 'rbf' and 'knn' kernels are currently supported..
gamma : float
Parameter for rbf kernel
alpha : float
Clamping factor
max_iter : float
Change maximum number of iterations allowed
tol : float
Convergence tolerance: threshold to consider the system at steady
state
n_neighbors : integer > 0
Parameter for knn kernel
"""
def __init__(self, kernel='rbf', gamma=20, n_neighbors=7,
alpha=1, max_iter=30, tol=1e-3):
self.max_iter = max_iter
self.tol = tol
# kernel parameters
self.kernel = kernel
self.gamma = gamma
self.n_neighbors = n_neighbors
# clamping factor
self.alpha = alpha
def _get_kernel(self, X, y=None):
if self.kernel == "rbf":
if y is None:
return rbf_kernel(X, X, gamma=self.gamma)
else:
return rbf_kernel(X, y, gamma=self.gamma)
elif self.kernel == "knn":
if self.nn_fit is None:
self.nn_fit = NearestNeighbors(self.n_neighbors).fit(X)
if y is None:
return self.nn_fit.kneighbors_graph(self.nn_fit._fit_X,
self.n_neighbors,
mode='connectivity')
else:
return self.nn_fit.kneighbors(y, return_distance=False)
else:
raise ValueError("%s is not a valid kernel. Only rbf and knn"
" are supported at this time" % self.kernel)
@abstractmethod
def _build_graph(self):
raise NotImplementedError("Graph construction must be implemented"
" to fit a label propagation model.")
def predict(self, X):
"""Performs inductive inference across the model.
Parameters
----------
X : array_like, shape = [n_samples, n_features]
Returns
-------
y : array_like, shape = [n_samples]
Predictions for input data
"""
probas = self.predict_proba(X)
return self.classes_[np.argmax(probas, axis=1)].ravel()
def predict_proba(self, X):
"""Predict probability for each possible outcome.
Compute the probability estimates for each single sample in X
and each possible outcome seen during training (categorical
distribution).
Parameters
----------
X : array_like, shape = [n_samples, n_features]
Returns
-------
probabilities : array, shape = [n_samples, n_classes]
Normalized probability distributions across
class labels
"""
check_is_fitted(self, 'X_')
if sparse.isspmatrix(X):
X_2d = X
else:
X_2d = np.atleast_2d(X)
weight_matrices = self._get_kernel(self.X_, X_2d)
if self.kernel == 'knn':
probabilities = []
for weight_matrix in weight_matrices:
ine = np.sum(self.label_distributions_[weight_matrix], axis=0)
probabilities.append(ine)
probabilities = np.array(probabilities)
else:
weight_matrices = weight_matrices.T
probabilities = np.dot(weight_matrices, self.label_distributions_)
normalizer = np.atleast_2d(np.sum(probabilities, axis=1)).T
probabilities /= normalizer
return probabilities
def fit(self, X, y):
"""Fit a semi-supervised label propagation model based
All the input data is provided matrix X (labeled and unlabeled)
and corresponding label matrix y with a dedicated marker value for
unlabeled samples.
Parameters
----------
X : array-like, shape = [n_samples, n_features]
A {n_samples by n_samples} size matrix will be created from this
y : array_like, shape = [n_samples]
n_labeled_samples (unlabeled points are marked as -1)
All unlabeled samples will be transductively assigned labels
Returns
-------
self : returns an instance of self.
"""
X, y = check_X_y(X, y)
self.X_ = X
# actual graph construction (implementations should override this)
graph_matrix = self._build_graph()
# label construction
# construct a categorical distribution for classification only
classes = np.unique(y)
classes = (classes[classes != -1])
self.classes_ = classes
n_samples, n_classes = len(y), len(classes)
y = np.asarray(y)
unlabeled = y == -1
clamp_weights = np.ones((n_samples, 1))
clamp_weights[unlabeled, 0] = self.alpha
# initialize distributions
self.label_distributions_ = np.zeros((n_samples, n_classes))
for label in classes:
self.label_distributions_[y == label, classes == label] = 1
y_static = np.copy(self.label_distributions_)
if self.alpha > 0.:
y_static *= 1 - self.alpha
y_static[unlabeled] = 0
l_previous = np.zeros((self.X_.shape[0], n_classes))
remaining_iter = self.max_iter
if sparse.isspmatrix(graph_matrix):
graph_matrix = graph_matrix.tocsr()
while (_not_converged(self.label_distributions_, l_previous, self.tol)
and remaining_iter > 1):
l_previous = self.label_distributions_
self.label_distributions_ = safe_sparse_dot(
graph_matrix, self.label_distributions_)
# clamp
self.label_distributions_ = np.multiply(
clamp_weights, self.label_distributions_) + y_static
remaining_iter -= 1
normalizer = np.sum(self.label_distributions_, axis=1)[:, np.newaxis]
self.label_distributions_ /= normalizer
# set the transduction item
transduction = self.classes_[np.argmax(self.label_distributions_,
axis=1)]
self.transduction_ = transduction.ravel()
self.n_iter_ = self.max_iter - remaining_iter
return self
class LabelPropagation(BaseLabelPropagation):
"""Label Propagation classifier
Parameters
----------
kernel : {'knn', 'rbf'}
String identifier for kernel function to use.
Only 'rbf' and 'knn' kernels are currently supported..
gamma : float
Parameter for rbf kernel
n_neighbors : integer > 0
Parameter for knn kernel
alpha : float
Clamping factor
max_iter : float
Change maximum number of iterations allowed
tol : float
Convergence tolerance: threshold to consider the system at steady
state
Attributes
----------
X_ : array, shape = [n_samples, n_features]
Input array.
classes_ : array, shape = [n_classes]
The distinct labels used in classifying instances.
label_distributions_ : array, shape = [n_samples, n_classes]
Categorical distribution for each item.
transduction_ : array, shape = [n_samples]
Label assigned to each item via the transduction.
n_iter_ : int
Number of iterations run.
Examples
--------
>>> from sklearn import datasets
>>> from sklearn.semi_supervised import LabelPropagation
>>> label_prop_model = LabelPropagation()
>>> iris = datasets.load_iris()
>>> random_unlabeled_points = np.where(np.random.random_integers(0, 1,
... size=len(iris.target)))
>>> labels = np.copy(iris.target)
>>> labels[random_unlabeled_points] = -1
>>> label_prop_model.fit(iris.data, labels)
... # doctest: +NORMALIZE_WHITESPACE +ELLIPSIS
LabelPropagation(...)
References
----------
Xiaojin Zhu and Zoubin Ghahramani. Learning from labeled and unlabeled data
with label propagation. Technical Report CMU-CALD-02-107, Carnegie Mellon
University, 2002 http://pages.cs.wisc.edu/~jerryzhu/pub/CMU-CALD-02-107.pdf
See Also
--------
LabelSpreading : Alternate label propagation strategy more robust to noise
"""
def _build_graph(self):
"""Matrix representing a fully connected graph between each sample
This basic implementation creates a non-stochastic affinity matrix, so
class distributions will exceed 1 (normalization may be desired).
"""
if self.kernel == 'knn':
self.nn_fit = None
affinity_matrix = self._get_kernel(self.X_)
normalizer = affinity_matrix.sum(axis=0)
if sparse.isspmatrix(affinity_matrix):
affinity_matrix.data /= np.diag(np.array(normalizer))
else:
affinity_matrix /= normalizer[:, np.newaxis]
return affinity_matrix
class LabelSpreading(BaseLabelPropagation):
"""LabelSpreading model for semi-supervised learning
This model is similar to the basic Label Propgation algorithm,
but uses affinity matrix based on the normalized graph Laplacian
and soft clamping across the labels.
Parameters
----------
kernel : {'knn', 'rbf'}
String identifier for kernel function to use.
Only 'rbf' and 'knn' kernels are currently supported.
gamma : float
parameter for rbf kernel
n_neighbors : integer > 0
parameter for knn kernel
alpha : float
clamping factor
max_iter : float
maximum number of iterations allowed
tol : float
Convergence tolerance: threshold to consider the system at steady
state
Attributes
----------
X_ : array, shape = [n_samples, n_features]
Input array.
classes_ : array, shape = [n_classes]
The distinct labels used in classifying instances.
label_distributions_ : array, shape = [n_samples, n_classes]
Categorical distribution for each item.
transduction_ : array, shape = [n_samples]
Label assigned to each item via the transduction.
n_iter_ : int
Number of iterations run.
Examples
--------
>>> from sklearn import datasets
>>> from sklearn.semi_supervised import LabelSpreading
>>> label_prop_model = LabelSpreading()
>>> iris = datasets.load_iris()
>>> random_unlabeled_points = np.where(np.random.random_integers(0, 1,
... size=len(iris.target)))
>>> labels = np.copy(iris.target)
>>> labels[random_unlabeled_points] = -1
>>> label_prop_model.fit(iris.data, labels)
... # doctest: +NORMALIZE_WHITESPACE +ELLIPSIS
LabelSpreading(...)
References
----------
Dengyong Zhou, Olivier Bousquet, Thomas Navin Lal, Jason Weston,
Bernhard Schoelkopf. Learning with local and global consistency (2004)
http://citeseer.ist.psu.edu/viewdoc/summary?doi=10.1.1.115.3219
See Also
--------
LabelPropagation : Unregularized graph based semi-supervised learning
"""
def __init__(self, kernel='rbf', gamma=20, n_neighbors=7, alpha=0.2,
max_iter=30, tol=1e-3):
# this one has different base parameters
super(LabelSpreading, self).__init__(kernel=kernel, gamma=gamma,
n_neighbors=n_neighbors,
alpha=alpha, max_iter=max_iter,
tol=tol)
def _build_graph(self):
"""Graph matrix for Label Spreading computes the graph laplacian"""
# compute affinity matrix (or gram matrix)
if self.kernel == 'knn':
self.nn_fit = None
n_samples = self.X_.shape[0]
affinity_matrix = self._get_kernel(self.X_)
laplacian = graph_laplacian(affinity_matrix, normed=True)
laplacian = -laplacian
if sparse.isspmatrix(laplacian):
diag_mask = (laplacian.row == laplacian.col)
laplacian.data[diag_mask] = 0.0
else:
laplacian.flat[::n_samples + 1] = 0.0 # set diag to 0.0
return laplacian
| bsd-3-clause |
h2oai/h2o | py/testdir_single_jvm/test_parse_rand_width_fvec.py | 9 | 9480 | import unittest, re, sys, random, time
sys.path.extend(['.','..','../..','py'])
import h2o, h2o_cmd, h2o_import as h2i, h2o_browse as h2b
print "Same as test_parse_many_cases.py but randomize the number of tokens in each line"
print "parser should do not crash?"
class Basic(unittest.TestCase):
def tearDown(self):
h2o.check_sandbox_for_errors()
@classmethod
def setUpClass(cls):
h2o.init()
global SYNDATASETS_DIR
SYNDATASETS_DIR = h2o.make_syn_dir()
# h2b.browseTheCloud()
@classmethod
def tearDownClass(cls):
h2o.tear_down_cloud()
def test_A_many_parse1(self):
rows = self.genrows1()
set = 1
self.tryThemAll(set,rows)
def test_B_many_parse2(self):
rows = self.genrows2()
set = 2
self.tryThemAll(set,rows)
# this one has problems with blank lines
def test_C_many_parse3(self):
rows = self.genrows3()
set = 3
self.tryThemAll(set,rows)
def genrows1(self):
# comment has to have # in first column? (no leading whitespace)
# FIX! what about blank fields and spaces as sep
# FIX! temporary need more lines to avoid sample error in H2O
# throw in some variants for leading 0 on the decimal, and scientific notation
# new: change the @ to an alternate legal SEP if the special HIVE SEP is in play
rows = [
"@FirstName@|@Middle@Initials@|@LastName@|@Date@of@Birth@ ",
"0|0.5|1|0",
"3|NaN|4|1",
"6||8|0",
"0.6|0.7|0.8|1",
"+0.6|+0.7|+0.8|0",
"-0.6|-0.7|-0.8|1",
".6|.7|.8|0",
"+.6|+.7|+.8|1",
"-.6|-.7|-.8|0",
"+0.6e0|+0.7e0|+0.8e0|1",
"-0.6e0|-0.7e0|-0.8e0|0",
".6e0|.7e0|.8e0|1",
"+.6e0|+.7e0|+.8e0|0",
"-.6e0|-.7e0|-.8e0|1",
"+0.6e00|+0.7e00|+0.8e00|0",
"-0.6e00|-0.7e00|-0.8e00|1",
".6e00|.7e00|.8e00|0",
"+.6e00|+.7e00|+.8e00|1",
"-.6e00|-.7e00|-.8e00|0",
"+0.6e-01|+0.7e-01|+0.8e-01|1",
"-0.6e-01|-0.7e-01|-0.8e-01|0",
".6e-01|.7e-01|.8e-01|1",
"+.6e-01|+.7e-01|+.8e-01|0",
"-.6e-01|-.7e-01|-.8e-01|1",
"+0.6e+01|+0.7e+01|+0.8e+01|0",
"-0.6e+01|-0.7e+01|-0.8e+01|1",
".6e+01|.7e+01|.8e+01|0",
"+.6e+01|+.7e+01|+.8e+01|1",
"-.6e+01|-.7e+01|-.8e+01|0",
"+0.6e102|+0.7e102|+0.8e102|1",
"-0.6e102|-0.7e102|-0.8e102|0",
".6e102|.7e102|.8e102|1",
"+.6e102|+.7e102|+.8e102|0",
"-.6e102|-.7e102|-.8e102|1",
]
# evilness to emulate broken files.
# just randomly chop the string before we template-resolve it
newRows = []
for r in rows:
newLength = random.randint(1,len(r))
# add random 0 or 1 to the start, and make RF use the first col
# so rf doesn't complain about the response
# be nice, and guarantee there's an input col for RF no matter what
newRows.append(random.choice(["0","1"]) + "|" + random.choice(["0","1"]) + "|" +
r[:newLength])
return newRows
# "# comment here is okay",
# "# comment here is okay too",
# FIX! needed an extra line to avoid bug on default 67+ sample?
def genrows2(self):
rows = [
"First@Name|@MiddleInitials|LastName@|Date@ofBirth",
"Kalyn|A.|Dalton|1967-04-01",
"Gwendolyn|B.|Burton|1947-10-26",
"Elodia|G.|Ali|1983-10-31",
"Elo@dia|@G.|Ali@|1983-10-31",
"Elodia|G.|Ali|1983-10-31",
"Elodia|G.|Ali|1983-10-31",
"Elodia|G.|Ali|1983-10-31",
"Elodia|G.|Ali|1983-10-31",
"Elodia|G.|Ali|1983-10-31"
]
# evilness to emulate broken files.
# just randomly chop the string before we template-resolve it
newRows = []
for r in rows:
newLength = random.randint(1,len(r))
# add random 0 or 1 to the start, and make RF use the first col
# so rf doesn't complain about the response
# be nice, and guarantee there's an input col for RF no matter what
newRows.append(random.choice(["0","1"]) + "|" + random.choice(["0","1"]) + "|" +
r[:newLength])
return newRows
# update spec
# intermixing blank lines in the first two lines breaks things
# blank lines cause all columns except the first to get NA (red)
# first may get a blank string? (not ignored)
def genrows3(self):
rows = [
"FirstName|MiddleInitials|LastName|DateofBirth",
"Kalyn|A.|Dalton|1967-04-01",
"",
"Gwendolyn||Burton|1947-10-26",
"",
"Elodia|G.|Ali|1983-10-31",
"Elodia|G.|Ali|1983-10-31",
"Elodia|G.|Ali|1983-10-31",
"Elodia|G.|Ali|1983-10-31",
"Elodia|G.|Ali|1983-10-31",
"Elodia|G.|Ali|1983-10-31",
"Elodia|G.|Ali|1983-10-31",
]
# evilness to emulate broken files.
# just randomly chop the string before we template-resolve it
newRows = []
for r in rows:
# this template set has empty lines
if r=="":
newRows.append(r)
else:
newLength = random.randint(1,len(r))
# add random 0 or 1 to the start, and make RF use the first col
# so rf doesn't complain about the response
newRows.append(random.choice(["0","1"]) + "|" + random.choice(["0","1"]) + "|" +
r[:newLength])
return newRows
# The 3 supported line-ends
# FIX! should test them within quoted tokens
eolDict = {
0:"\n",
1:"\r\n",
2:"\r"
}
# tab here will cause problems too?
# 5:['"\t','\t"'],
# 8:["'\t","\t'"]
tokenChangeDict = {
0:['',''],
1:['\t','\t'],
2:[' ',' '],
3:['"','"'],
4:['" ',' "'],
5:["'","'"],
6:["' "," '"],
}
def changeTokens(self,rows,tokenCase):
[cOpen,cClose] = self.tokenChangeDict[tokenCase]
newRows = []
for r in rows:
# don't quote lines that start with #
# can quote lines start with some spaces or tabs? maybe
comment = re.match(r'^[ \t]*#', r)
empty = re.match(r'^$',r)
if not (comment or empty):
r = re.sub('^',cOpen,r)
r = re.sub('\|',cClose + '|' + cOpen,r)
r = re.sub('$',cClose,r)
h2o.verboseprint(r)
newRows.append(r)
return newRows
def writeRows(self,csvPathname,rows,eol):
f = open(csvPathname, 'w')
for r in rows:
f.write(r + eol)
# what about case of missing eoll at end of file?
sepChangeDict = {
# NEW: 0x01 can be SEP character for Hive datasets
0:"",
1:",",
2:" ",
3:"\t",
}
def changeSep(self,rows,sepCase):
# do a trial replace, to see if we get a <tab><sp> problem
# comments at the beginning..get a good row
r = rows[-1]
tabseptab = re.search(r'\t|\t', r)
spsepsp = re.search(r' | ', r)
if tabseptab or spsepsp:
# use comma instead. always works
# print "Avoided"
newSep = ","
else:
newSep = self.sepChangeDict[sepCase]
newRows = [r.replace('|',newSep) for r in rows]
# special case, if using the HIVE sep, substitute randomly
# one of the other SEPs into the "@" in the template
# FIX! we need to add HIVE lineends into lineend choices.
# assuming that lineend
if newSep == "":
# don't use the same SEP to swap in.
randomOtherSep = random.choice(self.sepChangeDict.values())
while (randomOtherSep==newSep):
randomOtherSep = random.choice(self.sepChangeDict.values())
newRows = [r.replace('@',randomOtherSep) for r in newRows]
return (newSep, newRows)
def tryThemAll(self,set,rows):
for eolCase in range(len(self.eolDict)):
eol = self.eolDict[eolCase]
# change tokens must be first
for tokenCase in range(len(self.tokenChangeDict)):
newRows1 = self.changeTokens(rows,tokenCase)
for sepCase in range(len(self.sepChangeDict)):
(newSep, newRows2) = self.changeSep(newRows1,sepCase)
csvPathname = SYNDATASETS_DIR + '/parsetmp_' + \
str(set) + "_" + \
str(eolCase) + "_" + \
str(tokenCase) + "_" + \
str(sepCase) + \
'.data'
self.writeRows(csvPathname,newRows2,eol)
# give h2o the separator, to be nice. (integerized)
parseResult = h2i.import_parse(path=csvPathname, schema='put', separator=ord(newSep),
noPrint=not h2o.verbose)
# h2o_cmd.runRF(parseResult=parseResult, trees=1, response='C1', timeoutSecs=10, retryDelaySecs=0.1, noPrint=True)
h2o.verboseprint("Set", set)
h2o.check_sandbox_for_errors()
if __name__ == '__main__':
h2o.unit_main()
| apache-2.0 |
rhiever/sklearn-benchmarks | model_code/random_search_preprocessing/SGDClassifier.py | 1 | 3065 | import sys
import pandas as pd
import numpy as np
from sklearn.preprocessing import Binarizer, MaxAbsScaler, MinMaxScaler
from sklearn.preprocessing import Normalizer, PolynomialFeatures, RobustScaler, StandardScaler
from sklearn.decomposition import FastICA, PCA
from sklearn.kernel_approximation import RBFSampler, Nystroem
from sklearn.cluster import FeatureAgglomeration
from sklearn.feature_selection import SelectFwe, SelectPercentile, VarianceThreshold
from sklearn.feature_selection import SelectFromModel, RFE
from sklearn.ensemble import ExtraTreesClassifier
from sklearn.linear_model import SGDClassifier
from evaluate_model import evaluate_model
dataset = sys.argv[1]
num_param_combinations = int(sys.argv[2])
random_seed = int(sys.argv[3])
preprocessor_num = int(sys.argv[4])
np.random.seed(random_seed)
preprocessor_list = [Binarizer, MaxAbsScaler, MinMaxScaler, Normalizer,
PolynomialFeatures, RobustScaler, StandardScaler,
FastICA, PCA, RBFSampler, Nystroem, FeatureAgglomeration,
SelectFwe, SelectPercentile, VarianceThreshold,
SelectFromModel, RFE]
chosen_preprocessor = preprocessor_list[preprocessor_num]
pipeline_components = [chosen_preprocessor, SGDClassifier]
pipeline_parameters = {}
loss_values = np.random.choice(['hinge', 'log', 'modified_huber', 'squared_hinge', 'perceptron'], size=num_param_combinations)
penalty_values = np.random.choice(['l2', 'l1', 'elasticnet'], size=num_param_combinations)
alpha_values = np.random.exponential(scale=0.01, size=num_param_combinations)
learning_rate_values = np.random.choice(['constant', 'optimal', 'invscaling'], size=num_param_combinations)
fit_intercept_values = np.random.choice([True, False], size=num_param_combinations)
l1_ratio_values = np.random.uniform(low=0., high=1., size=num_param_combinations)
eta0_values = np.random.uniform(low=0., high=5., size=num_param_combinations)
power_t_values = np.random.uniform(low=0., high=5., size=num_param_combinations)
all_param_combinations = zip(loss_values, penalty_values, alpha_values, learning_rate_values, fit_intercept_values, l1_ratio_values, eta0_values, power_t_values)
pipeline_parameters[SGDClassifier] = \
[{'loss': loss, 'penalty': penalty, 'alpha': alpha, 'learning_rate': learning_rate, 'fit_intercept': fit_intercept,
'l1_ratio': 0.15 if penalty != 'elasticnet' else l1_ratio, 'eta0': 0. if learning_rate not in ['constant', 'invscaling'] else eta0,
'power_t': 0.5 if learning_rate != 'invscaling' else power_t, 'random_state': 324089}
for (loss, penalty, alpha, learning_rate, fit_intercept, l1_ratio, eta0, power_t) in all_param_combinations]
if chosen_preprocessor is SelectFromModel:
pipeline_parameters[SelectFromModel] = [{'estimator': ExtraTreesClassifier(n_estimators=100, random_state=324089)}]
elif chosen_preprocessor is RFE:
pipeline_parameters[RFE] = [{'estimator': ExtraTreesClassifier(n_estimators=100, random_state=324089)}]
evaluate_model(dataset, pipeline_components, pipeline_parameters)
| mit |
zhouguangfu/FreeROI | froi/algorithm/unused/neighbormapper.py | 6 | 2124 | # emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*-
# vi: set ft=python sts=4 ts=4 sw=4 et:
"""Mapper for getting spatial neighbors of voxels.
Author:
Date: 2012.05.26
Editors: [plz add own name here after edit]
"""
__docformat__ = 'restructuredtext'
import numpy as np
from scipy.spatial.distance import pdist, squareform
from scipy.sparse import issparse
from mvpa2.mappers.base import Mapper
from mvpa2.datasets.base import Dataset
from neighbor import *
from bpmri import *
import pdb
class NeighborMapper(Mapper):
"""Mapper to get spatial neighbors.
"""
def __init__(self,neighbor_shape, outsparse=True, **kwargs):
"""
Parameters
----------
neighborhood : .
outsparse: bool
whether to output sparse matrix.
"""
Mapper.__init__(self, **kwargs)
self.__outsparse = outsparse
self.__neighbor_shape = neighbor_shape
def _forward_dataset(self, ds):
out_ds = Dataset([])
out_ds.a = ds.a
pdb.set_trace()
iv = np.nonzero(ds.samples)[0]
coords = ds.sa.values()[0][iv]
out_ds.fa = coords
dim = ds.a.voxel_dim
nbdim = self.__neighbor_shape.nbdim
nbsize = self.__neighbor_shape.nbsize
shape_type = self.__neighbor_shape.shape_type
volnb = volneighbors(coords, dim, nbdim, nbsize, shape_type)
distmsk = volnb.compute_offsets()
if self.__outsparse == True:
out_ds.samples = distmask
elif self.__outsparse == False:
distmask = distmask.todense()
out_ds.samples = distmask
else:
raise RuntimeError('%outsparse should be True or False.')
return out_ds
if __name__ == "__main__":
pdb.set_trace()
targnii = 'prob-face-object.nii.gz')
nb_shape = neighbor_shape(3,26,'fast_cube')
map1 = NeighborMapper(nb_shape)
ds = bpfmri_dataset(targnii)
nb_mat = map1._forward_dataset(ds)
| bsd-3-clause |
rflamary/POT | examples/plot_OT_1D.py | 2 | 1698 | # -*- coding: utf-8 -*-
"""
====================
1D optimal transport
====================
This example illustrates the computation of EMD and Sinkhorn transport plans
and their visualization.
"""
# Author: Remi Flamary <remi.flamary@unice.fr>
#
# License: MIT License
import numpy as np
import matplotlib.pylab as pl
import ot
import ot.plot
from ot.datasets import make_1D_gauss as gauss
##############################################################################
# Generate data
# -------------
#%% parameters
n = 100 # nb bins
# bin positions
x = np.arange(n, dtype=np.float64)
# Gaussian distributions
a = gauss(n, m=20, s=5) # m= mean, s= std
b = gauss(n, m=60, s=10)
# loss matrix
M = ot.dist(x.reshape((n, 1)), x.reshape((n, 1)))
M /= M.max()
##############################################################################
# Plot distributions and loss matrix
# ----------------------------------
#%% plot the distributions
pl.figure(1, figsize=(6.4, 3))
pl.plot(x, a, 'b', label='Source distribution')
pl.plot(x, b, 'r', label='Target distribution')
pl.legend()
#%% plot distributions and loss matrix
pl.figure(2, figsize=(5, 5))
ot.plot.plot1D_mat(a, b, M, 'Cost matrix M')
##############################################################################
# Solve EMD
# ---------
#%% EMD
G0 = ot.emd(a, b, M)
pl.figure(3, figsize=(5, 5))
ot.plot.plot1D_mat(a, b, G0, 'OT matrix G0')
##############################################################################
# Solve Sinkhorn
# --------------
#%% Sinkhorn
lambd = 1e-3
Gs = ot.sinkhorn(a, b, M, lambd, verbose=True)
pl.figure(4, figsize=(5, 5))
ot.plot.plot1D_mat(a, b, Gs, 'OT matrix Sinkhorn')
pl.show()
| mit |
schets/scikit-learn | examples/svm/plot_svm_anova.py | 249 | 2000 | """
=================================================
SVM-Anova: SVM with univariate feature selection
=================================================
This example shows how to perform univariate feature before running a SVC
(support vector classifier) to improve the classification scores.
"""
print(__doc__)
import numpy as np
import matplotlib.pyplot as plt
from sklearn import svm, datasets, feature_selection, cross_validation
from sklearn.pipeline import Pipeline
###############################################################################
# Import some data to play with
digits = datasets.load_digits()
y = digits.target
# Throw away data, to be in the curse of dimension settings
y = y[:200]
X = digits.data[:200]
n_samples = len(y)
X = X.reshape((n_samples, -1))
# add 200 non-informative features
X = np.hstack((X, 2 * np.random.random((n_samples, 200))))
###############################################################################
# Create a feature-selection transform and an instance of SVM that we
# combine together to have an full-blown estimator
transform = feature_selection.SelectPercentile(feature_selection.f_classif)
clf = Pipeline([('anova', transform), ('svc', svm.SVC(C=1.0))])
###############################################################################
# Plot the cross-validation score as a function of percentile of features
score_means = list()
score_stds = list()
percentiles = (1, 3, 6, 10, 15, 20, 30, 40, 60, 80, 100)
for percentile in percentiles:
clf.set_params(anova__percentile=percentile)
# Compute cross-validation score using all CPUs
this_scores = cross_validation.cross_val_score(clf, X, y, n_jobs=1)
score_means.append(this_scores.mean())
score_stds.append(this_scores.std())
plt.errorbar(percentiles, score_means, np.array(score_stds))
plt.title(
'Performance of the SVM-Anova varying the percentile of features selected')
plt.xlabel('Percentile')
plt.ylabel('Prediction rate')
plt.axis('tight')
plt.show()
| bsd-3-clause |