repo_name
stringlengths 6
112
| path
stringlengths 4
204
| copies
stringlengths 1
3
| size
stringlengths 4
6
| content
stringlengths 714
810k
| license
stringclasses 15
values |
---|---|---|---|---|---|
billy-inn/scikit-learn | examples/semi_supervised/plot_label_propagation_digits.py | 268 | 2723 | """
===================================================
Label Propagation digits: Demonstrating performance
===================================================
This example demonstrates the power of semisupervised learning by
training a Label Spreading model to classify handwritten digits
with sets of very few labels.
The handwritten digit dataset has 1797 total points. The model will
be trained using all points, but only 30 will be labeled. Results
in the form of a confusion matrix and a series of metrics over each
class will be very good.
At the end, the top 10 most uncertain predictions will be shown.
"""
print(__doc__)
# Authors: Clay Woolam <clay@woolam.org>
# Licence: BSD
import numpy as np
import matplotlib.pyplot as plt
from scipy import stats
from sklearn import datasets
from sklearn.semi_supervised import label_propagation
from sklearn.metrics import confusion_matrix, classification_report
digits = datasets.load_digits()
rng = np.random.RandomState(0)
indices = np.arange(len(digits.data))
rng.shuffle(indices)
X = digits.data[indices[:330]]
y = digits.target[indices[:330]]
images = digits.images[indices[:330]]
n_total_samples = len(y)
n_labeled_points = 30
indices = np.arange(n_total_samples)
unlabeled_set = indices[n_labeled_points:]
# shuffle everything around
y_train = np.copy(y)
y_train[unlabeled_set] = -1
###############################################################################
# Learn with LabelSpreading
lp_model = label_propagation.LabelSpreading(gamma=0.25, max_iter=5)
lp_model.fit(X, y_train)
predicted_labels = lp_model.transduction_[unlabeled_set]
true_labels = y[unlabeled_set]
cm = confusion_matrix(true_labels, predicted_labels, labels=lp_model.classes_)
print("Label Spreading model: %d labeled & %d unlabeled points (%d total)" %
(n_labeled_points, n_total_samples - n_labeled_points, n_total_samples))
print(classification_report(true_labels, predicted_labels))
print("Confusion matrix")
print(cm)
# calculate uncertainty values for each transduced distribution
pred_entropies = stats.distributions.entropy(lp_model.label_distributions_.T)
# pick the top 10 most uncertain labels
uncertainty_index = np.argsort(pred_entropies)[-10:]
###############################################################################
# plot
f = plt.figure(figsize=(7, 5))
for index, image_index in enumerate(uncertainty_index):
image = images[image_index]
sub = f.add_subplot(2, 5, index + 1)
sub.imshow(image, cmap=plt.cm.gray_r)
plt.xticks([])
plt.yticks([])
sub.set_title('predict: %i\ntrue: %i' % (
lp_model.transduction_[image_index], y[image_index]))
f.suptitle('Learning with small amount of labeled data')
plt.show()
| bsd-3-clause |
0asa/scikit-learn | examples/applications/plot_out_of_core_classification.py | 255 | 13919 | """
======================================================
Out-of-core classification of text documents
======================================================
This is an example showing how scikit-learn can be used for classification
using an out-of-core approach: learning from data that doesn't fit into main
memory. We make use of an online classifier, i.e., one that supports the
partial_fit method, that will be fed with batches of examples. To guarantee
that the features space remains the same over time we leverage a
HashingVectorizer that will project each example into the same feature space.
This is especially useful in the case of text classification where new
features (words) may appear in each batch.
The dataset used in this example is Reuters-21578 as provided by the UCI ML
repository. It will be automatically downloaded and uncompressed on first run.
The plot represents the learning curve of the classifier: the evolution
of classification accuracy over the course of the mini-batches. Accuracy is
measured on the first 1000 samples, held out as a validation set.
To limit the memory consumption, we queue examples up to a fixed amount before
feeding them to the learner.
"""
# Authors: Eustache Diemert <eustache@diemert.fr>
# @FedericoV <https://github.com/FedericoV/>
# License: BSD 3 clause
from __future__ import print_function
from glob import glob
import itertools
import os.path
import re
import tarfile
import time
import numpy as np
import matplotlib.pyplot as plt
from matplotlib import rcParams
from sklearn.externals.six.moves import html_parser
from sklearn.externals.six.moves import urllib
from sklearn.datasets import get_data_home
from sklearn.feature_extraction.text import HashingVectorizer
from sklearn.linear_model import SGDClassifier
from sklearn.linear_model import PassiveAggressiveClassifier
from sklearn.linear_model import Perceptron
from sklearn.naive_bayes import MultinomialNB
def _not_in_sphinx():
# Hack to detect whether we are running by the sphinx builder
return '__file__' in globals()
###############################################################################
# Reuters Dataset related routines
###############################################################################
class ReutersParser(html_parser.HTMLParser):
"""Utility class to parse a SGML file and yield documents one at a time."""
def __init__(self, encoding='latin-1'):
html_parser.HTMLParser.__init__(self)
self._reset()
self.encoding = encoding
def handle_starttag(self, tag, attrs):
method = 'start_' + tag
getattr(self, method, lambda x: None)(attrs)
def handle_endtag(self, tag):
method = 'end_' + tag
getattr(self, method, lambda: None)()
def _reset(self):
self.in_title = 0
self.in_body = 0
self.in_topics = 0
self.in_topic_d = 0
self.title = ""
self.body = ""
self.topics = []
self.topic_d = ""
def parse(self, fd):
self.docs = []
for chunk in fd:
self.feed(chunk.decode(self.encoding))
for doc in self.docs:
yield doc
self.docs = []
self.close()
def handle_data(self, data):
if self.in_body:
self.body += data
elif self.in_title:
self.title += data
elif self.in_topic_d:
self.topic_d += data
def start_reuters(self, attributes):
pass
def end_reuters(self):
self.body = re.sub(r'\s+', r' ', self.body)
self.docs.append({'title': self.title,
'body': self.body,
'topics': self.topics})
self._reset()
def start_title(self, attributes):
self.in_title = 1
def end_title(self):
self.in_title = 0
def start_body(self, attributes):
self.in_body = 1
def end_body(self):
self.in_body = 0
def start_topics(self, attributes):
self.in_topics = 1
def end_topics(self):
self.in_topics = 0
def start_d(self, attributes):
self.in_topic_d = 1
def end_d(self):
self.in_topic_d = 0
self.topics.append(self.topic_d)
self.topic_d = ""
def stream_reuters_documents(data_path=None):
"""Iterate over documents of the Reuters dataset.
The Reuters archive will automatically be downloaded and uncompressed if
the `data_path` directory does not exist.
Documents are represented as dictionaries with 'body' (str),
'title' (str), 'topics' (list(str)) keys.
"""
DOWNLOAD_URL = ('http://archive.ics.uci.edu/ml/machine-learning-databases/'
'reuters21578-mld/reuters21578.tar.gz')
ARCHIVE_FILENAME = 'reuters21578.tar.gz'
if data_path is None:
data_path = os.path.join(get_data_home(), "reuters")
if not os.path.exists(data_path):
"""Download the dataset."""
print("downloading dataset (once and for all) into %s" %
data_path)
os.mkdir(data_path)
def progress(blocknum, bs, size):
total_sz_mb = '%.2f MB' % (size / 1e6)
current_sz_mb = '%.2f MB' % ((blocknum * bs) / 1e6)
if _not_in_sphinx():
print('\rdownloaded %s / %s' % (current_sz_mb, total_sz_mb),
end='')
archive_path = os.path.join(data_path, ARCHIVE_FILENAME)
urllib.request.urlretrieve(DOWNLOAD_URL, filename=archive_path,
reporthook=progress)
if _not_in_sphinx():
print('\r', end='')
print("untarring Reuters dataset...")
tarfile.open(archive_path, 'r:gz').extractall(data_path)
print("done.")
parser = ReutersParser()
for filename in glob(os.path.join(data_path, "*.sgm")):
for doc in parser.parse(open(filename, 'rb')):
yield doc
###############################################################################
# Main
###############################################################################
# Create the vectorizer and limit the number of features to a reasonable
# maximum
vectorizer = HashingVectorizer(decode_error='ignore', n_features=2 ** 18,
non_negative=True)
# Iterator over parsed Reuters SGML files.
data_stream = stream_reuters_documents()
# We learn a binary classification between the "acq" class and all the others.
# "acq" was chosen as it is more or less evenly distributed in the Reuters
# files. For other datasets, one should take care of creating a test set with
# a realistic portion of positive instances.
all_classes = np.array([0, 1])
positive_class = 'acq'
# Here are some classifiers that support the `partial_fit` method
partial_fit_classifiers = {
'SGD': SGDClassifier(),
'Perceptron': Perceptron(),
'NB Multinomial': MultinomialNB(alpha=0.01),
'Passive-Aggressive': PassiveAggressiveClassifier(),
}
def get_minibatch(doc_iter, size, pos_class=positive_class):
"""Extract a minibatch of examples, return a tuple X_text, y.
Note: size is before excluding invalid docs with no topics assigned.
"""
data = [(u'{title}\n\n{body}'.format(**doc), pos_class in doc['topics'])
for doc in itertools.islice(doc_iter, size)
if doc['topics']]
if not len(data):
return np.asarray([], dtype=int), np.asarray([], dtype=int)
X_text, y = zip(*data)
return X_text, np.asarray(y, dtype=int)
def iter_minibatches(doc_iter, minibatch_size):
"""Generator of minibatches."""
X_text, y = get_minibatch(doc_iter, minibatch_size)
while len(X_text):
yield X_text, y
X_text, y = get_minibatch(doc_iter, minibatch_size)
# test data statistics
test_stats = {'n_test': 0, 'n_test_pos': 0}
# First we hold out a number of examples to estimate accuracy
n_test_documents = 1000
tick = time.time()
X_test_text, y_test = get_minibatch(data_stream, 1000)
parsing_time = time.time() - tick
tick = time.time()
X_test = vectorizer.transform(X_test_text)
vectorizing_time = time.time() - tick
test_stats['n_test'] += len(y_test)
test_stats['n_test_pos'] += sum(y_test)
print("Test set is %d documents (%d positive)" % (len(y_test), sum(y_test)))
def progress(cls_name, stats):
"""Report progress information, return a string."""
duration = time.time() - stats['t0']
s = "%20s classifier : \t" % cls_name
s += "%(n_train)6d train docs (%(n_train_pos)6d positive) " % stats
s += "%(n_test)6d test docs (%(n_test_pos)6d positive) " % test_stats
s += "accuracy: %(accuracy).3f " % stats
s += "in %.2fs (%5d docs/s)" % (duration, stats['n_train'] / duration)
return s
cls_stats = {}
for cls_name in partial_fit_classifiers:
stats = {'n_train': 0, 'n_train_pos': 0,
'accuracy': 0.0, 'accuracy_history': [(0, 0)], 't0': time.time(),
'runtime_history': [(0, 0)], 'total_fit_time': 0.0}
cls_stats[cls_name] = stats
get_minibatch(data_stream, n_test_documents)
# Discard test set
# We will feed the classifier with mini-batches of 1000 documents; this means
# we have at most 1000 docs in memory at any time. The smaller the document
# batch, the bigger the relative overhead of the partial fit methods.
minibatch_size = 1000
# Create the data_stream that parses Reuters SGML files and iterates on
# documents as a stream.
minibatch_iterators = iter_minibatches(data_stream, minibatch_size)
total_vect_time = 0.0
# Main loop : iterate on mini-batchs of examples
for i, (X_train_text, y_train) in enumerate(minibatch_iterators):
tick = time.time()
X_train = vectorizer.transform(X_train_text)
total_vect_time += time.time() - tick
for cls_name, cls in partial_fit_classifiers.items():
tick = time.time()
# update estimator with examples in the current mini-batch
cls.partial_fit(X_train, y_train, classes=all_classes)
# accumulate test accuracy stats
cls_stats[cls_name]['total_fit_time'] += time.time() - tick
cls_stats[cls_name]['n_train'] += X_train.shape[0]
cls_stats[cls_name]['n_train_pos'] += sum(y_train)
tick = time.time()
cls_stats[cls_name]['accuracy'] = cls.score(X_test, y_test)
cls_stats[cls_name]['prediction_time'] = time.time() - tick
acc_history = (cls_stats[cls_name]['accuracy'],
cls_stats[cls_name]['n_train'])
cls_stats[cls_name]['accuracy_history'].append(acc_history)
run_history = (cls_stats[cls_name]['accuracy'],
total_vect_time + cls_stats[cls_name]['total_fit_time'])
cls_stats[cls_name]['runtime_history'].append(run_history)
if i % 3 == 0:
print(progress(cls_name, cls_stats[cls_name]))
if i % 3 == 0:
print('\n')
###############################################################################
# Plot results
###############################################################################
def plot_accuracy(x, y, x_legend):
"""Plot accuracy as a function of x."""
x = np.array(x)
y = np.array(y)
plt.title('Classification accuracy as a function of %s' % x_legend)
plt.xlabel('%s' % x_legend)
plt.ylabel('Accuracy')
plt.grid(True)
plt.plot(x, y)
rcParams['legend.fontsize'] = 10
cls_names = list(sorted(cls_stats.keys()))
# Plot accuracy evolution
plt.figure()
for _, stats in sorted(cls_stats.items()):
# Plot accuracy evolution with #examples
accuracy, n_examples = zip(*stats['accuracy_history'])
plot_accuracy(n_examples, accuracy, "training examples (#)")
ax = plt.gca()
ax.set_ylim((0.8, 1))
plt.legend(cls_names, loc='best')
plt.figure()
for _, stats in sorted(cls_stats.items()):
# Plot accuracy evolution with runtime
accuracy, runtime = zip(*stats['runtime_history'])
plot_accuracy(runtime, accuracy, 'runtime (s)')
ax = plt.gca()
ax.set_ylim((0.8, 1))
plt.legend(cls_names, loc='best')
# Plot fitting times
plt.figure()
fig = plt.gcf()
cls_runtime = []
for cls_name, stats in sorted(cls_stats.items()):
cls_runtime.append(stats['total_fit_time'])
cls_runtime.append(total_vect_time)
cls_names.append('Vectorization')
bar_colors = rcParams['axes.color_cycle'][:len(cls_names)]
ax = plt.subplot(111)
rectangles = plt.bar(range(len(cls_names)), cls_runtime, width=0.5,
color=bar_colors)
ax.set_xticks(np.linspace(0.25, len(cls_names) - 0.75, len(cls_names)))
ax.set_xticklabels(cls_names, fontsize=10)
ymax = max(cls_runtime) * 1.2
ax.set_ylim((0, ymax))
ax.set_ylabel('runtime (s)')
ax.set_title('Training Times')
def autolabel(rectangles):
"""attach some text vi autolabel on rectangles."""
for rect in rectangles:
height = rect.get_height()
ax.text(rect.get_x() + rect.get_width() / 2.,
1.05 * height, '%.4f' % height,
ha='center', va='bottom')
autolabel(rectangles)
plt.show()
# Plot prediction times
plt.figure()
#fig = plt.gcf()
cls_runtime = []
cls_names = list(sorted(cls_stats.keys()))
for cls_name, stats in sorted(cls_stats.items()):
cls_runtime.append(stats['prediction_time'])
cls_runtime.append(parsing_time)
cls_names.append('Read/Parse\n+Feat.Extr.')
cls_runtime.append(vectorizing_time)
cls_names.append('Hashing\n+Vect.')
bar_colors = rcParams['axes.color_cycle'][:len(cls_names)]
ax = plt.subplot(111)
rectangles = plt.bar(range(len(cls_names)), cls_runtime, width=0.5,
color=bar_colors)
ax.set_xticks(np.linspace(0.25, len(cls_names) - 0.75, len(cls_names)))
ax.set_xticklabels(cls_names, fontsize=8)
plt.setp(plt.xticks()[1], rotation=30)
ymax = max(cls_runtime) * 1.2
ax.set_ylim((0, ymax))
ax.set_ylabel('runtime (s)')
ax.set_title('Prediction Times (%d instances)' % n_test_documents)
autolabel(rectangles)
plt.show()
| bsd-3-clause |
hainm/scikit-learn | sklearn/feature_extraction/tests/test_text.py | 110 | 34127 | from __future__ import unicode_literals
import warnings
from sklearn.feature_extraction.text import strip_tags
from sklearn.feature_extraction.text import strip_accents_unicode
from sklearn.feature_extraction.text import strip_accents_ascii
from sklearn.feature_extraction.text import HashingVectorizer
from sklearn.feature_extraction.text import CountVectorizer
from sklearn.feature_extraction.text import TfidfTransformer
from sklearn.feature_extraction.text import TfidfVectorizer
from sklearn.feature_extraction.text import ENGLISH_STOP_WORDS
from sklearn.cross_validation import train_test_split
from sklearn.cross_validation import cross_val_score
from sklearn.grid_search import GridSearchCV
from sklearn.pipeline import Pipeline
from sklearn.svm import LinearSVC
from sklearn.base import clone
import numpy as np
from nose import SkipTest
from nose.tools import assert_equal
from nose.tools import assert_false
from nose.tools import assert_not_equal
from nose.tools import assert_true
from nose.tools import assert_almost_equal
from numpy.testing import assert_array_almost_equal
from numpy.testing import assert_array_equal
from numpy.testing import assert_raises
from sklearn.utils.testing import (assert_in, assert_less, assert_greater,
assert_warns_message, assert_raise_message,
clean_warning_registry)
from collections import defaultdict, Mapping
from functools import partial
import pickle
from io import StringIO
JUNK_FOOD_DOCS = (
"the pizza pizza beer copyright",
"the pizza burger beer copyright",
"the the pizza beer beer copyright",
"the burger beer beer copyright",
"the coke burger coke copyright",
"the coke burger burger",
)
NOTJUNK_FOOD_DOCS = (
"the salad celeri copyright",
"the salad salad sparkling water copyright",
"the the celeri celeri copyright",
"the tomato tomato salad water",
"the tomato salad water copyright",
)
ALL_FOOD_DOCS = JUNK_FOOD_DOCS + NOTJUNK_FOOD_DOCS
def uppercase(s):
return strip_accents_unicode(s).upper()
def strip_eacute(s):
return s.replace('\xe9', 'e')
def split_tokenize(s):
return s.split()
def lazy_analyze(s):
return ['the_ultimate_feature']
def test_strip_accents():
# check some classical latin accentuated symbols
a = '\xe0\xe1\xe2\xe3\xe4\xe5\xe7\xe8\xe9\xea\xeb'
expected = 'aaaaaaceeee'
assert_equal(strip_accents_unicode(a), expected)
a = '\xec\xed\xee\xef\xf1\xf2\xf3\xf4\xf5\xf6\xf9\xfa\xfb\xfc\xfd'
expected = 'iiiinooooouuuuy'
assert_equal(strip_accents_unicode(a), expected)
# check some arabic
a = '\u0625' # halef with a hamza below
expected = '\u0627' # simple halef
assert_equal(strip_accents_unicode(a), expected)
# mix letters accentuated and not
a = "this is \xe0 test"
expected = 'this is a test'
assert_equal(strip_accents_unicode(a), expected)
def test_to_ascii():
# check some classical latin accentuated symbols
a = '\xe0\xe1\xe2\xe3\xe4\xe5\xe7\xe8\xe9\xea\xeb'
expected = 'aaaaaaceeee'
assert_equal(strip_accents_ascii(a), expected)
a = '\xec\xed\xee\xef\xf1\xf2\xf3\xf4\xf5\xf6\xf9\xfa\xfb\xfc\xfd'
expected = 'iiiinooooouuuuy'
assert_equal(strip_accents_ascii(a), expected)
# check some arabic
a = '\u0625' # halef with a hamza below
expected = '' # halef has no direct ascii match
assert_equal(strip_accents_ascii(a), expected)
# mix letters accentuated and not
a = "this is \xe0 test"
expected = 'this is a test'
assert_equal(strip_accents_ascii(a), expected)
def test_word_analyzer_unigrams():
for Vectorizer in (CountVectorizer, HashingVectorizer):
wa = Vectorizer(strip_accents='ascii').build_analyzer()
text = ("J'ai mang\xe9 du kangourou ce midi, "
"c'\xe9tait pas tr\xeas bon.")
expected = ['ai', 'mange', 'du', 'kangourou', 'ce', 'midi',
'etait', 'pas', 'tres', 'bon']
assert_equal(wa(text), expected)
text = "This is a test, really.\n\n I met Harry yesterday."
expected = ['this', 'is', 'test', 'really', 'met', 'harry',
'yesterday']
assert_equal(wa(text), expected)
wa = Vectorizer(input='file').build_analyzer()
text = StringIO("This is a test with a file-like object!")
expected = ['this', 'is', 'test', 'with', 'file', 'like',
'object']
assert_equal(wa(text), expected)
# with custom preprocessor
wa = Vectorizer(preprocessor=uppercase).build_analyzer()
text = ("J'ai mang\xe9 du kangourou ce midi, "
" c'\xe9tait pas tr\xeas bon.")
expected = ['AI', 'MANGE', 'DU', 'KANGOUROU', 'CE', 'MIDI',
'ETAIT', 'PAS', 'TRES', 'BON']
assert_equal(wa(text), expected)
# with custom tokenizer
wa = Vectorizer(tokenizer=split_tokenize,
strip_accents='ascii').build_analyzer()
text = ("J'ai mang\xe9 du kangourou ce midi, "
"c'\xe9tait pas tr\xeas bon.")
expected = ["j'ai", 'mange', 'du', 'kangourou', 'ce', 'midi,',
"c'etait", 'pas', 'tres', 'bon.']
assert_equal(wa(text), expected)
def test_word_analyzer_unigrams_and_bigrams():
wa = CountVectorizer(analyzer="word", strip_accents='unicode',
ngram_range=(1, 2)).build_analyzer()
text = "J'ai mang\xe9 du kangourou ce midi, c'\xe9tait pas tr\xeas bon."
expected = ['ai', 'mange', 'du', 'kangourou', 'ce', 'midi',
'etait', 'pas', 'tres', 'bon', 'ai mange', 'mange du',
'du kangourou', 'kangourou ce', 'ce midi', 'midi etait',
'etait pas', 'pas tres', 'tres bon']
assert_equal(wa(text), expected)
def test_unicode_decode_error():
# decode_error default to strict, so this should fail
# First, encode (as bytes) a unicode string.
text = "J'ai mang\xe9 du kangourou ce midi, c'\xe9tait pas tr\xeas bon."
text_bytes = text.encode('utf-8')
# Then let the Analyzer try to decode it as ascii. It should fail,
# because we have given it an incorrect encoding.
wa = CountVectorizer(ngram_range=(1, 2), encoding='ascii').build_analyzer()
assert_raises(UnicodeDecodeError, wa, text_bytes)
ca = CountVectorizer(analyzer='char', ngram_range=(3, 6),
encoding='ascii').build_analyzer()
assert_raises(UnicodeDecodeError, ca, text_bytes)
def test_char_ngram_analyzer():
cnga = CountVectorizer(analyzer='char', strip_accents='unicode',
ngram_range=(3, 6)).build_analyzer()
text = "J'ai mang\xe9 du kangourou ce midi, c'\xe9tait pas tr\xeas bon"
expected = ["j'a", "'ai", 'ai ', 'i m', ' ma']
assert_equal(cnga(text)[:5], expected)
expected = ['s tres', ' tres ', 'tres b', 'res bo', 'es bon']
assert_equal(cnga(text)[-5:], expected)
text = "This \n\tis a test, really.\n\n I met Harry yesterday"
expected = ['thi', 'his', 'is ', 's i', ' is']
assert_equal(cnga(text)[:5], expected)
expected = [' yeste', 'yester', 'esterd', 'sterda', 'terday']
assert_equal(cnga(text)[-5:], expected)
cnga = CountVectorizer(input='file', analyzer='char',
ngram_range=(3, 6)).build_analyzer()
text = StringIO("This is a test with a file-like object!")
expected = ['thi', 'his', 'is ', 's i', ' is']
assert_equal(cnga(text)[:5], expected)
def test_char_wb_ngram_analyzer():
cnga = CountVectorizer(analyzer='char_wb', strip_accents='unicode',
ngram_range=(3, 6)).build_analyzer()
text = "This \n\tis a test, really.\n\n I met Harry yesterday"
expected = [' th', 'thi', 'his', 'is ', ' thi']
assert_equal(cnga(text)[:5], expected)
expected = ['yester', 'esterd', 'sterda', 'terday', 'erday ']
assert_equal(cnga(text)[-5:], expected)
cnga = CountVectorizer(input='file', analyzer='char_wb',
ngram_range=(3, 6)).build_analyzer()
text = StringIO("A test with a file-like object!")
expected = [' a ', ' te', 'tes', 'est', 'st ', ' tes']
assert_equal(cnga(text)[:6], expected)
def test_countvectorizer_custom_vocabulary():
vocab = {"pizza": 0, "beer": 1}
terms = set(vocab.keys())
# Try a few of the supported types.
for typ in [dict, list, iter, partial(defaultdict, int)]:
v = typ(vocab)
vect = CountVectorizer(vocabulary=v)
vect.fit(JUNK_FOOD_DOCS)
if isinstance(v, Mapping):
assert_equal(vect.vocabulary_, vocab)
else:
assert_equal(set(vect.vocabulary_), terms)
X = vect.transform(JUNK_FOOD_DOCS)
assert_equal(X.shape[1], len(terms))
def test_countvectorizer_custom_vocabulary_pipeline():
what_we_like = ["pizza", "beer"]
pipe = Pipeline([
('count', CountVectorizer(vocabulary=what_we_like)),
('tfidf', TfidfTransformer())])
X = pipe.fit_transform(ALL_FOOD_DOCS)
assert_equal(set(pipe.named_steps['count'].vocabulary_),
set(what_we_like))
assert_equal(X.shape[1], len(what_we_like))
def test_countvectorizer_custom_vocabulary_repeated_indeces():
vocab = {"pizza": 0, "beer": 0}
try:
CountVectorizer(vocabulary=vocab)
except ValueError as e:
assert_in("vocabulary contains repeated indices", str(e).lower())
def test_countvectorizer_custom_vocabulary_gap_index():
vocab = {"pizza": 1, "beer": 2}
try:
CountVectorizer(vocabulary=vocab)
except ValueError as e:
assert_in("doesn't contain index", str(e).lower())
def test_countvectorizer_stop_words():
cv = CountVectorizer()
cv.set_params(stop_words='english')
assert_equal(cv.get_stop_words(), ENGLISH_STOP_WORDS)
cv.set_params(stop_words='_bad_str_stop_')
assert_raises(ValueError, cv.get_stop_words)
cv.set_params(stop_words='_bad_unicode_stop_')
assert_raises(ValueError, cv.get_stop_words)
stoplist = ['some', 'other', 'words']
cv.set_params(stop_words=stoplist)
assert_equal(cv.get_stop_words(), set(stoplist))
def test_countvectorizer_empty_vocabulary():
try:
vect = CountVectorizer(vocabulary=[])
vect.fit(["foo"])
assert False, "we shouldn't get here"
except ValueError as e:
assert_in("empty vocabulary", str(e).lower())
try:
v = CountVectorizer(max_df=1.0, stop_words="english")
# fit on stopwords only
v.fit(["to be or not to be", "and me too", "and so do you"])
assert False, "we shouldn't get here"
except ValueError as e:
assert_in("empty vocabulary", str(e).lower())
def test_fit_countvectorizer_twice():
cv = CountVectorizer()
X1 = cv.fit_transform(ALL_FOOD_DOCS[:5])
X2 = cv.fit_transform(ALL_FOOD_DOCS[5:])
assert_not_equal(X1.shape[1], X2.shape[1])
def test_tf_idf_smoothing():
X = [[1, 1, 1],
[1, 1, 0],
[1, 0, 0]]
tr = TfidfTransformer(smooth_idf=True, norm='l2')
tfidf = tr.fit_transform(X).toarray()
assert_true((tfidf >= 0).all())
# check normalization
assert_array_almost_equal((tfidf ** 2).sum(axis=1), [1., 1., 1.])
# this is robust to features with only zeros
X = [[1, 1, 0],
[1, 1, 0],
[1, 0, 0]]
tr = TfidfTransformer(smooth_idf=True, norm='l2')
tfidf = tr.fit_transform(X).toarray()
assert_true((tfidf >= 0).all())
def test_tfidf_no_smoothing():
X = [[1, 1, 1],
[1, 1, 0],
[1, 0, 0]]
tr = TfidfTransformer(smooth_idf=False, norm='l2')
tfidf = tr.fit_transform(X).toarray()
assert_true((tfidf >= 0).all())
# check normalization
assert_array_almost_equal((tfidf ** 2).sum(axis=1), [1., 1., 1.])
# the lack of smoothing make IDF fragile in the presence of feature with
# only zeros
X = [[1, 1, 0],
[1, 1, 0],
[1, 0, 0]]
tr = TfidfTransformer(smooth_idf=False, norm='l2')
clean_warning_registry()
with warnings.catch_warnings(record=True) as w:
1. / np.array([0.])
numpy_provides_div0_warning = len(w) == 1
in_warning_message = 'divide by zero'
tfidf = assert_warns_message(RuntimeWarning, in_warning_message,
tr.fit_transform, X).toarray()
if not numpy_provides_div0_warning:
raise SkipTest("Numpy does not provide div 0 warnings.")
def test_sublinear_tf():
X = [[1], [2], [3]]
tr = TfidfTransformer(sublinear_tf=True, use_idf=False, norm=None)
tfidf = tr.fit_transform(X).toarray()
assert_equal(tfidf[0], 1)
assert_greater(tfidf[1], tfidf[0])
assert_greater(tfidf[2], tfidf[1])
assert_less(tfidf[1], 2)
assert_less(tfidf[2], 3)
def test_vectorizer():
# raw documents as an iterator
train_data = iter(ALL_FOOD_DOCS[:-1])
test_data = [ALL_FOOD_DOCS[-1]]
n_train = len(ALL_FOOD_DOCS) - 1
# test without vocabulary
v1 = CountVectorizer(max_df=0.5)
counts_train = v1.fit_transform(train_data)
if hasattr(counts_train, 'tocsr'):
counts_train = counts_train.tocsr()
assert_equal(counts_train[0, v1.vocabulary_["pizza"]], 2)
# build a vectorizer v1 with the same vocabulary as the one fitted by v1
v2 = CountVectorizer(vocabulary=v1.vocabulary_)
# compare that the two vectorizer give the same output on the test sample
for v in (v1, v2):
counts_test = v.transform(test_data)
if hasattr(counts_test, 'tocsr'):
counts_test = counts_test.tocsr()
vocabulary = v.vocabulary_
assert_equal(counts_test[0, vocabulary["salad"]], 1)
assert_equal(counts_test[0, vocabulary["tomato"]], 1)
assert_equal(counts_test[0, vocabulary["water"]], 1)
# stop word from the fixed list
assert_false("the" in vocabulary)
# stop word found automatically by the vectorizer DF thresholding
# words that are high frequent across the complete corpus are likely
# to be not informative (either real stop words of extraction
# artifacts)
assert_false("copyright" in vocabulary)
# not present in the sample
assert_equal(counts_test[0, vocabulary["coke"]], 0)
assert_equal(counts_test[0, vocabulary["burger"]], 0)
assert_equal(counts_test[0, vocabulary["beer"]], 0)
assert_equal(counts_test[0, vocabulary["pizza"]], 0)
# test tf-idf
t1 = TfidfTransformer(norm='l1')
tfidf = t1.fit(counts_train).transform(counts_train).toarray()
assert_equal(len(t1.idf_), len(v1.vocabulary_))
assert_equal(tfidf.shape, (n_train, len(v1.vocabulary_)))
# test tf-idf with new data
tfidf_test = t1.transform(counts_test).toarray()
assert_equal(tfidf_test.shape, (len(test_data), len(v1.vocabulary_)))
# test tf alone
t2 = TfidfTransformer(norm='l1', use_idf=False)
tf = t2.fit(counts_train).transform(counts_train).toarray()
assert_equal(t2.idf_, None)
# test idf transform with unlearned idf vector
t3 = TfidfTransformer(use_idf=True)
assert_raises(ValueError, t3.transform, counts_train)
# test idf transform with incompatible n_features
X = [[1, 1, 5],
[1, 1, 0]]
t3.fit(X)
X_incompt = [[1, 3],
[1, 3]]
assert_raises(ValueError, t3.transform, X_incompt)
# L1-normalized term frequencies sum to one
assert_array_almost_equal(np.sum(tf, axis=1), [1.0] * n_train)
# test the direct tfidf vectorizer
# (equivalent to term count vectorizer + tfidf transformer)
train_data = iter(ALL_FOOD_DOCS[:-1])
tv = TfidfVectorizer(norm='l1')
tv.max_df = v1.max_df
tfidf2 = tv.fit_transform(train_data).toarray()
assert_false(tv.fixed_vocabulary_)
assert_array_almost_equal(tfidf, tfidf2)
# test the direct tfidf vectorizer with new data
tfidf_test2 = tv.transform(test_data).toarray()
assert_array_almost_equal(tfidf_test, tfidf_test2)
# test transform on unfitted vectorizer with empty vocabulary
v3 = CountVectorizer(vocabulary=None)
assert_raises(ValueError, v3.transform, train_data)
# ascii preprocessor?
v3.set_params(strip_accents='ascii', lowercase=False)
assert_equal(v3.build_preprocessor(), strip_accents_ascii)
# error on bad strip_accents param
v3.set_params(strip_accents='_gabbledegook_', preprocessor=None)
assert_raises(ValueError, v3.build_preprocessor)
# error with bad analyzer type
v3.set_params = '_invalid_analyzer_type_'
assert_raises(ValueError, v3.build_analyzer)
def test_tfidf_vectorizer_setters():
tv = TfidfVectorizer(norm='l2', use_idf=False, smooth_idf=False,
sublinear_tf=False)
tv.norm = 'l1'
assert_equal(tv._tfidf.norm, 'l1')
tv.use_idf = True
assert_true(tv._tfidf.use_idf)
tv.smooth_idf = True
assert_true(tv._tfidf.smooth_idf)
tv.sublinear_tf = True
assert_true(tv._tfidf.sublinear_tf)
def test_hashing_vectorizer():
v = HashingVectorizer()
X = v.transform(ALL_FOOD_DOCS)
token_nnz = X.nnz
assert_equal(X.shape, (len(ALL_FOOD_DOCS), v.n_features))
assert_equal(X.dtype, v.dtype)
# By default the hashed values receive a random sign and l2 normalization
# makes the feature values bounded
assert_true(np.min(X.data) > -1)
assert_true(np.min(X.data) < 0)
assert_true(np.max(X.data) > 0)
assert_true(np.max(X.data) < 1)
# Check that the rows are normalized
for i in range(X.shape[0]):
assert_almost_equal(np.linalg.norm(X[0].data, 2), 1.0)
# Check vectorization with some non-default parameters
v = HashingVectorizer(ngram_range=(1, 2), non_negative=True, norm='l1')
X = v.transform(ALL_FOOD_DOCS)
assert_equal(X.shape, (len(ALL_FOOD_DOCS), v.n_features))
assert_equal(X.dtype, v.dtype)
# ngrams generate more non zeros
ngrams_nnz = X.nnz
assert_true(ngrams_nnz > token_nnz)
assert_true(ngrams_nnz < 2 * token_nnz)
# makes the feature values bounded
assert_true(np.min(X.data) > 0)
assert_true(np.max(X.data) < 1)
# Check that the rows are normalized
for i in range(X.shape[0]):
assert_almost_equal(np.linalg.norm(X[0].data, 1), 1.0)
def test_feature_names():
cv = CountVectorizer(max_df=0.5)
# test for Value error on unfitted/empty vocabulary
assert_raises(ValueError, cv.get_feature_names)
X = cv.fit_transform(ALL_FOOD_DOCS)
n_samples, n_features = X.shape
assert_equal(len(cv.vocabulary_), n_features)
feature_names = cv.get_feature_names()
assert_equal(len(feature_names), n_features)
assert_array_equal(['beer', 'burger', 'celeri', 'coke', 'pizza',
'salad', 'sparkling', 'tomato', 'water'],
feature_names)
for idx, name in enumerate(feature_names):
assert_equal(idx, cv.vocabulary_.get(name))
def test_vectorizer_max_features():
vec_factories = (
CountVectorizer,
TfidfVectorizer,
)
expected_vocabulary = set(['burger', 'beer', 'salad', 'pizza'])
expected_stop_words = set([u'celeri', u'tomato', u'copyright', u'coke',
u'sparkling', u'water', u'the'])
for vec_factory in vec_factories:
# test bounded number of extracted features
vectorizer = vec_factory(max_df=0.6, max_features=4)
vectorizer.fit(ALL_FOOD_DOCS)
assert_equal(set(vectorizer.vocabulary_), expected_vocabulary)
assert_equal(vectorizer.stop_words_, expected_stop_words)
def test_count_vectorizer_max_features():
# Regression test: max_features didn't work correctly in 0.14.
cv_1 = CountVectorizer(max_features=1)
cv_3 = CountVectorizer(max_features=3)
cv_None = CountVectorizer(max_features=None)
counts_1 = cv_1.fit_transform(JUNK_FOOD_DOCS).sum(axis=0)
counts_3 = cv_3.fit_transform(JUNK_FOOD_DOCS).sum(axis=0)
counts_None = cv_None.fit_transform(JUNK_FOOD_DOCS).sum(axis=0)
features_1 = cv_1.get_feature_names()
features_3 = cv_3.get_feature_names()
features_None = cv_None.get_feature_names()
# The most common feature is "the", with frequency 7.
assert_equal(7, counts_1.max())
assert_equal(7, counts_3.max())
assert_equal(7, counts_None.max())
# The most common feature should be the same
assert_equal("the", features_1[np.argmax(counts_1)])
assert_equal("the", features_3[np.argmax(counts_3)])
assert_equal("the", features_None[np.argmax(counts_None)])
def test_vectorizer_max_df():
test_data = ['abc', 'dea', 'eat']
vect = CountVectorizer(analyzer='char', max_df=1.0)
vect.fit(test_data)
assert_true('a' in vect.vocabulary_.keys())
assert_equal(len(vect.vocabulary_.keys()), 6)
assert_equal(len(vect.stop_words_), 0)
vect.max_df = 0.5 # 0.5 * 3 documents -> max_doc_count == 1.5
vect.fit(test_data)
assert_true('a' not in vect.vocabulary_.keys()) # {ae} ignored
assert_equal(len(vect.vocabulary_.keys()), 4) # {bcdt} remain
assert_true('a' in vect.stop_words_)
assert_equal(len(vect.stop_words_), 2)
vect.max_df = 1
vect.fit(test_data)
assert_true('a' not in vect.vocabulary_.keys()) # {ae} ignored
assert_equal(len(vect.vocabulary_.keys()), 4) # {bcdt} remain
assert_true('a' in vect.stop_words_)
assert_equal(len(vect.stop_words_), 2)
def test_vectorizer_min_df():
test_data = ['abc', 'dea', 'eat']
vect = CountVectorizer(analyzer='char', min_df=1)
vect.fit(test_data)
assert_true('a' in vect.vocabulary_.keys())
assert_equal(len(vect.vocabulary_.keys()), 6)
assert_equal(len(vect.stop_words_), 0)
vect.min_df = 2
vect.fit(test_data)
assert_true('c' not in vect.vocabulary_.keys()) # {bcdt} ignored
assert_equal(len(vect.vocabulary_.keys()), 2) # {ae} remain
assert_true('c' in vect.stop_words_)
assert_equal(len(vect.stop_words_), 4)
vect.min_df = 0.8 # 0.8 * 3 documents -> min_doc_count == 2.4
vect.fit(test_data)
assert_true('c' not in vect.vocabulary_.keys()) # {bcdet} ignored
assert_equal(len(vect.vocabulary_.keys()), 1) # {a} remains
assert_true('c' in vect.stop_words_)
assert_equal(len(vect.stop_words_), 5)
def test_count_binary_occurrences():
# by default multiple occurrences are counted as longs
test_data = ['aaabc', 'abbde']
vect = CountVectorizer(analyzer='char', max_df=1.0)
X = vect.fit_transform(test_data).toarray()
assert_array_equal(['a', 'b', 'c', 'd', 'e'], vect.get_feature_names())
assert_array_equal([[3, 1, 1, 0, 0],
[1, 2, 0, 1, 1]], X)
# using boolean features, we can fetch the binary occurrence info
# instead.
vect = CountVectorizer(analyzer='char', max_df=1.0, binary=True)
X = vect.fit_transform(test_data).toarray()
assert_array_equal([[1, 1, 1, 0, 0],
[1, 1, 0, 1, 1]], X)
# check the ability to change the dtype
vect = CountVectorizer(analyzer='char', max_df=1.0,
binary=True, dtype=np.float32)
X_sparse = vect.fit_transform(test_data)
assert_equal(X_sparse.dtype, np.float32)
def test_hashed_binary_occurrences():
# by default multiple occurrences are counted as longs
test_data = ['aaabc', 'abbde']
vect = HashingVectorizer(analyzer='char', non_negative=True,
norm=None)
X = vect.transform(test_data)
assert_equal(np.max(X[0:1].data), 3)
assert_equal(np.max(X[1:2].data), 2)
assert_equal(X.dtype, np.float64)
# using boolean features, we can fetch the binary occurrence info
# instead.
vect = HashingVectorizer(analyzer='char', non_negative=True, binary=True,
norm=None)
X = vect.transform(test_data)
assert_equal(np.max(X.data), 1)
assert_equal(X.dtype, np.float64)
# check the ability to change the dtype
vect = HashingVectorizer(analyzer='char', non_negative=True, binary=True,
norm=None, dtype=np.float64)
X = vect.transform(test_data)
assert_equal(X.dtype, np.float64)
def test_vectorizer_inverse_transform():
# raw documents
data = ALL_FOOD_DOCS
for vectorizer in (TfidfVectorizer(), CountVectorizer()):
transformed_data = vectorizer.fit_transform(data)
inversed_data = vectorizer.inverse_transform(transformed_data)
analyze = vectorizer.build_analyzer()
for doc, inversed_terms in zip(data, inversed_data):
terms = np.sort(np.unique(analyze(doc)))
inversed_terms = np.sort(np.unique(inversed_terms))
assert_array_equal(terms, inversed_terms)
# Test that inverse_transform also works with numpy arrays
transformed_data = transformed_data.toarray()
inversed_data2 = vectorizer.inverse_transform(transformed_data)
for terms, terms2 in zip(inversed_data, inversed_data2):
assert_array_equal(np.sort(terms), np.sort(terms2))
def test_count_vectorizer_pipeline_grid_selection():
# raw documents
data = JUNK_FOOD_DOCS + NOTJUNK_FOOD_DOCS
# label junk food as -1, the others as +1
target = [-1] * len(JUNK_FOOD_DOCS) + [1] * len(NOTJUNK_FOOD_DOCS)
# split the dataset for model development and final evaluation
train_data, test_data, target_train, target_test = train_test_split(
data, target, test_size=.2, random_state=0)
pipeline = Pipeline([('vect', CountVectorizer()),
('svc', LinearSVC())])
parameters = {
'vect__ngram_range': [(1, 1), (1, 2)],
'svc__loss': ('hinge', 'squared_hinge')
}
# find the best parameters for both the feature extraction and the
# classifier
grid_search = GridSearchCV(pipeline, parameters, n_jobs=1)
# Check that the best model found by grid search is 100% correct on the
# held out evaluation set.
pred = grid_search.fit(train_data, target_train).predict(test_data)
assert_array_equal(pred, target_test)
# on this toy dataset bigram representation which is used in the last of
# the grid_search is considered the best estimator since they all converge
# to 100% accuracy models
assert_equal(grid_search.best_score_, 1.0)
best_vectorizer = grid_search.best_estimator_.named_steps['vect']
assert_equal(best_vectorizer.ngram_range, (1, 1))
def test_vectorizer_pipeline_grid_selection():
# raw documents
data = JUNK_FOOD_DOCS + NOTJUNK_FOOD_DOCS
# label junk food as -1, the others as +1
target = [-1] * len(JUNK_FOOD_DOCS) + [1] * len(NOTJUNK_FOOD_DOCS)
# split the dataset for model development and final evaluation
train_data, test_data, target_train, target_test = train_test_split(
data, target, test_size=.1, random_state=0)
pipeline = Pipeline([('vect', TfidfVectorizer()),
('svc', LinearSVC())])
parameters = {
'vect__ngram_range': [(1, 1), (1, 2)],
'vect__norm': ('l1', 'l2'),
'svc__loss': ('hinge', 'squared_hinge'),
}
# find the best parameters for both the feature extraction and the
# classifier
grid_search = GridSearchCV(pipeline, parameters, n_jobs=1)
# Check that the best model found by grid search is 100% correct on the
# held out evaluation set.
pred = grid_search.fit(train_data, target_train).predict(test_data)
assert_array_equal(pred, target_test)
# on this toy dataset bigram representation which is used in the last of
# the grid_search is considered the best estimator since they all converge
# to 100% accuracy models
assert_equal(grid_search.best_score_, 1.0)
best_vectorizer = grid_search.best_estimator_.named_steps['vect']
assert_equal(best_vectorizer.ngram_range, (1, 1))
assert_equal(best_vectorizer.norm, 'l2')
assert_false(best_vectorizer.fixed_vocabulary_)
def test_vectorizer_pipeline_cross_validation():
# raw documents
data = JUNK_FOOD_DOCS + NOTJUNK_FOOD_DOCS
# label junk food as -1, the others as +1
target = [-1] * len(JUNK_FOOD_DOCS) + [1] * len(NOTJUNK_FOOD_DOCS)
pipeline = Pipeline([('vect', TfidfVectorizer()),
('svc', LinearSVC())])
cv_scores = cross_val_score(pipeline, data, target, cv=3)
assert_array_equal(cv_scores, [1., 1., 1.])
def test_vectorizer_unicode():
# tests that the count vectorizer works with cyrillic.
document = (
"\xd0\x9c\xd0\xb0\xd1\x88\xd0\xb8\xd0\xbd\xd0\xbd\xd0\xbe\xd0"
"\xb5 \xd0\xbe\xd0\xb1\xd1\x83\xd1\x87\xd0\xb5\xd0\xbd\xd0\xb8\xd0"
"\xb5 \xe2\x80\x94 \xd0\xbe\xd0\xb1\xd1\x88\xd0\xb8\xd1\x80\xd0\xbd"
"\xd1\x8b\xd0\xb9 \xd0\xbf\xd0\xbe\xd0\xb4\xd1\x80\xd0\xb0\xd0\xb7"
"\xd0\xb4\xd0\xb5\xd0\xbb \xd0\xb8\xd1\x81\xd0\xba\xd1\x83\xd1\x81"
"\xd1\x81\xd1\x82\xd0\xb2\xd0\xb5\xd0\xbd\xd0\xbd\xd0\xbe\xd0\xb3"
"\xd0\xbe \xd0\xb8\xd0\xbd\xd1\x82\xd0\xb5\xd0\xbb\xd0\xbb\xd0"
"\xb5\xd0\xba\xd1\x82\xd0\xb0, \xd0\xb8\xd0\xb7\xd1\x83\xd1\x87"
"\xd0\xb0\xd1\x8e\xd1\x89\xd0\xb8\xd0\xb9 \xd0\xbc\xd0\xb5\xd1\x82"
"\xd0\xbe\xd0\xb4\xd1\x8b \xd0\xbf\xd0\xbe\xd1\x81\xd1\x82\xd1\x80"
"\xd0\xbe\xd0\xb5\xd0\xbd\xd0\xb8\xd1\x8f \xd0\xb0\xd0\xbb\xd0\xb3"
"\xd0\xbe\xd1\x80\xd0\xb8\xd1\x82\xd0\xbc\xd0\xbe\xd0\xb2, \xd1\x81"
"\xd0\xbf\xd0\xbe\xd1\x81\xd0\xbe\xd0\xb1\xd0\xbd\xd1\x8b\xd1\x85 "
"\xd0\xbe\xd0\xb1\xd1\x83\xd1\x87\xd0\xb0\xd1\x82\xd1\x8c\xd1\x81\xd1"
"\x8f.")
vect = CountVectorizer()
X_counted = vect.fit_transform([document])
assert_equal(X_counted.shape, (1, 15))
vect = HashingVectorizer(norm=None, non_negative=True)
X_hashed = vect.transform([document])
assert_equal(X_hashed.shape, (1, 2 ** 20))
# No collisions on such a small dataset
assert_equal(X_counted.nnz, X_hashed.nnz)
# When norm is None and non_negative, the tokens are counted up to
# collisions
assert_array_equal(np.sort(X_counted.data), np.sort(X_hashed.data))
def test_tfidf_vectorizer_with_fixed_vocabulary():
# non regression smoke test for inheritance issues
vocabulary = ['pizza', 'celeri']
vect = TfidfVectorizer(vocabulary=vocabulary)
X_1 = vect.fit_transform(ALL_FOOD_DOCS)
X_2 = vect.transform(ALL_FOOD_DOCS)
assert_array_almost_equal(X_1.toarray(), X_2.toarray())
assert_true(vect.fixed_vocabulary_)
def test_pickling_vectorizer():
instances = [
HashingVectorizer(),
HashingVectorizer(norm='l1'),
HashingVectorizer(binary=True),
HashingVectorizer(ngram_range=(1, 2)),
CountVectorizer(),
CountVectorizer(preprocessor=strip_tags),
CountVectorizer(analyzer=lazy_analyze),
CountVectorizer(preprocessor=strip_tags).fit(JUNK_FOOD_DOCS),
CountVectorizer(strip_accents=strip_eacute).fit(JUNK_FOOD_DOCS),
TfidfVectorizer(),
TfidfVectorizer(analyzer=lazy_analyze),
TfidfVectorizer().fit(JUNK_FOOD_DOCS),
]
for orig in instances:
s = pickle.dumps(orig)
copy = pickle.loads(s)
assert_equal(type(copy), orig.__class__)
assert_equal(copy.get_params(), orig.get_params())
assert_array_equal(
copy.fit_transform(JUNK_FOOD_DOCS).toarray(),
orig.fit_transform(JUNK_FOOD_DOCS).toarray())
def test_stop_words_removal():
# Ensure that deleting the stop_words_ attribute doesn't affect transform
fitted_vectorizers = (
TfidfVectorizer().fit(JUNK_FOOD_DOCS),
CountVectorizer(preprocessor=strip_tags).fit(JUNK_FOOD_DOCS),
CountVectorizer(strip_accents=strip_eacute).fit(JUNK_FOOD_DOCS)
)
for vect in fitted_vectorizers:
vect_transform = vect.transform(JUNK_FOOD_DOCS).toarray()
vect.stop_words_ = None
stop_None_transform = vect.transform(JUNK_FOOD_DOCS).toarray()
delattr(vect, 'stop_words_')
stop_del_transform = vect.transform(JUNK_FOOD_DOCS).toarray()
assert_array_equal(stop_None_transform, vect_transform)
assert_array_equal(stop_del_transform, vect_transform)
def test_pickling_transformer():
X = CountVectorizer().fit_transform(JUNK_FOOD_DOCS)
orig = TfidfTransformer().fit(X)
s = pickle.dumps(orig)
copy = pickle.loads(s)
assert_equal(type(copy), orig.__class__)
assert_array_equal(
copy.fit_transform(X).toarray(),
orig.fit_transform(X).toarray())
def test_non_unique_vocab():
vocab = ['a', 'b', 'c', 'a', 'a']
vect = CountVectorizer(vocabulary=vocab)
assert_raises(ValueError, vect.fit, [])
def test_hashingvectorizer_nan_in_docs():
# np.nan can appear when using pandas to load text fields from a csv file
# with missing values.
message = "np.nan is an invalid document, expected byte or unicode string."
exception = ValueError
def func():
hv = HashingVectorizer()
hv.fit_transform(['hello world', np.nan, 'hello hello'])
assert_raise_message(exception, message, func)
def test_tfidfvectorizer_binary():
# Non-regression test: TfidfVectorizer used to ignore its "binary" param.
v = TfidfVectorizer(binary=True, use_idf=False, norm=None)
assert_true(v.binary)
X = v.fit_transform(['hello world', 'hello hello']).toarray()
assert_array_equal(X.ravel(), [1, 1, 1, 0])
X2 = v.transform(['hello world', 'hello hello']).toarray()
assert_array_equal(X2.ravel(), [1, 1, 1, 0])
def test_tfidfvectorizer_export_idf():
vect = TfidfVectorizer(use_idf=True)
vect.fit(JUNK_FOOD_DOCS)
assert_array_almost_equal(vect.idf_, vect._tfidf.idf_)
def test_vectorizer_vocab_clone():
vect_vocab = TfidfVectorizer(vocabulary=["the"])
vect_vocab_clone = clone(vect_vocab)
vect_vocab.fit(ALL_FOOD_DOCS)
vect_vocab_clone.fit(ALL_FOOD_DOCS)
assert_equal(vect_vocab_clone.vocabulary_, vect_vocab.vocabulary_)
| bsd-3-clause |
pv/scikit-learn | examples/model_selection/plot_validation_curve.py | 229 | 1823 | """
==========================
Plotting Validation Curves
==========================
In this plot you can see the training scores and validation scores of an SVM
for different values of the kernel parameter gamma. For very low values of
gamma, you can see that both the training score and the validation score are
low. This is called underfitting. Medium values of gamma will result in high
values for both scores, i.e. the classifier is performing fairly well. If gamma
is too high, the classifier will overfit, which means that the training score
is good but the validation score is poor.
"""
print(__doc__)
import matplotlib.pyplot as plt
import numpy as np
from sklearn.datasets import load_digits
from sklearn.svm import SVC
from sklearn.learning_curve import validation_curve
digits = load_digits()
X, y = digits.data, digits.target
param_range = np.logspace(-6, -1, 5)
train_scores, test_scores = validation_curve(
SVC(), X, y, param_name="gamma", param_range=param_range,
cv=10, scoring="accuracy", n_jobs=1)
train_scores_mean = np.mean(train_scores, axis=1)
train_scores_std = np.std(train_scores, axis=1)
test_scores_mean = np.mean(test_scores, axis=1)
test_scores_std = np.std(test_scores, axis=1)
plt.title("Validation Curve with SVM")
plt.xlabel("$\gamma$")
plt.ylabel("Score")
plt.ylim(0.0, 1.1)
plt.semilogx(param_range, train_scores_mean, label="Training score", color="r")
plt.fill_between(param_range, train_scores_mean - train_scores_std,
train_scores_mean + train_scores_std, alpha=0.2, color="r")
plt.semilogx(param_range, test_scores_mean, label="Cross-validation score",
color="g")
plt.fill_between(param_range, test_scores_mean - test_scores_std,
test_scores_mean + test_scores_std, alpha=0.2, color="g")
plt.legend(loc="best")
plt.show()
| bsd-3-clause |
luo66/scikit-learn | examples/linear_model/plot_theilsen.py | 232 | 3615 | """
====================
Theil-Sen Regression
====================
Computes a Theil-Sen Regression on a synthetic dataset.
See :ref:`theil_sen_regression` for more information on the regressor.
Compared to the OLS (ordinary least squares) estimator, the Theil-Sen
estimator is robust against outliers. It has a breakdown point of about 29.3%
in case of a simple linear regression which means that it can tolerate
arbitrary corrupted data (outliers) of up to 29.3% in the two-dimensional
case.
The estimation of the model is done by calculating the slopes and intercepts
of a subpopulation of all possible combinations of p subsample points. If an
intercept is fitted, p must be greater than or equal to n_features + 1. The
final slope and intercept is then defined as the spatial median of these
slopes and intercepts.
In certain cases Theil-Sen performs better than :ref:`RANSAC
<ransac_regression>` which is also a robust method. This is illustrated in the
second example below where outliers with respect to the x-axis perturb RANSAC.
Tuning the ``residual_threshold`` parameter of RANSAC remedies this but in
general a priori knowledge about the data and the nature of the outliers is
needed.
Due to the computational complexity of Theil-Sen it is recommended to use it
only for small problems in terms of number of samples and features. For larger
problems the ``max_subpopulation`` parameter restricts the magnitude of all
possible combinations of p subsample points to a randomly chosen subset and
therefore also limits the runtime. Therefore, Theil-Sen is applicable to larger
problems with the drawback of losing some of its mathematical properties since
it then works on a random subset.
"""
# Author: Florian Wilhelm -- <florian.wilhelm@gmail.com>
# License: BSD 3 clause
import time
import numpy as np
import matplotlib.pyplot as plt
from sklearn.linear_model import LinearRegression, TheilSenRegressor
from sklearn.linear_model import RANSACRegressor
print(__doc__)
estimators = [('OLS', LinearRegression()),
('Theil-Sen', TheilSenRegressor(random_state=42)),
('RANSAC', RANSACRegressor(random_state=42)), ]
##############################################################################
# Outliers only in the y direction
np.random.seed(0)
n_samples = 200
# Linear model y = 3*x + N(2, 0.1**2)
x = np.random.randn(n_samples)
w = 3.
c = 2.
noise = 0.1 * np.random.randn(n_samples)
y = w * x + c + noise
# 10% outliers
y[-20:] += -20 * x[-20:]
X = x[:, np.newaxis]
plt.plot(x, y, 'k+', mew=2, ms=8)
line_x = np.array([-3, 3])
for name, estimator in estimators:
t0 = time.time()
estimator.fit(X, y)
elapsed_time = time.time() - t0
y_pred = estimator.predict(line_x.reshape(2, 1))
plt.plot(line_x, y_pred,
label='%s (fit time: %.2fs)' % (name, elapsed_time))
plt.axis('tight')
plt.legend(loc='upper left')
##############################################################################
# Outliers in the X direction
np.random.seed(0)
# Linear model y = 3*x + N(2, 0.1**2)
x = np.random.randn(n_samples)
noise = 0.1 * np.random.randn(n_samples)
y = 3 * x + 2 + noise
# 10% outliers
x[-20:] = 9.9
y[-20:] += 22
X = x[:, np.newaxis]
plt.figure()
plt.plot(x, y, 'k+', mew=2, ms=8)
line_x = np.array([-3, 10])
for name, estimator in estimators:
t0 = time.time()
estimator.fit(X, y)
elapsed_time = time.time() - t0
y_pred = estimator.predict(line_x.reshape(2, 1))
plt.plot(line_x, y_pred,
label='%s (fit time: %.2fs)' % (name, elapsed_time))
plt.axis('tight')
plt.legend(loc='upper left')
plt.show()
| bsd-3-clause |
rsignell-usgs/notebook | iris_snippets_debug.py | 1 | 6841 | # -*- coding: utf-8 -*-
# <nbformat>3.0</nbformat>
# <codecell>
%matplotlib inline
import time
import contextlib
from datetime import datetime, timedelta
import numpy as np
import numpy.ma as ma
import matplotlib.tri as tri
import matplotlib.pyplot as plt
from scipy.spatial import Delaunay
import iris
from iris.unit import Unit
from iris.exceptions import CoordinateNotFoundError
import cartopy.crs as ccrs
from cartopy.feature import NaturalEarthFeature, COLORS
from cartopy.mpl.gridliner import LONGITUDE_FORMATTER, LATITUDE_FORMATTER
LAND = NaturalEarthFeature('physical', 'land', '10m', edgecolor='face',
facecolor=COLORS['land'])
iris.FUTURE.netcdf_promote = True
iris.FUTURE.cell_datetime_objects = True # <- TODO!
def time_coord(cube):
"""Return the variable attached to time axis and rename it to time."""
try:
cube.coord(axis='T').rename('time')
except CoordinateNotFoundError:
pass
timevar = cube.coord('time')
return timevar
def z_coord(cube):
"""Heuristic way to return the dimensionless vertical coordinate."""
try:
z = cube.coord(axis='Z')
except CoordinateNotFoundError:
z = cube.coords(axis='Z')
for coord in cube.coords(axis='Z'):
if coord.ndim == 1:
z = coord
return z
def time_near(cube, datetime):
"""Return the nearest index to a `datetime`."""
timevar = time_coord(cube)
try:
time = timevar.units.date2num(datetime)
idx = timevar.nearest_neighbour_index(time)
except IndexError:
idx = -1
return idx
def time_slice(cube, start, stop=None):
"""TODO: Re-write to use `iris.FUTURE.cell_datetime_objects`."""
istart = time_near(cube, start)
if stop:
istop = time_near(cube, stop)
if istart == istop:
raise ValueError('istart must be different from istop!'
'Got istart {!r} and '
' istop {!r}'.format(istart, istop))
return cube[istart:istop, ...]
else:
return cube[istart, ...]
def plot_surface(cube, model='', unstructure=False, **kw):
projection = kw.pop('projection', ccrs.PlateCarree())
figsize = kw.pop('figsize', (8, 6))
cmap = kw.pop('cmap', plt.cm.rainbow)
fig, ax = plt.subplots(figsize=figsize,
subplot_kw=dict(projection=projection))
ax.set_extent(get_bbox(cube))
ax.add_feature(LAND)
ax.coastlines(resolution='10m')
gl = ax.gridlines(draw_labels=True)
gl.xlabels_top = gl.ylabels_right = False
gl.xformatter = LONGITUDE_FORMATTER
gl.yformatter = LATITUDE_FORMATTER
z = z_coord(cube)
if z:
positive = z.attributes.get('positive', None)
if positive == 'up':
idx = np.unique(z.points.argmax(axis=0))[0]
else:
idx = np.unique(z.points.argmin(axis=0))[0]
c = cube[idx, ...].copy()
else:
idx = None
c = cube.copy()
c.data = ma.masked_invalid(c.data)
t = time_coord(cube)
t = t.units.num2date(t.points)[0]
if unstructure:
# The following lines would work if the cube is note bbox-sliced.
# lon = cube.mesh.nodes[:, 0]
# lat = cube.mesh.nodes[:, 1]
# nv = cube.mesh.faces
lon = cube.coord(axis='X').points
lat = cube.coord(axis='Y').points
nv = Delaunay(np.c_[lon, lat]).vertices
triang = tri.Triangulation(lon, lat, triangles=nv)
# http://matplotlib.org/examples/pylab_examples/ tricontour_smooth_delaunay.html
if False: # TODO: Test this.
subdiv = 3
min_circle_ratio = 0.01
mask = tri.TriAnalyzer(triang).get_flat_tri_mask(min_circle_ratio)
triang.set_mask(mask)
refiner = tri.UniformTriRefiner(triang)
tri_ref, data_ref = refiner.refine_field(cube.data, subdiv=subdiv)
cs = ax.tricontourf(triang, c.data, cmap=cmap, **kw)
else:
cs = ax.pcolormesh(c.coord(axis='X').points,
c.coord(axis='Y').points,
c.data, cmap=cmap, **kw)
title = (model, t, c.name(), idx)
ax.set_title('{}: {}\nVariable: {} level: {}'.format(*title))
return fig, ax, cs
def get_bbox(cube):
xmin = cube.coord(axis='X').points.min()
xmax = cube.coord(axis='X').points.max()
ymin = cube.coord(axis='Y').points.min()
ymax = cube.coord(axis='Y').points.max()
return [xmin, xmax, ymin, ymax]
@contextlib.contextmanager
def timeit(log=None):
t = time.time()
yield
elapsed = time.strftime("%H:%M:%S", time.gmtime(time.time()-t))
if log:
log.info(elapsed)
else:
print(elapsed)
# <codecell>
model = 'NECOFS_FVCOM'
start = datetime.utcnow() - timedelta(days=7)
bbox = [-70.8, 41.4, -69.9, 42.3]
units = Unit('Kelvin')
# <markdowncell>
# #### No horizontal subset works fine.
# <codecell>
with timeit():
url = "http://www.smast.umassd.edu:8080/thredds/dodsC/FVCOM/NECOFS/"
url += "Forecasts/NECOFS_FVCOM_OCEAN_MASSBAY_FORECAST.nc"
cube = iris.load_cube(url, 'sea_water_potential_temperature')
cube = time_slice(cube, start, None)
cube.convert_units(units)
print(cube)
fig, ax, cs = plot_surface(cube, model, unstructure=True)
cbar = fig.colorbar(cs, extend='both', shrink=0.75)
t = cbar.ax.set_title(cube.units)
# <markdowncell>
# #### If forcing the `X` and `Y` the subset works.
# <codecell>
with timeit():
url = "http://www.smast.umassd.edu:8080/thredds/dodsC/FVCOM/NECOFS/"
url += "Forecasts/NECOFS_FVCOM_OCEAN_MASSBAY_FORECAST.nc"
cube = iris.load_cube(url, 'sea_water_potential_temperature')
cube = time_slice(cube, start, None)
cube.convert_units(units)
print(cube.coord(axis='Y'))
print(cube.coord(axis='X'))
print(cube.coord(axis='Z'))
print("\n")
cube = cube.intersection(longitude=(bbox[0], bbox[2]),
latitude=(bbox[1], bbox[3]))
print(cube)
fig, ax, cs = plot_surface(cube, model, unstructure=True)
cbar = fig.colorbar(cs, extend='both', shrink=0.75)
t = cbar.ax.set_title(cube.units)
# <markdowncell>
# #### Trying to subset directly takes forever...
# <codecell>
with timeit():
url = "http://www.smast.umassd.edu:8080/thredds/dodsC/FVCOM/NECOFS/"
url += "Forecasts/NECOFS_FVCOM_OCEAN_MASSBAY_FORECAST.nc"
cube = iris.load_cube(url, 'sea_water_potential_temperature')
cube = time_slice(cube, start, None)
cube.convert_units(units)
cube = cube.intersection(longitude=(bbox[0], bbox[2]),
latitude=(bbox[1], bbox[3]))
print(cube)
fig, ax, cs = plot_surface(cube, model, unstructure=True)
cbar = fig.colorbar(cs, extend='both', shrink=0.75)
t = cbar.ax.set_title(cube.units)
| mit |
davidtwomey/greengraphs_cw | greengraph/test/test_command.py | 1 | 1945 | from greengraph.command import parser, process, build_greengraph
from mock import mock_open, patch
# Test command line input arguments
def test_command_args():
args = parser.parse_args(['--from','London','--to','Paris','--steps','15','--out','file_name'])
assert args.startLoc == 'London'
assert args.endLoc == 'Paris'
assert args.steps == 15
assert args.out == 'file_name'
@patch('matplotlib.pyplot.show')
@patch('matplotlib.pyplot.savefig')
@patch('matplotlib.pyplot.plot')
@patch('greengraph.greengraph.Map.show_green',return_value='Test')
@patch('greengraph.greengraph.Map.count_green')
@patch('matplotlib.image.imread')
@patch('requests.get')
@patch('greengraph.greengraph.Greengraph.location_sequence',return_value=[(10.,10.),(15.,15.)])
@patch('greengraph.greengraph.Greengraph.geolocate', return_value=(10.,10))
def test_build_greengraph(mock_geolocate,mock_location_sequence,mock_get,mock_imread,mock_count_green,mock_show_green,mock_plot, mock_savefig,mock_show):
args = parser.parse_args(['--from','London','--to','Paris','--steps','1','--out','file_name'])
build_greengraph(args)
mock_location_sequence.assert_called_with((10.,10.),(10.,10.),1)
assert mock_count_green.called == True
@patch('matplotlib.pyplot.show')
@patch('matplotlib.pyplot.savefig')
@patch('matplotlib.pyplot.plot')
@patch('greengraph.greengraph.Greengraph.green_between', return_value = 'Test')
def test_build_greengraph_steps(mock_green_between,mock_savefig,mock_plot,mock_show):
args = parser.parse_args(['--to','London','--from','Amsterdam'])
build_greengraph(args)
mock_green_between.assert_called_with(20)
@patch('greengraph.command.build_greengraph')
@patch('greengraph.command.parser.parse_args',return_value='test')
def test_process(mock_parse, mock_build_greengraph):
process()
assert mock_parse.called == True
mock_build_greengraph.assert_called_with('test')
| mit |
dgrivas/pwfm | main.py | 1 | 7608 | # coding=utf-8
#!/usr/bin/env python
from __future__ import print_function
from class_galib import *
import matplotlib.pyplot as plt
import matplotlib
matplotlib.rcParams['font.size'] = 10.0
import timeit
import os
import sys
MUTATION_DECREASE = 0.001 # Mutation reduction value
MUTATION_ADAPT_POINT = 0.3 # Mutation correction start (percent of generations)
FINAL_MUTATION = 0.005
CROSSOVER_ENGINEERS = 0.4
CROSSOVER_JOBS = 0.3
pop_fit = []
optimum_fit = []
mean_fit = []
def main():
# Get parameters from menu:
optimal_fitness, lifetime, popsize, rejection, mutation_probability, \
traveltime, surplus_weight, overtime_weight = main_menu()
print("Initializing...")
sys.stdout.flush()
# Initialize timer
start = timeit.default_timer()
# Create main GA object
ga = GeAl(optimal_fitness, lifetime, popsize, traveltime)
# ga.set_travel_time(TRAVELTIME)
#
# Prepare data for population (get total jobs, engineers from db):
ga.prepare_pop(CROSSOVER_ENGINEERS, CROSSOVER_JOBS)
# Generate new population of random chromosomes:
ga.generate_pop()
#
# Evolution loop:
for g in range(lifetime):
print("Evolution: %s,\tmutation probability: %s" % (g, mutation_probability), end=",\t")
sys.stdout.flush()
#
# Evaluate population:
(fitness, assignment, worktime, surplus, overtime, dispersion) = \
ga.evaluate_population(surplus_weight, overtime_weight)
update_plot_data(ga, fitness)
#
# Check optimal fitness
if fitness < optimal_fitness:
g -= 1
break
#
# Selection:
ga.prepare_selection() # prepare cumulative probability list
offsprings_nr = int(round(rejection * popsize))
offsprings = ga.individuals2replace(offsprings_nr) # get individuals to replace
# print("\nIndividuals to replace: %s" % offsprings)
#
# Crossover:
# Get parents and do crossover:
for offspring in offsprings:
parents = ga.select()
# print("kid: %s, parents: %s" % (offspring, parents))
ga.crossover(parents[0], parents[1], offspring)
pass
# ga.print_nebula()
#
# Mutation:
ga.apply_mutation(mutation_probability, newborn=True)
# Mutation correction
if g > (MUTATION_ADAPT_POINT * lifetime):
mutation_probability = max(mutation_probability - MUTATION_DECREASE, FINAL_MUTATION)
# print("%s, %s" % (g, mutation_probability))
#
# Integrate offsprings into next generation, update worktime:
ga.update_generation()
#
# Clear nebula for next generation
ga.clear_nebula()
pass
# Print optimum solution
stop = timeit.default_timer()
runtime = round(stop - start, 2)
print("\n\nRuntime: %ssec" % runtime)
print("Optimum fitness: %s, after %s life cycles" % (fitness, g+1))
print("Mean worktime: %smin" % (sum(worktime)/len(worktime)))
print("Optimum solution:\nAssigment:\n%s\nEngineer Worktime:\n%s" % (assignment, worktime))
#
plot_result(surplus, overtime, dispersion, runtime)
def update_plot_data(ga, fitness):
"""
Update plot data.
:param ga: GA object
:param fitness: best fitness of generation
:return:
"""
pop_fit.append(ga._pop_fitness[:]) # Collect generation fitness for diagram
optimum_fit.append(fitness) # Collect max fitness data for diagram
mean_fit.append(sum(ga._pop_fitness)/len(ga._pop_fitness)) # Calculate mean fitness
def plot_result(surplus, overtime, dispersion, runtime):
"""
Plot GA data.
"""
fig = plt.figure()
#
# lineoffsets2 sets the increment between each data set
colors = [[0, 0, 0]]
lineoffsets = 1
linelengths = 0.5
#
# create vertical population fitness plot
pf = fig.add_subplot(211)
pf.set_xlim(0, len(pop_fit))
pf.eventplot(pop_fit, colors=colors, lineoffsets=lineoffsets,
linelengths=linelengths, orientation='vertical')
# Plot mean fitness
pf.plot(mean_fit, label='Mean Fitness')
pf.legend(loc='best', fancybox=True, framealpha=0.5)
pf.set_ylabel('Fitness')
pf.set_xlabel('Generations')
pf.set_title('Population Fitness Evolution', fontweight='bold', fontsize=12)
#
# Create best fitness plot
textstr = 'Best fitness: %s\nSurplus: %smin\nOvertime: %smin\nDispersion: %s\nRuntime: %ssec' % \
(min(optimum_fit), surplus, overtime, dispersion, runtime)
bf = fig.add_subplot(212)
bf.set_xlim(0, len(pop_fit))
bf.set_ylabel('Fitness')
bf.set_xlabel('Generations')
bf.set_title('Best Fitness', fontweight='bold', fontsize=12)
# place a text box in upper left in axes coords
props = dict(boxstyle='round', facecolor='wheat', alpha=0.5)
bf.text(0.95, 0.95, textstr, transform=bf.transAxes, fontsize=10,
va='top', ha='right', bbox=props)
bf.plot(optimum_fit)
#
plt.tight_layout()
plt.show()
def main_menu():
optimal_fitness = 12
lifetime = 200 # Max GA iterations
popsize = 40 # Population size
rejection = 0.4 # Population rejection ratio
mutation_probability = 0.06 # Mutation probability
traveltime = 15
surplus_weight = 1.0
overtime_weight = 2.0
while True:
os.system('clear')
print (70 * '-')
print (" Π Α Ρ Α Μ Ε Τ Ρ Ο Ι Γ Ε Ν Ε Τ Ι Κ Ο Υ Α Λ Γ Ο Ρ Ι Θ Μ Ο Υ")
print (70 * '-')
print("\n\t[1] Optimal Fitness (%s)"
"\n\t[2] Lifetime (%s)"
"\n\t[3] Population Size (%s)"
"\n\t[4] Mutation Probability (%s)"
"\n\t[5] Rejection Ratio (%s)"
"\n\t[6] Travel Time (%s)"
"\n\t[7] Surplus Weight (%s)"
"\n\t[8] Overtime Weight (%s)"
"\n\n\t[0] Εκτέλεση αλγορίθμου" % (optimal_fitness, lifetime, popsize, mutation_probability, rejection,
traveltime, surplus_weight, overtime_weight))
try:
selection = raw_input("\nΕπιλέξτε παράμετρο: ")
if selection =='1':
optimal_fitness = int(raw_input("Optimal Fitness: "))
elif selection == '2':
lifetime = int(raw_input("Lifetime: "))
elif selection == '3':
popsize = int(raw_input("Population Size: "))
elif selection == '4':
mutation_probability = float(raw_input("Mutation Probability: "))
elif selection == '5':
rejection = float(raw_input("Rejection Ratio: "))
elif selection == '6':
traveltime = int(raw_input("Travel Time: "))
elif selection == '7':
surplus_weight = float(raw_input("Surplus Weight: "))
elif selection == '8':
overtime_weight = float(raw_input("Overtime Weight: "))
elif selection == '0':
print("\n\n...Ο αλγόριθμος εκτελείται... Παρακαλώ περιμένετε!!!")
break
else:
print("Άγνωστη επιλογή!")
except ValueError:
print("Oops! That was no valid number. Try again...")
return optimal_fitness, lifetime, popsize, rejection, mutation_probability, \
traveltime, surplus_weight, overtime_weight
if __name__ == "__main__":
main()
| gpl-2.0 |
jjhelmus/artview | artview/__init__.py | 1 | 1265 | """
========================
ARTview (:mod:`artview`)
========================
.. _user:
################
Reference Manual
################
:Release: |release|
:Date: |today|
This guide provides details on all public functions, modules and classes
included in ARTview which a typical user will use on a regular basis.
.. toctree::
:maxdepth: 1
core
components
plugins
scripts
view
"""
# Detect if we're being called as part of ARTview setup procedure
try:
__ARTVIEW_SETUP__
except NameError:
__ARTVIEW_SETUP__ = False
if __ARTVIEW_SETUP__:
import sys as _sys
_sys.stderr.write("Running from ARTview source directory.\n")
del _sys
else:
try:
# versioning
from .version import git_revision as __git_revision__
from .version import version as __version__
except:
import warnings
warnings.warn("No ARTview Version!")
import matplotlib
matplotlib.use('Qt4Agg')
matplotlib.rcParams['backend.qt4'] = 'PyQt4'
# import subpackages
from . import core
from . import components
from . import plugins
from . import scripts
from . import parser
from . import view
# define standard execution
run = scripts.scripts['standard']
| bsd-3-clause |
nelson-liu/scikit-learn | sklearn/utils/tests/test_estimator_checks.py | 3 | 6145 | import scipy.sparse as sp
import numpy as np
import sys
from sklearn.externals.six.moves import cStringIO as StringIO
from sklearn.base import BaseEstimator, ClassifierMixin
from sklearn.utils.testing import assert_raises_regex, assert_true
from sklearn.utils.estimator_checks import check_estimator
from sklearn.utils.estimator_checks import check_estimators_unfitted
from sklearn.utils.estimator_checks import check_no_fit_attributes_set_in_init
from sklearn.ensemble import AdaBoostClassifier
from sklearn.linear_model import MultiTaskElasticNet
from sklearn.utils.validation import check_X_y, check_array
class CorrectNotFittedError(ValueError):
"""Exception class to raise if estimator is used before fitting.
Like NotFittedError, it inherits from ValueError, but not from
AttributeError. Used for testing only.
"""
class BaseBadClassifier(BaseEstimator, ClassifierMixin):
def fit(self, X, y):
return self
def predict(self, X):
return np.ones(X.shape[0])
class ChangesDict(BaseEstimator):
def __init__(self):
self.key = 0
def fit(self, X, y=None):
X, y = check_X_y(X, y)
return self
def predict(self, X):
X = check_array(X)
self.key = 1000
return np.ones(X.shape[0])
class NoCheckinPredict(BaseBadClassifier):
def fit(self, X, y):
X, y = check_X_y(X, y)
return self
class NoSparseClassifier(BaseBadClassifier):
def fit(self, X, y):
X, y = check_X_y(X, y, accept_sparse=['csr', 'csc'])
if sp.issparse(X):
raise ValueError("Nonsensical Error")
return self
def predict(self, X):
X = check_array(X)
return np.ones(X.shape[0])
class CorrectNotFittedErrorClassifier(BaseBadClassifier):
def fit(self, X, y):
X, y = check_X_y(X, y)
self.coef_ = np.ones(X.shape[1])
return self
def predict(self, X):
if not hasattr(self, 'coef_'):
raise CorrectNotFittedError("estimator is not fitted yet")
X = check_array(X)
return np.ones(X.shape[0])
class NoSampleWeightPandasSeriesType(BaseEstimator):
def fit(self, X, y, sample_weight=None):
# Convert data
X, y = check_X_y(X, y,
accept_sparse=("csr", "csc"),
multi_output=True,
y_numeric=True)
# Function is only called after we verify that pandas is installed
from pandas import Series
if isinstance(sample_weight, Series):
raise ValueError("Estimator does not accept 'sample_weight'"
"of type pandas.Series")
return self
def predict(self, X):
X = check_array(X)
return np.ones(X.shape[0])
def test_check_estimator():
# tests that the estimator actually fails on "bad" estimators.
# not a complete test of all checks, which are very extensive.
# check that we have a set_params and can clone
msg = "it does not implement a 'get_params' methods"
assert_raises_regex(TypeError, msg, check_estimator, object)
# check that we have a fit method
msg = "object has no attribute 'fit'"
assert_raises_regex(AttributeError, msg, check_estimator, BaseEstimator)
# check that fit does input validation
msg = "TypeError not raised"
assert_raises_regex(AssertionError, msg, check_estimator, BaseBadClassifier)
# check that sample_weights in fit accepts pandas.Series type
try:
from pandas import Series # noqa
msg = ("Estimator NoSampleWeightPandasSeriesType raises error if "
"'sample_weight' parameter is of type pandas.Series")
assert_raises_regex(
ValueError, msg, check_estimator, NoSampleWeightPandasSeriesType)
except ImportError:
pass
# check that predict does input validation (doesn't accept dicts in input)
msg = "Estimator doesn't check for NaN and inf in predict"
assert_raises_regex(AssertionError, msg, check_estimator, NoCheckinPredict)
# check that estimator state does not change
# at transform/predict/predict_proba time
msg = 'Estimator changes __dict__ during predict'
assert_raises_regex(AssertionError, msg, check_estimator, ChangesDict)
# check for sparse matrix input handling
name = NoSparseClassifier.__name__
msg = "Estimator " + name + " doesn't seem to fail gracefully on sparse data"
# the check for sparse input handling prints to the stdout,
# instead of raising an error, so as not to remove the original traceback.
# that means we need to jump through some hoops to catch it.
old_stdout = sys.stdout
string_buffer = StringIO()
sys.stdout = string_buffer
try:
check_estimator(NoSparseClassifier)
except:
pass
finally:
sys.stdout = old_stdout
assert_true(msg in string_buffer.getvalue())
# doesn't error on actual estimator
check_estimator(AdaBoostClassifier)
check_estimator(MultiTaskElasticNet)
def test_check_estimators_unfitted():
# check that a ValueError/AttributeError is raised when calling predict
# on an unfitted estimator
msg = "AttributeError or ValueError not raised by predict"
assert_raises_regex(AssertionError, msg, check_estimators_unfitted,
"estimator", NoSparseClassifier)
# check that CorrectNotFittedError inherit from either ValueError
# or AttributeError
check_estimators_unfitted("estimator", CorrectNotFittedErrorClassifier)
def test_check_no_fit_attributes_set_in_init():
class NonConformantEstimator(object):
def __init__(self):
self.you_should_not_set_this_ = None
msg = ("By convention, attributes ending with '_'.+"
'should not be initialized in the constructor.+'
"Attribute 'you_should_not_set_this_' was found.+"
'in estimator estimator_name')
assert_raises_regex(AssertionError, msg,
check_no_fit_attributes_set_in_init,
'estimator_name',
NonConformantEstimator)
| bsd-3-clause |
fraricci/pymatgen | pymatgen/electronic_structure/tests/test_plotter.py | 4 | 14764 | # coding: utf-8
# Copyright (c) Pymatgen Development Team.
# Distributed under the terms of the MIT License.
import unittest
import os
import json
import warnings
from io import open
import scipy
from monty.os.path import which
from pymatgen.electronic_structure.core import Spin
from pymatgen.electronic_structure.cohp import CompleteCohp
from pymatgen.electronic_structure.dos import CompleteDos
from pymatgen.electronic_structure.boltztrap import BoltztrapAnalyzer
from pymatgen.electronic_structure.plotter import DosPlotter, BSPlotter, \
plot_ellipsoid, fold_point, plot_brillouin_zone, BSPlotterProjected, \
BSDOSPlotter, CohpPlotter, BoltztrapPlotter
from pymatgen.electronic_structure.bandstructure import BandStructureSymmLine
from pymatgen.core.structure import Structure
from pymatgen.io.vasp import Vasprun
from pymatgen.util.testing import PymatgenTest
test_dir = os.path.join(os.path.dirname(__file__), "..", "..", "..",
'test_files')
class DosPlotterTest(unittest.TestCase):
def setUp(self):
with open(os.path.join(test_dir, "complete_dos.json"), "r",
encoding='utf-8') as f:
self.dos = CompleteDos.from_dict(json.load(f))
self.plotter = DosPlotter(sigma=0.2, stack=True)
warnings.simplefilter("ignore")
def tearDown(self):
warnings.simplefilter("default")
def test_add_dos_dict(self):
d = self.plotter.get_dos_dict()
self.assertEqual(len(d), 0)
self.plotter.add_dos_dict(self.dos.get_element_dos(),
key_sort_func=lambda x: x.X)
d = self.plotter.get_dos_dict()
self.assertEqual(len(d), 4)
def test_get_dos_dict(self):
self.plotter.add_dos_dict(self.dos.get_element_dos(),
key_sort_func=lambda x: x.X)
d = self.plotter.get_dos_dict()
for el in ["Li", "Fe", "P", "O"]:
self.assertIn(el, d)
# Minimal baseline testing for get_plot. not a true test. Just checks that
# it can actually execute.
def test_get_plot(self):
# Disabling latex is needed for this test to work.
from matplotlib import rc
rc('text', usetex=False)
self.plotter.add_dos_dict(self.dos.get_element_dos(),
key_sort_func=lambda x: x.X)
plt = self.plotter.get_plot()
self.plotter.save_plot("dosplot.png")
self.assertTrue(os.path.isfile("dosplot.png"))
os.remove("dosplot.png")
plt.close("all")
class BSPlotterTest(unittest.TestCase):
def setUp(self):
with open(os.path.join(test_dir, "CaO_2605_bandstructure.json"),
"r", encoding='utf-8') as f:
d = json.loads(f.read())
self.bs = BandStructureSymmLine.from_dict(d)
self.plotter = BSPlotter(self.bs)
warnings.simplefilter("ignore")
def tearDown(self):
warnings.simplefilter("default")
def test_bs_plot_data(self):
self.assertEqual(len(self.plotter.bs_plot_data()['distances'][0]), 16,
"wrong number of distances in the first branch")
self.assertEqual(len(self.plotter.bs_plot_data()['distances']), 10,
"wrong number of branches")
self.assertEqual(
sum([len(e) for e in self.plotter.bs_plot_data()['distances']]),
160, "wrong number of distances")
self.assertEqual(self.plotter.bs_plot_data()['ticks']['label'][5], "K",
"wrong tick label")
self.assertEqual(len(self.plotter.bs_plot_data()['ticks']['label']),
19, "wrong number of tick labels")
# Minimal baseline testing for get_plot. not a true test. Just checks that
# it can actually execute.
def test_get_plot(self):
# zero_to_efermi = True, ylim = None, smooth = False,
# vbm_cbm_marker = False, smooth_tol = None
# Disabling latex is needed for this test to work.
from matplotlib import rc
rc('text', usetex=False)
plt = self.plotter.get_plot()
plt = self.plotter.get_plot(smooth=True)
plt = self.plotter.get_plot(vbm_cbm_marker=True)
self.plotter.save_plot("bsplot.png")
self.assertTrue(os.path.isfile("bsplot.png"))
os.remove("bsplot.png")
plt.close("all")
class BSPlotterProjectedTest(unittest.TestCase):
def setUp(self):
with open(os.path.join(test_dir, "Cu2O_361_bandstructure.json"),
"r", encoding='utf-8') as f:
d = json.load(f)
self.bs = BandStructureSymmLine.from_dict(d)
self.plotter = BSPlotterProjected(self.bs)
warnings.simplefilter("ignore")
def tearDown(self):
warnings.simplefilter("default")
# Minimal baseline testing for get_plot. not a true test. Just checks that
# it can actually execute.
def test_methods(self):
pass
# self.plotter.get_elt_projected_plots().close()
# self.plotter.get_elt_projected_plots_color().close()
# self.plotter.get_projected_plots_dots({'Cu': ['d', 's'], 'O': ['p']}).close()
# self.plotter.get_projected_plots_dots_patom_pmorb(
# {'Cu': ['dxy', 's', 'px'], 'O': ['px', 'py', 'pz']},
# {'Cu': [3, 5], 'O': [1]}
# ).close()
class BSDOSPlotterTest(unittest.TestCase):
def setUp(self):
warnings.simplefilter("ignore")
def tearDown(self):
warnings.simplefilter("default")
# Minimal baseline testing for get_plot. not a true test. Just checks that
# it can actually execute.
def test_methods(self):
v = Vasprun(os.path.join(test_dir, "vasprun_Si_bands.xml"))
p = BSDOSPlotter()
plt = p.get_plot(v.get_band_structure(
kpoints_filename=os.path.join(test_dir, "KPOINTS_Si_bands")))
plt.close()
plt = p.get_plot(v.get_band_structure(
kpoints_filename=os.path.join(test_dir, "KPOINTS_Si_bands")),
v.complete_dos)
plt.close("all")
class PlotBZTest(unittest.TestCase):
def setUp(self):
self.rec_latt = Structure.from_file(
os.path.join(test_dir, "Si.cssr")).lattice.reciprocal_lattice
self.kpath = [[[0., 0., 0.], [0.5, 0., 0.5], [0.5, 0.25, 0.75],
[0.375, 0.375, 0.75]]]
self.labels = {'\\Gamma': [0., 0., 0.], 'K': [0.375, 0.375, 0.75],
u'L': [0.5, 0.5, 0.5],
'U': [0.625, 0.25, 0.625], 'W': [0.5, 0.25, 0.75],
'X': [0.5, 0., 0.5]}
self.hessian = [[17.64757034, 3.90159625, -4.77845607],
[3.90159625, 14.88874142, 6.75776076],
[-4.77845607, 6.75776076, 12.12987493]]
self.center = [0.41, 0., 0.41]
self.points = [[0., 0., 0.], [0.5, 0.5, 0.5]]
warnings.simplefilter("ignore")
def tearDown(self):
warnings.simplefilter("default")
def test_bz_plot(self):
fig, ax = plot_ellipsoid(self.hessian, self.center,
lattice=self.rec_latt)
fig = plot_brillouin_zone(self.rec_latt, lines=self.kpath, labels=self.labels,
kpoints=self.points, ax=ax, show=False)
def test_fold_point(self):
self.assertTrue(
scipy.allclose(fold_point([0., -0.5, 0.5], lattice=self.rec_latt),
self.rec_latt.get_cartesian_coords([0., 0.5, 0.5])))
self.assertTrue(
scipy.allclose(fold_point([0.1, -0.6, 0.2], lattice=self.rec_latt),
self.rec_latt.get_cartesian_coords([0.1, 0.4, 0.2])))
x_trans = which("x_trans")
@unittest.skipIf(not x_trans, "No x_trans.")
class BoltztrapPlotterTest(unittest.TestCase):
def setUp(self):
warnings.simplefilter("ignore")
def tearDown(self):
warnings.simplefilter("default")
def test_plots(self):
bz = BoltztrapAnalyzer.from_files(
os.path.join(test_dir, "boltztrap/transp/"))
plotter = BoltztrapPlotter(bz)
plotter.plot_seebeck_eff_mass_mu().close()
plotter.plot_complexity_factor_mu().close()
plotter.plot_conductivity_mu().close()
plotter.plot_power_factor_mu().close()
plotter.plot_zt_mu().close()
plotter.plot_dos().close()
# TODO: These tests fail. Whoever is responsible for the
# BoltztrapPlotter needs to fix these. The fact that there are not tests
# for the plotter is atrocious. I will reject all future additions to
# the plotter until these are fixed.
# plotter.plot_seebeck_temp()
# plotter.plot_seebeck_dop()
# plotter.plot_carriers()
# plotter.plot_conductivity_dop()
# plotter.plot_conductivity_temp()
# plotter.plot_power_factor_dop()
# plotter.plot_power_factor_temp()
# plotter.plot_eff_mass_dop()
# plotter.plot_zt_dop()
# plotter.plot_zt_temp()
class CohpPlotterTest(PymatgenTest):
def setUp(self):
path = os.path.join(test_dir, "cohp", "complete_cohp_lobster.json")
with open(os.path.join(path), "r") as f:
self.cohp = CompleteCohp.from_dict(json.load(f))
path = os.path.join(test_dir, "cohp", "complete_coop_lobster.json")
with open(os.path.join(path), "r") as f:
self.coop = CompleteCohp.from_dict(json.load(f))
self.cohp_plot = CohpPlotter(zero_at_efermi=False)
self.coop_plot = CohpPlotter(are_coops=True)
warnings.simplefilter("ignore")
def tearDown(self):
warnings.simplefilter("default")
def test_attributes(self):
self.assertFalse(self.cohp_plot.are_coops)
self.assertTrue(self.coop_plot.are_coops)
self.assertFalse(self.cohp_plot.zero_at_efermi)
self.assertTrue(self.coop_plot.zero_at_efermi)
self.cohp_plot.add_cohp_dict(self.cohp.all_cohps)
cohp_energies = self.cohp_plot._cohps["1"]["energies"]
self.assertEqual(len(cohp_energies), 301)
self.assertAlmostEqual(cohp_energies[0], -0.27768)
self.assertAlmostEqual(cohp_energies[-1], 14.77248)
self.coop_plot.add_cohp_dict(self.coop.all_cohps)
coop_energies = self.coop_plot._cohps["10"]["energies"]
self.assertEqual(len(coop_energies), 241)
self.assertAlmostEqual(coop_energies[0], -6.02510)
self.assertAlmostEqual(coop_energies[-1], 6.02510)
def test_add_cohp_dict(self):
# Sorts the populations by z-coordinates of the sites
def sortkeys(sites):
return sites[0].z, sites[1].z
sorted_keys = ["3", "4", "7", "8",
"9", "10", "11", "6",
"5", "2", "1"]
d_coop = self.coop_plot.get_cohp_dict()
self.assertEqual(len(d_coop), 0)
bonds = self.coop.bonds
self.coop_plot.add_cohp_dict(self.coop.all_cohps,
key_sort_func=lambda x:
sortkeys(bonds[x]["sites"]))
d_coop = self.coop_plot.get_cohp_dict()
self.assertEqual(len(d_coop), 11)
self.assertEqual(list(self.coop_plot._cohps.keys()), sorted_keys)
def test_get_cohp_dict(self):
self.cohp_plot.add_cohp_dict(self.cohp.all_cohps)
d_cohp = self.cohp_plot.get_cohp_dict()
for bond in ["1", "2"]:
self.assertIn(bond, d_cohp)
def test_get_plot(self):
self.cohp_plot.add_cohp_dict(self.cohp.all_cohps)
plt_cohp = self.cohp_plot.get_plot()
ax_cohp = plt_cohp.gca()
self.assertEqual(ax_cohp.get_xlabel(), "-COHP")
self.assertEqual(ax_cohp.get_ylabel(), "$E$ (eV)")
legend_labels = ax_cohp.get_legend_handles_labels()[1]
self.assertEqual(len(self.cohp_plot._cohps), len(legend_labels))
self.assertEqual(ax_cohp.lines[0].get_linestyle(), "-")
self.assertEqual(ax_cohp.lines[1].get_linestyle(), "--")
for label in legend_labels:
self.assertIn(label, self.cohp_plot._cohps)
linesindex = legend_labels.index("1")
linestyles = {Spin.up: '-', Spin.down: '--'}
cohp_fe_fe = self.cohp.all_cohps["1"]
for s, spin in enumerate([Spin.up, Spin.down]):
lines = ax_cohp.lines[2 * linesindex + s]
self.assertArrayAlmostEqual(lines.get_xdata(),
-cohp_fe_fe.cohp[spin])
self.assertArrayAlmostEqual(lines.get_ydata(), self.cohp.energies)
self.assertEqual(lines.get_linestyle(), linestyles[spin])
plt_cohp.close()
plt_cohp = self.cohp_plot.get_plot(invert_axes=False,
plot_negative=False)
ax_cohp = plt_cohp.gca()
self.assertEqual(ax_cohp.get_xlabel(), "$E$ (eV)")
self.assertEqual(ax_cohp.get_ylabel(), "COHP")
for s, spin in enumerate([Spin.up, Spin.down]):
lines = ax_cohp.lines[2 * linesindex + s]
self.assertArrayAlmostEqual(lines.get_xdata(), self.cohp.energies)
self.assertArrayAlmostEqual(lines.get_ydata(),
cohp_fe_fe.cohp[spin])
plt_cohp.close()
plt_cohp = self.cohp_plot.get_plot(integrated=True)
ax_cohp = plt_cohp.gca()
self.assertEqual(ax_cohp.get_xlabel(), "-ICOHP (eV)")
for s, spin in enumerate([Spin.up, Spin.down]):
lines = ax_cohp.lines[2 * linesindex + s]
self.assertArrayAlmostEqual(lines.get_xdata(),
-cohp_fe_fe.icohp[spin])
coop_dict = {"Bi5-Bi6": self.coop.all_cohps["10"]}
self.coop_plot.add_cohp_dict(coop_dict)
plt_coop = self.coop_plot.get_plot()
ax_coop = plt_coop.gca()
self.assertEqual(ax_coop.get_xlabel(), "COOP")
self.assertEqual(ax_coop.get_ylabel(), "$E - E_f$ (eV)")
lines_coop = ax_coop.get_lines()[0]
self.assertArrayAlmostEqual(lines_coop.get_ydata(),
self.coop.energies - self.coop.efermi)
coop_bi_bi = self.coop.all_cohps["10"].cohp[Spin.up]
self.assertArrayAlmostEqual(lines_coop.get_xdata(), coop_bi_bi)
# Cleanup.
plt_cohp.close()
plt_coop.close("all")
def test_save_plot(self):
self.cohp_plot.add_cohp_dict(self.cohp.all_cohps)
plt_cohp = self.cohp_plot.get_plot()
self.cohp_plot.save_plot("cohpplot.png")
self.assertTrue(os.path.isfile("cohpplot.png"))
os.remove("cohpplot.png")
plt_cohp.close("all")
if __name__ == "__main__":
unittest.main()
| mit |
RichardLeeK/MachineLearning | ml/sequential_selection.py | 2 | 1787 | class SBS():
from sklearn.metrics import accuracy_score
def __init__(self, estimator, k_features, scoring=accuracy_score,
test_size=0.25, random_state=1):
from sklearn.base import clone
self.scoring = scoring
self.estimator = clone(estimator)
self.k_features = k_features
self.test_size = test_size
self.random_state = random_state
def fit(self, X, y):
from itertools import combinations
import numpy as np
from sklearn.cross_validation import train_test_split
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=self.test_size,
random_state=self.random_state)
dim = X_train.shape[1]
#inital prediction
self.indices_ = list(range(dim))
self.subsets_ = [self.indices_]
score = self._calc_score(X_train, y_train,
X_test, y_test, self.indices_)
self.scores_ = [score]
#reducing the number of features
while dim > self.k_features:
scores = []
subsets = []
for c in combinations(self.indices_, r = dim - 1):
score = self._calc_score(X_train, y_train,
X_test, y_test, list(c))
scores.append(score)
subsets.append(c)
best = np.argmax(scores)
self.indices_ = subsets[best]
self.subsets_.append(self.indices_)
dim -= 1
self.scores_.append(scores[best])
self.k_score_ = self.scores_[-1]
return self
def transform(self, X):
return X[:, self.indices_]
def _calc_score(self, X_train, y_train, X_test, y_test, indices):
self.estimator.fit(X_train[:, indices], y_train)
y_pred = self.estimator.predict(X_test[:, indices])
score = self.scoring(y_test, y_pred)
return score | mit |
techtonik/docrepr | docrepr/sphinxify.py | 1 | 12785 | # -*- coding: utf-8 -*
"""
Process docstrings with Sphinx
AUTHORS:
- Tim Joseph Dumol (2009-09-29): initial version
- The Spyder Development Team: Maintenance
Copyright (C) 2009 Tim Dumol <tim@timdumol.com>
Copyright (C) 2013- The Spyder Development Team
Distributed under the terms of the BSD License
Taken from the Sage project (www.sagemath.org).
See here for the original version:
http://doc.sagemath.org/html/en/reference/notebook/sagenb/misc/sphinxify.html
"""
# Stdlib imports
import codecs
import inspect
import os
import os.path as osp
import re
import shutil
import sys
import tempfile
from xml.sax.saxutils import escape
# 3rd party imports
from docutils.utils import SystemMessage as SystemMessage
from jinja2 import Environment, FileSystemLoader
import sphinx
from sphinx.application import Sphinx
# Local imports
from . import options
from .utils import PY2, to_unicode_from_fs, to_binary_string
#-----------------------------------------------------------------------------
# Globals and constants
#-----------------------------------------------------------------------------
CONFDIR_PATH = osp.dirname(__file__)
CSS_PATH = osp.join(CONFDIR_PATH, 'static', 'css')
JS_PATH = osp.join(CONFDIR_PATH, 'js')
JQUERY_PATH = JS_PATH
if os.name == 'nt':
CACHEDIR = tempfile.gettempdir() + osp.sep + 'docrepr'
else:
username = to_unicode_from_fs(os.environ.get('USER'))
CACHEDIR = tempfile.gettempdir() + osp.sep + 'docrepr-' + username
#-----------------------------------------------------------------------------
# Utility functions
#-----------------------------------------------------------------------------
def is_sphinx_markup(docstring):
"""Returns whether a string contains Sphinx-style ReST markup."""
# this could be made much more clever
return ("`" in docstring or "::" in docstring)
def warning(message):
"""Print a warning message on the rich text view"""
env = Environment()
env.loader = FileSystemLoader(osp.join(CONFDIR_PATH, 'templates'))
warning = env.get_template("warning.html")
return warning.render(css_path=CSS_PATH, text=message)
def format_argspec(argspec):
"""Format argspect, convenience wrapper around inspect's.
This takes a dict instead of ordered arguments and calls
inspect.format_argspec with the arguments in the necessary order.
"""
return inspect.formatargspec(argspec['args'], argspec['varargs'],
argspec['varkw'], argspec['defaults'])
def getsignaturefromtext(text, objname):
"""Get object signatures from text (object documentation)
Return a list containing a single string in most cases
Example of multiple signatures: PyQt4 objects
"""
# Default values
if not text:
text = ''
if not objname:
objname = ''
# Regexps
oneline_re = objname + r'\([^\)].+?(?<=[\w\]\}\'"])\)(?!,)'
multiline_re = objname + r'\([^\)]+(?<=[\w\]\}\'"])\)(?!,)'
multiline_end_parenleft_re = r'(%s\([^\)]+(\),\n.+)+(?<=[\w\]\}\'"])\))'
# Grabbing signatures
sigs_1 = re.findall(oneline_re + '|' + multiline_re, text)
sigs_2 = [g[0] for g in re.findall(multiline_end_parenleft_re % objname, text)]
all_sigs = sigs_1 + sigs_2
# The most relevant signature is usually the first one. There could be
# others in doctests but those are not so important
if all_sigs:
sig = all_sigs[0]
sig = '(' + sig.split('(')[-1] # Remove objname
return sig
else:
return ''
def generate_conf(directory):
"""
Generates a Sphinx configuration file in `directory`.
Parameters
----------
directory : str
Base directory to use
"""
# conf.py file for Sphinx
conf = osp.join(CONFDIR_PATH, 'conf.py')
# Docstring layout page (in Jinja):
layout = osp.join(osp.join(CONFDIR_PATH, 'templates'), 'layout.html')
os.makedirs(osp.join(directory, 'templates'))
os.makedirs(osp.join(directory, 'static'))
shutil.copy(conf, directory)
shutil.copy(layout, osp.join(directory, 'templates'))
open(osp.join(directory, '__init__.py'), 'w').write('')
open(osp.join(directory, 'static', 'empty'), 'w').write('')
def global_template_vars():
"""Generate a dictionary of global variables for our templates"""
if options['local_mathjax']:
# TODO: Fix local use of MathJax
MATHJAX_PATH = "file:///" + osp.join(JS_PATH, 'mathjax')
else:
MATHJAX_PATH = "https://cdn.mathjax.org/mathjax/latest"
global_vars = \
{
'css_path': CSS_PATH,
'js_path': JS_PATH,
'jquery_path': JQUERY_PATH,
'mathjax_path': MATHJAX_PATH,
'math_on': 'true' if options['render_math'] else '',
'platform': sys.platform,
'collapse': options['collapse_sections'],
'use_qt4': options['use_qt4'],
'outline': options['outline']
}
return global_vars
def init_template_vars(oinfo):
"""
Initialize variables for our templates.
It gives default values to the most important variables
"""
tmpl_vars = global_template_vars()
# Object name
if oinfo['name'] is None:
tmpl_vars['name'] = ''
else:
tmpl_vars['name'] = oinfo['name']
# Argspec
tmpl_vars['argspec'] = ''
if oinfo['argspec'] is None:
argspec = getsignaturefromtext(oinfo['docstring'], oinfo['name'])
if argspec:
tmpl_vars['argspec'] = argspec
else:
argspec = oinfo['argspec']
try:
has_self = argspec['args'][0] == 'self'
except (KeyError, IndexError):
fmt_argspec = getsignaturefromtext(oinfo['docstring'], oinfo['name'])
if fmt_argspec:
tmpl_vars['argspec'] = fmt_argspec
else:
tmpl_vars['argspec'] = '(...)'
else:
if has_self:
argspec['args'] = argspec['args'][1:]
tmpl_vars['argspec'] = format_argspec(argspec)
# Type
if oinfo['type_name'] is None:
tmpl_vars['note'] = ''
else:
tmpl_vars['note'] = '%s' % oinfo['type_name']
return tmpl_vars
def generate_extensions(render_math):
"""Generate list of Sphinx extensions"""
# We need jsmath to get pretty plain-text latex in docstrings
extensions = []
if sphinx.__version__ < "1.1" or not render_math:
extensions = ['sphinx.ext.jsmath']
else:
extensions = ['sphinx.ext.mathjax']
# For scipy and matplotlib docstrings, which need this extension to
# be rendered correctly (see Spyder Issue #1138)
extensions.append('sphinx.ext.autosummary')
# Plots
try:
# TODO: Add an option to avoid importing mpl every time
import matplotlib # analysis:ignore
extensions.append('plot_directive')
except ImportError:
pass
return extensions
#-----------------------------------------------------------------------------
# Sphinxify
#-----------------------------------------------------------------------------
def sphinxify(docstring, srcdir, output_format='html', temp_confdir=False):
"""
Runs Sphinx on a docstring and outputs the processed content
Parameters
----------
docstring : str
a ReST-formatted docstring
srcdir : str
Source directory where Sphinx is going to be run
output_format: str
It can be either `html` or `text`.
temp_confdir : bool
Whether to create a temp conf dir for Sphinx
Returns
-------
An Sphinx-processed string, in either HTML or plain text format, depending
on the value of `output_format`
"""
if docstring is None:
docstring = ''
# Rst file to sphinxify
base_name = osp.join(srcdir, 'docstring')
rst_name = base_name + '.rst'
# Output file name
if output_format == 'html':
suffix = '.html'
else:
suffix = '.txt'
output_name = base_name + suffix
# This is needed so users can type \\ on latex eqnarray envs inside raw
# docstrings
template_vars = global_template_vars()
if template_vars['math_on']:
docstring = docstring.replace('\\\\', '\\\\\\\\')
if not docstring or docstring == '<no docstring>':
template_vars['warning'] = 'true'
template_vars['warn_message'] = "No documentation available"
# Write docstring to rst_name
with codecs.open(rst_name, 'w', encoding='utf-8') as rst_file:
rst_file.write(docstring)
# Create confdir
if temp_confdir:
# TODO: This may be inefficient. Find a faster way to do it.
confdir = tempfile.mkdtemp()
confdir = to_unicode_from_fs(confdir)
generate_conf(confdir)
else:
confdir = CONFDIR_PATH
# Get extensions list
extensions = generate_extensions(options['render_math'])
# Override conf variables
confoverrides = {'html_context': template_vars, 'extensions': extensions}
# Create Sphinx app
doctreedir = osp.join(srcdir, 'doctrees')
sphinx_app = Sphinx(srcdir, confdir, srcdir, doctreedir, output_format,
confoverrides, status=None, warning=None,
freshenv=True, warningiserror=False, tags=None)
# Run the app
try:
sphinx_app.build(None, [rst_name])
except SystemMessage:
# TODO: Make this message configurable, so that it can be translated
error_message = "It was not possible to get rich help for this object"
output = warning(error_message)
return output
# Some adjustments to the output
if osp.exists(output_name):
output = codecs.open(output_name, 'r', encoding='utf-8').read()
output = output.replace('<pre>', '<pre class="literal-block">')
else:
error_message = "It was not possible to get rich help for this object"
output = warning(error_message)
# Remove temp confdir
if temp_confdir:
shutil.rmtree(confdir, ignore_errors=True)
# Return contents
return output
def rich_repr(oinfo):
"""
Generate a rich representation of an object's docstring and several
other metadata associated with it.
These data are contained in an `oinfo` dict, as computed by the
IPython.core.oinspect library
Parameters
----------
oinfo : dict
An object info dictionary
Returns
-------
The url of the page that contains the rich representation
"""
# Create srcdir
if not osp.isdir(CACHEDIR):
os.mkdir(CACHEDIR)
srcdir = tempfile.mkdtemp(dir=CACHEDIR)
srcdir = to_unicode_from_fs(srcdir)
output_file = osp.join(srcdir, 'rich_repr_output.html')
template_vars = init_template_vars(oinfo)
# Sphinxified dsocstring contents
obj_doc = sphinxify(oinfo['docstring'], srcdir)
template_vars['docstring'] = obj_doc
if oinfo.get('class_docstring'):
class_doc = sphinxify(oinfo['class_docstring'], srcdir)
template_vars['class_docstring'] = class_doc
else:
template_vars['class_docstring'] = ''
# Add link to docs.python.org
# TODO: Make this really work (e.g. for the math module)
template_vars['docs_py_org'] = ''
file_def = oinfo.get('file')
if file_def:
lib_dirs = ['site-packages', 'dist-packages', 'pymodules']
if not any([d in file_def for d in lib_dirs]):
mod = file_def.split(os.sep)[-1]
mod_name = mod.split('.')[0]
if PY2:
link = "https://docs.python.org/2/library/{0}.html#{0}.{1}".format(
mod_name, oinfo['name'])
else:
link = "https://docs.python.org/3/library/{0}.html#{0}.{1}".format(
mod_name, oinfo['name'])
template_vars['docs_py_org'] = link
# Add a class to several characters on the argspec. This way we can
# highlight them using css, in a similar way to what IPython does.
# NOTE: Before doing this, we escape common html chars so that they
# don't interfere with the rest of html present in the page
argspec = escape(template_vars['argspec'])
for char in ['=', ',', '(', ')', '*', '**']:
argspec = argspec.replace(char,
'<span class="argspec-highlight">' + char + '</span>')
template_vars['argspec'] = argspec
# Replace vars on the template
env = Environment()
env.loader = FileSystemLoader(osp.join(CONFDIR_PATH, 'templates'))
page = env.get_template("rich_repr.html")
output = page.render(**template_vars)
# Rewrite output contents after adjustments
with open(output_file, 'wb') as f:
f.write(to_binary_string(output, encoding='utf-8'))
# Return output file name
return output_file
| bsd-3-clause |
BorisJeremic/Real-ESSI-Examples | analytic_solution/test_cases/Contact/Stress_Based_Contact_Verification/SoftContact_NonLinHardSoftShear/Shear_Zone_Length/SZ_h_1e3/Normalized_Shear_Stress_Plot.py | 24 | 3505 | #!/usr/bin/python
import h5py
import matplotlib.pylab as plt
import matplotlib as mpl
import sys
import numpy as np;
plt.rcParams.update({'font.size': 28})
# set tick width
mpl.rcParams['xtick.major.size'] = 10
mpl.rcParams['xtick.major.width'] = 5
mpl.rcParams['xtick.minor.size'] = 10
mpl.rcParams['xtick.minor.width'] = 5
plt.rcParams['xtick.labelsize']=24
mpl.rcParams['ytick.major.size'] = 10
mpl.rcParams['ytick.major.width'] = 5
mpl.rcParams['ytick.minor.size'] = 10
mpl.rcParams['ytick.minor.width'] = 5
plt.rcParams['ytick.labelsize']=24
###############################################################
## Analytical Solution
###############################################################
# Go over each feioutput and plot each one.
thefile = "Analytical_Solution_Shear.feioutput";
finput = h5py.File(thefile)
# Read the time and displacement
times = finput["time"][:]
shear_strain_x = finput["/Model/Elements/Element_Outputs"][4,:]
shear_strain_y = finput["/Model/Elements/Element_Outputs"][5,:]
shear_stress_x = finput["/Model/Elements/Element_Outputs"][7,:]
shear_stress_y = finput["/Model/Elements/Element_Outputs"][8,:]
normal_stress = -finput["/Model/Elements/Element_Outputs"][9,:];
shear_strain = np.sqrt(shear_strain_x*shear_strain_x + shear_strain_y*shear_strain_y) ;
shear_stress = np.sqrt(shear_stress_x*shear_stress_x + shear_stress_y*shear_stress_y );
shear_stress = shear_stress_x;
shear_strain = shear_strain_x;
# Configure the figure filename, according to the input filename.
outfig=thefile.replace("_","-")
outfigname=outfig.replace("h5.feioutput","pdf")
# Plot the figure. Add labels and titles.
plt.figure(figsize=(12,10))
plt.plot(shear_strain,shear_stress/normal_stress,'-r',label='Analytical Solution', Linewidth=4)
plt.xlabel(r"Shear Strain $\gamma $")
plt.ylabel(r"Normalized Shear Stress $\tau/\sigma_n$")
###############################################################
## Numerical Solution
###############################################################
# Go over each feioutput and plot each one.
thefile = "Monotonic_Contact_Behaviour_Adding_Tangential_Load.h5.feioutput";
finput = h5py.File(thefile)
# Read the time and displacement
times = finput["time"][:]
shear_strain_x = finput["/Model/Elements/Element_Outputs"][4,:]
shear_strain_y = finput["/Model/Elements/Element_Outputs"][5,:]
shear_stress_x = finput["/Model/Elements/Element_Outputs"][7,:]
shear_stress_y = finput["/Model/Elements/Element_Outputs"][8,:]
normal_stress = -finput["/Model/Elements/Element_Outputs"][9,:];
shear_strain = np.sqrt(shear_strain_x*shear_strain_x + shear_strain_y*shear_strain_y) ;
shear_stress = np.sqrt(shear_stress_x*shear_stress_x + shear_stress_y*shear_stress_y );
shear_stress = shear_stress_x;
shear_strain = shear_strain_x;
# Configure the figure filename, according to the input filename.
outfig=thefile.replace("_","-")
outfigname=outfig.replace("h5.feioutput","pdf")
# Plot the figure. Add labels and titles.
plt.plot(shear_strain,shear_stress/normal_stress,'-k',label='Numerical Solution', Linewidth=4)
plt.xlabel(r"Shear Strain $\gamma $")
plt.ylabel(r"Normalized Shear Stress $\tau/\sigma_n$")
########################################################
# # axes = plt.gca()
# # axes.set_xlim([-7,7])
# # axes.set_ylim([-1,1])
outfigname = "Normalized_Shear_Stress.pdf";
legend = plt.legend()
legend.get_frame().set_linewidth(0.0)
legend.get_frame().set_facecolor('none')
plt.savefig(outfigname, bbox_inches='tight')
# plt.show()
| cc0-1.0 |
nhejazi/scikit-learn | examples/gaussian_process/plot_gpr_prior_posterior.py | 36 | 2900 | """
==========================================================================
Illustration of prior and posterior Gaussian process for different kernels
==========================================================================
This example illustrates the prior and posterior of a GPR with different
kernels. Mean, standard deviation, and 10 samples are shown for both prior
and posterior.
"""
print(__doc__)
# Authors: Jan Hendrik Metzen <jhm@informatik.uni-bremen.de>
#
# License: BSD 3 clause
import numpy as np
from matplotlib import pyplot as plt
from sklearn.gaussian_process import GaussianProcessRegressor
from sklearn.gaussian_process.kernels import (RBF, Matern, RationalQuadratic,
ExpSineSquared, DotProduct,
ConstantKernel)
kernels = [1.0 * RBF(length_scale=1.0, length_scale_bounds=(1e-1, 10.0)),
1.0 * RationalQuadratic(length_scale=1.0, alpha=0.1),
1.0 * ExpSineSquared(length_scale=1.0, periodicity=3.0,
length_scale_bounds=(0.1, 10.0),
periodicity_bounds=(1.0, 10.0)),
ConstantKernel(0.1, (0.01, 10.0))
* (DotProduct(sigma_0=1.0, sigma_0_bounds=(0.0, 10.0)) ** 2),
1.0 * Matern(length_scale=1.0, length_scale_bounds=(1e-1, 10.0),
nu=1.5)]
for fig_index, kernel in enumerate(kernels):
# Specify Gaussian Process
gp = GaussianProcessRegressor(kernel=kernel)
# Plot prior
plt.figure(fig_index, figsize=(8, 8))
plt.subplot(2, 1, 1)
X_ = np.linspace(0, 5, 100)
y_mean, y_std = gp.predict(X_[:, np.newaxis], return_std=True)
plt.plot(X_, y_mean, 'k', lw=3, zorder=9)
plt.fill_between(X_, y_mean - y_std, y_mean + y_std,
alpha=0.2, color='k')
y_samples = gp.sample_y(X_[:, np.newaxis], 10)
plt.plot(X_, y_samples, lw=1)
plt.xlim(0, 5)
plt.ylim(-3, 3)
plt.title("Prior (kernel: %s)" % kernel, fontsize=12)
# Generate data and fit GP
rng = np.random.RandomState(4)
X = rng.uniform(0, 5, 10)[:, np.newaxis]
y = np.sin((X[:, 0] - 2.5) ** 2)
gp.fit(X, y)
# Plot posterior
plt.subplot(2, 1, 2)
X_ = np.linspace(0, 5, 100)
y_mean, y_std = gp.predict(X_[:, np.newaxis], return_std=True)
plt.plot(X_, y_mean, 'k', lw=3, zorder=9)
plt.fill_between(X_, y_mean - y_std, y_mean + y_std,
alpha=0.2, color='k')
y_samples = gp.sample_y(X_[:, np.newaxis], 10)
plt.plot(X_, y_samples, lw=1)
plt.scatter(X[:, 0], y, c='r', s=50, zorder=10, edgecolors=(0, 0, 0))
plt.xlim(0, 5)
plt.ylim(-3, 3)
plt.title("Posterior (kernel: %s)\n Log-Likelihood: %.3f"
% (gp.kernel_, gp.log_marginal_likelihood(gp.kernel_.theta)),
fontsize=12)
plt.tight_layout()
plt.show()
| bsd-3-clause |
walterreade/scikit-learn | benchmarks/bench_glmnet.py | 297 | 3848 | """
To run this, you'll need to have installed.
* glmnet-python
* scikit-learn (of course)
Does two benchmarks
First, we fix a training set and increase the number of
samples. Then we plot the computation time as function of
the number of samples.
In the second benchmark, we increase the number of dimensions of the
training set. Then we plot the computation time as function of
the number of dimensions.
In both cases, only 10% of the features are informative.
"""
import numpy as np
import gc
from time import time
from sklearn.datasets.samples_generator import make_regression
alpha = 0.1
# alpha = 0.01
def rmse(a, b):
return np.sqrt(np.mean((a - b) ** 2))
def bench(factory, X, Y, X_test, Y_test, ref_coef):
gc.collect()
# start time
tstart = time()
clf = factory(alpha=alpha).fit(X, Y)
delta = (time() - tstart)
# stop time
print("duration: %0.3fs" % delta)
print("rmse: %f" % rmse(Y_test, clf.predict(X_test)))
print("mean coef abs diff: %f" % abs(ref_coef - clf.coef_.ravel()).mean())
return delta
if __name__ == '__main__':
from glmnet.elastic_net import Lasso as GlmnetLasso
from sklearn.linear_model import Lasso as ScikitLasso
# Delayed import of pylab
import pylab as pl
scikit_results = []
glmnet_results = []
n = 20
step = 500
n_features = 1000
n_informative = n_features / 10
n_test_samples = 1000
for i in range(1, n + 1):
print('==================')
print('Iteration %s of %s' % (i, n))
print('==================')
X, Y, coef_ = make_regression(
n_samples=(i * step) + n_test_samples, n_features=n_features,
noise=0.1, n_informative=n_informative, coef=True)
X_test = X[-n_test_samples:]
Y_test = Y[-n_test_samples:]
X = X[:(i * step)]
Y = Y[:(i * step)]
print("benchmarking scikit-learn: ")
scikit_results.append(bench(ScikitLasso, X, Y, X_test, Y_test, coef_))
print("benchmarking glmnet: ")
glmnet_results.append(bench(GlmnetLasso, X, Y, X_test, Y_test, coef_))
pl.clf()
xx = range(0, n * step, step)
pl.title('Lasso regression on sample dataset (%d features)' % n_features)
pl.plot(xx, scikit_results, 'b-', label='scikit-learn')
pl.plot(xx, glmnet_results, 'r-', label='glmnet')
pl.legend()
pl.xlabel('number of samples to classify')
pl.ylabel('Time (s)')
pl.show()
# now do a benchmark where the number of points is fixed
# and the variable is the number of features
scikit_results = []
glmnet_results = []
n = 20
step = 100
n_samples = 500
for i in range(1, n + 1):
print('==================')
print('Iteration %02d of %02d' % (i, n))
print('==================')
n_features = i * step
n_informative = n_features / 10
X, Y, coef_ = make_regression(
n_samples=(i * step) + n_test_samples, n_features=n_features,
noise=0.1, n_informative=n_informative, coef=True)
X_test = X[-n_test_samples:]
Y_test = Y[-n_test_samples:]
X = X[:n_samples]
Y = Y[:n_samples]
print("benchmarking scikit-learn: ")
scikit_results.append(bench(ScikitLasso, X, Y, X_test, Y_test, coef_))
print("benchmarking glmnet: ")
glmnet_results.append(bench(GlmnetLasso, X, Y, X_test, Y_test, coef_))
xx = np.arange(100, 100 + n * step, step)
pl.figure('scikit-learn vs. glmnet benchmark results')
pl.title('Regression in high dimensional spaces (%d samples)' % n_samples)
pl.plot(xx, scikit_results, 'b-', label='scikit-learn')
pl.plot(xx, glmnet_results, 'r-', label='glmnet')
pl.legend()
pl.xlabel('number of features')
pl.ylabel('Time (s)')
pl.axis('tight')
pl.show()
| bsd-3-clause |
tjof2/robustpca | robustpca/rosl.py | 1 | 6653 | # -*- coding: utf-8 -*-
# Copyright 2015-2020 Tom Furnival
#
# This file is part of robustpca.
#
# robustpca is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# robustpca is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with robustpca. If not, see <http://www.gnu.org/licenses/>.
import numpy as np
from sklearn.base import BaseEstimator, TransformerMixin
from sklearn.utils import check_array
from sklearn.utils.validation import check_is_fitted
from ._rosl import rosl_all_double, rosl_all_float
class ROSL(BaseEstimator, TransformerMixin):
"""Robust Orthonormal Subspace Learning (ROSL).
Robust Orthonormal Subspace Learning seeks to recover a low-rank matrix X
and a sparse error matrix E from a corrupted observation Y:
min ||X||_* + lambda ||E||_1 subject to Y = X + E
where ||.||_* is the nuclear norm, and ||.||_1 is the l1-norm. ROSL further models
the low-rank matrix X as spanning an orthonormal subspace D with coefficients A:
X = D*A
Further information can be found in the paper [Shu2014]_.
Parameters
----------
subsampling : None or float or tuple(float, float)
* If None, use full data matrix
* If float, use a random fraction of the data (ROSL+ algorithm)
* If tuple of floats, use a random fraction of the columns and rows (ROSL+ algorithm)
n_components : int, optional
Initial estimate of data dimensionality.
lambda1 : float
Regularization parameter on l1-norm (sparse error term).
Default is 0.01.
tol : float
Stopping criterion for iterative algorithm. Default is 1e-6.
max_iter : int
Maximum number of iterations. Default is 500.
copy : bool, default False
If True, fit on a copy of the data.
random_seed : None or int
Random seed used to sample the data and initialize the starting point.
Default is None.
Attributes
----------
low_rank_ : array, [n_samples, n_features]
The results of the ROSL decomposition.
error_ : array, [n_samples, n_features]
The error in the model.
References
----------
.. [Shu2014] X. Shu, F. Porikli and N. Ahuja, "Robust Orthonormal Subspace Learning:
Efficient Recovery of Corrupted Low-Rank Matrices," 2014 IEEE Conference on
Computer Vision and Pattern Recognition, Columbus, OH, 2014, pp. 3874-3881,
DOI: 10.1109/CVPR.2014.495.
"""
def __init__(
self,
n_components=None,
subsampling=None,
lambda1=0.01,
max_iter=500,
tol=1e-6,
copy=False,
random_seed=None,
):
self.n_components = n_components
self.subsampling = subsampling
self.lambda1 = lambda1
self.max_iter = max_iter
self.tol = tol
self.copy = copy
self.random_seed = random_seed
def _fit(self, X):
"""Build a model of data X.
Parameters
----------
X : array [n_samples, n_features]
The data to be modelled
Returns
-------
loadings : array [n_samples, n_features]
The subspace coefficients
components : array [n_components, n_features]
The subspace basis
"""
X = check_array(X, copy=self.copy, order="F", dtype=[np.float64, np.float32])
self.n_samples, self.n_features = X.shape
if self.lambda1 is None:
self.lambda1_ = 1.0 / np.sqrt(self.n_features)
else:
self.lambda1_ = self.lambda1
if self.n_components is None:
self.n_components = self.n_features
self.max_iter = int(self.max_iter)
if self.subsampling is None:
sampling = False
s1, s2 = (1.0, 1.0)
elif isinstance(self.subsampling, tuple):
if len(self.subsampling) != 2:
raise ValueError(
"Invalid subsampling parameter: got tuple of len="
f"{len(self.subsampling)} instead of a tuple of len=2."
)
sampling = True
s1, s2 = self.subsampling
else:
sampling = True
s1 = self.subsampling
s2 = self.subsampling
if s1 > 1.0 or s1 < 0.0 or s2 > 1.0 or s2 < 0.0:
raise ValueError(
f"Invalid subsampling parameter: got {self.subsampling} "
"instead of a float or pair of floats between 0 and 1."
)
if self.random_seed is None:
# Negative integer used to seed randomly in C++
self.random_seed_ = -1
else:
self.random_seed_ = self.random_seed
if X.dtype == np.float64:
f = rosl_all_double
elif X.dtype == np.float32:
f = rosl_all_float
A, E, D, B, rank_est = f(
X,
self.lambda1_,
self.tol,
sampling,
self.n_components,
self.max_iter,
s1,
s2,
self.random_seed_,
)
self.n_components_ = int(rank_est)
self.loadings_ = D[:, : self.n_components_]
self.components_ = B[: self.n_components_]
self.error_ = E
self.low_rank_ = A
return self.loadings_, self.components_
def fit(self, X, y=None):
"""Build a model of data X.
Parameters
----------
X : array [n_samples, n_features]
The data to be modelled.
y : Ignored
Returns
-------
self : object
Returns the instance itself.
"""
self._fit(X)
return self
def transform(self, Y):
"""Apply the learned model to data Y.
Parameters
----------
Y : array [n_samples, n_features]
The data to be transformed
Returns
-------
Y_transformed : array [n_samples, n_features]
The coefficients of the Y data when projected on the
learned basis.
"""
check_is_fitted(self, "n_components_")
Y = check_array(Y)
return Y @ self.components_.T
| gpl-3.0 |
natanielruiz/android-yolo | jni-build/jni/include/tensorflow/contrib/learn/python/learn/estimators/rnn.py | 6 | 10199 | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Recurrent Neural Network estimators."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.contrib.learn.python.learn import models
from tensorflow.contrib.learn.python.learn.estimators import _sklearn
from tensorflow.contrib.learn.python.learn.estimators.base import TensorFlowEstimator
def null_input_op_fn(x):
"""This function does no transformation on the inputs, used as default."""
return x
class TensorFlowRNNClassifier(TensorFlowEstimator, _sklearn.ClassifierMixin):
"""TensorFlow RNN Classifier model."""
def __init__(self,
rnn_size,
n_classes,
cell_type='gru',
num_layers=1,
input_op_fn=null_input_op_fn,
initial_state=None,
bidirectional=False,
sequence_length=None,
attn_length=None,
attn_size=None,
attn_vec_size=None,
batch_size=32,
steps=50,
optimizer='Adagrad',
learning_rate=0.1,
class_weight=None,
clip_gradients=5.0,
continue_training=False,
config=None,
verbose=1):
"""Initializes a TensorFlowRNNClassifier instance.
Args:
rnn_size: The size for rnn cell, e.g. size of your word embeddings.
cell_type: The type of rnn cell, including rnn, gru, and lstm.
num_layers: The number of layers of the rnn model.
input_op_fn: Function that will transform the input tensor, such as
creating word embeddings, byte list, etc. This takes
an argument x for input and returns transformed x.
bidirectional: boolean, Whether this is a bidirectional rnn.
sequence_length: If sequence_length is provided, dynamic calculation
is performed. This saves computational time when unrolling past max
sequence length.
initial_state: An initial state for the RNN. This must be a tensor of
appropriate type and shape [batch_size x cell.state_size].
attn_length: integer, the size of attention vector attached to rnn cells.
attn_size: integer, the size of an attention window attached to rnn cells.
attn_vec_size: integer, the number of convolutional features calculated on
attention state and the size of the hidden layer built from base cell state.
n_classes: Number of classes in the target.
batch_size: Mini batch size.
steps: Number of steps to run over data.
optimizer: Optimizer name (or class), for example "SGD", "Adam",
"Adagrad".
learning_rate: If this is constant float value, no decay function is
used. Instead, a customized decay function can be passed that accepts
global_step as parameter and returns a Tensor.
e.g. exponential decay function:
````python
def exp_decay(global_step):
return tf.train.exponential_decay(
learning_rate=0.1, global_step,
decay_steps=2, decay_rate=0.001)
````
class_weight: None or list of n_classes floats. Weight associated with
classes for loss computation. If not given, all classes are
supposed to have weight one.
continue_training: when continue_training is True, once initialized
model will be continuely trained on every call of fit.
config: RunConfig object that controls the configurations of the session,
e.g. num_cores, gpu_memory_fraction, etc.
"""
self.rnn_size = rnn_size
self.cell_type = cell_type
self.input_op_fn = input_op_fn
self.bidirectional = bidirectional
self.num_layers = num_layers
self.sequence_length = sequence_length
self.initial_state = initial_state
self.attn_length = attn_length
self.attn_size = attn_size
self.attn_vec_size = attn_vec_size
super(TensorFlowRNNClassifier, self).__init__(
model_fn=self._model_fn,
n_classes=n_classes,
batch_size=batch_size,
steps=steps,
optimizer=optimizer,
learning_rate=learning_rate,
class_weight=class_weight,
clip_gradients=clip_gradients,
continue_training=continue_training,
config=config,
verbose=verbose)
def _model_fn(self, x, y):
return models.get_rnn_model(self.rnn_size, self.cell_type, self.num_layers,
self.input_op_fn, self.bidirectional,
models.logistic_regression,
self.sequence_length, self.initial_state,
self.attn_length, self.attn_size,
self.attn_vec_size)(x, y)
@property
def bias_(self):
"""Returns bias of the rnn layer."""
return self.get_variable_value('logistic_regression/bias')
@property
def weights_(self):
"""Returns weights of the rnn layer."""
return self.get_variable_value('logistic_regression/weights')
class TensorFlowRNNRegressor(TensorFlowEstimator, _sklearn.RegressorMixin):
"""TensorFlow RNN Regressor model."""
def __init__(self,
rnn_size,
cell_type='gru',
num_layers=1,
input_op_fn=null_input_op_fn,
initial_state=None,
bidirectional=False,
sequence_length=None,
attn_length=None,
attn_size=None,
attn_vec_size=None,
n_classes=0,
batch_size=32,
steps=50,
optimizer='Adagrad',
learning_rate=0.1,
clip_gradients=5.0,
continue_training=False,
config=None,
verbose=1):
"""Initializes a TensorFlowRNNRegressor instance.
Args:
rnn_size: The size for rnn cell, e.g. size of your word embeddings.
cell_type: The type of rnn cell, including rnn, gru, and lstm.
num_layers: The number of layers of the rnn model.
input_op_fn: Function that will transform the input tensor, such as
creating word embeddings, byte list, etc. This takes
an argument x for input and returns transformed x.
bidirectional: boolean, Whether this is a bidirectional rnn.
sequence_length: If sequence_length is provided, dynamic calculation
is performed. This saves computational time when unrolling past max
sequence length.
attn_length: integer, the size of attention vector attached to rnn cells.
attn_size: integer, the size of an attention window attached to rnn cells.
attn_vec_size: integer, the number of convolutional features calculated on
attention state and the size of the hidden layer built from base cell state.
initial_state: An initial state for the RNN. This must be a tensor of
appropriate type and shape [batch_size x cell.state_size].
batch_size: Mini batch size.
steps: Number of steps to run over data.
optimizer: Optimizer name (or class), for example "SGD", "Adam",
"Adagrad".
learning_rate: If this is constant float value, no decay function is
used. Instead, a customized decay function can be passed that accepts
global_step as parameter and returns a Tensor.
e.g. exponential decay function:
````python
def exp_decay(global_step):
return tf.train.exponential_decay(
learning_rate=0.1, global_step,
decay_steps=2, decay_rate=0.001)
````
continue_training: when continue_training is True, once initialized
model will be continuely trained on every call of fit.
config: RunConfig object that controls the configurations of the
session, e.g. num_cores, gpu_memory_fraction, etc.
verbose: Controls the verbosity, possible values:
* 0: the algorithm and debug information is muted.
* 1: trainer prints the progress.
* 2: log device placement is printed.
"""
self.rnn_size = rnn_size
self.cell_type = cell_type
self.input_op_fn = input_op_fn
self.bidirectional = bidirectional
self.num_layers = num_layers
self.sequence_length = sequence_length
self.initial_state = initial_state
self.attn_length = attn_length
self.attn_size = attn_size
self.attn_vec_size = attn_vec_size
super(TensorFlowRNNRegressor, self).__init__(
model_fn=self._model_fn,
n_classes=n_classes,
batch_size=batch_size,
steps=steps,
optimizer=optimizer,
learning_rate=learning_rate,
clip_gradients=clip_gradients,
continue_training=continue_training,
config=config,
verbose=verbose)
def _model_fn(self, x, y):
return models.get_rnn_model(self.rnn_size, self.cell_type, self.num_layers,
self.input_op_fn, self.bidirectional,
models.linear_regression, self.sequence_length,
self.initial_state, self.attn_length,
self.attn_size, self.attn_vec_size)(x, y)
@property
def bias_(self):
"""Returns bias of the rnn layer."""
return self.get_variable_value('linear_regression/bias')
@property
def weights_(self):
"""Returns weights of the rnn layer."""
return self.get_variable_value('linear_regression/weights')
| apache-2.0 |
scikit-image/skimage-demos | mm_color_select.py | 1 | 2262 | # Based on a blog post by Steve Eddins:
# http://blogs.mathworks.com/steve/2010/12/23/two-dimensional-histograms/
from skimage import io, color, exposure
import numpy as np
url = 'http://blogs.mathworks.com/images/steve/2010/mms.jpg'
import os
if not os.path.exists('mm.jpg'):
print("Downloading M&M's...")
from urllib.request import urlretrieve
urlretrieve(url, 'mm.jpg')
mm = io.imread('mm.jpg')
mm_lab = color.rgb2lab(mm)
L, a, b = mm_lab.T
left, right = -100, 100
bins = np.arange(left, right)
H, x_edges, y_edges = np.histogram2d(a.flatten(), b.flatten(), bins,
normed=True)
import matplotlib.pyplot as plt
from matplotlib.widgets import RectangleSelector
from matplotlib.patches import Rectangle
f = plt.figure()
ax0 = plt.subplot2grid((2, 2), (0, 0))
ax1 = plt.subplot2grid((2, 2), (0, 1), rowspan=2)
ax2 = plt.subplot2grid((2, 2), (1, 0))
f.suptitle('Select values by dragging the mouse on the histogram')
ax0.imshow(mm)
ax0.set_title('Input')
ax0.set_xticks([])
ax0.set_yticks([])
ax1.imshow(exposure.rescale_intensity(H, in_range=(0, 5e-4)),
extent=[left, right, right, left], cmap=plt.cm.gray)
ax1.set_title('Histogram')
ax1.set_xlabel('b')
ax1.set_ylabel('a')
rectprops=dict(
facecolor='gray',
edgecolor='white',
alpha=0.3
)
selected_rectangle = Rectangle((0, 0), 0, 0, transform=ax1.transData,
**rectprops)
ax1.add_patch(selected_rectangle)
result = ax2.imshow(mm)
ax2.set_title('L + masked a, b')
def histogram_select(e_click, e_release):
x0, y0 = e_click.xdata, e_click.ydata
x1, y1 = e_release.xdata, e_release.ydata
x0, x1 = min(x0, x1), max(x0, x1)
y0, y1 = min(y0, y1), max(y0, y1)
selected_rectangle.set_xy((x0, y0))
selected_rectangle.set_height(y1 - y0)
selected_rectangle.set_width(x1 - x0)
green_mm_lab = mm_lab.copy()
L, a, b = green_mm_lab.T
mask = ((a > y0) & (a < y1)) & ((b > x0) & (b < x1))
green_mm_lab[..., 1:][~mask.T] = 0
green_mm = color.lab2rgb(green_mm_lab)
result.set_data(green_mm)
f.canvas.draw()
rs = RectangleSelector(ax1, histogram_select, drawtype='box',
spancoords='data', rectprops=rectprops)
plt.show()
| bsd-3-clause |
nixingyang/Miscellaneous-Projects | Deep Image Prior/solution.py | 1 | 4593 | import matplotlib
matplotlib.use("Agg")
import os
import cv2
import pylab
import numpy as np
from keras.callbacks import Callback, ModelCheckpoint
from load_model import init_model
from load_dataset import init_batch_generators
# Data
IMAGE_URL = "https://avatars.githubusercontent.com/u/15064790?s=460&v=4"
IMAGE_SIZE = (256, 256)
MEAN = 0
STANDARD_DEVIATION = 10
# Training
LEARNING_RATE = 0.0001
FILTERS_LIST = [32, 64, 128, 256, 512]
STEPS_PER_EPOCH = 100
EPOCHS = 1000000
# Output
OUTPUT_FOLDER_PATH = os.path.join("/tmp", __file__.split(os.sep)[-2])
OPTIMAL_WEIGHTS_FOLDER_PATH = os.path.join(OUTPUT_FOLDER_PATH,
"optimal weights")
PREDICTIONS_FOLDER_PATH = os.path.join(OUTPUT_FOLDER_PATH, "predictions")
class InspectLoss(Callback):
def __init__(self):
super(InspectLoss, self).__init__()
self.train_loss_list = []
self.valid_loss_list = []
def on_epoch_end(self, epoch, logs=None):
train_loss = logs.get("loss")
valid_loss = logs.get("val_loss")
self.train_loss_list.append(train_loss)
self.valid_loss_list.append(valid_loss)
epoch_index_array = np.arange(len(self.train_loss_list)) + 1
pylab.figure()
pylab.plot(epoch_index_array,
self.train_loss_list,
"yellowgreen",
label="train_loss")
pylab.plot(epoch_index_array,
self.valid_loss_list,
"lightskyblue",
label="valid_loss")
pylab.grid()
pylab.legend(bbox_to_anchor=(0., 1.02, 1., .102),
loc=2,
ncol=2,
mode="expand",
borderaxespad=0.)
pylab.savefig(os.path.join(OUTPUT_FOLDER_PATH, "loss_curve.png"))
pylab.close()
class InspectPredictions(Callback):
def __init__(self, batch_generator):
super(InspectPredictions, self).__init__()
self.batch_generator = batch_generator
def visualize_image_content(self, image_content_array, epoch, split_name):
for image_index, image_content in enumerate(
(image_content_array * 255).astype(np.uint8), start=1):
image_file_path = os.path.join(
PREDICTIONS_FOLDER_PATH,
"epoch_{}_sample_{}_{}.png".format(epoch + 1, image_index,
split_name))
cv2.imwrite(image_file_path, image_content)
def on_epoch_end(self, epoch, logs=None):
input_image_content_array, _ = next(self.batch_generator)
predicted_image_content_array = self.model.predict_on_batch(
input_image_content_array)
self.visualize_image_content(predicted_image_content_array,
epoch,
split_name="prediction")
def run():
print("Creating folders ...")
for folder_path in [OPTIMAL_WEIGHTS_FOLDER_PATH, PREDICTIONS_FOLDER_PATH]:
if not os.path.isdir(folder_path):
os.makedirs(folder_path)
print("Output folder is {}".format(OUTPUT_FOLDER_PATH))
print("Initiating the model ...")
model = init_model(IMAGE_SIZE, FILTERS_LIST, LEARNING_RATE,
os.path.join(OPTIMAL_WEIGHTS_FOLDER_PATH, "model.png"))
print("Initiating the batch generators ...")
train_batch_generator, valid_batch_generator, inspection_batch_generator = init_batch_generators(
IMAGE_URL, IMAGE_SIZE, MEAN, STANDARD_DEVIATION, OUTPUT_FOLDER_PATH)
print("Performing the training procedure ...")
modelcheckpoint_callback = ModelCheckpoint(
os.path.join(OPTIMAL_WEIGHTS_FOLDER_PATH, "model.h5"),
monitor="val_loss",
save_best_only=True,
save_weights_only=False,
verbose=1) # NB: Theoretically,ground truth might be unavailable.
inspectloss_callback = InspectLoss()
inspectpredictions_callback = InspectPredictions(inspection_batch_generator)
model.fit_generator(generator=train_batch_generator,
steps_per_epoch=STEPS_PER_EPOCH,
validation_data=valid_batch_generator,
validation_steps=1,
callbacks=[
modelcheckpoint_callback, inspectloss_callback,
inspectpredictions_callback
],
epochs=EPOCHS,
verbose=2)
print("All done!")
if __name__ == "__main__":
run()
| mit |
uglyboxer/linear_neuron | net-p3/lib/python3.5/site-packages/sklearn/mixture/tests/test_dpgmm.py | 12 | 2594 | import unittest
import nose
import numpy as np
from sklearn.mixture import DPGMM, VBGMM
from sklearn.mixture.dpgmm import log_normalize
from sklearn.datasets import make_blobs
from sklearn.utils.testing import assert_array_less
from sklearn.mixture.tests.test_gmm import GMMTester
np.seterr(all='warn')
def test_class_weights():
# check that the class weights are updated
# simple 3 cluster dataset
X, y = make_blobs(random_state=1)
for Model in [DPGMM, VBGMM]:
dpgmm = Model(n_components=10, random_state=1, alpha=20, n_iter=50)
dpgmm.fit(X)
# get indices of components that are used:
indices = np.unique(dpgmm.predict(X))
active = np.zeros(10, dtype=np.bool)
active[indices] = True
# used components are important
assert_array_less(.1, dpgmm.weights_[active])
# others are not
assert_array_less(dpgmm.weights_[~active], .05)
def test_log_normalize():
v = np.array([0.1, 0.8, 0.01, 0.09])
a = np.log(2 * v)
assert np.allclose(v, log_normalize(a), rtol=0.01)
def do_model(self, **kwds):
return VBGMM(verbose=False, **kwds)
class DPGMMTester(GMMTester):
model = DPGMM
do_test_eval = False
def score(self, g, train_obs):
_, z = g.score_samples(train_obs)
return g.lower_bound(train_obs, z)
class TestDPGMMWithSphericalCovars(unittest.TestCase, DPGMMTester):
covariance_type = 'spherical'
setUp = GMMTester._setUp
class TestDPGMMWithDiagCovars(unittest.TestCase, DPGMMTester):
covariance_type = 'diag'
setUp = GMMTester._setUp
class TestDPGMMWithTiedCovars(unittest.TestCase, DPGMMTester):
covariance_type = 'tied'
setUp = GMMTester._setUp
class TestDPGMMWithFullCovars(unittest.TestCase, DPGMMTester):
covariance_type = 'full'
setUp = GMMTester._setUp
class VBGMMTester(GMMTester):
model = do_model
do_test_eval = False
def score(self, g, train_obs):
_, z = g.score_samples(train_obs)
return g.lower_bound(train_obs, z)
class TestVBGMMWithSphericalCovars(unittest.TestCase, VBGMMTester):
covariance_type = 'spherical'
setUp = GMMTester._setUp
class TestVBGMMWithDiagCovars(unittest.TestCase, VBGMMTester):
covariance_type = 'diag'
setUp = GMMTester._setUp
class TestVBGMMWithTiedCovars(unittest.TestCase, VBGMMTester):
covariance_type = 'tied'
setUp = GMMTester._setUp
class TestVBGMMWithFullCovars(unittest.TestCase, VBGMMTester):
covariance_type = 'full'
setUp = GMMTester._setUp
if __name__ == '__main__':
nose.runmodule()
| mit |
aflaxman/scikit-learn | examples/linear_model/plot_logistic_multinomial.py | 81 | 2525 | """
====================================================
Plot multinomial and One-vs-Rest Logistic Regression
====================================================
Plot decision surface of multinomial and One-vs-Rest Logistic Regression.
The hyperplanes corresponding to the three One-vs-Rest (OVR) classifiers
are represented by the dashed lines.
"""
print(__doc__)
# Authors: Tom Dupre la Tour <tom.dupre-la-tour@m4x.org>
# License: BSD 3 clause
import numpy as np
import matplotlib.pyplot as plt
from sklearn.datasets import make_blobs
from sklearn.linear_model import LogisticRegression
# make 3-class dataset for classification
centers = [[-5, 0], [0, 1.5], [5, -1]]
X, y = make_blobs(n_samples=1000, centers=centers, random_state=40)
transformation = [[0.4, 0.2], [-0.4, 1.2]]
X = np.dot(X, transformation)
for multi_class in ('multinomial', 'ovr'):
clf = LogisticRegression(solver='sag', max_iter=100, random_state=42,
multi_class=multi_class).fit(X, y)
# print the training scores
print("training score : %.3f (%s)" % (clf.score(X, y), multi_class))
# create a mesh to plot in
h = .02 # step size in the mesh
x_min, x_max = X[:, 0].min() - 1, X[:, 0].max() + 1
y_min, y_max = X[:, 1].min() - 1, X[:, 1].max() + 1
xx, yy = np.meshgrid(np.arange(x_min, x_max, h),
np.arange(y_min, y_max, h))
# Plot the decision boundary. For that, we will assign a color to each
# point in the mesh [x_min, x_max]x[y_min, y_max].
Z = clf.predict(np.c_[xx.ravel(), yy.ravel()])
# Put the result into a color plot
Z = Z.reshape(xx.shape)
plt.figure()
plt.contourf(xx, yy, Z, cmap=plt.cm.Paired)
plt.title("Decision surface of LogisticRegression (%s)" % multi_class)
plt.axis('tight')
# Plot also the training points
colors = "bry"
for i, color in zip(clf.classes_, colors):
idx = np.where(y == i)
plt.scatter(X[idx, 0], X[idx, 1], c=color, cmap=plt.cm.Paired,
edgecolor='black', s=20)
# Plot the three one-against-all classifiers
xmin, xmax = plt.xlim()
ymin, ymax = plt.ylim()
coef = clf.coef_
intercept = clf.intercept_
def plot_hyperplane(c, color):
def line(x0):
return (-(x0 * coef[c, 0]) - intercept[c]) / coef[c, 1]
plt.plot([xmin, xmax], [line(xmin), line(xmax)],
ls="--", color=color)
for i, color in zip(clf.classes_, colors):
plot_hyperplane(i, color)
plt.show()
| bsd-3-clause |
sgenoud/scikit-learn | examples/applications/plot_tomography_l1_reconstruction.py | 3 | 5476 | """
======================================================================
Compressive sensing: tomography reconstruction with L1 prior (Lasso)
======================================================================
This example shows the reconstruction of an image from a set of parallel
projections, acquired along different angles. Such a dataset is acquired in
**computed tomography** (CT).
Without any prior information on the sample, the number of projections
required to reconstruct the image is of the order of the linear size
``l`` of the image (in pixels). For simplicity we consider here a sparse
image, where only pixels on the boundary of objects have a non-zero
value. Such data could correspond for example to a cellular material.
Note however that most images are sparse in a different basis, such as
the Haar wavelets. Only ``l/7`` projections are acquired, therefore it is
necessary to use prior information available on the sample (its
sparsity): this is an example of **compressive sensing**.
The tomography projection operation is a linear transformation. In
addition to the data-fidelity term corresponding to a linear regression,
we penalize the L1 norm of the image to account for its sparsity. The
resulting optimization problem is called the :ref:`lasso`. We use the
class :class:`sklearn.linear_model.sparse.Lasso`, that uses the
coordinate descent algorithm. Importantly, this implementation is more
computationally efficient on a sparse matrix, as the projection operator
used here.
The reconstruction with L1 penalization gives a result with zero error
(all pixels are successfully labeled with 0 or 1), even if noise was
added to the projections. In comparison, an L2 penalization
(:class:`sklearn.linear_model.Ridge`) produces a large number of labeling
errors for the pixels. Important artifacts are observed on the
reconstructed image, contrary to the L1 penalization. Note in particular
the circular artifact separating the pixels in the corners, that have
contributed to fewer projections than the central disk.
"""
print __doc__
# Author: Emmanuelle Gouillart <emmanuelle.gouillart@nsup.org>
# License: Simplified BSD
import numpy as np
from scipy import sparse
from scipy import ndimage
from sklearn.linear_model.sparse import Lasso
from sklearn.linear_model import Ridge
import matplotlib.pyplot as plt
def _weights(x, dx=1, orig=0):
x = np.ravel(x)
floor_x = np.floor((x - orig) / dx)
alpha = (x - orig - floor_x * dx) / dx
return np.hstack((floor_x, floor_x + 1)), np.hstack((1 - alpha, alpha))
def _generate_center_coordinates(l_x):
l_x = float(l_x)
X, Y = np.mgrid[:l_x, :l_x]
center = l_x / 2.
X += 0.5 - center
Y += 0.5 - center
return X, Y
def build_projection_operator(l_x, n_dir):
""" Compute the tomography design matrix.
Parameters
----------
l_x : int
linear size of image array
n_dir : int
number of angles at which projections are acquired.
Returns
-------
p : sparse matrix of shape (n_dir l_x, l_x**2)
"""
X, Y = _generate_center_coordinates(l_x)
angles = np.linspace(0, np.pi, n_dir, endpoint=False)
data_inds, weights, camera_inds = [], [], []
data_unravel_indices = np.arange(l_x ** 2)
data_unravel_indices = np.hstack((data_unravel_indices,
data_unravel_indices))
for i, angle in enumerate(angles):
Xrot = np.cos(angle) * X - np.sin(angle) * Y
inds, w = _weights(Xrot, dx=1, orig=X.min())
mask = np.logical_and(inds >= 0, inds < l_x)
weights += list(w[mask])
camera_inds += list(inds[mask] + i * l_x)
data_inds += list(data_unravel_indices[mask])
proj_operator = sparse.coo_matrix((weights, (camera_inds, data_inds)))
return proj_operator
def generate_synthetic_data():
""" Synthetic binary data """
rs = np.random.RandomState(0)
n_pts = 36.
x, y = np.ogrid[0:l, 0:l]
mask_outer = (x - l / 2) ** 2 + (y - l / 2) ** 2 < (l / 2) ** 2
mask = np.zeros((l, l))
points = l * rs.rand(2, n_pts)
mask[(points[0]).astype(np.int), (points[1]).astype(np.int)] = 1
mask = ndimage.gaussian_filter(mask, sigma=l / n_pts)
res = np.logical_and(mask > mask.mean(), mask_outer)
return res - ndimage.binary_erosion(res)
# Generate synthetic images, and projections
l = 128
proj_operator = build_projection_operator(l, l / 7.)
data = generate_synthetic_data()
proj = proj_operator * data.ravel()[:, np.newaxis]
proj += 0.15 * np.random.randn(*proj.shape)
# Reconstruction with L2 (Ridge) penalization
rgr_ridge = Ridge(alpha=0.2)
rgr_ridge.fit(proj_operator, proj.ravel())
rec_l2 = rgr_ridge.coef_.reshape(l, l)
# Reconstruction with L1 (Lasso) penalization
# the best value of alpha was determined using cross validation
# with LassoCV
rgr_lasso = Lasso(alpha=0.001)
rgr_lasso.fit(proj_operator, proj.ravel())
rec_l1 = rgr_lasso.coef_.reshape(l, l)
plt.figure(figsize=(8, 3.3))
plt.subplot(131)
plt.imshow(data, cmap=plt.cm.gray, interpolation='nearest')
plt.axis('off')
plt.title('original image')
plt.subplot(132)
plt.imshow(rec_l2, cmap=plt.cm.gray, interpolation='nearest')
plt.title('L2 penalization')
plt.axis('off')
plt.subplot(133)
plt.imshow(rec_l1, cmap=plt.cm.gray, interpolation='nearest')
plt.title('L1 penalization')
plt.axis('off')
plt.subplots_adjust(hspace=0.01, wspace=0.01, top=1, bottom=0, left=0,
right=1)
plt.show()
| bsd-3-clause |
jreeder/avoplot | src/avoplot/__init__.py | 3 | 3736 | #Copyright (C) Nial Peters 2013
#
#This file is part of AvoPlot.
#
#AvoPlot is free software: you can redistribute it and/or modify
#it under the terms of the GNU General Public License as published by
#the Free Software Foundation, either version 3 of the License, or
#(at your option) any later version.
#
#AvoPlot is distributed in the hope that it will be useful,
#but WITHOUT ANY WARRANTY; without even the implied warranty of
#MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
#GNU General Public License for more details.
#
#You should have received a copy of the GNU General Public License
#along with AvoPlot. If not, see <http://www.gnu.org/licenses/>.
import os
import os.path
import warnings
import sys
import collections
import matplotlib
matplotlib.use('WXAgg')
####################################################################
# Program Information
####################################################################
VERSION = "14.01" #year.month of release
AUTHOR = 'Nial Peters'
AUTHOR_EMAIL = 'nonbiostudent@hotmail.com'
URL = 'http://code.google.com/p/avoplot/'
PROG_SHORT_NAME = 'AvoPlot'
PROG_LONG_NAME = 'AvoPlot'
SHORT_DESCRIPTION = 'Plot scientific data'
LONG_DESCRIPTION = ''
COPYRIGHT = 'Copyright (C) Nial Peters 2013'
COPY_PERMISSION =(
'\n%s is free software: you can redistribute it and/or modify\n'
'it under the terms of the GNU General Public License as published by\n'
'the Free Software Foundation, either version 3 of the License, or\n'
'(at your option) any later version.\n'
'\n'
'%s is distributed in the hope that it will be useful,\n'
'but WITHOUT ANY WARRANTY; without even the implied warranty of\n'
'MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n'
'GNU General Public License for more details.\n'
'\n'
'You should have received a copy of the GNU General Public License\n'
'along with %s. If not, see <http://www.gnu.org/licenses/>.\n'
''%(PROG_SHORT_NAME, PROG_SHORT_NAME, PROG_SHORT_NAME))
SRC_FILE_HEADER = ('#%s\n\nThis file is part of %s.\n\n%s'
''%(COPYRIGHT, PROG_SHORT_NAME,
COPY_PERMISSION)).replace('\n','\n#')
####################################################################
def get_avoplot_rw_dir():
"""
Returns the path used by AvoPlot for things like caching settings,
storing templates etc. This is platform dependent, but on Linux it
will be in ~/.AvoPlot
"""
if sys.platform == 'win32':
#Windows doesn't really do hidden directories, so get rid of the dot
return os.path.join(os.path.expanduser('~'),"%s"%PROG_SHORT_NAME)
else:
return os.path.join(os.path.expanduser('~'),".%s"%PROG_SHORT_NAME)
def get_avoplot_sys_dir():
"""
Returns the path used by AvoPlot to store user independent
files
"""
return __path__[0]
def get_avoplot_icons_dir():
"""
Returns the full path to the directory where the AvoPlot icons
are stored.
"""
return os.path.join(get_avoplot_sys_dir(),'icons')
def get_license_file():
"""
Returns the full path to the COPYING file installed with AvoPlot
"""
return os.path.join(__path__[0],'COPYING')
def call_on_idle(func, *args, **kwargs):
"""
Registers a callable to be executed when the event loop is empty. The
callable will only be called once. This is used to execute the _destroy()
method of AvoPlotElementBase.
"""
call_on_idle.idle_q.append((func, args, kwargs))
call_on_idle.idle_q = collections.deque()
#make sure that all the directories that we are expecting to exist actually do.
try:
os.makedirs(get_avoplot_rw_dir())
except OSError:
#dir already exists
pass
| gpl-3.0 |
FinHackChamp/FinHack | student_model.py | 1 | 17205 | # The code is rewritten based on source code from tensorflow tutorial for Recurrent Neural Network.
# https://www.tensorflow.org/versions/0.6.0/tutorials/recurrent/index.html
# You can get source code for the tutorial from
# https://github.com/tensorflow/tensorflow/blob/master/tensorflow/models/rnn/ptb/ptb_word_lm.py
#
# There is dropout on each hidden layer to prevent the model from overfitting
#
# Here is an useful practical guide for training dropout networks
# https://www.cs.toronto.edu/~hinton/absps/JMLRdropout.pdf
# You can find the practical guide on Appendix A
import numpy as np
import tensorflow as tf
import time
import csv
from random import shuffle
import random
from sklearn.metrics import mean_squared_error
from sklearn.metrics import r2_score
from sklearn import metrics
from math import sqrt
# flags
tf.flags.DEFINE_float("epsilon", 0.1, "Epsilon value for Adam Optimizer.")
tf.flags.DEFINE_float("l2_lambda", 0.3, "Lambda for l2 loss.")
tf.flags.DEFINE_float("learning_rate", 0.1, "Learning rate")
tf.flags.DEFINE_float("max_grad_norm", 20.0, "Clip gradients to this norm.")
tf.flags.DEFINE_float("keep_prob", 0.6, "Keep probability for dropout")
tf.flags.DEFINE_integer("hidden_layer_num", 1, "The number of hidden layers (Integer)")
tf.flags.DEFINE_integer("hidden_size", 200, "The number of hidden nodes (Integer)")
tf.flags.DEFINE_integer("evaluation_interval", 5, "Evaluate and print results every x epochs")
tf.flags.DEFINE_integer("batch_size", 32, "Batch size for training.")
tf.flags.DEFINE_integer("epochs", 30, "Number of epochs to train for.")
tf.flags.DEFINE_boolean("allow_soft_placement", True, "Allow device soft device placement")
tf.flags.DEFINE_boolean("log_device_placement", False, "Log placement of ops on devices")
tf.flags.DEFINE_string("train_data_path", 'data/0910_b_train.csv', "Path to the training dataset")
tf.flags.DEFINE_string("test_data_path", 'data/0910_b_test.csv', "Path to the testing dataset")
currentOutputMatrix = []
cumulativeOutputMatrix = []
log_file_path = '2layeredb.txt'
hidden_state_path = 'hidden_stateb2.npy'
FLAGS = tf.flags.FLAGS
FLAGS._parse_flags()
print("\nParameters:")
for attr, value in sorted(FLAGS.__flags.items()):
print("{}={}".format(attr.upper(), value))
print("")
def add_gradient_noise(t, stddev=1e-3, name=None):
"""
Adds gradient noise as described in http://arxiv.org/abs/1511.06807 [2].
The input Tensor `t` should be a gradient.
The output will be `t` + gaussian noise.
0.001 was said to be a good fixed value for memory networks [2].
"""
with tf.op_scope([t, stddev], name, "add_gradient_noise") as name:
t = tf.convert_to_tensor(t, name="t")
gn = tf.random_normal(tf.shape(t), stddev=stddev)
return tf.add(t, gn, name=name)
class StudentModel(object):
def __init__(self, is_training, config):
self.state_size = config.state_size
self._batch_size = batch_size = FLAGS.batch_size
self.num_skills = num_skills = config.num_skills
self.hidden_layer_num = len(self.state_size)
self.hidden_size = size = FLAGS.hidden_size
self.num_steps = num_steps = config.num_steps
input_size = num_skills*2
inputs = self._input_data = tf.placeholder(tf.int32, [batch_size, num_steps])
self._target_id = target_id = tf.placeholder(tf.int32, [None])
self._target_correctness = target_correctness = tf.placeholder(tf.float32, [None])
final_hidden_size = self.state_size[-1]
hidden_layers = []
for i in range(self.hidden_layer_num):
hidden1 = tf.contrib.rnn.BasicLSTMCell(self.state_size[i], state_is_tuple=True,reuse=tf.get_variable_scope().reuse)
if is_training and config.keep_prob < 1:
hidden1 = tf.contrib.rnn.DropoutWrapper(hidden1, output_keep_prob=FLAGS.keep_prob)
hidden_layers.append(hidden1)
cell = tf.contrib.rnn.MultiRNNCell(hidden_layers, state_is_tuple=True)
#input_data: [batch_size*num_steps]
input_data = tf.reshape(self._input_data, [-1])
#one-hot encoding
with tf.device("/gpu:0"):
#labels: [batch_size* num_steps, 1]
labels = tf.expand_dims(input_data, 1)
#indices: [batch_size*num_steps, 1]
indices = tf.expand_dims(tf.range(0, batch_size*num_steps, 1), 1)
#concated: [batch_size * num_steps, 2]
concated = tf.concat( [indices, labels],1)
# If sparse_indices is an n by d matrix, then for each i in [0, n)
# dense[sparse_indices[i][0], ..., sparse_indices[i][d-1]] = sparse_values[i]
# input_size: 2* num_skills
# inputs: [batch_size* num_steps * input_size]
inputs = tf.sparse_to_dense(concated, tf.stack([batch_size*num_steps, input_size]), 1.0, 0.0)
inputs.set_shape([batch_size*num_steps, input_size])
# [batch_size, num_steps, input_size]
inputs = tf.reshape(inputs, [-1, num_steps, input_size])
x = inputs
# x = tf.transpose(inputs, [1, 0, 2])
# # Reshape to (n_steps*batch_size, n_input)
# x = tf.reshape(x, [-1, input_size])
# # Split to get a list of 'n_steps'
# # tensors of shape (doc_num, n_input)
# x = tf.split(0, num_steps, x)
#inputs = [tf.squeeze(input_, [1]) for input_ in tf.split(1, num_steps, inputs)]
#outputs, state = tf.nn.rnn(hidden1, x, dtype=tf.float32)
#outputs: [batch_size, num_steps, final_hidden_size]
outputs, state = tf.nn.dynamic_rnn(cell, x, dtype=tf.float32)
# print state
#output = [batch_size * num_steps, final_hidden_size]
output = tf.reshape(tf.concat(outputs,1), [-1, final_hidden_size])
# print "shape"
# print outputs.shape
# print output.shape
# calculate the logits from last hidden layer to output layer
sigmoid_w = tf.get_variable("sigmoid_w", [final_hidden_size, num_skills])
sigmoid_b = tf.get_variable("sigmoid_b", [num_skills])
#logits [batch_size * num_steps, num_skills]
logits = tf.matmul(output, sigmoid_w) + sigmoid_b
# from output nodes to pick up the right one we want
#logits: [batch_size * num_steps * num_skills]
# logits are the output of the rnn after sigmoid
logits = tf.reshape(logits, [-1])
#target_id = batch_num*m.num_steps*m.num_skills + skill_num*m.num_skills + int(problem_ids[j+1]))
#selected_logits: shape of target_id
selected_logits = tf.gather(logits, self.target_id)
#make prediction
self._pred = self._pred_values = pred_values = tf.sigmoid(selected_logits)
# loss function
loss = tf.reduce_sum(tf.nn.sigmoid_cross_entropy_with_logits(logits = selected_logits,labels= target_correctness))
#self._cost = cost = tf.reduce_mean(loss)
self._final_state = state
self._cost = cost = loss
@property
def batch_size(self):
return self._batch_size
@property
def input_data(self):
return self._input_data
@property
def auc(self):
return self._auc
@property
def pred(self):
return self._pred
@property
def target_id(self):
return self._target_id
@property
def target_correctness(self):
return self._target_correctness
@property
def initial_state(self):
return self._initial_state
@property
def pred_values(self):
return self._pred_values
@property
def cost(self):
return self._cost
@property
def final_state(self):
return self._final_state
class HyperParamsConfig(object):
"""Small config."""
init_scale = 0.05
num_steps = 0
max_grad_norm = FLAGS.max_grad_norm
max_max_epoch = FLAGS.epochs
keep_prob = FLAGS.keep_prob
num_skills = 0
state_size = [200,200]
def run_epoch(session, m, students, eval_op, verbose=False):
"""Runs the model on the given data."""
start_time = time.time()
global cumulativeOutputMatrix
global currentOutputMatrix
index = 0
pred_labels = []
actual_labels = []
while(index+m.batch_size < len(students)):
x = np.zeros((m.batch_size, m.num_steps))
target_id = []
target_correctness = []
count = 0
for i in range(m.batch_size):
student = students[index+i]
problem_ids = student[1]
correctness = student[2]
for j in range(len(problem_ids)-1):
problem_id = int(problem_ids[j])
label_index = 0
if(int(correctness[j]) == 0):
label_index = problem_id
else:
label_index = problem_id + m.num_skills
x[i, j] = label_index
target_id.append(i*m.num_steps*m.num_skills+j*m.num_skills+int(problem_ids[j+1]))
target_correctness.append(int(correctness[j+1]))
actual_labels.append(int(correctness[j+1]))
pred, _, final_state = session.run([m.pred, eval_op, m.final_state], feed_dict={
m.input_data: x, m.target_id: target_id,
m.target_correctness: target_correctness})
#h: [batch_size, num_unit]
h = final_state[0][1]
for i in range(len(final_state)):
if i == 0: continue
h = np.concatenate((h,final_state[i][1]), axis=1)
index += m.batch_size
# if first batch of data
if len(currentOutputMatrix) < 1:
currentOutputMatrix = h
else:
currentOutputMatrix = np.concatenate((currentOutputMatrix, h), axis = 0)
# if last iteration in a epoch
if index+m.batch_size >= len(students):
# if first epoch
if len(cumulativeOutputMatrix) < 1:
cumulativeOutputMatrix = currentOutputMatrix
else:
# print cumulativeOutputMatrix.shape
# print currentOutputMatrix.shape
#ignore test case
if np.array(cumulativeOutputMatrix).shape[0] == np.array(currentOutputMatrix).shape[0]:
cumulativeOutputMatrix = np.concatenate((cumulativeOutputMatrix, currentOutputMatrix), axis= 1)
# a new epoch
if index < 1:
currentOutputMatrix = []
for p in pred:
pred_labels.append(p)
# print "final_state"
#final_state: (tuple(LSTMStateTuple([batch_size, hidden_size])))
# print len(final_state)
# print len(final_state[0])
# print len(currentOutputMatrix)
# print len(cumulativeOutputMatrix)
# print np.array(cumulativeOutputMatrix).shape
# print np.array(currentOutputMatrix).shape
# reset current when finish a epoch
currentOutputMatrix = []
# print final_state[0][0].shape
rmse = sqrt(mean_squared_error(actual_labels, pred_labels))
fpr, tpr, thresholds = metrics.roc_curve(actual_labels, pred_labels, pos_label=1)
auc = metrics.auc(fpr, tpr)
#calculate r^2
r2 = r2_score(actual_labels, pred_labels)
return rmse, auc, r2, final_state
def read_data_from_csv_file(fileName):
config = HyperParamsConfig()
inputs = []
targets = []
rows = []
max_skill_num = 0
max_num_problems = 0
problems = []
with open(fileName, "r") as csvfile:
reader = csv.reader(csvfile, delimiter=',')
for row in reader:
rows.append(row)
index = 0
i = 0
print "the number of rows is " + str(len(rows))
tuple_rows = []
#turn list to tuple
while(index < len(rows)-1):
problems_num = int(rows[index][0])
secondRow = rows[index+1]
thirdRow = rows[index+2]
# if index == 0:
# print secondRow
# print thirdRow
if len(secondRow) <1:
index+=3
continue
tmp_max_skill = max(map(int, secondRow))
if(tmp_max_skill > max_skill_num):
max_skill_num = tmp_max_skill
if(problems_num <= 2):
index += 3
else:
if problems_num > max_num_problems:
max_num_problems = problems_num
tup = (rows[index], secondRow, thirdRow)
tuple_rows.append(tup)
problems.append(problems_num)
index += 3
# print problems
#shuffle the tuple
random.shuffle(tuple_rows)
print "The number of students is ", len(tuple_rows)
print "Finish reading data"
return tuple_rows, max_num_problems, max_skill_num+1
def main(unused_args):
global cumulativeOutputMatrix
config = HyperParamsConfig()
eval_config = HyperParamsConfig()
timestamp = str(time.time())
train_data_path = FLAGS.train_data_path
#path to your test data set
test_data_path = FLAGS.test_data_path
#the file to store your test results
result_file_path = "run_logs_{}".format(timestamp)
#your model name
model_name = "DKT"
train_students, train_max_num_problems, train_max_skill_num = read_data_from_csv_file(train_data_path)
config.num_steps = train_max_num_problems
config.num_skills = train_max_skill_num
test_students, test_max_num_problems, test_max_skill_num = read_data_from_csv_file(test_data_path)
eval_config.num_steps = test_max_num_problems
eval_config.num_skills = test_max_skill_num
with tf.Graph().as_default():
session_conf = tf.ConfigProto(allow_soft_placement=FLAGS.allow_soft_placement,
log_device_placement=FLAGS.log_device_placement)
global_step = tf.Variable(0, name="global_step", trainable=False)
# decay learning rate
starter_learning_rate = FLAGS.learning_rate
learning_rate = tf.train.exponential_decay(starter_learning_rate, global_step, 3000, 0.96, staircase=True)
optimizer = tf.train.AdamOptimizer(learning_rate=learning_rate, epsilon=FLAGS.epsilon)
with tf.Session(config=session_conf) as session:
initializer = tf.random_uniform_initializer(-config.init_scale, config.init_scale)
# training model
with tf.variable_scope("model", reuse=None, initializer=initializer):
m = StudentModel(is_training=True, config=config)
# testing model
with tf.variable_scope("model", reuse=True, initializer=initializer):
mtest = StudentModel(is_training=False, config=eval_config)
grads_and_vars = optimizer.compute_gradients(m.cost)
grads_and_vars = [(tf.clip_by_norm(g, FLAGS.max_grad_norm), v)
for g, v in grads_and_vars if g is not None]
grads_and_vars = [(add_gradient_noise(g), v) for g, v in grads_and_vars]
train_op = optimizer.apply_gradients(grads_and_vars, name="train_op", global_step=global_step)
session.run(tf.global_variables_initializer())
# log hyperparameters to results file
with open(result_file_path, "a+") as f:
print("Writing hyperparameters into file")
f.write("Hidden layer size: %d \n" % (FLAGS.hidden_size))
f.write("Dropout rate: %.3f \n" % (FLAGS.keep_prob))
f.write("Batch size: %d \n" % (FLAGS.batch_size))
f.write("Max grad norm: %d \n" % (FLAGS.max_grad_norm))
# saver = tf.train.Saver(tf.all_variables())
cs = []
hs = []
for i in range(config.max_max_epoch):
rmse, auc, r2, final_state = run_epoch(session, m, train_students, train_op, verbose=True)
print("Epoch: %d Train Metrics:\n rmse: %.3f \t auc: %.3f \t r2: %.3f \n" % (i + 1, rmse, auc, r2))
with open(log_file_path, "a+") as f:
f.write("Epoch: %d Train Metrics:\n rmse: %.3f \t auc: %.3f \t r2: %.3f \n" % (i + 1, rmse, auc, r2))
if((i+1) % FLAGS.evaluation_interval == 0):
print "Save variables to disk"
# save_path = saver.save(session, model_name)#
print("*"*10)
print("Start to test model....")
rmse, auc, r2, _ = run_epoch(session, mtest, test_students, tf.no_op())
print("Epoch: %d Test Metrics:\n rmse: %.3f \t auc: %.3f \t r2: %.3f" % (i+1, rmse, auc, r2))
with open(log_file_path, "a+") as f:
f.write("Epoch: %d Test Metrics:\n rmse: %.3f \t auc: %.3f \t r2: %.3f" % ((i+1) , rmse, auc, r2))
f.write("\n")
print("*"*10)
# c, h = final_state
# if len(cs) < 1:
# cs = c
# else
cumulativeOutputMatrix = np.array(cumulativeOutputMatrix)
print cumulativeOutputMatrix.shape
np.save(hidden_state_path, cumulativeOutputMatrix)
# np.savetxt(hidden_state_path, cumulativeOutputMatrix, delimiter=',',)
if __name__ == "__main__":
tf.app.run() | mit |
kenshay/ImageScript | ProgramData/SystemFiles/Python/Lib/site-packages/spyderlib/widgets/ipython.py | 2 | 29448 | # -*- coding:utf-8 -*-
#
# Copyright © 2011-2012 Pierre Raybaut
# Licensed under the terms of the MIT License
# (see spyderlib/__init__.py for details)
"""
IPython v0.13+ client's widget
"""
# Fix for Issue 1356
from __future__ import absolute_import
# Stdlib imports
import os
import os.path as osp
from string import Template
import sys
import time
# Qt imports
from spyderlib.qt.QtGui import (QTextEdit, QKeySequence, QWidget, QMenu,
QHBoxLayout, QToolButton, QVBoxLayout,
QMessageBox)
from spyderlib.qt.QtCore import SIGNAL, Qt
from spyderlib import pygments_patch
pygments_patch.apply()
# IPython imports
from IPython.qt.console.rich_ipython_widget import RichIPythonWidget
from IPython.qt.console.ansi_code_processor import ANSI_OR_SPECIAL_PATTERN
from IPython.core.application import get_ipython_dir
from IPython.core.oinspect import call_tip
from IPython.config.loader import Config, load_pyconfig_files
# Local imports
from spyderlib.baseconfig import (get_conf_path, get_image_path,
get_module_source_path, _)
from spyderlib.config import CONF
from spyderlib.guiconfig import (create_shortcut, get_font, get_shortcut,
new_shortcut)
from spyderlib.utils.dochelpers import getargspecfromtext, getsignaturefromtext
from spyderlib.utils.qthelpers import (get_std_icon, create_toolbutton,
add_actions, create_action, get_icon,
restore_keyevent)
from spyderlib.utils import programs, sourcecode
from spyderlib.widgets.browser import WebView
from spyderlib.widgets.calltip import CallTipWidget
from spyderlib.widgets.mixins import (BaseEditMixin, InspectObjectMixin,
SaveHistoryMixin, TracebackLinksMixin)
#-----------------------------------------------------------------------------
# Templates
#-----------------------------------------------------------------------------
# Using the same css file from the Object Inspector for now. Maybe
# later it'll be a good idea to create a new one.
UTILS_PATH = get_module_source_path('spyderlib', 'utils')
CSS_PATH = osp.join(UTILS_PATH, 'inspector', 'static', 'css')
TEMPLATES_PATH = osp.join(UTILS_PATH, 'ipython', 'templates')
BLANK = open(osp.join(TEMPLATES_PATH, 'blank.html')).read()
LOADING = open(osp.join(TEMPLATES_PATH, 'loading.html')).read()
KERNEL_ERROR = open(osp.join(TEMPLATES_PATH, 'kernel_error.html')).read()
#-----------------------------------------------------------------------------
# Control widgets
#-----------------------------------------------------------------------------
class IPythonControlWidget(TracebackLinksMixin, InspectObjectMixin, QTextEdit,
BaseEditMixin):
"""
Subclass of QTextEdit with features from Spyder's mixins to use as the
control widget for IPython widgets
"""
QT_CLASS = QTextEdit
def __init__(self, parent=None):
QTextEdit.__init__(self, parent)
BaseEditMixin.__init__(self)
TracebackLinksMixin.__init__(self)
InspectObjectMixin.__init__(self)
self.found_results = []
self.calltip_widget = CallTipWidget(self, hide_timer_on=True)
# To not use Spyder calltips obtained through the monitor
self.calltips = False
def showEvent(self, event):
"""Reimplement Qt Method"""
self.emit(SIGNAL("visibility_changed(bool)"), True)
def _key_question(self, text):
""" Action for '?' and '(' """
parent = self.parentWidget()
self.current_prompt_pos = parent._prompt_pos
if self.get_current_line_to_cursor():
last_obj = self.get_last_obj()
if last_obj and not last_obj.isdigit():
self.show_object_info(last_obj)
self.insert_text(text)
def keyPressEvent(self, event):
"""Reimplement Qt Method - Basic keypress event handler"""
event, text, key, ctrl, shift = restore_keyevent(event)
if key == Qt.Key_Question and not self.has_selected_text():
self._key_question(text)
elif key == Qt.Key_ParenLeft and not self.has_selected_text():
self._key_question(text)
else:
# Let the parent widget handle the key press event
QTextEdit.keyPressEvent(self, event)
def focusInEvent(self, event):
"""Reimplement Qt method to send focus change notification"""
self.emit(SIGNAL('focus_changed()'))
return super(IPythonControlWidget, self).focusInEvent(event)
def focusOutEvent(self, event):
"""Reimplement Qt method to send focus change notification"""
self.emit(SIGNAL('focus_changed()'))
return super(IPythonControlWidget, self).focusOutEvent(event)
class IPythonPageControlWidget(QTextEdit, BaseEditMixin):
"""
Subclass of QTextEdit with features from Spyder's mixins.BaseEditMixin to
use as the paging widget for IPython widgets
"""
QT_CLASS = QTextEdit
def __init__(self, parent=None):
QTextEdit.__init__(self, parent)
BaseEditMixin.__init__(self)
self.found_results = []
def showEvent(self, event):
"""Reimplement Qt Method"""
self.emit(SIGNAL("visibility_changed(bool)"), True)
def keyPressEvent(self, event):
"""Reimplement Qt Method - Basic keypress event handler"""
event, text, key, ctrl, shift = restore_keyevent(event)
if key == Qt.Key_Slash and self.isVisible():
self.emit(SIGNAL("show_find_widget()"))
def focusInEvent(self, event):
"""Reimplement Qt method to send focus change notification"""
self.emit(SIGNAL('focus_changed()'))
return super(IPythonPageControlWidget, self).focusInEvent(event)
def focusOutEvent(self, event):
"""Reimplement Qt method to send focus change notification"""
self.emit(SIGNAL('focus_changed()'))
return super(IPythonPageControlWidget, self).focusOutEvent(event)
#-----------------------------------------------------------------------------
# Shell widget
#-----------------------------------------------------------------------------
class IPythonShellWidget(RichIPythonWidget):
"""
Spyder's IPython shell widget
This class has custom control and page_control widgets, additional methods
to provide missing functionality and a couple more keyboard shortcuts.
"""
def __init__(self, *args, **kw):
# To override the Qt widget used by RichIPythonWidget
self.custom_control = IPythonControlWidget
self.custom_page_control = IPythonPageControlWidget
super(IPythonShellWidget, self).__init__(*args, **kw)
self.set_background_color()
# --- Spyder variables ---
self.ipyclient = None
# --- Keyboard shortcuts ---
self.shortcuts = self.create_shortcuts()
# --- IPython variables ---
# To send an interrupt signal to the Spyder kernel
self.custom_interrupt = True
# To restart the Spyder kernel in case it dies
self.custom_restart = True
#---- Public API ----------------------------------------------------------
def set_ipyclient(self, ipyclient):
"""Bind this shell widget to an IPython client one"""
self.ipyclient = ipyclient
self.exit_requested.connect(ipyclient.exit_callback)
def long_banner(self):
"""Banner for IPython widgets with pylab message"""
from IPython.core.usage import default_gui_banner
banner = default_gui_banner
pylab_o = CONF.get('ipython_console', 'pylab', True)
autoload_pylab_o = CONF.get('ipython_console', 'pylab/autoload', True)
mpl_installed = programs.is_module_installed('matplotlib')
if mpl_installed and (pylab_o and autoload_pylab_o):
pylab_message = ("\nPopulating the interactive namespace from "
"numpy and matplotlib")
banner = banner + pylab_message
sympy_o = CONF.get('ipython_console', 'symbolic_math', True)
if sympy_o:
lines = """
These commands were executed:
>>> from __future__ import division
>>> from sympy import *
>>> x, y, z, t = symbols('x y z t')
>>> k, m, n = symbols('k m n', integer=True)
>>> f, g, h = symbols('f g h', cls=Function)
"""
banner = banner + lines
return banner
def short_banner(self):
"""Short banner with Python and IPython versions"""
from IPython.core.release import version
py_ver = '%d.%d.%d' % (sys.version_info[0], sys.version_info[1],
sys.version_info[2])
banner = 'Python %s on %s -- IPython %s' % (py_ver, sys.platform,
version)
return banner
def clear_console(self):
self.execute("%clear")
def write_to_stdin(self, line):
"""Send raw characters to the IPython kernel through stdin"""
try:
self.kernel_client.stdin_channel.input(line)
except AttributeError:
self.kernel_client.input(line)
def set_background_color(self):
lightbg_o = CONF.get('ipython_console', 'light_color')
if not lightbg_o:
self.set_default_style(colors='linux')
def create_shortcuts(self):
inspect = create_shortcut(self._control.inspect_current_object,
context='Console', name='Inspect current object',
parent=self)
clear_console = create_shortcut(self.clear_console, context='Console',
name='Clear shell', parent=self)
# Fixed shortcuts
new_shortcut("Ctrl+T", self,
lambda: self.emit(SIGNAL("new_ipyclient()")))
return [inspect, clear_console]
def get_signature(self, content):
"""Get signature from inspect reply content"""
data = content.get('data', {})
text = data.get('text/plain', '')
if text:
text = ANSI_OR_SPECIAL_PATTERN.sub('', text)
line = self._control.get_current_line_to_cursor()
name = line[:-1].split('.')[-1]
argspec = getargspecfromtext(text)
if argspec:
# This covers cases like np.abs, whose docstring is
# the same as np.absolute and because of that a proper
# signature can't be obtained correctly
signature = name + argspec
else:
signature = getsignaturefromtext(text, name)
return signature
else:
return ''
#---- IPython private methods ---------------------------------------------
def _context_menu_make(self, pos):
"""Reimplement the IPython context menu"""
menu = super(IPythonShellWidget, self)._context_menu_make(pos)
return self.ipyclient.add_actions_to_context_menu(menu)
def _banner_default(self):
"""
Reimplement banner creation to let the user decide if he wants a
banner or not
"""
banner_o = CONF.get('ipython_console', 'show_banner', True)
if banner_o:
return self.long_banner()
else:
return self.short_banner()
def _handle_object_info_reply(self, rep):
"""
Reimplement call tips to only show signatures, using the same style
from our Editor and External Console too
Note: For IPython 2-
"""
self.log.debug("oinfo: %s", rep.get('content', ''))
cursor = self._get_cursor()
info = self._request_info.get('call_tip')
if info and info.id == rep['parent_header']['msg_id'] and \
info.pos == cursor.position():
content = rep['content']
if content.get('ismagic', False):
call_info, doc = None, None
else:
call_info, doc = call_tip(content, format_call=True)
if call_info is None and doc is not None:
name = content['name'].split('.')[-1]
argspec = getargspecfromtext(doc)
if argspec:
# This covers cases like np.abs, whose docstring is
# the same as np.absolute and because of that a proper
# signature can't be obtained correctly
call_info = name + argspec
else:
call_info = getsignaturefromtext(doc, name)
if call_info:
self._control.show_calltip(_("Arguments"), call_info,
signature=True, color='#2D62FF')
def _handle_inspect_reply(self, rep):
"""
Reimplement call tips to only show signatures, using the same style
from our Editor and External Console too
Note: For IPython 3+
"""
cursor = self._get_cursor()
info = self._request_info.get('call_tip')
if info and info.id == rep['parent_header']['msg_id'] and \
info.pos == cursor.position():
content = rep['content']
if content.get('status') == 'ok' and content.get('found', False):
signature = self.get_signature(content)
if signature:
self._control.show_calltip(_("Arguments"), signature,
signature=True, color='#2D62FF')
#---- Qt methods ----------------------------------------------------------
def focusInEvent(self, event):
"""Reimplement Qt method to send focus change notification"""
self.emit(SIGNAL('focus_changed()'))
return super(IPythonShellWidget, self).focusInEvent(event)
def focusOutEvent(self, event):
"""Reimplement Qt method to send focus change notification"""
self.emit(SIGNAL('focus_changed()'))
return super(IPythonShellWidget, self).focusOutEvent(event)
#-----------------------------------------------------------------------------
# Client widget
#-----------------------------------------------------------------------------
class IPythonClient(QWidget, SaveHistoryMixin):
"""
IPython client or frontend for Spyder
This is a widget composed of a shell widget (i.e. RichIPythonWidget
+ our additions = IPythonShellWidget) and an WebView info widget to
print kernel error and other messages.
"""
SEPARATOR = '%s##---(%s)---' % (os.linesep*2, time.ctime())
def __init__(self, plugin, name, history_filename, connection_file=None,
hostname=None, sshkey=None, password=None,
kernel_widget_id=None, menu_actions=None):
super(IPythonClient, self).__init__(plugin)
SaveHistoryMixin.__init__(self)
self.options_button = None
# stop button and icon
self.stop_button = None
self.stop_icon = get_icon("stop.png")
self.connection_file = connection_file
self.kernel_widget_id = kernel_widget_id
self.hostname = hostname
self.sshkey = sshkey
self.password = password
self.name = name
self.get_option = plugin.get_option
self.shellwidget = IPythonShellWidget(config=self.shellwidget_config(),
local_kernel=False)
self.shellwidget.hide()
self.infowidget = WebView(self)
self.menu_actions = menu_actions
self.history_filename = get_conf_path(history_filename)
self.history = []
self.namespacebrowser = None
self.set_infowidget_font()
self.loading_page = self._create_loading_page()
self.infowidget.setHtml(self.loading_page)
vlayout = QVBoxLayout()
toolbar_buttons = self.get_toolbar_buttons()
hlayout = QHBoxLayout()
for button in toolbar_buttons:
hlayout.addWidget(button)
vlayout.addLayout(hlayout)
vlayout.setContentsMargins(0, 0, 0, 0)
vlayout.addWidget(self.shellwidget)
vlayout.addWidget(self.infowidget)
self.setLayout(vlayout)
self.exit_callback = lambda: plugin.close_client(client=self)
#------ Public API --------------------------------------------------------
def show_shellwidget(self, give_focus=True):
"""Show shellwidget and configure it"""
self.infowidget.hide()
self.shellwidget.show()
self.infowidget.setHtml(BLANK)
if give_focus:
self.get_control().setFocus()
# Connect shellwidget to the client
self.shellwidget.set_ipyclient(self)
# To save history
self.shellwidget.executing.connect(self.add_to_history)
# For Mayavi to run correctly
self.shellwidget.executing.connect(self.set_backend_for_mayavi)
# To update history after execution
self.shellwidget.executed.connect(self.update_history)
# To update the Variable Explorer after execution
self.shellwidget.executed.connect(self.auto_refresh_namespacebrowser)
# To show a stop button, when executing a process
self.shellwidget.executing.connect(self.enable_stop_button)
# To hide a stop button after execution stopped
self.shellwidget.executed.connect(self.disable_stop_button)
def enable_stop_button(self):
self.stop_button.setEnabled(True)
def disable_stop_button(self):
self.stop_button.setDisabled(True)
def stop_button_click_handler(self):
self.stop_button.setDisabled(True)
self.interrupt_kernel()
def show_kernel_error(self, error):
"""Show kernel initialization errors in infowidget"""
# Remove explanation about how to kill the kernel (doesn't apply to us)
error = error.split('issues/2049')[-1]
# Remove unneeded blank lines at the beginning
eol = sourcecode.get_eol_chars(error)
if eol:
error = error.replace(eol, '<br>')
while error.startswith('<br>'):
error = error[4:]
# Remove connection message
if error.startswith('To connect another client') or \
error.startswith('[IPKernelApp] To connect another client'):
error = error.split('<br>')
error = '<br>'.join(error[2:])
# Don't break lines in hyphens
# From http://stackoverflow.com/q/7691569/438386
error = error.replace('-', '‑')
message = _("An error ocurred while starting the kernel")
kernel_error_template = Template(KERNEL_ERROR)
page = kernel_error_template.substitute(css_path=CSS_PATH,
message=message,
error=error)
self.infowidget.setHtml(page)
def show_restart_animation(self):
self.shellwidget.hide()
self.infowidget.setHtml(self.loading_page)
self.infowidget.show()
def get_name(self):
"""Return client name"""
return ((_("Console") if self.hostname is None else self.hostname)
+ " " + self.name)
def get_control(self):
"""Return the text widget (or similar) to give focus to"""
# page_control is the widget used for paging
page_control = self.shellwidget._page_control
if page_control and page_control.isVisible():
return page_control
else:
return self.shellwidget._control
def get_options_menu(self):
"""Return options menu"""
restart_action = create_action(self, _("Restart kernel"),
shortcut=QKeySequence("Ctrl+."),
icon=get_icon('restart.png'),
triggered=self.restart_kernel)
restart_action.setShortcutContext(Qt.WidgetWithChildrenShortcut)
# Main menu
if self.menu_actions is not None:
actions = [restart_action, None] + self.menu_actions
else:
actions = [restart_action]
return actions
def get_toolbar_buttons(self):
"""Return toolbar buttons list"""
#TODO: Eventually add some buttons (Empty for now)
# (see for example: spyderlib/widgets/externalshell/baseshell.py)
buttons = []
# Code to add the stop button
if self.stop_button is None:
self.stop_button = create_toolbutton(self, text=_("Stop"),
icon=self.stop_icon,
tip=_("Stop the current command"))
self.disable_stop_button()
# set click event handler
self.stop_button.clicked.connect(self.stop_button_click_handler)
if self.stop_button is not None:
buttons.append(self.stop_button)
if self.options_button is None:
options = self.get_options_menu()
if options:
self.options_button = create_toolbutton(self,
text=_("Options"), icon=get_icon('tooloptions.png'))
self.options_button.setPopupMode(QToolButton.InstantPopup)
menu = QMenu(self)
add_actions(menu, options)
self.options_button.setMenu(menu)
if self.options_button is not None:
buttons.append(self.options_button)
return buttons
def add_actions_to_context_menu(self, menu):
"""Add actions to IPython widget context menu"""
# See spyderlib/widgets/ipython.py for more details on this method
inspect_action = create_action(self, _("Inspect current object"),
QKeySequence(get_shortcut('console',
'inspect current object')),
icon=get_std_icon('MessageBoxInformation'),
triggered=self.inspect_object)
clear_line_action = create_action(self, _("Clear line or block"),
QKeySequence("Shift+Escape"),
icon=get_icon('eraser.png'),
triggered=self.clear_line)
clear_console_action = create_action(self, _("Clear console"),
QKeySequence(get_shortcut('console',
'clear shell')),
icon=get_icon('clear.png'),
triggered=self.clear_console)
quit_action = create_action(self, _("&Quit"), icon='exit.png',
triggered=self.exit_callback)
add_actions(menu, (None, inspect_action, clear_line_action,
clear_console_action, None, quit_action))
return menu
def set_font(self, font):
"""Set IPython widget's font"""
self.shellwidget._control.setFont(font)
self.shellwidget.font = font
def set_infowidget_font(self):
font = get_font('inspector', 'rich_text')
self.infowidget.set_font(font)
def interrupt_kernel(self):
"""Interrupt the associanted Spyder kernel if it's running"""
self.shellwidget.request_interrupt_kernel()
def restart_kernel(self):
"""Restart the associanted Spyder kernel"""
self.shellwidget.request_restart_kernel()
def inspect_object(self):
"""Show how to inspect an object with our object inspector"""
self.shellwidget._control.inspect_current_object()
def clear_line(self):
"""Clear a console line"""
self.shellwidget._keyboard_quit()
def clear_console(self):
"""Clear the whole console"""
self.shellwidget.execute("%clear")
def if_kernel_dies(self, t):
"""
Show a message in the console if the kernel dies.
t is the time in seconds between the death and showing the message.
"""
message = _("It seems the kernel died unexpectedly. Use "
"'Restart kernel' to continue using this console.")
self.shellwidget._append_plain_text(message + '\n')
def update_history(self):
self.history = self.shellwidget._history
def set_backend_for_mayavi(self, command):
calling_mayavi = False
lines = command.splitlines()
for l in lines:
if not l.startswith('#'):
if 'import mayavi' in l or 'from mayavi' in l:
calling_mayavi = True
break
if calling_mayavi:
message = _("Changing backend to Qt for Mayavi")
self.shellwidget._append_plain_text(message + '\n')
self.shellwidget.execute("%gui inline\n%gui qt")
def interrupt_message(self):
"""
Print an interrupt message when the client is connected to an external
kernel
"""
message = _("Kernel process is either remote or unspecified. "
"Cannot interrupt")
QMessageBox.information(self, "IPython", message)
def restart_message(self):
"""
Print a restart message when the client is connected to an external
kernel
"""
message = _("Kernel process is either remote or unspecified. "
"Cannot restart.")
QMessageBox.information(self, "IPython", message)
def set_namespacebrowser(self, namespacebrowser):
"""Set namespace browser widget"""
self.namespacebrowser = namespacebrowser
def auto_refresh_namespacebrowser(self):
"""Refresh namespace browser"""
if self.namespacebrowser:
self.namespacebrowser.refresh_table()
def shellwidget_config(self):
"""Generate a Config instance for shell widgets using our config
system
This lets us create each widget with its own config (as opposed to
IPythonQtConsoleApp, where all widgets have the same config)
"""
# ---- IPython config ----
try:
profile_path = osp.join(get_ipython_dir(), 'profile_default')
full_ip_cfg = load_pyconfig_files(['ipython_qtconsole_config.py'],
profile_path)
# From the full config we only select the IPythonWidget section
# because the others have no effect here.
ip_cfg = Config({'IPythonWidget': full_ip_cfg.IPythonWidget})
except:
ip_cfg = Config()
# ---- Spyder config ----
spy_cfg = Config()
# Make the pager widget a rich one (i.e a QTextEdit)
spy_cfg.IPythonWidget.kind = 'rich'
# Gui completion widget
gui_comp_o = self.get_option('use_gui_completion')
completions = {True: 'droplist', False: 'ncurses'}
spy_cfg.IPythonWidget.gui_completion = completions[gui_comp_o]
# Pager
pager_o = self.get_option('use_pager')
if pager_o:
spy_cfg.IPythonWidget.paging = 'inside'
else:
spy_cfg.IPythonWidget.paging = 'none'
# Calltips
calltips_o = self.get_option('show_calltips')
spy_cfg.IPythonWidget.enable_calltips = calltips_o
# Buffer size
buffer_size_o = self.get_option('buffer_size')
spy_cfg.IPythonWidget.buffer_size = buffer_size_o
# Prompts
in_prompt_o = self.get_option('in_prompt')
out_prompt_o = self.get_option('out_prompt')
if in_prompt_o:
spy_cfg.IPythonWidget.in_prompt = in_prompt_o
if out_prompt_o:
spy_cfg.IPythonWidget.out_prompt = out_prompt_o
# Merge IPython and Spyder configs. Spyder prefs will have prevalence
# over IPython ones
ip_cfg._merge(spy_cfg)
return ip_cfg
#------ Private API -------------------------------------------------------
def _create_loading_page(self):
loading_template = Template(LOADING)
loading_img = get_image_path('loading_sprites.png')
if os.name == 'nt':
loading_img = loading_img.replace('\\', '/')
message = _("Connecting to kernel...")
page = loading_template.substitute(css_path=CSS_PATH,
loading_img=loading_img,
message=message)
return page
#---- Qt methods ----------------------------------------------------------
def closeEvent(self, event):
"""
Reimplement Qt method to stop sending the custom_restart_kernel_died
signal
"""
kc = self.shellwidget.kernel_client
if kc is not None:
kc.hb_channel.pause()
| gpl-3.0 |
jguhlin/nn-replicon-identification | replicon_identification_cnn4.py | 1 | 22218 | # Next step is to add filename processed to text summary
import tensorflow as tf
import numpy as np
import matplotlib as mpl
import matplotlib.pyplot as plt
from scipy.sparse import csr_matrix
from collections import Counter
import collections
import random
from six.moves import urllib
from six.moves import xrange # pylint: disable=redefined-builtin
import Bio
from Bio import SeqIO
import os
import concurrent.futures
import functools
from functools import partial
import math
import threading
import time
import random
from random import shuffle
import pickle
import ntpath
import os.path
import sys
from tensorflow.python.summary import summary
from collections import deque
import itertools
# k-mer size to use
k = 9
#
# NOTE!!!!!!!!!!!!!!!!
#
# We can reduce problem space if we get the reverse complement, and add a bit to indicate reversed or not...
# Not really.... revcomp just doubles it back up again....
#
# Also -- Build a recurrent network to predict sequences that come after a given kmer?
# Look at word2vec, dna2vec, bag of words, skip-gram
#
# Problem space
space = 5 ** k
def partition(n, step, coll):
for i in range(0, len(coll), step):
if (i+n > len(coll)):
break # raise StopIteration...
yield coll[i:i+n]
def get_kmers(k):
return lambda sequence: partition(k, k, sequence)
def convert_nt(c):
return {"N": 0, "A": 1, "C": 2, "T": 3, "G": 4}.get(c, 0)
def convert_nt_complement(c):
return {"N": 0, "A": 3, "C": 4, "T": 1, "G": 2}.get(c, 0)
def convert_kmer_to_int(kmer):
return int(''.join(str(x) for x in (map(convert_nt, kmer))), 5)
def convert_kmer_to_int_complement(kmer):
return int(''.join(str(x) for x in reversed(list(map(convert_nt_complement, kmer)))), 5)
def convert_base5(n):
return {"0": "N", "1": "A", "2": "C", "3": "T", "4": "G"}.get(n,"N")
def convert_to_kmer(kmer):
return ''.join(map(convert_base5, str(np.base_repr(kmer, 5))))
# Not using sparse tensors anymore.
tf.logging.set_verbosity(tf.logging.INFO)
# Get all kmers, in order, with a sliding window of k (but sliding 1bp for each iteration up to k)
# Also get RC for all....
def kmer_processor(seq,offset):
return list(map(convert_kmer_to_int, get_kmers(k)(seq[offset:])))
def get_kmers_from_seq(sequence):
kmers_from_seq = list()
kp = functools.partial(kmer_processor, sequence)
for i in map(kp, range(0,k)):
kmers_from_seq.append(i)
rev = sequence[::-1]
kpr = functools.partial(kmer_processor, rev)
for i in map(kpr, range(0,k)):
kmers_from_seq.append(i)
# for i in range(0,k):
# kmers_from_seq.append(kmer_processor(sequence,i))
# for i in range(0,k):
# kmers_from_seq.append(kmer_processor(rev, i))
return kmers_from_seq
data = list()
def load_fasta(filename):
# tf.summary.text("File", tf.as_string(filename))
data = dict()
file_base_name = ntpath.basename(filename)
picklefilename = file_base_name + ".picklepickle"
if os.path.isfile(picklefilename):
print("Loading from pickle: " + filename)
data = pickle.load(open(picklefilename, "rb"))
else:
print("File not found, generating new sequence: " + picklefilename)
for seq_record in SeqIO.parse(filename, "fasta"):
data.update({seq_record.id:
get_kmers_from_seq(seq_record.seq.upper())})
pickle.dump(data, open(picklefilename, "wb"))
sys.stdout.flush()
return(data)
def get_kmers_from_file(filename):
kmer_list = list()
for seq_record in SeqIO.parse(filename, "fasta"):
kmer_list.extend(get_kmers_from_seq(seq_record.seq.upper()))
return set([item for sublist in kmer_list for item in sublist])
all_kmers = set()
# Very slow, should make this part concurrent...
def find_all_kmers(directory):
kmer_master_list = list()
files = [directory + "/" + f for f in os.listdir(directory)]
with concurrent.futures.ThreadPoolExecutor(max_workers=4) as executor:
for i in executor.map(get_kmers_from_file, files):
kmer_master_list.extend(list(i))
kmer_master_list = list(set(kmer_master_list))
print("Total unique kmers: " + str(len(set(kmer_master_list))))
return set(kmer_master_list)
def get_categories(directory):
data = list()
files = os.listdir(directory)
for filename in files:
for seq_record in SeqIO.parse(directory + "/" + filename, "fasta"):
data.append(seq_record.id)
data = sorted(list(set(data)))
return(data)
def training_file_generator(directory):
files = [directory + "/" + f for f in os.listdir(directory)]
random.shuffle(files)
def gen():
nonlocal files
if (len(files) == 0):
files = [directory + "/" + f for f in os.listdir(directory)]
random.shuffle(files)
return(files.pop())
return gen
def gen_random_training_data(input_data, window_size):
rname = random.choice(list(input_data.keys()))
rdata = random.choice(input_data[rname])
idx = random.randrange(window_size + 1, len(rdata) - window_size - 1)
tdata = list();
for i in range(idx - window_size - 1, idx + window_size):
if (i < 0): continue
if (i >= len(rdata)): break
if type(rdata[idx]) == list: break;
if type(rdata[i]) == list: break
tdata.append(kmer_dict[rdata[i]])
return tdata, rname
# The current state is, each training batch is from a single FASTA file (strain, usually)
# This can be ok, as long as training batch is a large number
# Need to speed up reading of FASTA files though, maybe pyfaidx or something?
# Define the one-hot dictionary...
replicons_list = get_categories("training-files/")
oh = dict()
a = 0
for i in replicons_list:
oh[i] = tf.one_hot(a, len(replicons_list))
a += 1
oh = dict()
a = 0
for i in replicons_list:
oh[i] = a
a += 1
oh = dict()
oh['Main'] = [1.0, 0.0, 0.0]
oh['pSymA'] = [0.0, 1.0, 0.0]
oh['pSymB'] = [0.0, 0.0, 1.0]
def generate_training_batch(data, batch_size, window_size):
training_batch_data = list();
while len(training_batch_data) < batch_size:
training_batch_data.append(gen_random_training_data(data,
window_size))
return training_batch_data
batch_size = 1024
embedding_size = 128
window_size = 7
replicons_list = get_categories("training-files/")
filegen = training_file_generator("training-files/")
repdict = dict()
a = 0
for i in replicons_list:
repdict[i] = a
a += 1
def test_input_fn(data):
tbatch = generate_training_batch(data, 1, window_size)
dat = {'x': tf.convert_to_tensor([tf.convert_to_tensor(get_kmer_embeddings(tbatch[0][0]))])}
lab = tf.convert_to_tensor([repdict[tbatch[0][1]]])
return dat, lab
def train_input_fn_raw(data):
tbatch = generate_training_batch(data, 1, window_size)
dat = {'x': (get_kmer_embeddings(tbatch[0][0]))}
lab = [repdict[tbatch[0][1]]]
return dat, lab
# Because this was run at work on a smaller sample of files....
# with open("all_kmers_subset.txt", "w") as f:
# for s in all_kmers:
# f.write(str(s) +"\n")
sess = tf.Session()
# Because this was run at work on a smaller sample of files....
all_kmers = list()
# with open("all_kmers_subset.txt", "r") as f:
# for line in f:
# all_kmers.append(int(line.strip()))
all_kmers = pickle.load(open("all_kmers.p", "rb"))
all_kmers = set(all_kmers)
len(all_kmers)
# len(data)
# all_kmers = set([item for sublist in data for item in sublist])
unused_kmers = set(range(0, space)) - all_kmers
kmer_dict = dict()
reverse_kmer_dict = dict();
a = 0
for i in all_kmers:
kmer_dict[i] = a
reverse_kmer_dict[a] = i
a += 1
kmer_count = len(all_kmers)
[len(all_kmers), len(unused_kmers), space]
# This fn now generates all possible combinations of training data....
def gen_training_data(input_data, window_size):
total_data = list()
for k in input_data.keys():
for kdata in input_data[k]:
for i in range(window_size + 1, len(kdata) - window_size):
kentry = list()
for x in range(i - window_size - 1, i + window_size):
kentry.append(kmer_dict[kdata[x]])
total_data.append([kentry, k])
return total_data
feature_columns = [tf.feature_column.numeric_column("x", shape=[15,128])]
embeddings = np.load("final_embeddings.npy")
def get_kmer_embeddings(kmers):
a = list() # numpy.empty(128 * 15)
for k in kmers:
a.append(embeddings[k])
return a
#return np.hstack(a)
def gen_training_data_generator(input_data, window_size, repdict):
for k in input_data.keys():
for kdata in input_data[k]:
for i in range(window_size + 1, len(kdata) - window_size):
kentry = list()
for x in range(i - window_size - 1, i + window_size):
kentry.append(kmer_dict[kdata[x]])
yield(kentry, [repdict[k]])
# Not infinite
def kmer_generator(directory, window_size):
files = [directory + "/" + f for f in os.listdir(directory)]
random.shuffle(files)
replicons_list = get_categories("training-files/")
repdict = dict()
a = 0
for i in replicons_list:
repdict[i] = a
a += 1
for f in files:
fasta = load_fasta(f)
yield from gen_training_data_generator(fasta, window_size, repdict)
# Plan to use tf.data.Dataset.from_generator
# ds = tf.contrib.data.Dataset.list_files("training-files/").map(tf_load_fasta)
def roundrobin(*iterables):
"roundrobin('ABC', 'D', 'EF') --> A D E B F C"
# Recipe credited to George Sakkis
pending = len(iterables)
nexts = itertools.cycle(iter(it).__next__ for it in iterables)
while pending:
try:
for next in nexts:
yield next()
except StopIteration:
pending -= 1
nexts = itertools.cycle(itertools.islice(nexts, pending))
def my_input_fn():
# kmer_gen = functools.partial(kmer_generator, "training-files/", window_size)
# kmer_gen1 = functools.partial(kmer_generator, "training-files/", window_size)
# kmer_gen2 = functools.partial(kmer_generator, "training-files/", window_size)
# kmer_gen3 = functools.partial(kmer_generator, "training-files/", window_size)
# Round robin 3 files...
# This will also cause everything to repeat 3 times
# Would be best to round robin all files at once and only repeat once (let tf.data.Dataset handle repeats)
rr = functools.partial(roundrobin, kmer_generator("training-files/", window_size),
kmer_generator("training-files/", window_size),
kmer_generator("training-files/", window_size),
kmer_generator("training-files/", window_size),
kmer_generator("training-files/", window_size))
# alternate_gens = functools.partial(alternate, kmer_gen, kmer_gen1, kmer_gen2, kmer_gen3)
ds = tf.data.Dataset.from_generator(rr,
(tf.int64,
tf.int64),
(tf.TensorShape([15]),
tf.TensorShape(None)))
# Numbers reduced to run on my desktop
# ds = ds.repeat(2)
# ds = ds.prefetch(8192)
# ds = ds.shuffle(buffer_size=1000000) # Large buffer size for better randomization
# ds = ds.batch(4096)
ds = ds.repeat(1)
ds = ds.prefetch(4096)
ds = ds.shuffle(buffer_size=10000)
ds = ds.batch(512)
def add_labels(arr, lab):
return({"x": arr}, lab)
ds = ds.map(add_labels)
iterator = ds.make_one_shot_iterator()
batch_features, batch_labels = iterator.get_next()
return batch_features, batch_labels
#next_batch = my_input_fn()
# with tf.Session() as sess:
# first_batch = sess.run(next_batch)
# print(first_batch)
# nn = tf.estimator.DNNClassifier(feature_columns=feature_columns,
# hidden_units = [256, 128, len(replicons_list) + 10],
# activation_fn=tf.nn.tanh,
# dropout=0.1,
# model_dir="classifier",
# n_classes=len(replicons_list),
# optimizer="Momentum")
# Have to know the names of the tensors to do this level of logging....
# Custom estimator would allow it....
# tensors_to_log = {"probabilities": "softmax_tensor"}
# logging_hook = tf.train.LoggingTensorHook(tensors=tensors_to_log, every_n_iter=100)
# nn.train(input_fn = my_input_fn)
# Trained on l1 and l2 as 0.001 and learning_rate 0.1
# Changing learning rate to 0.2 for additional run
# dnn = tf.estimator.DNNClassifier(feature_columns=feature_columns,
# hidden_units = [256, 45],
# activation_fn=tf.nn.relu,
# dropout=0.05,
# model_dir="classifier.relu.dropout0.05.proximaladagrad.lrecoli",
# n_classes=len(replicons_list),
# optimizer=tf.train.ProximalAdagradOptimizer(
# learning_rate=0.2,
# l1_regularization_strength=0.001,
# l2_regularization_strength=0.001))
#acc = dnn.evaluate(input_fn = my_input_fn, steps=1000)
#print("Accuracy: " + acc["accuracy"] + "\n");
#print("Loss: %s" % acc["loss"])
#print("Root Mean Squared Error: %s" % acc["rmse"])
# dnn.train(input_fn = my_input_fn)
# CNN experiment for training
# Based off of kmer embeddings
embeddings_stored = np.load("final_embeddings.npy")
def _add_layer_summary(value, tag):
summary.scalar('%s/fraction_of_zero_values' % tag, tf.nn.zero_fraction(value))
summary.histogram('%s/activation' % tag, value)
# Based off of: https://www.tensorflow.org/tutorials/layers
def cnn_model_fn(features, labels, mode):
"""Model fn for CNN"""
# , [len(all_kmers), 128]
embeddings = tf.get_variable("embeddings", trainable=False, initializer=embeddings_stored)
embedded_kmers = tf.nn.embedding_lookup(embeddings, features["x"])
# shaped = tf.reshape(embedded_kmers, [-1, 1920])
# Input layer
# So inputs are 1920, or 15 * 128, and "1" deep (which is a float)
input_layer = tf.reshape(embedded_kmers, [-1, 15, 128, 1])
# filters * kernelsize[0] * kernel_size[1] must be > input_layer_size
# So 1920 <= 32 * 5 * 12
conv1 = tf.layers.conv2d(
inputs=input_layer,
filters=32,
kernel_size=[2,128],
strides=3,
padding="same",
name="Conv1",
activation=None) # With all 3 at tf.sigmoid getting ~ 80% accuracy and 0.5 loss
conv2 = tf.layers.conv2d(
inputs=input_layer,
filters=32,
kernel_size=[3, 128],
strides=3,
padding="same",
name="Conv2",
activation=None)
conv3 = tf.layers.conv2d(
inputs=input_layer,
filters=32,
kernel_size=[5, 128],
strides=3,
padding="same",
name="Conv3",
activation=None)
# It took some work
# This conv4 layer now gives output of -1,3,3,32
# Which lets avg_pool4 and concat receive acceptable shapes...
# conv4 = tf.layers.conv2d(
# inputs=input_layer,
# filters=32,
# kernel_size=[3, 32],
# strides=6,
# padding="same",
# name="Conv4",
# activation=None)
# print(tf.shape(conv4))
avg_pool1 = tf.layers.average_pooling2d(conv1, pool_size=[4, 32], strides=[2,16], padding="same", name="AvgPooling_1")
avg_pool2 = tf.layers.average_pooling2d(conv2, pool_size=[4, 32], strides=[2,16], padding="same", name="AvgPooling_2")
avg_pool3 = tf.layers.average_pooling2d(conv3, pool_size=[4, 32], strides=[2,16], padding="same", name="AvgPooling_3")
# avg_pool4 = tf.layers.average_pooling2d(conv4, pool_size=[1, 16], strides=[1, 8], padding="same", name="AvgPooling_4")
# print(tf.shape(avg_pool4))
# pool1 = tf.layers.max_pooling1d(conv1, pool_size=4, strides=2, padding="same", name="Pool1")
# pool2 = tf.layers.max_pooling1d(conv2, pool_size=4, strides=2, padding="same", name="Pool2")
# pool3 = tf.layers.max_pooling1d(conv3, pool_size=4, strides=2, padding="same", name="Pool3")
# pool4 = tf.layers.max_pooling1d(conv4, pool_size=4, strides=2, padding="same", name="Pool4")
# all_pools = tf.concat([pool1, pool2, pool3, pool4], 2)
#all_pools = tf.concat([conv1, conv2, conv3, conv4], 1)
all_pools = tf.concat([avg_pool1, avg_pool2, avg_pool3], 1) # avg_pool4 is removed
flatten = tf.reshape(all_pools, [-1, 3 * 3 * 32 * 3])
#conv1_img = tf.unstack(conv1, axis=3)
#tf.summary.image("Visualize_conv1", conv1_img)
# Our input to pool1 is 5, 128, 32 now....
# pool1 = tf.layers.max_pooling2d(inputs=conv1, pool_size=[2, 2], strides=2, padding="same")
# So output is....
# -1 8 x 64 x 32
# Convolutional Layer #2 and Pooling Layer #2
# conv2 = tf.layers.conv2d(
# inputs=conv1,
# filters=64,
# kernel_size=[2, 2],
# strides=[1,4],
# padding="same",
# activation=tf.nn.relu)
# conv2_img = tf.expand_dims(tf.unstack(conv2, axis=3), axis=3)
# tf.summary.image("Visualize_conv2", conv2_img)
# SO output here is 4 x 60 x 64
# flatten = tf.reshape(conv1, [-1, 15 * 16, 32], name = "Flatten")
# So now should be -1, 8, 64, 64
# pool2 = tf.layers.max_pooling2d(inputs=conv2, pool_size=[2, 2], strides=2)
#tf.layers.max_pooling1d(inputs=flatten, pool_size=)
# avg_pooled = tf.layers.average_pooling1d(flatten, pool_size=10, strides=5, padding="same", name="AvgPooling")
# pool2 reduces by half again
# So -1, 4, 32, 64
# pool2_flat = tf.reshape(avg_pool1, [-1, 2048]) # 4 * 32 * 64 = 8192
# 2,048 neurons
dropout1 = tf.layers.dropout(inputs=flatten, rate=0.4, training=mode == tf.estimator.ModeKeys.TRAIN)
dense = tf.layers.dense(inputs=dropout1, units=2048, activation=None)
dropout2 = tf.layers.dropout(inputs=dense, rate=0.2, training=mode == tf.estimator.ModeKeys.TRAIN)
dense2 = tf.layers.dense(inputs=dropout2, units=1024, activation=tf.nn.relu)
dropout3 = tf.layers.dropout(inputs=dense, rate=0.2, training=mode == tf.estimator.ModeKeys.TRAIN)
dense3 = tf.layers.dense(inputs=dropout3, units=512, activation=tf.nn.relu)
_add_layer_summary(dense, "Dense")
_add_layer_summary(dense2, "Dense2")
_add_layer_summary(dense3, "Dense3")
# 0.5 is suggested for CNNs
# 0.2 makes the system memorize the data
# Trying 0.4, may switch to 0.3 or 0.5 again.
dropout = tf.layers.dropout(
inputs=dense3, rate=0.2, training=mode == tf.estimator.ModeKeys.TRAIN)
# Must have len(replicons_list) neurons
logits = tf.layers.dense(inputs=dropout, units=len(replicons_list))
_add_layer_summary(logits, "Logits")
predictions = {
"classes": tf.argmax(input=logits, axis=1),
"probabilities": tf.nn.softmax(logits, name="softmax_tensor")}
if mode == tf.estimator.ModeKeys.PREDICT:
return tf.estimator.EstimatorSpec(mode=mode, predictions=predictions)
# Calculate Loss (for both TRAIN and EVAL modes)
onehot_labels = tf.one_hot(indices=tf.cast(labels, tf.int32), depth=len(replicons_list))
loss = tf.losses.softmax_cross_entropy(
onehot_labels=onehot_labels, logits=logits)
correct_prediction = tf.equal(tf.argmax(logits, 1), labels)
accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))
#f.summary.text("LogitsArgMax", tf.as_string(tf.argmax(logits, 1)))
#tf.summary.text("Labels", tf.as_string(labels))
#tf.summary.text("Prediction", tf.as_string(tf.argmax(labels, 1)))
# tf.summary.text("Onehot", tf.as_string(onehot_labels))
# tf.summary.text("Predictions", tf.as_string(correct_prediction))
tf.summary.scalar('Accuracy', accuracy)
# Configure the Training Op (for TRAIN mode)
if mode == tf.estimator.ModeKeys.TRAIN:
optimizer = tf.train.GradientDescentOptimizer(learning_rate=0.001)
train_op = optimizer.minimize(
loss=loss,
global_step=tf.train.get_global_step())
return tf.estimator.EstimatorSpec(mode=mode, loss=loss, train_op=train_op)
# Add evaluation metrics (for EVAL mode)
eval_metric_ops = {
"accuracy": tf.metrics.accuracy(
labels=labels, predictions=predictions["classes"])}
return tf.estimator.EstimatorSpec(
mode=mode,
loss=loss,
eval_metric_ops=eval_metric_ops)
def main(unused_argv):
classifier = tf.estimator.Estimator(
model_fn=cnn_model_fn,
model_dir="classifier_cnn4",
config=tf.contrib.learn.RunConfig(
save_checkpoints_steps=1000,
save_checkpoints_secs=None,
save_summary_steps=100))
# tensors_to_log = {"probabilities": "softmax_tensor"}
# logging_hook = tf.train.LoggingTensorHook(
# tensors=tensors_to_log, every_n_iter=50)
classifier.train(input_fn=my_input_fn)
# steps=10000
#hooks=[logging_hook])
eval_results = classifier.evaluate(input_fn=my_input_fn, steps=1000)
print(eval_results)
if __name__ == "__main__":
tf.app.run()
| epl-1.0 |
smblance/ggplot | ggplot/geoms/geom_boxplot.py | 12 | 1218 | from __future__ import (absolute_import, division, print_function,
unicode_literals)
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import matplotlib.cbook as cbook
from .geom import geom
from ggplot.utils import is_string
from ggplot.utils import is_categorical
class geom_boxplot(geom):
DEFAULT_AES = {'y': None, 'color': 'black', 'flier_marker': '+'}
REQUIRED_AES = {'x'}
DEFAULT_PARAMS = {'stat': 'identity', 'position': 'identity'}
def __group(self, x, y):
out = {}
for xx, yy in zip(x,y):
if yy not in out: out[yy] = []
out[yy].append(xx)
return out
def _plot_unit(self, pinfo, ax):
x = pinfo.pop('x')
y = pinfo.pop('y')
color = pinfo.pop('color')
fliermarker = pinfo.pop('flier_marker')
if y is not None:
g = self.__group(x,y)
l = sorted(g.keys())
x = [g[k] for k in l]
q = ax.boxplot(x, vert=False)
plt.setp(q['boxes'], color=color)
plt.setp(q['whiskers'], color=color)
plt.setp(q['fliers'], color=color, marker=fliermarker)
if l:
plt.setp(ax, yticklabels=l)
| bsd-2-clause |
mbayon/TFG-MachineLearning | vbig/lib/python2.7/site-packages/pandas/core/base.py | 4 | 38618 | """
Base and utility classes for pandas objects.
"""
import warnings
from pandas import compat
from pandas.compat import builtins
import numpy as np
from pandas.core.dtypes.missing import isnull
from pandas.core.dtypes.generic import ABCDataFrame, ABCSeries, ABCIndexClass
from pandas.core.dtypes.common import is_object_dtype, is_list_like, is_scalar
from pandas.util._validators import validate_bool_kwarg
from pandas.core import common as com
import pandas.core.nanops as nanops
import pandas._libs.lib as lib
from pandas.compat.numpy import function as nv
from pandas.util._decorators import (Appender, cache_readonly,
deprecate_kwarg, Substitution)
from pandas.core.common import AbstractMethodError
_shared_docs = dict()
_indexops_doc_kwargs = dict(klass='IndexOpsMixin', inplace='',
unique='IndexOpsMixin', duplicated='IndexOpsMixin')
class StringMixin(object):
"""implements string methods so long as object defines a `__unicode__`
method.
Handles Python2/3 compatibility transparently.
"""
# side note - this could be made into a metaclass if more than one
# object needs
# ----------------------------------------------------------------------
# Formatting
def __unicode__(self):
raise AbstractMethodError(self)
def __str__(self):
"""
Return a string representation for a particular Object
Invoked by str(df) in both py2/py3.
Yields Bytestring in Py2, Unicode String in py3.
"""
if compat.PY3:
return self.__unicode__()
return self.__bytes__()
def __bytes__(self):
"""
Return a string representation for a particular object.
Invoked by bytes(obj) in py3 only.
Yields a bytestring in both py2/py3.
"""
from pandas.core.config import get_option
encoding = get_option("display.encoding")
return self.__unicode__().encode(encoding, 'replace')
def __repr__(self):
"""
Return a string representation for a particular object.
Yields Bytestring in Py2, Unicode String in py3.
"""
return str(self)
class PandasObject(StringMixin):
"""baseclass for various pandas objects"""
@property
def _constructor(self):
"""class constructor (for this class it's just `__class__`"""
return self.__class__
def __unicode__(self):
"""
Return a string representation for a particular object.
Invoked by unicode(obj) in py2 only. Yields a Unicode String in both
py2/py3.
"""
# Should be overwritten by base classes
return object.__repr__(self)
def _dir_additions(self):
""" add addtional __dir__ for this object """
return set()
def _dir_deletions(self):
""" delete unwanted __dir__ for this object """
return set()
def __dir__(self):
"""
Provide method name lookup and completion
Only provide 'public' methods
"""
rv = set(dir(type(self)))
rv = (rv - self._dir_deletions()) | self._dir_additions()
return sorted(rv)
def _reset_cache(self, key=None):
"""
Reset cached properties. If ``key`` is passed, only clears that key.
"""
if getattr(self, '_cache', None) is None:
return
if key is None:
self._cache.clear()
else:
self._cache.pop(key, None)
def __sizeof__(self):
"""
Generates the total memory usage for a object that returns
either a value or Series of values
"""
if hasattr(self, 'memory_usage'):
mem = self.memory_usage(deep=True)
if not is_scalar(mem):
mem = mem.sum()
return int(mem)
# no memory_usage attribute, so fall back to
# object's 'sizeof'
return super(PandasObject, self).__sizeof__()
class NoNewAttributesMixin(object):
"""Mixin which prevents adding new attributes.
Prevents additional attributes via xxx.attribute = "something" after a
call to `self.__freeze()`. Mainly used to prevent the user from using
wrong attrirbutes on a accessor (`Series.cat/.str/.dt`).
If you really want to add a new attribute at a later time, you need to use
`object.__setattr__(self, key, value)`.
"""
def _freeze(self):
"""Prevents setting additional attributes"""
object.__setattr__(self, "__frozen", True)
# prevent adding any attribute via s.xxx.new_attribute = ...
def __setattr__(self, key, value):
# _cache is used by a decorator
# dict lookup instead of getattr as getattr is false for getter
# which error
if getattr(self, "__frozen", False) and not \
(key in type(self).__dict__ or key == "_cache"):
raise AttributeError("You cannot add any new attribute '{key}'".
format(key=key))
object.__setattr__(self, key, value)
class PandasDelegate(PandasObject):
""" an abstract base class for delegating methods/properties """
def _delegate_property_get(self, name, *args, **kwargs):
raise TypeError("You cannot access the "
"property {name}".format(name=name))
def _delegate_property_set(self, name, value, *args, **kwargs):
raise TypeError("The property {name} cannot be set".format(name=name))
def _delegate_method(self, name, *args, **kwargs):
raise TypeError("You cannot call method {name}".format(name=name))
@classmethod
def _add_delegate_accessors(cls, delegate, accessors, typ,
overwrite=False):
"""
add accessors to cls from the delegate class
Parameters
----------
cls : the class to add the methods/properties to
delegate : the class to get methods/properties & doc-strings
acccessors : string list of accessors to add
typ : 'property' or 'method'
overwrite : boolean, default False
overwrite the method/property in the target class if it exists
"""
def _create_delegator_property(name):
def _getter(self):
return self._delegate_property_get(name)
def _setter(self, new_values):
return self._delegate_property_set(name, new_values)
_getter.__name__ = name
_setter.__name__ = name
return property(fget=_getter, fset=_setter,
doc=getattr(delegate, name).__doc__)
def _create_delegator_method(name):
def f(self, *args, **kwargs):
return self._delegate_method(name, *args, **kwargs)
f.__name__ = name
f.__doc__ = getattr(delegate, name).__doc__
return f
for name in accessors:
if typ == 'property':
f = _create_delegator_property(name)
else:
f = _create_delegator_method(name)
# don't overwrite existing methods/properties
if overwrite or not hasattr(cls, name):
setattr(cls, name, f)
class AccessorProperty(object):
"""Descriptor for implementing accessor properties like Series.str
"""
def __init__(self, accessor_cls, construct_accessor):
self.accessor_cls = accessor_cls
self.construct_accessor = construct_accessor
self.__doc__ = accessor_cls.__doc__
def __get__(self, instance, owner=None):
if instance is None:
# this ensures that Series.str.<method> is well defined
return self.accessor_cls
return self.construct_accessor(instance)
def __set__(self, instance, value):
raise AttributeError("can't set attribute")
def __delete__(self, instance):
raise AttributeError("can't delete attribute")
class GroupByError(Exception):
pass
class DataError(GroupByError):
pass
class SpecificationError(GroupByError):
pass
class SelectionMixin(object):
"""
mixin implementing the selection & aggregation interface on a group-like
object sub-classes need to define: obj, exclusions
"""
_selection = None
_internal_names = ['_cache', '__setstate__']
_internal_names_set = set(_internal_names)
_builtin_table = {
builtins.sum: np.sum,
builtins.max: np.max,
builtins.min: np.min
}
_cython_table = {
builtins.sum: 'sum',
builtins.max: 'max',
builtins.min: 'min',
np.sum: 'sum',
np.mean: 'mean',
np.prod: 'prod',
np.std: 'std',
np.var: 'var',
np.median: 'median',
np.max: 'max',
np.min: 'min',
np.cumprod: 'cumprod',
np.cumsum: 'cumsum'
}
@property
def _selection_name(self):
"""
return a name for myself; this would ideally be called
the 'name' property, but we cannot conflict with the
Series.name property which can be set
"""
if self._selection is None:
return None # 'result'
else:
return self._selection
@property
def _selection_list(self):
if not isinstance(self._selection, (list, tuple, ABCSeries,
ABCIndexClass, np.ndarray)):
return [self._selection]
return self._selection
@cache_readonly
def _selected_obj(self):
if self._selection is None or isinstance(self.obj, ABCSeries):
return self.obj
else:
return self.obj[self._selection]
@cache_readonly
def ndim(self):
return self._selected_obj.ndim
@cache_readonly
def _obj_with_exclusions(self):
if self._selection is not None and isinstance(self.obj,
ABCDataFrame):
return self.obj.reindex(columns=self._selection_list)
if len(self.exclusions) > 0:
return self.obj.drop(self.exclusions, axis=1)
else:
return self.obj
def __getitem__(self, key):
if self._selection is not None:
raise Exception('Column(s) %s already selected' % self._selection)
if isinstance(key, (list, tuple, ABCSeries, ABCIndexClass,
np.ndarray)):
if len(self.obj.columns.intersection(key)) != len(key):
bad_keys = list(set(key).difference(self.obj.columns))
raise KeyError("Columns not found: %s"
% str(bad_keys)[1:-1])
return self._gotitem(list(key), ndim=2)
elif not getattr(self, 'as_index', False):
if key not in self.obj.columns:
raise KeyError("Column not found: %s" % key)
return self._gotitem(key, ndim=2)
else:
if key not in self.obj:
raise KeyError("Column not found: %s" % key)
return self._gotitem(key, ndim=1)
def _gotitem(self, key, ndim, subset=None):
"""
sub-classes to define
return a sliced object
Parameters
----------
key : string / list of selections
ndim : 1,2
requested ndim of result
subset : object, default None
subset to act on
"""
raise AbstractMethodError(self)
def aggregate(self, func, *args, **kwargs):
raise AbstractMethodError(self)
agg = aggregate
def _try_aggregate_string_function(self, arg, *args, **kwargs):
"""
if arg is a string, then try to operate on it:
- try to find a function (or attribute) on ourselves
- try to find a numpy function
- raise
"""
assert isinstance(arg, compat.string_types)
f = getattr(self, arg, None)
if f is not None:
if callable(f):
return f(*args, **kwargs)
# people may try to aggregate on a non-callable attribute
# but don't let them think they can pass args to it
assert len(args) == 0
assert len([kwarg for kwarg in kwargs
if kwarg not in ['axis', '_level']]) == 0
return f
f = getattr(np, arg, None)
if f is not None:
return f(self, *args, **kwargs)
raise ValueError("{} is an unknown string function".format(arg))
def _aggregate(self, arg, *args, **kwargs):
"""
provide an implementation for the aggregators
Parameters
----------
arg : string, dict, function
*args : args to pass on to the function
**kwargs : kwargs to pass on to the function
Returns
-------
tuple of result, how
Notes
-----
how can be a string describe the required post-processing, or
None if not required
"""
is_aggregator = lambda x: isinstance(x, (list, tuple, dict))
is_nested_renamer = False
_axis = kwargs.pop('_axis', None)
if _axis is None:
_axis = getattr(self, 'axis', 0)
_level = kwargs.pop('_level', None)
if isinstance(arg, compat.string_types):
return self._try_aggregate_string_function(arg, *args,
**kwargs), None
if isinstance(arg, dict):
# aggregate based on the passed dict
if _axis != 0: # pragma: no cover
raise ValueError('Can only pass dict with axis=0')
obj = self._selected_obj
def nested_renaming_depr(level=4):
# deprecation of nested renaming
# GH 15931
warnings.warn(
("using a dict with renaming "
"is deprecated and will be removed in a future "
"version"),
FutureWarning, stacklevel=level)
# if we have a dict of any non-scalars
# eg. {'A' : ['mean']}, normalize all to
# be list-likes
if any(is_aggregator(x) for x in compat.itervalues(arg)):
new_arg = compat.OrderedDict()
for k, v in compat.iteritems(arg):
if not isinstance(v, (tuple, list, dict)):
new_arg[k] = [v]
else:
new_arg[k] = v
# the keys must be in the columns
# for ndim=2, or renamers for ndim=1
# ok for now, but deprecated
# {'A': { 'ra': 'mean' }}
# {'A': { 'ra': ['mean'] }}
# {'ra': ['mean']}
# not ok
# {'ra' : { 'A' : 'mean' }}
if isinstance(v, dict):
is_nested_renamer = True
if k not in obj.columns:
raise SpecificationError('cannot perform renaming '
'for {0} with a nested '
'dictionary'.format(k))
nested_renaming_depr(4 + (_level or 0))
elif isinstance(obj, ABCSeries):
nested_renaming_depr()
arg = new_arg
else:
# deprecation of renaming keys
# GH 15931
keys = list(compat.iterkeys(arg))
if (isinstance(obj, ABCDataFrame) and
len(obj.columns.intersection(keys)) != len(keys)):
nested_renaming_depr()
from pandas.core.reshape.concat import concat
def _agg_1dim(name, how, subset=None):
"""
aggregate a 1-dim with how
"""
colg = self._gotitem(name, ndim=1, subset=subset)
if colg.ndim != 1:
raise SpecificationError("nested dictionary is ambiguous "
"in aggregation")
return colg.aggregate(how, _level=(_level or 0) + 1)
def _agg_2dim(name, how):
"""
aggregate a 2-dim with how
"""
colg = self._gotitem(self._selection, ndim=2,
subset=obj)
return colg.aggregate(how, _level=None)
def _agg(arg, func):
"""
run the aggregations over the arg with func
return an OrderedDict
"""
result = compat.OrderedDict()
for fname, agg_how in compat.iteritems(arg):
result[fname] = func(fname, agg_how)
return result
# set the final keys
keys = list(compat.iterkeys(arg))
result = compat.OrderedDict()
# nested renamer
if is_nested_renamer:
result = list(_agg(arg, _agg_1dim).values())
if all(isinstance(r, dict) for r in result):
result, results = compat.OrderedDict(), result
for r in results:
result.update(r)
keys = list(compat.iterkeys(result))
else:
if self._selection is not None:
keys = None
# some selection on the object
elif self._selection is not None:
sl = set(self._selection_list)
# we are a Series like object,
# but may have multiple aggregations
if len(sl) == 1:
result = _agg(arg, lambda fname,
agg_how: _agg_1dim(self._selection, agg_how))
# we are selecting the same set as we are aggregating
elif not len(sl - set(keys)):
result = _agg(arg, _agg_1dim)
# we are a DataFrame, with possibly multiple aggregations
else:
result = _agg(arg, _agg_2dim)
# no selection
else:
try:
result = _agg(arg, _agg_1dim)
except SpecificationError:
# we are aggregating expecting all 1d-returns
# but we have 2d
result = _agg(arg, _agg_2dim)
# combine results
def is_any_series():
# return a boolean if we have *any* nested series
return any([isinstance(r, ABCSeries)
for r in compat.itervalues(result)])
def is_any_frame():
# return a boolean if we have *any* nested series
return any([isinstance(r, ABCDataFrame)
for r in compat.itervalues(result)])
if isinstance(result, list):
return concat(result, keys=keys, axis=1), True
elif is_any_frame():
# we have a dict of DataFrames
# return a MI DataFrame
return concat([result[k] for k in keys],
keys=keys, axis=1), True
elif isinstance(self, ABCSeries) and is_any_series():
# we have a dict of Series
# return a MI Series
try:
result = concat(result)
except TypeError:
# we want to give a nice error here if
# we have non-same sized objects, so
# we don't automatically broadcast
raise ValueError("cannot perform both aggregation "
"and transformation operations "
"simultaneously")
return result, True
# fall thru
from pandas import DataFrame, Series
try:
result = DataFrame(result)
except ValueError:
# we have a dict of scalars
result = Series(result,
name=getattr(self, 'name', None))
return result, True
elif is_list_like(arg) and arg not in compat.string_types:
# we require a list, but not an 'str'
return self._aggregate_multiple_funcs(arg,
_level=_level,
_axis=_axis), None
else:
result = None
f = self._is_cython_func(arg)
if f and not args and not kwargs:
return getattr(self, f)(), None
# caller can react
return result, True
def _aggregate_multiple_funcs(self, arg, _level, _axis):
from pandas.core.reshape.concat import concat
if _axis != 0:
raise NotImplementedError("axis other than 0 is not supported")
if self._selected_obj.ndim == 1:
obj = self._selected_obj
else:
obj = self._obj_with_exclusions
results = []
keys = []
# degenerate case
if obj.ndim == 1:
for a in arg:
try:
colg = self._gotitem(obj.name, ndim=1, subset=obj)
results.append(colg.aggregate(a))
# make sure we find a good name
name = com._get_callable_name(a) or a
keys.append(name)
except (TypeError, DataError):
pass
except SpecificationError:
raise
# multiples
else:
for col in obj:
try:
colg = self._gotitem(col, ndim=1, subset=obj[col])
results.append(colg.aggregate(arg))
keys.append(col)
except (TypeError, DataError):
pass
except ValueError:
# cannot aggregate
continue
except SpecificationError:
raise
# if we are empty
if not len(results):
raise ValueError("no results")
try:
return concat(results, keys=keys, axis=1)
except TypeError:
# we are concatting non-NDFrame objects,
# e.g. a list of scalars
from pandas.core.dtypes.cast import is_nested_object
from pandas import Series
result = Series(results, index=keys, name=self.name)
if is_nested_object(result):
raise ValueError("cannot combine transform and "
"aggregation operations")
return result
def _shallow_copy(self, obj=None, obj_type=None, **kwargs):
""" return a new object with the replacement attributes """
if obj is None:
obj = self._selected_obj.copy()
if obj_type is None:
obj_type = self._constructor
if isinstance(obj, obj_type):
obj = obj.obj
for attr in self._attributes:
if attr not in kwargs:
kwargs[attr] = getattr(self, attr)
return obj_type(obj, **kwargs)
def _is_cython_func(self, arg):
""" if we define an internal function for this argument, return it """
return self._cython_table.get(arg)
def _is_builtin_func(self, arg):
"""
if we define an builtin function for this argument, return it,
otherwise return the arg
"""
return self._builtin_table.get(arg, arg)
class GroupByMixin(object):
""" provide the groupby facilities to the mixed object """
@staticmethod
def _dispatch(name, *args, **kwargs):
""" dispatch to apply """
def outer(self, *args, **kwargs):
def f(x):
x = self._shallow_copy(x, groupby=self._groupby)
return getattr(x, name)(*args, **kwargs)
return self._groupby.apply(f)
outer.__name__ = name
return outer
def _gotitem(self, key, ndim, subset=None):
"""
sub-classes to define
return a sliced object
Parameters
----------
key : string / list of selections
ndim : 1,2
requested ndim of result
subset : object, default None
subset to act on
"""
# create a new object to prevent aliasing
if subset is None:
subset = self.obj
# we need to make a shallow copy of ourselves
# with the same groupby
kwargs = dict([(attr, getattr(self, attr))
for attr in self._attributes])
self = self.__class__(subset,
groupby=self._groupby[key],
parent=self,
**kwargs)
self._reset_cache()
if subset.ndim == 2:
if is_scalar(key) and key in subset or is_list_like(key):
self._selection = key
return self
class IndexOpsMixin(object):
""" common ops mixin to support a unified inteface / docs for Series /
Index
"""
# ndarray compatibility
__array_priority__ = 1000
def transpose(self, *args, **kwargs):
""" return the transpose, which is by definition self """
nv.validate_transpose(args, kwargs)
return self
T = property(transpose, doc="return the transpose, which is by "
"definition self")
@property
def shape(self):
""" return a tuple of the shape of the underlying data """
return self._values.shape
@property
def ndim(self):
""" return the number of dimensions of the underlying data,
by definition 1
"""
return 1
def item(self):
""" return the first element of the underlying data as a python
scalar
"""
try:
return self.values.item()
except IndexError:
# copy numpy's message here because Py26 raises an IndexError
raise ValueError('can only convert an array of size 1 to a '
'Python scalar')
@property
def data(self):
""" return the data pointer of the underlying data """
return self.values.data
@property
def itemsize(self):
""" return the size of the dtype of the item of the underlying data """
return self._values.itemsize
@property
def nbytes(self):
""" return the number of bytes in the underlying data """
return self._values.nbytes
@property
def strides(self):
""" return the strides of the underlying data """
return self._values.strides
@property
def size(self):
""" return the number of elements in the underlying data """
return self._values.size
@property
def flags(self):
""" return the ndarray.flags for the underlying data """
return self.values.flags
@property
def base(self):
""" return the base object if the memory of the underlying data is
shared
"""
return self.values.base
@property
def _values(self):
""" the internal implementation """
return self.values
@property
def empty(self):
return not self.size
def max(self):
""" The maximum value of the object """
return nanops.nanmax(self.values)
def argmax(self, axis=None):
"""
return a ndarray of the maximum argument indexer
See also
--------
numpy.ndarray.argmax
"""
return nanops.nanargmax(self.values)
def min(self):
""" The minimum value of the object """
return nanops.nanmin(self.values)
def argmin(self, axis=None):
"""
return a ndarray of the minimum argument indexer
See also
--------
numpy.ndarray.argmin
"""
return nanops.nanargmin(self.values)
@cache_readonly
def hasnans(self):
""" return if I have any nans; enables various perf speedups """
return isnull(self).any()
def _reduce(self, op, name, axis=0, skipna=True, numeric_only=None,
filter_type=None, **kwds):
""" perform the reduction type operation if we can """
func = getattr(self, name, None)
if func is None:
raise TypeError("{klass} cannot perform the operation {op}".format(
klass=self.__class__.__name__, op=name))
return func(**kwds)
def value_counts(self, normalize=False, sort=True, ascending=False,
bins=None, dropna=True):
"""
Returns object containing counts of unique values.
The resulting object will be in descending order so that the
first element is the most frequently-occurring element.
Excludes NA values by default.
Parameters
----------
normalize : boolean, default False
If True then the object returned will contain the relative
frequencies of the unique values.
sort : boolean, default True
Sort by values
ascending : boolean, default False
Sort in ascending order
bins : integer, optional
Rather than count values, group them into half-open bins,
a convenience for pd.cut, only works with numeric data
dropna : boolean, default True
Don't include counts of NaN.
Returns
-------
counts : Series
"""
from pandas.core.algorithms import value_counts
result = value_counts(self, sort=sort, ascending=ascending,
normalize=normalize, bins=bins, dropna=dropna)
return result
_shared_docs['unique'] = (
"""
Return unique values in the object. Uniques are returned in order
of appearance, this does NOT sort. Hash table-based unique.
Parameters
----------
values : 1d array-like
Returns
-------
unique values.
- If the input is an Index, the return is an Index
- If the input is a Categorical dtype, the return is a Categorical
- If the input is a Series/ndarray, the return will be an ndarray
See Also
--------
unique
Index.unique
Series.unique
""")
@Appender(_shared_docs['unique'] % _indexops_doc_kwargs)
def unique(self):
values = self._values
if hasattr(values, 'unique'):
result = values.unique()
else:
from pandas.core.algorithms import unique1d
result = unique1d(values)
return result
def nunique(self, dropna=True):
"""
Return number of unique elements in the object.
Excludes NA values by default.
Parameters
----------
dropna : boolean, default True
Don't include NaN in the count.
Returns
-------
nunique : int
"""
uniqs = self.unique()
n = len(uniqs)
if dropna and isnull(uniqs).any():
n -= 1
return n
@property
def is_unique(self):
"""
Return boolean if values in the object are unique
Returns
-------
is_unique : boolean
"""
return self.nunique() == len(self)
@property
def is_monotonic(self):
"""
Return boolean if values in the object are
monotonic_increasing
.. versionadded:: 0.19.0
Returns
-------
is_monotonic : boolean
"""
from pandas import Index
return Index(self).is_monotonic
is_monotonic_increasing = is_monotonic
@property
def is_monotonic_decreasing(self):
"""
Return boolean if values in the object are
monotonic_decreasing
.. versionadded:: 0.19.0
Returns
-------
is_monotonic_decreasing : boolean
"""
from pandas import Index
return Index(self).is_monotonic_decreasing
def memory_usage(self, deep=False):
"""
Memory usage of my values
Parameters
----------
deep : bool
Introspect the data deeply, interrogate
`object` dtypes for system-level memory consumption
Returns
-------
bytes used
Notes
-----
Memory usage does not include memory consumed by elements that
are not components of the array if deep=False
See Also
--------
numpy.ndarray.nbytes
"""
if hasattr(self.values, 'memory_usage'):
return self.values.memory_usage(deep=deep)
v = self.values.nbytes
if deep and is_object_dtype(self):
v += lib.memory_usage_of_objects(self.values)
return v
def factorize(self, sort=False, na_sentinel=-1):
"""
Encode the object as an enumerated type or categorical variable
Parameters
----------
sort : boolean, default False
Sort by values
na_sentinel: int, default -1
Value to mark "not found"
Returns
-------
labels : the indexer to the original array
uniques : the unique Index
"""
from pandas.core.algorithms import factorize
return factorize(self, sort=sort, na_sentinel=na_sentinel)
_shared_docs['searchsorted'] = (
"""Find indices where elements should be inserted to maintain order.
Find the indices into a sorted %(klass)s `self` such that, if the
corresponding elements in `value` were inserted before the indices,
the order of `self` would be preserved.
Parameters
----------
value : array_like
Values to insert into `self`.
side : {'left', 'right'}, optional
If 'left', the index of the first suitable location found is given.
If 'right', return the last such index. If there is no suitable
index, return either 0 or N (where N is the length of `self`).
sorter : 1-D array_like, optional
Optional array of integer indices that sort `self` into ascending
order. They are typically the result of ``np.argsort``.
Returns
-------
indices : array of ints
Array of insertion points with the same shape as `value`.
See Also
--------
numpy.searchsorted
Notes
-----
Binary search is used to find the required insertion points.
Examples
--------
>>> x = pd.Series([1, 2, 3])
>>> x
0 1
1 2
2 3
dtype: int64
>>> x.searchsorted(4)
array([3])
>>> x.searchsorted([0, 4])
array([0, 3])
>>> x.searchsorted([1, 3], side='left')
array([0, 2])
>>> x.searchsorted([1, 3], side='right')
array([1, 3])
>>> x = pd.Categorical(['apple', 'bread', 'bread', 'cheese', 'milk' ])
[apple, bread, bread, cheese, milk]
Categories (4, object): [apple < bread < cheese < milk]
>>> x.searchsorted('bread')
array([1]) # Note: an array, not a scalar
>>> x.searchsorted(['bread'])
array([1])
>>> x.searchsorted(['bread', 'eggs'])
array([1, 4])
>>> x.searchsorted(['bread', 'eggs'], side='right')
array([3, 4]) # eggs before milk
""")
@Substitution(klass='IndexOpsMixin')
@Appender(_shared_docs['searchsorted'])
@deprecate_kwarg(old_arg_name='key', new_arg_name='value')
def searchsorted(self, value, side='left', sorter=None):
# needs coercion on the key (DatetimeIndex does already)
return self.values.searchsorted(value, side=side, sorter=sorter)
_shared_docs['drop_duplicates'] = (
"""Return %(klass)s with duplicate values removed
Parameters
----------
keep : {'first', 'last', False}, default 'first'
- ``first`` : Drop duplicates except for the first occurrence.
- ``last`` : Drop duplicates except for the last occurrence.
- False : Drop all duplicates.
%(inplace)s
Returns
-------
deduplicated : %(klass)s
""")
@Appender(_shared_docs['drop_duplicates'] % _indexops_doc_kwargs)
def drop_duplicates(self, keep='first', inplace=False):
inplace = validate_bool_kwarg(inplace, 'inplace')
if isinstance(self, ABCIndexClass):
if self.is_unique:
return self._shallow_copy()
duplicated = self.duplicated(keep=keep)
result = self[np.logical_not(duplicated)]
if inplace:
return self._update_inplace(result)
else:
return result
_shared_docs['duplicated'] = (
"""Return boolean %(duplicated)s denoting duplicate values
Parameters
----------
keep : {'first', 'last', False}, default 'first'
- ``first`` : Mark duplicates as ``True`` except for the first
occurrence.
- ``last`` : Mark duplicates as ``True`` except for the last
occurrence.
- False : Mark all duplicates as ``True``.
Returns
-------
duplicated : %(duplicated)s
""")
@Appender(_shared_docs['duplicated'] % _indexops_doc_kwargs)
def duplicated(self, keep='first'):
from pandas.core.algorithms import duplicated
if isinstance(self, ABCIndexClass):
if self.is_unique:
return np.zeros(len(self), dtype=np.bool)
return duplicated(self, keep=keep)
else:
return self._constructor(duplicated(self, keep=keep),
index=self.index).__finalize__(self)
# ----------------------------------------------------------------------
# abstracts
def _update_inplace(self, result, **kwargs):
raise AbstractMethodError(self)
| mit |
wangwei7175878/tutorials | tensorflowTUT/tf11_build_network/full_code.py | 2 | 2107 | # View more python learning tutorial on my Youtube and Youku channel!!!
# Youtube video tutorial: https://www.youtube.com/channel/UCdyjiB5H8Pu7aDTNVXTTpcg
# Youku video tutorial: http://i.youku.com/pythontutorial
"""
Please note, this code is only for python 3+. If you are using python 2+, please modify the code accordingly.
"""
from __future__ import print_function
import tensorflow as tf
import numpy as np
import matplotlib.pyplot as plt
def add_layer(inputs, in_size, out_size, activation_function=None):
Weights = tf.Variable(tf.random_normal([in_size, out_size]))
biases = tf.Variable(tf.zeros([1, out_size]) + 0.1)
Wx_plus_b = tf.matmul(inputs, Weights) + biases
if activation_function is None:
outputs = Wx_plus_b
else:
outputs = activation_function(Wx_plus_b)
return outputs
# Make up some real data
x_data = np.linspace(-1, 1, 300, dtype=np.float32)[:, np.newaxis]
noise = np.random.normal(0, 0.05, x_data.shape).astype(np.float32)
y_data = np.square(x_data) - 0.5 + noise
##plt.scatter(x_data, y_data)
##plt.show()
# define placeholder for inputs to network
xs = tf.placeholder(tf.float32, [None, 1])
ys = tf.placeholder(tf.float32, [None, 1])
# add hidden layer
l1 = add_layer(xs, 1, 10, activation_function=tf.nn.relu)
# add output layer
prediction = add_layer(l1, 10, 1, activation_function=None)
# the error between prediction and real data
loss = tf.reduce_mean(tf.reduce_sum(tf.square(ys-prediction), reduction_indices=[1]))
train_step = tf.train.GradientDescentOptimizer(0.1).minimize(loss)
# important step
sess = tf.Session()
# tf.initialize_all_variables() no long valid from
# 2017-03-02 if using tensorflow >= 0.12
if int((tf.__version__).split('.')[1]) < 12 and int((tf.__version__).split('.')[0]) < 1:
init = tf.initialize_all_variables()
else:
init = tf.global_variables_initializer()
sess.run(init)
for i in range(1000):
# training
sess.run(train_step, feed_dict={xs: x_data, ys: y_data})
if i % 50 == 0:
# to see the step improvement
print(sess.run(loss, feed_dict={xs: x_data, ys: y_data}))
| mit |
carlgogo/vip_exoplanets | vip_hci/stats/im_stats.py | 2 | 3777 | #! /usr/bin/env python
"""
Module for image statistics.
"""
__author__ = 'Carlos Alberto Gomez Gonzalez'
__all__ = ['frame_histo_stats',
'frame_average_radprofile']
import numpy as np
import pandas as pd
from matplotlib import pyplot as plt
from ..var import frame_center
from ..conf.utils_conf import check_array, vip_figsize
def frame_average_radprofile(frame, sep=1, init_rad=None, plot=True):
""" Calculates the average radial profile of an image.
Parameters
----------
frame : numpy ndarray
Input image or 2d array.
sep : int, optional
The average radial profile is recorded every ``sep`` pixels.
plot : bool, optional
If True the profile is plotted.
Returns
-------
df : dataframe
Pandas dataframe with the radial profile and distances.
Notes
-----
https://stackoverflow.com/questions/21242011/most-efficient-way-to-calculate-radial-profile
https://stackoverflow.com/questions/48842320/what-is-the-best-way-to-calculate-radial-average-of-the-image-with-python
https://github.com/keflavich/image_tools/blob/master/image_tools/radialprofile.py
"""
check_array(frame, dim=2)
cy, cx = frame_center(frame)
if init_rad is None:
init_rad = 1
x, y = np.indices((frame.shape))
r = np.sqrt((x - cx) ** 2 + (y - cy) ** 2)
r = r.astype(int)
tbin = np.bincount(r.ravel(), frame.ravel())
nr = np.bincount(r.ravel())
radprofile = tbin / nr
radists = np.arange(init_rad + 1, int(cy), sep) - 1
radprofile_radists = radprofile[radists]
nr_radists = nr[radists]
df = pd.DataFrame({'rad': radists, 'radprof': radprofile_radists,
'npx': nr_radists})
if plot:
plt.figure(figsize=vip_figsize)
plt.plot(radists, radprofile_radists, '.-', alpha=0.6)
plt.grid(which='both', alpha=0.4)
plt.xlabel('Pixels')
plt.ylabel('Counts')
plt.minorticks_on()
plt.xlim(0)
return df
def frame_histo_stats(image_array, plot=True):
"""Plots a frame with a colorbar, its histogram and some statistics: mean,
median, maximum, minimum and standard deviation values.
Parameters
----------
image_array : numpy ndarray
The input frame.
plot : bool, optional
If True plots the frame and the histogram with the values.
Return
------
mean : float
Mean value of array.
median : float
Median value of array.
std : float
Standard deviation of array.
maxim : int or float
Maximum value.
minim : int or float
Minimum value.
"""
vector = image_array.flatten()
mean = vector.mean()
median = np.median(vector)
maxim = vector.max()
minim = vector.min()
std = vector.std()
if plot:
fig, axes = plt.subplots(nrows=1, ncols=2, figsize=(10, 4))
ax0, ax1 = axes.flat
bins = int(np.sqrt(vector.shape[0]))
txt = 'Mean = {:.3f}\n'.format(mean) + \
'Median = {:.3f}\n'.format(median) +\
'Stddev = {:.3f}\n'.format(std) +\
'Max = {:.3f}\n'.format(maxim) +\
'Min = {:.3f}\n\n'.format(minim)
ax0.imshow(image_array, interpolation="nearest", origin ="lower",
cmap='viridis')
ax0.set_title('Frame')
ax0.grid('off')
ax1.hist(vector, bins=bins, label=txt, alpha=0.5, histtype='stepfilled')
ax1.set_yscale('log')
ax1.set_title('Histogram')
ax1.text(0.98, 0.98, txt, transform=ax1.transAxes, fontsize=12,
verticalalignment='top', horizontalalignment='right')
plt.show()
return mean, median, std, maxim, minim
| bsd-3-clause |
giorgiop/scikit-learn | examples/ensemble/plot_ensemble_oob.py | 58 | 3265 | """
=============================
OOB Errors for Random Forests
=============================
The ``RandomForestClassifier`` is trained using *bootstrap aggregation*, where
each new tree is fit from a bootstrap sample of the training observations
:math:`z_i = (x_i, y_i)`. The *out-of-bag* (OOB) error is the average error for
each :math:`z_i` calculated using predictions from the trees that do not
contain :math:`z_i` in their respective bootstrap sample. This allows the
``RandomForestClassifier`` to be fit and validated whilst being trained [1].
The example below demonstrates how the OOB error can be measured at the
addition of each new tree during training. The resulting plot allows a
practitioner to approximate a suitable value of ``n_estimators`` at which the
error stabilizes.
.. [1] T. Hastie, R. Tibshirani and J. Friedman, "Elements of Statistical
Learning Ed. 2", p592-593, Springer, 2009.
"""
import matplotlib.pyplot as plt
from collections import OrderedDict
from sklearn.datasets import make_classification
from sklearn.ensemble import RandomForestClassifier, ExtraTreesClassifier
# Author: Kian Ho <hui.kian.ho@gmail.com>
# Gilles Louppe <g.louppe@gmail.com>
# Andreas Mueller <amueller@ais.uni-bonn.de>
#
# License: BSD 3 Clause
print(__doc__)
RANDOM_STATE = 123
# Generate a binary classification dataset.
X, y = make_classification(n_samples=500, n_features=25,
n_clusters_per_class=1, n_informative=15,
random_state=RANDOM_STATE)
# NOTE: Setting the `warm_start` construction parameter to `True` disables
# support for parallelized ensembles but is necessary for tracking the OOB
# error trajectory during training.
ensemble_clfs = [
("RandomForestClassifier, max_features='sqrt'",
RandomForestClassifier(warm_start=True, oob_score=True,
max_features="sqrt",
random_state=RANDOM_STATE)),
("RandomForestClassifier, max_features='log2'",
RandomForestClassifier(warm_start=True, max_features='log2',
oob_score=True,
random_state=RANDOM_STATE)),
("RandomForestClassifier, max_features=None",
RandomForestClassifier(warm_start=True, max_features=None,
oob_score=True,
random_state=RANDOM_STATE))
]
# Map a classifier name to a list of (<n_estimators>, <error rate>) pairs.
error_rate = OrderedDict((label, []) for label, _ in ensemble_clfs)
# Range of `n_estimators` values to explore.
min_estimators = 15
max_estimators = 175
for label, clf in ensemble_clfs:
for i in range(min_estimators, max_estimators + 1):
clf.set_params(n_estimators=i)
clf.fit(X, y)
# Record the OOB error for each `n_estimators=i` setting.
oob_error = 1 - clf.oob_score_
error_rate[label].append((i, oob_error))
# Generate the "OOB error rate" vs. "n_estimators" plot.
for label, clf_err in error_rate.items():
xs, ys = zip(*clf_err)
plt.plot(xs, ys, label=label)
plt.xlim(min_estimators, max_estimators)
plt.xlabel("n_estimators")
plt.ylabel("OOB error rate")
plt.legend(loc="upper right")
plt.show()
| bsd-3-clause |
arcyfelix/Courses | 17-06-05-Machine-Learning-For-Trading/43_painless_qlearning.py | 2 | 4944 | # Author: Kyle Kastner
# License: BSD 3-Clause
# Implementing http://mnemstudio.org/path-finding-q-learning-tutorial.htm
# Q-learning formula from http://sarvagyavaish.github.io/FlappyBirdRL/
# Visualization based on code from Gael Varoquaux gael.varoquaux@normalesup.org
# http://scikit-learn.org/stable/auto_examples/applications/plot_stock_market.html
import numpy as np
import matplotlib.pyplot as plt
from matplotlib.collections import LineCollection
# defines the reward/connection graph
r = np.array([[-1, -1, -1, -1, 0, -1],
[-1, -1, -1, 0, -1, 100],
[-1, -1, -1, 0, -1, -1],
[-1, 0, 0, -1, 0, -1],
[ 0, -1, -1, 0, -1, 100],
[-1, 0, -1, -1, 0, 100]]).astype("float32")
q = np.zeros_like(r)
def update_q(state, next_state, action, alpha, gamma):
rsa = r[state, action]
qsa = q[state, action]
new_q = qsa + alpha * (rsa + gamma * max(q[next_state, :]) - qsa)
q[state, action] = new_q
# renormalize row to be between 0 and 1
rn = q[state][q[state] > 0] / np.sum(q[state][q[state] > 0])
q[state][q[state] > 0] = rn
return r[state, action]
def show_traverse():
# show all the greedy traversals
for i in range(len(q)):
current_state = i
traverse = "%i -> " % current_state
n_steps = 0
while current_state != 5 and n_steps < 20:
next_state = np.argmax(q[current_state])
current_state = next_state
traverse += "%i -> " % current_state
n_steps = n_steps + 1
# cut off final arrow
traverse = traverse[:-4]
print("Greedy traversal for starting state %i" % i)
print(traverse)
print("")
def show_q():
# show all the valid/used transitions
coords = np.array([[2, 2],
[4, 2],
[5, 3],
[4, 4],
[2, 4],
[5, 2]])
# invert y axis for display
coords[:, 1] = max(coords[:, 1]) - coords[:, 1]
plt.figure(1, facecolor='w', figsize=(10, 8))
plt.clf()
ax = plt.axes([0., 0., 1., 1.])
plt.axis('off')
plt.scatter(coords[:, 0], coords[:, 1], c='r')
start_idx, end_idx = np.where(q > 0)
segments = [[coords[start], coords[stop]]
for start, stop in zip(start_idx, end_idx)]
values = np.array(q[q > 0])
# bump up values for viz
values = values
lc = LineCollection(segments,
zorder=0, cmap=plt.cm.hot_r)
lc.set_array(values)
ax.add_collection(lc)
verticalalignment = 'top'
horizontalalignment = 'left'
for i in range(len(coords)):
x = coords[i][0]
y = coords[i][1]
name = str(i)
if i == 1:
y = y - .05
x = x + .05
elif i == 3:
y = y - .05
x = x + .05
elif i == 4:
y = y - .05
x = x + .05
else:
y = y + .05
x = x + .05
plt.text(x, y, name, size=10,
horizontalalignment=horizontalalignment,
verticalalignment=verticalalignment,
bbox=dict(facecolor='w',
edgecolor=plt.cm.spectral(float(len(coords))),
alpha=.6))
plt.show()
# Core algorithm
gamma = 0.8
alpha = 1.
n_episodes = 1E3
n_states = 6
n_actions = 6
epsilon = 0.05
random_state = np.random.RandomState(1999)
for e in range(int(n_episodes)):
states = list(range(n_states))
random_state.shuffle(states)
current_state = states[0]
goal = False
if e % int(n_episodes / 10.) == 0 and e > 0:
pass
# uncomment this to see plots each monitoring
#show_traverse()
#show_q()
while not goal:
# epsilon greedy
valid_moves = r[current_state] >= 0
if random_state.rand() < epsilon:
actions = np.array(list(range(n_actions)))
actions = actions[valid_moves == True]
if type(actions) is int:
actions = [actions]
random_state.shuffle(actions)
action = actions[0]
next_state = action
else:
if np.sum(q[current_state]) > 0:
action = np.argmax(q[current_state])
else:
# Don't allow invalid moves at the start
# Just take a random move
actions = np.array(list(range(n_actions)))
actions = actions[valid_moves == True]
random_state.shuffle(actions)
action = actions[0]
next_state = action
reward = update_q(current_state, next_state, action,
alpha=alpha, gamma=gamma)
# Goal state has reward 100
if reward > 1:
goal = True
current_state = next_state
print(q)
show_traverse()
show_q() | apache-2.0 |
dungvtdev/upsbayescpm | pgmpy/estimators/BayesianEstimator.py | 4 | 7891 | # -*- coding: utf-8 -*-
import numpy as np
from pgmpy.estimators import ParameterEstimator
from pgmpy.factors.discrete import TabularCPD
from pgmpy.models import BayesianModel
class BayesianEstimator(ParameterEstimator):
def __init__(self, model, data, **kwargs):
"""
Class used to compute parameters for a model using Bayesian Parameter Estimation.
See `MaximumLikelihoodEstimator` for constructor parameters.
"""
if not isinstance(model, BayesianModel):
raise NotImplementedError("Bayesian Parameter Estimation is only implemented for BayesianModel")
super(BayesianEstimator, self).__init__(model, data, **kwargs)
def get_parameters(self, prior_type='BDeu', equivalent_sample_size=5, pseudo_counts=None):
"""
Method to estimate the model parameters (CPDs).
Parameters
----------
prior_type: 'dirichlet', 'BDeu', or 'K2'
string indicting which type of prior to use for the model parameters.
- If 'prior_type' is 'dirichlet', the following must be provided:
'pseudo_counts' = dirichlet hyperparameters; a dict containing, for each variable, a list
with a "virtual" count for each variable state, that is added to the state counts.
(lexicographic ordering of states assumed)
- If 'prior_type' is 'BDeu', then an 'equivalent_sample_size'
must be specified instead of 'pseudo_counts'. This is equivalent to
'prior_type=dirichlet' and using uniform 'pseudo_counts' of
`equivalent_sample_size/(node_cardinality*np.prod(parents_cardinalities))` for each node.
'equivalent_sample_size' can either be a numerical value or a dict that specifies
the size for each variable seperately.
- A prior_type of 'K2' is a shorthand for 'dirichlet' + setting every pseudo_count to 1,
regardless of the cardinality of the variable.
Returns
-------
parameters: list
List of TabularCPDs, one for each variable of the model
Examples
--------
>>> import numpy as np
>>> import pandas as pd
>>> from pgmpy.models import BayesianModel
>>> from pgmpy.estimators import BayesianEstimator
>>> values = pd.DataFrame(np.random.randint(low=0, high=2, size=(1000, 4)),
... columns=['A', 'B', 'C', 'D'])
>>> model = BayesianModel([('A', 'B'), ('C', 'B'), ('C', 'D')])
>>> estimator = BayesianEstimator(model, values)
>>> estimator.get_parameters(prior_type='BDeu', equivalent_sample_size=5)
[<TabularCPD representing P(C:2) at 0x7f7b534251d0>,
<TabularCPD representing P(B:2 | C:2, A:2) at 0x7f7b4dfd4da0>,
<TabularCPD representing P(A:2) at 0x7f7b4dfd4fd0>,
<TabularCPD representing P(D:2 | C:2) at 0x7f7b4df822b0>]
"""
parameters = []
for node in self.model.nodes():
_equivalent_sample_size = equivalent_sample_size[node] if isinstance(equivalent_sample_size, dict) else \
equivalent_sample_size
_pseudo_counts = pseudo_counts[node] if isinstance(pseudo_counts, dict) else pseudo_counts
cpd = self.estimate_cpd(node,
prior_type=prior_type,
equivalent_sample_size=_equivalent_sample_size,
pseudo_counts=_pseudo_counts)
parameters.append(cpd)
return parameters
def estimate_cpd(self, node, prior_type='BDeu', pseudo_counts=[], equivalent_sample_size=5):
"""
Method to estimate the CPD for a given variable.
Parameters
----------
node: int, string (any hashable python object)
The name of the variable for which the CPD is to be estimated.
prior_type: 'dirichlet', 'BDeu', 'K2',
string indicting which type of prior to use for the model parameters.
- If 'prior_type' is 'dirichlet', the following must be provided:
'pseudo_counts' = dirichlet hyperparameters; a list or dict
with a "virtual" count for each variable state.
The virtual counts are added to the actual state counts found in the data.
(if a list is provided, a lexicographic ordering of states is assumed)
- If 'prior_type' is 'BDeu', then an 'equivalent_sample_size'
must be specified instead of 'pseudo_counts'. This is equivalent to
'prior_type=dirichlet' and using uniform 'pseudo_counts' of
`equivalent_sample_size/(node_cardinality*np.prod(parents_cardinalities))`.
- A prior_type of 'K2' is a shorthand for 'dirichlet' + setting every pseudo_count to 1,
regardless of the cardinality of the variable.
Returns
-------
CPD: TabularCPD
Examples
--------
>>> import pandas as pd
>>> from pgmpy.models import BayesianModel
>>> from pgmpy.estimators import BayesianEstimator
>>> data = pd.DataFrame(data={'A': [0, 0, 1], 'B': [0, 1, 0], 'C': [1, 1, 0]})
>>> model = BayesianModel([('A', 'C'), ('B', 'C')])
>>> estimator = BayesianEstimator(model, data)
>>> cpd_C = estimator.estimate_cpd('C', prior_type="dirichlet", pseudo_counts=[1, 2])
>>> print(cpd_C)
╒══════╤══════╤══════╤══════╤════════════════════╕
│ A │ A(0) │ A(0) │ A(1) │ A(1) │
├──────┼──────┼──────┼──────┼────────────────────┤
│ B │ B(0) │ B(1) │ B(0) │ B(1) │
├──────┼──────┼──────┼──────┼────────────────────┤
│ C(0) │ 0.25 │ 0.25 │ 0.5 │ 0.3333333333333333 │
├──────┼──────┼──────┼──────┼────────────────────┤
│ C(1) │ 0.75 │ 0.75 │ 0.5 │ 0.6666666666666666 │
╘══════╧══════╧══════╧══════╧════════════════════╛
"""
node_cardinality = len(self.state_names[node])
parents = sorted(self.model.get_parents(node))
parents_cardinalities = [len(self.state_names[parent]) for parent in parents]
if prior_type == 'K2':
pseudo_counts = [1] * node_cardinality
elif prior_type == 'BDeu':
alpha = float(equivalent_sample_size) / (node_cardinality * np.prod(parents_cardinalities))
pseudo_counts = [alpha] * node_cardinality
elif prior_type == 'dirichlet':
if not len(pseudo_counts) == node_cardinality:
raise ValueError("'pseudo_counts' should have length {0}".format(node_cardinality))
if isinstance(pseudo_counts, dict):
pseudo_counts = sorted(pseudo_counts.values())
else:
raise ValueError("'prior_type' not specified")
state_counts = self.state_counts(node)
bayesian_counts = (state_counts.T + pseudo_counts).T
cpd = TabularCPD(node, node_cardinality, np.array(bayesian_counts),
evidence=parents,
evidence_card=parents_cardinalities,
state_names=self.state_names)
cpd.normalize()
return cpd
| mit |
MechCoder/scikit-learn | sklearn/utils/tests/test_murmurhash.py | 79 | 2849 | # Author: Olivier Grisel <olivier.grisel@ensta.org>
#
# License: BSD 3 clause
import numpy as np
from sklearn.externals.six import b, u
from sklearn.utils.murmurhash import murmurhash3_32
from numpy.testing import assert_array_almost_equal
from numpy.testing import assert_array_equal
from sklearn.utils.testing import assert_equal, assert_true
def test_mmhash3_int():
assert_equal(murmurhash3_32(3), 847579505)
assert_equal(murmurhash3_32(3, seed=0), 847579505)
assert_equal(murmurhash3_32(3, seed=42), -1823081949)
assert_equal(murmurhash3_32(3, positive=False), 847579505)
assert_equal(murmurhash3_32(3, seed=0, positive=False), 847579505)
assert_equal(murmurhash3_32(3, seed=42, positive=False), -1823081949)
assert_equal(murmurhash3_32(3, positive=True), 847579505)
assert_equal(murmurhash3_32(3, seed=0, positive=True), 847579505)
assert_equal(murmurhash3_32(3, seed=42, positive=True), 2471885347)
def test_mmhash3_int_array():
rng = np.random.RandomState(42)
keys = rng.randint(-5342534, 345345, size=3 * 2 * 1).astype(np.int32)
keys = keys.reshape((3, 2, 1))
for seed in [0, 42]:
expected = np.array([murmurhash3_32(int(k), seed)
for k in keys.flat])
expected = expected.reshape(keys.shape)
assert_array_equal(murmurhash3_32(keys, seed), expected)
for seed in [0, 42]:
expected = np.array([murmurhash3_32(k, seed, positive=True)
for k in keys.flat])
expected = expected.reshape(keys.shape)
assert_array_equal(murmurhash3_32(keys, seed, positive=True),
expected)
def test_mmhash3_bytes():
assert_equal(murmurhash3_32(b('foo'), 0), -156908512)
assert_equal(murmurhash3_32(b('foo'), 42), -1322301282)
assert_equal(murmurhash3_32(b('foo'), 0, positive=True), 4138058784)
assert_equal(murmurhash3_32(b('foo'), 42, positive=True), 2972666014)
def test_mmhash3_unicode():
assert_equal(murmurhash3_32(u('foo'), 0), -156908512)
assert_equal(murmurhash3_32(u('foo'), 42), -1322301282)
assert_equal(murmurhash3_32(u('foo'), 0, positive=True), 4138058784)
assert_equal(murmurhash3_32(u('foo'), 42, positive=True), 2972666014)
def test_no_collision_on_byte_range():
previous_hashes = set()
for i in range(100):
h = murmurhash3_32(' ' * i, 0)
assert_true(h not in previous_hashes,
"Found collision on growing empty string")
def test_uniform_distribution():
n_bins, n_samples = 10, 100000
bins = np.zeros(n_bins, dtype=np.float64)
for i in range(n_samples):
bins[murmurhash3_32(i, positive=True) % n_bins] += 1
means = bins / n_samples
expected = np.ones(n_bins) / n_bins
assert_array_almost_equal(means / expected, np.ones(n_bins), 2)
| bsd-3-clause |
antiface/mne-python | mne/time_frequency/tfr.py | 2 | 48373 | """A module which implements the time frequency estimation.
Morlet code inspired by Matlab code from Sheraz Khan & Brainstorm & SPM
"""
# Authors : Alexandre Gramfort <alexandre.gramfort@telecom-paristech.fr>
# Hari Bharadwaj <hari@nmr.mgh.harvard.edu>
#
# License : BSD (3-clause)
import warnings
from math import sqrt
from copy import deepcopy
import numpy as np
from scipy import linalg
from scipy.fftpack import fftn, ifftn
from ..fixes import partial
from ..baseline import rescale
from ..parallel import parallel_func
from ..utils import logger, verbose, _time_mask
from ..channels.channels import ContainsMixin, UpdateChannelsMixin
from ..io.pick import pick_info, pick_types
from ..utils import check_fname
from .multitaper import dpss_windows
from .._hdf5 import write_hdf5, read_hdf5
def _get_data(inst, return_itc):
"""Get data from Epochs or Evoked instance as epochs x ch x time"""
from ..epochs import _BaseEpochs
from ..evoked import Evoked
if not isinstance(inst, (_BaseEpochs, Evoked)):
raise TypeError('inst must be Epochs or Evoked')
if isinstance(inst, _BaseEpochs):
data = inst.get_data()
else:
if return_itc:
raise ValueError('return_itc must be False for evoked data')
data = inst.data[np.newaxis, ...].copy()
return data
def morlet(sfreq, freqs, n_cycles=7, sigma=None, zero_mean=False):
"""Compute Wavelets for the given frequency range
Parameters
----------
sfreq : float
Sampling Frequency
freqs : array
frequency range of interest (1 x Frequencies)
n_cycles: float | array of float
Number of cycles. Fixed number or one per frequency.
sigma : float, (optional)
It controls the width of the wavelet ie its temporal
resolution. If sigma is None the temporal resolution
is adapted with the frequency like for all wavelet transform.
The higher the frequency the shorter is the wavelet.
If sigma is fixed the temporal resolution is fixed
like for the short time Fourier transform and the number
of oscillations increases with the frequency.
zero_mean : bool
Make sure the wavelet is zero mean
Returns
-------
Ws : list of array
Wavelets time series
See Also
--------
mne.time_frequency.cwt_morlet : Compute time-frequency decomposition
with Morlet wavelets
"""
Ws = list()
n_cycles = np.atleast_1d(n_cycles)
if (n_cycles.size != 1) and (n_cycles.size != len(freqs)):
raise ValueError("n_cycles should be fixed or defined for "
"each frequency.")
for k, f in enumerate(freqs):
if len(n_cycles) != 1:
this_n_cycles = n_cycles[k]
else:
this_n_cycles = n_cycles[0]
# fixed or scale-dependent window
if sigma is None:
sigma_t = this_n_cycles / (2.0 * np.pi * f)
else:
sigma_t = this_n_cycles / (2.0 * np.pi * sigma)
# this scaling factor is proportional to (Tallon-Baudry 98):
# (sigma_t*sqrt(pi))^(-1/2);
t = np.arange(0., 5. * sigma_t, 1.0 / sfreq)
t = np.r_[-t[::-1], t[1:]]
oscillation = np.exp(2.0 * 1j * np.pi * f * t)
gaussian_enveloppe = np.exp(-t ** 2 / (2.0 * sigma_t ** 2))
if zero_mean: # to make it zero mean
real_offset = np.exp(- 2 * (np.pi * f * sigma_t) ** 2)
oscillation -= real_offset
W = oscillation * gaussian_enveloppe
W /= sqrt(0.5) * linalg.norm(W.ravel())
Ws.append(W)
return Ws
def _dpss_wavelet(sfreq, freqs, n_cycles=7, time_bandwidth=4.0,
zero_mean=False):
"""Compute Wavelets for the given frequency range
Parameters
----------
sfreq : float
Sampling Frequency.
freqs : ndarray, shape (n_freqs,)
The frequencies in Hz.
n_cycles : float | ndarray, shape (n_freqs,)
The number of cycles globally or for each frequency.
Defaults to 7.
time_bandwidth : float, (optional)
Time x Bandwidth product.
The number of good tapers (low-bias) is chosen automatically based on
this to equal floor(time_bandwidth - 1).
Default is 4.0, giving 3 good tapers.
Returns
-------
Ws : list of array
Wavelets time series
"""
Ws = list()
if time_bandwidth < 2.0:
raise ValueError("time_bandwidth should be >= 2.0 for good tapers")
n_taps = int(np.floor(time_bandwidth - 1))
n_cycles = np.atleast_1d(n_cycles)
if n_cycles.size != 1 and n_cycles.size != len(freqs):
raise ValueError("n_cycles should be fixed or defined for "
"each frequency.")
for m in range(n_taps):
Wm = list()
for k, f in enumerate(freqs):
if len(n_cycles) != 1:
this_n_cycles = n_cycles[k]
else:
this_n_cycles = n_cycles[0]
t_win = this_n_cycles / float(f)
t = np.arange(0., t_win, 1.0 / sfreq)
# Making sure wavelets are centered before tapering
oscillation = np.exp(2.0 * 1j * np.pi * f * (t - t_win / 2.))
# Get dpss tapers
tapers, conc = dpss_windows(t.shape[0], time_bandwidth / 2.,
n_taps)
Wk = oscillation * tapers[m]
if zero_mean: # to make it zero mean
real_offset = Wk.mean()
Wk -= real_offset
Wk /= sqrt(0.5) * linalg.norm(Wk.ravel())
Wm.append(Wk)
Ws.append(Wm)
return Ws
def _centered(arr, newsize):
"""Aux Function to center data"""
# Return the center newsize portion of the array.
newsize = np.asarray(newsize)
currsize = np.array(arr.shape)
startind = (currsize - newsize) // 2
endind = startind + newsize
myslice = [slice(startind[k], endind[k]) for k in range(len(endind))]
return arr[tuple(myslice)]
def _cwt_fft(X, Ws, mode="same"):
"""Compute cwt with fft based convolutions
Return a generator over signals.
"""
X = np.asarray(X)
# Precompute wavelets for given frequency range to save time
n_signals, n_times = X.shape
n_freqs = len(Ws)
Ws_max_size = max(W.size for W in Ws)
size = n_times + Ws_max_size - 1
# Always use 2**n-sized FFT
fsize = 2 ** int(np.ceil(np.log2(size)))
# precompute FFTs of Ws
fft_Ws = np.empty((n_freqs, fsize), dtype=np.complex128)
for i, W in enumerate(Ws):
if len(W) > n_times:
raise ValueError('Wavelet is too long for such a short signal. '
'Reduce the number of cycles.')
fft_Ws[i] = fftn(W, [fsize])
for k, x in enumerate(X):
if mode == "full":
tfr = np.zeros((n_freqs, fsize), dtype=np.complex128)
elif mode == "same" or mode == "valid":
tfr = np.zeros((n_freqs, n_times), dtype=np.complex128)
fft_x = fftn(x, [fsize])
for i, W in enumerate(Ws):
ret = ifftn(fft_x * fft_Ws[i])[:n_times + W.size - 1]
if mode == "valid":
sz = abs(W.size - n_times) + 1
offset = (n_times - sz) / 2
tfr[i, offset:(offset + sz)] = _centered(ret, sz)
else:
tfr[i, :] = _centered(ret, n_times)
yield tfr
def _cwt_convolve(X, Ws, mode='same'):
"""Compute time freq decomposition with temporal convolutions
Return a generator over signals.
"""
X = np.asarray(X)
n_signals, n_times = X.shape
n_freqs = len(Ws)
# Compute convolutions
for x in X:
tfr = np.zeros((n_freqs, n_times), dtype=np.complex128)
for i, W in enumerate(Ws):
ret = np.convolve(x, W, mode=mode)
if len(W) > len(x):
raise ValueError('Wavelet is too long for such a short '
'signal. Reduce the number of cycles.')
if mode == "valid":
sz = abs(W.size - n_times) + 1
offset = (n_times - sz) / 2
tfr[i, offset:(offset + sz)] = ret
else:
tfr[i] = ret
yield tfr
def cwt_morlet(X, sfreq, freqs, use_fft=True, n_cycles=7.0, zero_mean=False):
"""Compute time freq decomposition with Morlet wavelets
This function operates directly on numpy arrays. Consider using
`tfr_morlet` to process `Epochs` or `Evoked` instances.
Parameters
----------
X : array of shape [n_signals, n_times]
signals (one per line)
sfreq : float
sampling Frequency
freqs : array
Array of frequencies of interest
use_fft : bool
Compute convolution with FFT or temoral convolution.
n_cycles: float | array of float
Number of cycles. Fixed number or one per frequency.
zero_mean : bool
Make sure the wavelets are zero mean.
Returns
-------
tfr : 3D array
Time Frequency Decompositions (n_signals x n_frequencies x n_times)
See Also
--------
tfr.cwt : Compute time-frequency decomposition with user-provided wavelets
"""
mode = 'same'
# mode = "valid"
n_signals, n_times = X.shape
n_frequencies = len(freqs)
# Precompute wavelets for given frequency range to save time
Ws = morlet(sfreq, freqs, n_cycles=n_cycles, zero_mean=zero_mean)
if use_fft:
coefs = _cwt_fft(X, Ws, mode)
else:
coefs = _cwt_convolve(X, Ws, mode)
tfrs = np.empty((n_signals, n_frequencies, n_times), dtype=np.complex)
for k, tfr in enumerate(coefs):
tfrs[k] = tfr
return tfrs
def cwt(X, Ws, use_fft=True, mode='same', decim=1):
"""Compute time freq decomposition with continuous wavelet transform
Parameters
----------
X : array of shape [n_signals, n_times]
signals (one per line)
Ws : list of array
Wavelets time series
use_fft : bool
Use FFT for convolutions
mode : 'same' | 'valid' | 'full'
Convention for convolution
decim : int
Temporal decimation factor
Returns
-------
tfr : 3D array
Time Frequency Decompositions (n_signals x n_frequencies x n_times)
See Also
--------
mne.time_frequency.cwt_morlet : Compute time-frequency decomposition
with Morlet wavelets
"""
n_signals, n_times = X[:, ::decim].shape
n_frequencies = len(Ws)
if use_fft:
coefs = _cwt_fft(X, Ws, mode)
else:
coefs = _cwt_convolve(X, Ws, mode)
tfrs = np.empty((n_signals, n_frequencies, n_times), dtype=np.complex)
for k, tfr in enumerate(coefs):
tfrs[k] = tfr[..., ::decim]
return tfrs
def _time_frequency(X, Ws, use_fft, decim):
"""Aux of time_frequency for parallel computing over channels
"""
n_epochs, n_times = X.shape
n_times = n_times // decim + bool(n_times % decim)
n_frequencies = len(Ws)
psd = np.zeros((n_frequencies, n_times)) # PSD
plf = np.zeros((n_frequencies, n_times), np.complex) # phase lock
mode = 'same'
if use_fft:
tfrs = _cwt_fft(X, Ws, mode)
else:
tfrs = _cwt_convolve(X, Ws, mode)
for tfr in tfrs:
tfr = tfr[:, ::decim]
tfr_abs = np.abs(tfr)
psd += tfr_abs ** 2
plf += tfr / tfr_abs
psd /= n_epochs
plf = np.abs(plf) / n_epochs
return psd, plf
@verbose
def single_trial_power(data, sfreq, frequencies, use_fft=True, n_cycles=7,
baseline=None, baseline_mode='ratio', times=None,
decim=1, n_jobs=1, zero_mean=False, verbose=None):
"""Compute time-frequency power on single epochs
Parameters
----------
data : array of shape [n_epochs, n_channels, n_times]
The epochs
sfreq : float
Sampling rate
frequencies : array-like
The frequencies
use_fft : bool
Use the FFT for convolutions or not.
n_cycles : float | array of float
Number of cycles in the Morlet wavelet. Fixed number
or one per frequency.
baseline : None (default) or tuple of length 2
The time interval to apply baseline correction.
If None do not apply it. If baseline is (a, b)
the interval is between "a (s)" and "b (s)".
If a is None the beginning of the data is used
and if b is None then b is set to the end of the interval.
If baseline is equal ot (None, None) all the time
interval is used.
baseline_mode : None | 'ratio' | 'zscore'
Do baseline correction with ratio (power is divided by mean
power during baseline) or zscore (power is divided by standard
deviation of power during baseline after subtracting the mean,
power = [power - mean(power_baseline)] / std(power_baseline))
times : array
Required to define baseline
decim : int
Temporal decimation factor
n_jobs : int
The number of epochs to process at the same time
zero_mean : bool
Make sure the wavelets are zero mean.
verbose : bool, str, int, or None
If not None, override default verbose level (see mne.verbose).
Returns
-------
power : 4D array
Power estimate (Epochs x Channels x Frequencies x Timepoints).
"""
mode = 'same'
n_frequencies = len(frequencies)
n_epochs, n_channels, n_times = data[:, :, ::decim].shape
# Precompute wavelets for given frequency range to save time
Ws = morlet(sfreq, frequencies, n_cycles=n_cycles, zero_mean=zero_mean)
parallel, my_cwt, _ = parallel_func(cwt, n_jobs)
logger.info("Computing time-frequency power on single epochs...")
power = np.empty((n_epochs, n_channels, n_frequencies, n_times),
dtype=np.float)
# Package arguments for `cwt` here to minimize omissions where only one of
# the two calls below is updated with new function arguments.
cwt_kw = dict(Ws=Ws, use_fft=use_fft, mode=mode, decim=decim)
if n_jobs == 1:
for k, e in enumerate(data):
x = cwt(e, **cwt_kw)
power[k] = (x * x.conj()).real
else:
# Precompute tf decompositions in parallel
tfrs = parallel(my_cwt(e, **cwt_kw) for e in data)
for k, tfr in enumerate(tfrs):
power[k] = (tfr * tfr.conj()).real
# Run baseline correction. Be sure to decimate the times array as well if
# needed.
if times is not None:
times = times[::decim]
power = rescale(power, times, baseline, baseline_mode, copy=False)
return power
def _induced_power_cwt(data, sfreq, frequencies, use_fft=True, n_cycles=7,
decim=1, n_jobs=1, zero_mean=False):
"""Compute time induced power and inter-trial phase-locking factor
The time frequency decomposition is done with Morlet wavelets
Parameters
----------
data : array
3D array of shape [n_epochs, n_channels, n_times]
sfreq : float
sampling Frequency
frequencies : array
Array of frequencies of interest
use_fft : bool
Compute transform with fft based convolutions or temporal
convolutions.
n_cycles : float | array of float
Number of cycles. Fixed number or one per frequency.
decim: int
Temporal decimation factor
n_jobs : int
The number of CPUs used in parallel. All CPUs are used in -1.
Requires joblib package.
zero_mean : bool
Make sure the wavelets are zero mean.
Returns
-------
power : 2D array
Induced power (Channels x Frequencies x Timepoints).
Squared amplitude of time-frequency coefficients.
phase_lock : 2D array
Phase locking factor in [0, 1] (Channels x Frequencies x Timepoints)
"""
n_frequencies = len(frequencies)
n_epochs, n_channels, n_times = data[:, :, ::decim].shape
# Precompute wavelets for given frequency range to save time
Ws = morlet(sfreq, frequencies, n_cycles=n_cycles, zero_mean=zero_mean)
psd = np.empty((n_channels, n_frequencies, n_times))
plf = np.empty((n_channels, n_frequencies, n_times))
# Separate to save memory for n_jobs=1
parallel, my_time_frequency, _ = parallel_func(_time_frequency, n_jobs)
psd_plf = parallel(my_time_frequency(data[:, c, :], Ws, use_fft, decim)
for c in range(n_channels))
for c, (psd_c, plf_c) in enumerate(psd_plf):
psd[c, :, :], plf[c, :, :] = psd_c, plf_c
return psd, plf
def _preproc_tfr(data, times, freqs, tmin, tmax, fmin, fmax, mode,
baseline, vmin, vmax, dB):
"""Aux Function to prepare tfr computation"""
from ..viz.utils import _setup_vmin_vmax
if mode is not None and baseline is not None:
logger.info("Applying baseline correction '%s' during %s" %
(mode, baseline))
data = rescale(data.copy(), times, baseline, mode)
# crop time
itmin, itmax = None, None
idx = np.where(_time_mask(times, tmin, tmax))[0]
if tmin is not None:
itmin = idx[0]
if tmax is not None:
itmax = idx[-1] + 1
times = times[itmin:itmax]
# crop freqs
ifmin, ifmax = None, None
idx = np.where(_time_mask(freqs, fmin, fmax))[0]
if fmin is not None:
ifmin = idx[0]
if fmax is not None:
ifmax = idx[-1] + 1
freqs = freqs[ifmin:ifmax]
# crop data
data = data[:, ifmin:ifmax, itmin:itmax]
times *= 1e3
if dB:
data = 10 * np.log10((data * data.conj()).real)
vmin, vmax = _setup_vmin_vmax(data, vmin, vmax)
return data, times, freqs, vmin, vmax
class AverageTFR(ContainsMixin, UpdateChannelsMixin):
"""Container for Time-Frequency data
Can for example store induced power at sensor level or intertrial
coherence.
Parameters
----------
info : Info
The measurement info.
data : ndarray, shape (n_channels, n_freqs, n_times)
The data.
times : ndarray, shape (n_times,)
The time values in seconds.
freqs : ndarray, shape (n_freqs,)
The frequencies in Hz.
nave : int
The number of averaged TFRs.
comment : str | None
Comment on the data, e.g., the experimental condition.
Defaults to None.
method : str | None
Comment on the method used to compute the data, e.g., morlet wavelet.
Defaults to None.
verbose : bool, str, int, or None
If not None, override default verbose level (see mne.verbose).
Attributes
----------
ch_names : list
The names of the channels.
"""
@verbose
def __init__(self, info, data, times, freqs, nave, comment=None,
method=None, verbose=None):
self.info = info
if data.ndim != 3:
raise ValueError('data should be 3d. Got %d.' % data.ndim)
n_channels, n_freqs, n_times = data.shape
if n_channels != len(info['chs']):
raise ValueError("Number of channels and data size don't match"
" (%d != %d)." % (n_channels, len(info['chs'])))
if n_freqs != len(freqs):
raise ValueError("Number of frequencies and data size don't match"
" (%d != %d)." % (n_freqs, len(freqs)))
if n_times != len(times):
raise ValueError("Number of times and data size don't match"
" (%d != %d)." % (n_times, len(times)))
self.data = data
self.times = times
self.freqs = freqs
self.nave = nave
self.comment = comment
self.method = method
@property
def ch_names(self):
return self.info['ch_names']
def crop(self, tmin=None, tmax=None, copy=False):
"""Crop data to a given time interval
Parameters
----------
tmin : float | None
Start time of selection in seconds.
tmax : float | None
End time of selection in seconds.
copy : bool
If False epochs is cropped in place.
"""
inst = self if not copy else self.copy()
mask = _time_mask(inst.times, tmin, tmax)
inst.times = inst.times[mask]
inst.data = inst.data[..., mask]
return inst
@verbose
def plot(self, picks=None, baseline=None, mode='mean', tmin=None,
tmax=None, fmin=None, fmax=None, vmin=None, vmax=None,
cmap='RdBu_r', dB=False, colorbar=True, show=True,
title=None, axes=None, verbose=None):
"""Plot TFRs in a topography with images
Parameters
----------
picks : array-like of int | None
The indices of the channels to plot.
baseline : None (default) or tuple of length 2
The time interval to apply baseline correction.
If None do not apply it. If baseline is (a, b)
the interval is between "a (s)" and "b (s)".
If a is None the beginning of the data is used
and if b is None then b is set to the end of the interval.
If baseline is equal ot (None, None) all the time
interval is used.
mode : None | 'logratio' | 'ratio' | 'zscore' | 'mean' | 'percent'
Do baseline correction with ratio (power is divided by mean
power during baseline) or zscore (power is divided by standard
deviation of power during baseline after subtracting the mean,
power = [power - mean(power_baseline)] / std(power_baseline)).
If None no baseline correction is applied.
tmin : None | float
The first time instant to display. If None the first time point
available is used.
tmax : None | float
The last time instant to display. If None the last time point
available is used.
fmin : None | float
The first frequency to display. If None the first frequency
available is used.
fmax : None | float
The last frequency to display. If None the last frequency
available is used.
vmin : float | None
The mininum value an the color scale. If vmin is None, the data
minimum value is used.
vmax : float | None
The maxinum value an the color scale. If vmax is None, the data
maximum value is used.
cmap : matplotlib colormap | str
The colormap to use. Defaults to 'RdBu_r'.
dB : bool
If True, 20*log10 is applied to the data to get dB.
colorbar : bool
If true, colorbar will be added to the plot. For user defined axes,
the colorbar cannot be drawn. Defaults to True.
show : bool
Call pyplot.show() at the end.
title : str | None
String for title. Defaults to None (blank/no title).
axes : instance of Axes | list | None
The axes to plot to. If list, the list must be a list of Axes of
the same length as the number of channels. If instance of Axes,
there must be only one channel plotted.
verbose : bool, str, int, or None
If not None, override default verbose level (see mne.verbose).
Returns
-------
fig : matplotlib.figure.Figure
The figure containing the topography.
"""
from ..viz.topo import _imshow_tfr
import matplotlib.pyplot as plt
times, freqs = self.times.copy(), self.freqs.copy()
data = self.data[picks]
data, times, freqs, vmin, vmax = \
_preproc_tfr(data, times, freqs, tmin, tmax, fmin, fmax, mode,
baseline, vmin, vmax, dB)
tmin, tmax = times[0], times[-1]
if isinstance(axes, plt.Axes):
axes = [axes]
if isinstance(axes, list) and len(axes) != len(picks):
raise RuntimeError('There must be an axes for each picked '
'channel.')
if colorbar:
logger.warning('Cannot draw colorbar for user defined axes.')
for idx in range(len(data)):
if axes is None:
fig = plt.figure()
ax = fig.add_subplot(111)
else:
ax = axes[idx]
fig = ax.get_figure()
_imshow_tfr(ax, 0, tmin, tmax, vmin, vmax, ylim=None,
tfr=data[idx: idx + 1], freq=freqs,
x_label='Time (ms)', y_label='Frequency (Hz)',
colorbar=False, picker=False, cmap=cmap)
if title:
fig.suptitle(title)
if show:
plt.show()
return fig
def plot_topo(self, picks=None, baseline=None, mode='mean', tmin=None,
tmax=None, fmin=None, fmax=None, vmin=None, vmax=None,
layout=None, cmap='RdBu_r', title=None, dB=False,
colorbar=True, layout_scale=0.945, show=True,
border='none', fig_facecolor='k', font_color='w'):
"""Plot TFRs in a topography with images
Parameters
----------
picks : array-like of int | None
The indices of the channels to plot. If None all available
channels are displayed.
baseline : None (default) or tuple of length 2
The time interval to apply baseline correction.
If None do not apply it. If baseline is (a, b)
the interval is between "a (s)" and "b (s)".
If a is None the beginning of the data is used
and if b is None then b is set to the end of the interval.
If baseline is equal ot (None, None) all the time
interval is used.
mode : None | 'logratio' | 'ratio' | 'zscore' | 'mean' | 'percent'
Do baseline correction with ratio (power is divided by mean
power during baseline) or zscore (power is divided by standard
deviation of power during baseline after subtracting the mean,
power = [power - mean(power_baseline)] / std(power_baseline)).
If None no baseline correction is applied.
tmin : None | float
The first time instant to display. If None the first time point
available is used.
tmax : None | float
The last time instant to display. If None the last time point
available is used.
fmin : None | float
The first frequency to display. If None the first frequency
available is used.
fmax : None | float
The last frequency to display. If None the last frequency
available is used.
vmin : float | None
The mininum value an the color scale. If vmin is None, the data
minimum value is used.
vmax : float | None
The maxinum value an the color scale. If vmax is None, the data
maximum value is used.
layout : Layout | None
Layout instance specifying sensor positions. If possible, the
correct layout is inferred from the data.
cmap : matplotlib colormap | str
The colormap to use. Defaults to 'RdBu_r'.
title : str
Title of the figure.
dB : bool
If True, 20*log10 is applied to the data to get dB.
colorbar : bool
If true, colorbar will be added to the plot
layout_scale : float
Scaling factor for adjusting the relative size of the layout
on the canvas.
show : bool
Call pyplot.show() at the end.
border : str
matplotlib borders style to be used for each sensor plot.
fig_facecolor : str | obj
The figure face color. Defaults to black.
font_color: str | obj
The color of tick labels in the colorbar. Defaults to white.
Returns
-------
fig : matplotlib.figure.Figure
The figure containing the topography.
"""
from ..viz.topo import _imshow_tfr, _plot_topo
import matplotlib.pyplot as plt
times = self.times.copy()
freqs = self.freqs
data = self.data
info = self.info
if picks is not None:
data = data[picks]
info = pick_info(info, picks)
data, times, freqs, vmin, vmax = \
_preproc_tfr(data, times, freqs, tmin, tmax, fmin, fmax,
mode, baseline, vmin, vmax, dB)
if layout is None:
from mne import find_layout
layout = find_layout(self.info)
imshow = partial(_imshow_tfr, tfr=data, freq=freqs, cmap=cmap)
fig = _plot_topo(info=info, times=times,
show_func=imshow, layout=layout,
colorbar=colorbar, vmin=vmin, vmax=vmax, cmap=cmap,
layout_scale=layout_scale, title=title, border=border,
x_label='Time (ms)', y_label='Frequency (Hz)',
fig_facecolor=fig_facecolor,
font_color=font_color)
if show:
plt.show()
return fig
def _check_compat(self, tfr):
"""checks that self and tfr have the same time-frequency ranges"""
assert np.all(tfr.times == self.times)
assert np.all(tfr.freqs == self.freqs)
def __add__(self, tfr):
self._check_compat(tfr)
out = self.copy()
out.data += tfr.data
return out
def __iadd__(self, tfr):
self._check_compat(tfr)
self.data += tfr.data
return self
def __sub__(self, tfr):
self._check_compat(tfr)
out = self.copy()
out.data -= tfr.data
return out
def __isub__(self, tfr):
self._check_compat(tfr)
self.data -= tfr.data
return self
def copy(self):
"""Return a copy of the instance."""
return deepcopy(self)
def __repr__(self):
s = "time : [%f, %f]" % (self.times[0], self.times[-1])
s += ", freq : [%f, %f]" % (self.freqs[0], self.freqs[-1])
s += ", nave : %d" % self.nave
s += ', channels : %d' % self.data.shape[0]
return "<AverageTFR | %s>" % s
def apply_baseline(self, baseline, mode='mean'):
"""Baseline correct the data
Parameters
----------
baseline : tuple or list of length 2
The time interval to apply rescaling / baseline correction.
If None do not apply it. If baseline is (a, b)
the interval is between "a (s)" and "b (s)".
If a is None the beginning of the data is used
and if b is None then b is set to the end of the interval.
If baseline is equal to (None, None) all the time
interval is used.
mode : 'logratio' | 'ratio' | 'zscore' | 'mean' | 'percent'
Do baseline correction with ratio (power is divided by mean
power during baseline) or z-score (power is divided by standard
deviation of power during baseline after subtracting the mean,
power = [power - mean(power_baseline)] / std(power_baseline))
If None, baseline no correction will be performed.
"""
self.data = rescale(self.data, self.times, baseline, mode, copy=False)
def plot_topomap(self, tmin=None, tmax=None, fmin=None, fmax=None,
ch_type=None, baseline=None, mode='mean',
layout=None, vmin=None, vmax=None, cmap='RdBu_r',
sensors=True, colorbar=True, unit=None, res=64, size=2,
cbar_fmt='%1.1e', show_names=False, title=None,
axes=None, show=True, outlines='head', head_pos=None):
"""Plot topographic maps of time-frequency intervals of TFR data
Parameters
----------
tmin : None | float
The first time instant to display. If None the first time point
available is used.
tmax : None | float
The last time instant to display. If None the last time point
available is used.
fmin : None | float
The first frequency to display. If None the first frequency
available is used.
fmax : None | float
The last frequency to display. If None the last frequency
available is used.
ch_type : 'mag' | 'grad' | 'planar1' | 'planar2' | 'eeg' | None
The channel type to plot. For 'grad', the gradiometers are
collected in pairs and the RMS for each pair is plotted.
If None, then channels are chosen in the order given above.
baseline : tuple or list of length 2
The time interval to apply rescaling / baseline correction.
If None do not apply it. If baseline is (a, b)
the interval is between "a (s)" and "b (s)".
If a is None the beginning of the data is used
and if b is None then b is set to the end of the interval.
If baseline is equal to (None, None) all the time
interval is used.
mode : 'logratio' | 'ratio' | 'zscore' | 'mean' | 'percent'
Do baseline correction with ratio (power is divided by mean
power during baseline) or z-score (power is divided by standard
deviation of power during baseline after subtracting the mean,
power = [power - mean(power_baseline)] / std(power_baseline))
If None, baseline no correction will be performed.
layout : None | Layout
Layout instance specifying sensor positions (does not need to
be specified for Neuromag data). If possible, the correct layout
file is inferred from the data; if no appropriate layout file was
found, the layout is automatically generated from the sensor
locations.
vmin : float | callable
The value specfying the lower bound of the color range.
If None, and vmax is None, -vmax is used. Else np.min(data).
If callable, the output equals vmin(data).
vmax : float | callable
The value specfying the upper bound of the color range.
If None, the maximum absolute value is used. If vmin is None,
but vmax is not, defaults to np.min(data).
If callable, the output equals vmax(data).
cmap : matplotlib colormap
Colormap. For magnetometers and eeg defaults to 'RdBu_r', else
'Reds'.
sensors : bool | str
Add markers for sensor locations to the plot. Accepts matplotlib
plot format string (e.g., 'r+' for red plusses). If True, a circle
will be used (via .add_artist). Defaults to True.
colorbar : bool
Plot a colorbar.
unit : dict | str | None
The unit of the channel type used for colorbar label. If
scale is None the unit is automatically determined.
res : int
The resolution of the topomap image (n pixels along each side).
size : float
Side length per topomap in inches.
cbar_fmt : str
String format for colorbar values.
show_names : bool | callable
If True, show channel names on top of the map. If a callable is
passed, channel names will be formatted using the callable; e.g.,
to delete the prefix 'MEG ' from all channel names, pass the
function lambda x: x.replace('MEG ', ''). If `mask` is not None,
only significant sensors will be shown.
title : str | None
Title. If None (default), no title is displayed.
axes : instance of Axes | None
The axes to plot to. If None the axes is defined automatically.
show : bool
Call pyplot.show() at the end.
outlines : 'head' | dict | None
The outlines to be drawn. If 'head', a head scheme will be drawn.
If dict, each key refers to a tuple of x and y positions.
The values in 'mask_pos' will serve as image mask. If None, nothing
will be drawn. Defaults to 'head'. If dict, the 'autoshrink' (bool)
field will trigger automated shrinking of the positions due to
points outside the outline. Moreover, a matplotlib patch object can
be passed for advanced masking options, either directly or as a
function that returns patches (required for multi-axis plots).
head_pos : dict | None
If None (default), the sensors are positioned such that they span
the head circle. If dict, can have entries 'center' (tuple) and
'scale' (tuple) for what the center and scale of the head should be
relative to the electrode locations.
Returns
-------
fig : matplotlib.figure.Figure
The figure containing the topography.
"""
from ..viz import plot_tfr_topomap
return plot_tfr_topomap(self, tmin=tmin, tmax=tmax, fmin=fmin,
fmax=fmax, ch_type=ch_type, baseline=baseline,
mode=mode, layout=layout, vmin=vmin, vmax=vmax,
cmap=cmap, sensors=sensors, colorbar=colorbar,
unit=unit, res=res, size=size,
cbar_fmt=cbar_fmt, show_names=show_names,
title=title, axes=axes, show=show,
outlines=outlines, head_pos=head_pos)
def save(self, fname, overwrite=False):
"""Save TFR object to hdf5 file
Parameters
----------
fname : str
The file name, which should end with -tfr.h5 .
overwrite : bool
If True, overwrite file (if it exists). Defaults to false
"""
write_tfrs(fname, self, overwrite=overwrite)
def _prepare_write_tfr(tfr, condition):
"""Aux function"""
return (condition, dict(times=tfr.times, freqs=tfr.freqs,
data=tfr.data, info=tfr.info, nave=tfr.nave,
comment=tfr.comment, method=tfr.method))
def write_tfrs(fname, tfr, overwrite=False):
"""Write a TFR dataset to hdf5.
Parameters
----------
fname : string
The file name, which should end with -tfr.h5
tfr : AverageTFR instance, or list of AverageTFR instances
The TFR dataset, or list of TFR datasets, to save in one file.
Note. If .comment is not None, a name will be generated on the fly,
based on the order in which the TFR objects are passed
overwrite : bool
If True, overwrite file (if it exists). Defaults to False.
See Also
--------
read_tfrs
Notes
-----
.. versionadded:: 0.9.0
"""
out = []
if not isinstance(tfr, (list, tuple)):
tfr = [tfr]
for ii, tfr_ in enumerate(tfr):
comment = ii if tfr_.comment is None else tfr_.comment
out.append(_prepare_write_tfr(tfr_, condition=comment))
write_hdf5(fname, out, overwrite=overwrite)
def read_tfrs(fname, condition=None):
"""
Read TFR datasets from hdf5 file.
Parameters
----------
fname : string
The file name, which should end with -tfr.h5 .
condition : int or str | list of int or str | None
The condition to load. If None, all conditions will be returned.
Defaults to None.
See Also
--------
write_tfrs
Returns
-------
tfrs : list of instances of AverageTFR | instance of AverageTFR
Depending on `condition` either the TFR object or a list of multiple
TFR objects.
Notes
-----
.. versionadded:: 0.9.0
"""
check_fname(fname, 'tfr', ('-tfr.h5',))
logger.info('Reading %s ...' % fname)
tfr_data = read_hdf5(fname)
if condition is not None:
tfr_dict = dict(tfr_data)
if condition not in tfr_dict:
keys = ['%s' % k for k in tfr_dict]
raise ValueError('Cannot find condition ("{0}") in this file. '
'I can give you "{1}""'
.format(condition, " or ".join(keys)))
out = AverageTFR(**tfr_dict[condition])
else:
out = [AverageTFR(**d) for d in list(zip(*tfr_data))[1]]
return out
def tfr_morlet(inst, freqs, n_cycles, use_fft=False,
return_itc=True, decim=1, n_jobs=1):
"""Compute Time-Frequency Representation (TFR) using Morlet wavelets
Parameters
----------
inst : Epochs | Evoked
The epochs or evoked object.
freqs : ndarray, shape (n_freqs,)
The frequencies in Hz.
n_cycles : float | ndarray, shape (n_freqs,)
The number of cycles globally or for each frequency.
use_fft : bool
The fft based convolution or not.
return_itc : bool
Return intertrial coherence (ITC) as well as averaged power.
Must be ``False`` for evoked data.
decim : int
The decimation factor on the time axis. To reduce memory usage.
n_jobs : int
The number of jobs to run in parallel.
Returns
-------
power : instance of AverageTFR
The averaged power.
itc : instance of AverageTFR
The intertrial coherence (ITC). Only returned if return_itc
is True.
See Also
--------
tfr_multitaper, tfr_stockwell
"""
data = _get_data(inst, return_itc)
picks = pick_types(inst.info, meg=True, eeg=True)
info = pick_info(inst.info, picks)
data = data[:, picks, :]
power, itc = _induced_power_cwt(data, sfreq=info['sfreq'],
frequencies=freqs,
n_cycles=n_cycles, n_jobs=n_jobs,
use_fft=use_fft, decim=decim,
zero_mean=True)
times = inst.times[::decim].copy()
nave = len(data)
out = AverageTFR(info, power, times, freqs, nave, method='morlet-power')
if return_itc:
out = (out, AverageTFR(info, itc, times, freqs, nave,
method='morlet-itc'))
return out
@verbose
def _induced_power_mtm(data, sfreq, frequencies, time_bandwidth=4.0,
use_fft=True, n_cycles=7, decim=1, n_jobs=1,
zero_mean=True, verbose=None):
"""Compute time induced power and inter-trial phase-locking factor
The time frequency decomposition is done with DPSS wavelets
Parameters
----------
data : np.ndarray, shape (n_epochs, n_channels, n_times)
The input data.
sfreq : float
sampling Frequency
frequencies : np.ndarray, shape (n_frequencies,)
Array of frequencies of interest
time_bandwidth : float
Time x (Full) Bandwidth product.
The number of good tapers (low-bias) is chosen automatically based on
this to equal floor(time_bandwidth - 1). Default is 4.0 (3 tapers).
use_fft : bool
Compute transform with fft based convolutions or temporal
convolutions. Defaults to True.
n_cycles : float | np.ndarray shape (n_frequencies,)
Number of cycles. Fixed number or one per frequency. Defaults to 7.
decim: int
Temporal decimation factor. Defaults to 1.
n_jobs : int
The number of CPUs used in parallel. All CPUs are used in -1.
Requires joblib package. Defaults to 1.
zero_mean : bool
Make sure the wavelets are zero mean. Defaults to True.
verbose : bool, str, int, or None
If not None, override default verbose level (see mne.verbose).
Returns
-------
power : np.ndarray, shape (n_channels, n_frequencies, n_times)
Induced power. Squared amplitude of time-frequency coefficients.
itc : np.ndarray, shape (n_channels, n_frequencies, n_times)
Phase locking value.
"""
n_epochs, n_channels, n_times = data[:, :, ::decim].shape
logger.info('Data is %d trials and %d channels', n_epochs, n_channels)
n_frequencies = len(frequencies)
logger.info('Multitaper time-frequency analysis for %d frequencies',
n_frequencies)
# Precompute wavelets for given frequency range to save time
Ws = _dpss_wavelet(sfreq, frequencies, n_cycles=n_cycles,
time_bandwidth=time_bandwidth, zero_mean=zero_mean)
n_taps = len(Ws)
logger.info('Using %d tapers', n_taps)
n_times_wavelets = Ws[0][0].shape[0]
if n_times <= n_times_wavelets:
warnings.warn("Time windows are as long or longer than the epoch. "
"Consider reducing n_cycles.")
psd = np.zeros((n_channels, n_frequencies, n_times))
itc = np.zeros((n_channels, n_frequencies, n_times))
parallel, my_time_frequency, _ = parallel_func(_time_frequency,
n_jobs)
for m in range(n_taps):
psd_itc = parallel(my_time_frequency(data[:, c, :],
Ws[m], use_fft, decim)
for c in range(n_channels))
for c, (psd_c, itc_c) in enumerate(psd_itc):
psd[c, :, :] += psd_c
itc[c, :, :] += itc_c
psd /= n_taps
itc /= n_taps
return psd, itc
def tfr_multitaper(inst, freqs, n_cycles, time_bandwidth=4.0, use_fft=True,
return_itc=True, decim=1, n_jobs=1):
"""Compute Time-Frequency Representation (TFR) using DPSS wavelets
Parameters
----------
inst : Epochs | Evoked
The epochs or evoked object.
freqs : ndarray, shape (n_freqs,)
The frequencies in Hz.
n_cycles : float | ndarray, shape (n_freqs,)
The number of cycles globally or for each frequency.
The time-window length is thus T = n_cycles / freq.
time_bandwidth : float, (optional)
Time x (Full) Bandwidth product. Should be >= 2.0.
Choose this along with n_cycles to get desired frequency resolution.
The number of good tapers (least leakage from far away frequencies)
is chosen automatically based on this to floor(time_bandwidth - 1).
Default is 4.0 (3 good tapers).
E.g., With freq = 20 Hz and n_cycles = 10, we get time = 0.5 s.
If time_bandwidth = 4., then frequency smoothing is (4 / time) = 8 Hz.
use_fft : bool
The fft based convolution or not.
Defaults to True.
return_itc : bool
Return intertrial coherence (ITC) as well as averaged power.
Defaults to True.
decim : int
The decimation factor on the time axis. To reduce memory usage.
Note than this is brute force decimation, no anti-aliasing is done.
Defaults to 1.
n_jobs : int
The number of jobs to run in parallel. Defaults to 1.
Returns
-------
power : AverageTFR
The averaged power.
itc : AverageTFR
The intertrial coherence (ITC). Only returned if return_itc
is True.
See Also
--------
tfr_multitaper, tfr_stockwell
Notes
-----
.. versionadded:: 0.9.0
"""
data = _get_data(inst, return_itc)
picks = pick_types(inst.info, meg=True, eeg=True)
info = pick_info(inst.info, picks)
data = data[:, picks, :]
power, itc = _induced_power_mtm(data, sfreq=info['sfreq'],
frequencies=freqs, n_cycles=n_cycles,
time_bandwidth=time_bandwidth,
use_fft=use_fft, decim=decim,
n_jobs=n_jobs, zero_mean=True,
verbose='INFO')
times = inst.times[::decim].copy()
nave = len(data)
out = AverageTFR(info, power, times, freqs, nave,
method='mutlitaper-power')
if return_itc:
out = (out, AverageTFR(info, itc, times, freqs, nave,
method='mutlitaper-itc'))
return out
| bsd-3-clause |
trichter/sito | bin/console/plotday.py | 1 | 3486 | #!/usr/bin/env python
# by TR
import argparse
from obspy.core import UTCDateTime as UTC
import logging
logging.basicConfig()
parser = argparse.ArgumentParser(description='Day plots.')
parser.add_argument('file_station',
help='file to plot or station to plot')
parser.add_argument('date', nargs='?', default=None, type=UTC,
help='if first argument is station: date')
parser.add_argument('-a', '--absolute-scale', type=float, # default=0.02,
help='display with different scale')
parser.add_argument('-r', '--relative-scale', type=float,
help='display with different relative scale - '
'overwrites ABSOLUTE_SCALE')
parser.add_argument('-s', '--save',
help='save plot to this file instead of showing')
parser.add_argument('-x', '--xcorr-append',
help='dont plot raw data and pass this argument to Data object')
parser.add_argument('-c', '--channel', default='HHZ',
help='channel to plot, default: HHZ')
parser.add_argument('--component', default=None,
help='component to plot, depr., use channel')
parser.add_argument('-d', '--downsample', default=1,
help='downsample to this sampling rate, default: 1')
parser.add_argument('-o', '--options',
help='dictionary with kwargs passed to plotday')
parser.add_argument('-f', '--frequency', nargs='?', default=False, const=None,
help='plot frequency spectrum')
args = parser.parse_args()
if args.options is None:
kwargs = {}
else:
kwargs = eval('dict(%s)' % args.options)
kwargs.update(dict(absolutescale=args.absolute_scale,
scale=args.relative_scale,
downsample=args.downsample,
save=args.save, show=args.save is None))
print kwargs
if args.date is None:
from sito import read
if not '.' in args.file_station:
args.file_station = args.file_station + '.QHD'
stream = read(args.file_station)
if args.absolute_scale is None and args.relative_scale is None:
kwargs['scale'] = 1.
else:
station = args.file_station
if station.startswith('PB') or station == 'LVC' or station.endswith('CX'):
from sito.data import IPOC
data = IPOC(xcorr_append=args.xcorr_append)
elif station == 'PKD':
from sito.data import Parkfield
data = Parkfield(xcorr_append=args.xcorr_append)
else:
raise argparse.ArgumentError('Not a valid station name')
day = UTC(args.date)
if args.xcorr_append is None:
#stream = data.getRawStream(day, station, component=args.component)
stream = data.getRawStreamFromClient(day, day + 24 * 3600, station, component=args.component, channel=args.channel)
else:
stream = data.getStream(day, station, component=args.component)
if args.absolute_scale is None and args.relative_scale is None:
kwargs['absolutescale'] = 0.0005
tr = stream[0]
tr.stats.filter = ''
if args.frequency is False:
if tr.stats.is_fft:
tr.ifft()
print tr
#tr.plotTrace(component=args.component, **kwargs)
tr.plotTrace(**kwargs)
else:
# if not tr.stats.is_fft:
# tr.fft(1024)
# if tr.stats.is_fft:
# if args.frequency is not None:
# tr.ffttrim(*eval(args.frequency))
print tr
tr.plotPSD()
if args.save is None:
from matplotlib.pyplot import show
show()
| mit |
glennq/scikit-learn | examples/svm/plot_iris.py | 15 | 3256 | """
==================================================
Plot different SVM classifiers in the iris dataset
==================================================
Comparison of different linear SVM classifiers on a 2D projection of the iris
dataset. We only consider the first 2 features of this dataset:
- Sepal length
- Sepal width
This example shows how to plot the decision surface for four SVM classifiers
with different kernels.
The linear models ``LinearSVC()`` and ``SVC(kernel='linear')`` yield slightly
different decision boundaries. This can be a consequence of the following
differences:
- ``LinearSVC`` minimizes the squared hinge loss while ``SVC`` minimizes the
regular hinge loss.
- ``LinearSVC`` uses the One-vs-All (also known as One-vs-Rest) multiclass
reduction while ``SVC`` uses the One-vs-One multiclass reduction.
Both linear models have linear decision boundaries (intersecting hyperplanes)
while the non-linear kernel models (polynomial or Gaussian RBF) have more
flexible non-linear decision boundaries with shapes that depend on the kind of
kernel and its parameters.
.. NOTE:: while plotting the decision function of classifiers for toy 2D
datasets can help get an intuitive understanding of their respective
expressive power, be aware that those intuitions don't always generalize to
more realistic high-dimensional problems.
"""
print(__doc__)
import numpy as np
import matplotlib.pyplot as plt
from sklearn import svm, datasets
# import some data to play with
iris = datasets.load_iris()
X = iris.data[:, :2] # we only take the first two features. We could
# avoid this ugly slicing by using a two-dim dataset
y = iris.target
h = .02 # step size in the mesh
# we create an instance of SVM and fit out data. We do not scale our
# data since we want to plot the support vectors
C = 1.0 # SVM regularization parameter
svc = svm.SVC(kernel='linear', C=C).fit(X, y)
rbf_svc = svm.SVC(kernel='rbf', gamma=0.7, C=C).fit(X, y)
poly_svc = svm.SVC(kernel='poly', degree=3, C=C).fit(X, y)
lin_svc = svm.LinearSVC(C=C).fit(X, y)
# create a mesh to plot in
x_min, x_max = X[:, 0].min() - 1, X[:, 0].max() + 1
y_min, y_max = X[:, 1].min() - 1, X[:, 1].max() + 1
xx, yy = np.meshgrid(np.arange(x_min, x_max, h),
np.arange(y_min, y_max, h))
# title for the plots
titles = ['SVC with linear kernel',
'LinearSVC (linear kernel)',
'SVC with RBF kernel',
'SVC with polynomial (degree 3) kernel']
for i, clf in enumerate((svc, lin_svc, rbf_svc, poly_svc)):
# Plot the decision boundary. For that, we will assign a color to each
# point in the mesh [x_min, x_max]x[y_min, y_max].
plt.subplot(2, 2, i + 1)
plt.subplots_adjust(wspace=0.4, hspace=0.4)
Z = clf.predict(np.c_[xx.ravel(), yy.ravel()])
# Put the result into a color plot
Z = Z.reshape(xx.shape)
plt.contourf(xx, yy, Z, cmap=plt.cm.coolwarm, alpha=0.8)
# Plot also the training points
plt.scatter(X[:, 0], X[:, 1], c=y, cmap=plt.cm.coolwarm)
plt.xlabel('Sepal length')
plt.ylabel('Sepal width')
plt.xlim(xx.min(), xx.max())
plt.ylim(yy.min(), yy.max())
plt.xticks(())
plt.yticks(())
plt.title(titles[i])
plt.show()
| bsd-3-clause |
saifrahmed/bokeh | bokeh/charts/builder/scatter_builder.py | 43 | 7792 | """This is the Bokeh charts interface. It gives you a high level API
to build complex plot is a simple way.
This is the Scatter class which lets you build your Scatter charts
just passing the arguments to the Chart class and calling the proper
functions.
"""
#-----------------------------------------------------------------------------
# Copyright (c) 2012 - 2014, Continuum Analytics, Inc. All rights reserved.
#
# Powered by the Bokeh Development Team.
#
# The full license is in the file LICENSE.txt, distributed with this software.
#-----------------------------------------------------------------------------
#-----------------------------------------------------------------------------
# Imports
#-----------------------------------------------------------------------------
from __future__ import absolute_import
import numpy as np
try:
import pandas as pd
except:
pd = None
from collections import OrderedDict
from ..utils import chunk, cycle_colors, make_scatter
from .._builder import create_and_build, Builder
from .._data_adapter import DataAdapter
from ...models import ColumnDataSource, Range1d
from ...properties import String
#-----------------------------------------------------------------------------
# Classes and functions
#-----------------------------------------------------------------------------
def Scatter(values, **kws):
""" Create a scatter chart using :class:`ScatterBuilder <bokeh.charts.builder.scatter_builder.ScatterBuilder>`
to render the geometry from values.
Args:
values (iterable): iterable 2d representing the data series
values matrix.
In addition the the parameters specific to this chart,
:ref:`userguide_charts_generic_arguments` are also accepted as keyword parameters.
Returns:
a new :class:`Chart <bokeh.charts.Chart>`
Examples:
.. bokeh-plot::
:source-position: above
from collections import OrderedDict
from bokeh.charts import Scatter, output_file, show
# (dict, OrderedDict, lists, arrays and DataFrames of (x, y) tuples are valid inputs)
xyvalues = OrderedDict()
xyvalues['python'] = [(1, 2), (3, 3), (4, 7), (5, 5), (8, 26)]
xyvalues['pypy'] = [(1, 12), (2, 23), (4, 47), (5, 15), (8, 46)]
xyvalues['jython'] = [(1, 22), (2, 43), (4, 10), (6, 25), (8, 26)]
scatter = Scatter(xyvalues, title="Scatter", legend="top_left", ylabel='Languages')
output_file('scatter.html')
show(scatter)
"""
return create_and_build(ScatterBuilder, values, **kws)
class ScatterBuilder(Builder):
"""This is the Scatter class and it is in charge of plotting
Scatter charts in an easy and intuitive way.
Essentially, we provide a way to ingest the data, make the proper
calculations and push the references into a source object.
We additionally make calculations for the ranges. And finally add
the needed glyphs (markers) taking the references from the source.
"""
# TODO: (bev) should be an enumeration
marker = String("circle", help="""
The marker type to use (default: ``circle``).
""")
def _process_data(self):
"""Take the scatter.values data to calculate the chart properties
accordingly. Then build a dict containing references to all the
calculated points to be used by the marker glyph inside the
``_yield_renderers`` method.
"""
self._data = dict()
# list to save all the attributes we are going to create
self._attr = []
# list to save all the groups available in the incoming input
self._groups.extend(self._values.keys())
# Grouping
self.parse_data()
@property
def parse_data(self):
"""Parse data received from self._values and create correct x, y
series values checking if input is a pandas DataFrameGroupBy
object or one of the stardard supported types (that can be
converted to a DataAdapter)
"""
if pd is not None and \
isinstance(self._values, pd.core.groupby.DataFrameGroupBy):
return self._parse_groupped_data
else:
return self._parse_data
def _parse_groupped_data(self):
"""Parse data in self._values in case it's a pandas
DataFrameGroupBy and create the data 'x_...' and 'y_...' values
for all data series
"""
for i, val in enumerate(self._values.keys()):
xy = self._values[val]
self._set_and_get("x_", val, xy[:, 0])
self._set_and_get("y_", val, xy[:, 1])
def _parse_data(self):
"""Parse data in self._values in case it's an iterable (not a pandas
DataFrameGroupBy) and create the data 'x_...' and 'y_...' values
for all data series
"""
for i, val in enumerate(self._values.keys()):
x_, y_ = [], []
xy = self._values[val]
for value in self._values.index:
x_.append(xy[value][0])
y_.append(xy[value][1])
self.set_and_get("x_", val, x_)
self.set_and_get("y_", val, y_)
def _set_sources(self):
"""Push the Scatter data into the ColumnDataSource and
calculate the proper ranges."""
self._source = ColumnDataSource(self._data)
x_names, y_names = self._attr[::2], self._attr[1::2]
endx = max(max(self._data[i]) for i in x_names)
startx = min(min(self._data[i]) for i in x_names)
self.x_range = Range1d(
start=startx - 0.1 * (endx - startx),
end=endx + 0.1 * (endx - startx)
)
endy = max(max(self._data[i]) for i in y_names)
starty = min(min(self._data[i]) for i in y_names)
self.y_range = Range1d(
start=starty - 0.1 * (endy - starty),
end=endy + 0.1 * (endy - starty)
)
def _yield_renderers(self):
"""Use the marker glyphs to display the points.
Takes reference points from data loaded at the ColumnDataSource.
"""
duplets = list(chunk(self._attr, 2))
colors = cycle_colors(duplets, self.palette)
for i, duplet in enumerate(duplets, start=1):
renderer = make_scatter(
self._source, duplet[0], duplet[1], self.marker, colors[i - 1]
)
self._legends.append((self._groups[i-1], [renderer]))
yield renderer
def _adapt_values(self):
"""Prepare context before main show method is invoked.
Customize show preliminary actions by handling DataFrameGroupBy
values in order to create the series values and labels."""
# check if pandas is installed
if pd:
# if it is we try to take advantage of it's data structures
# asumming we get an groupby object
if isinstance(self._values, pd.core.groupby.DataFrameGroupBy):
pdict = OrderedDict()
for i in self._values.groups.keys():
self._labels = self._values.get_group(i).columns
xname = self._values.get_group(i).columns[0]
yname = self._values.get_group(i).columns[1]
x = getattr(self._values.get_group(i), xname)
y = getattr(self._values.get_group(i), yname)
pdict[i] = np.array([x.values, y.values]).T
self._values = DataAdapter(pdict)
self._labels = self._values.keys()
else:
self._values = DataAdapter(self._values)
self._labels = self._values.keys()
else:
self._values = DataAdapter(self._values)
self._labels = self._values.keys()
| bsd-3-clause |
katkovalyova/CBB752Spring2017.github.io | StructAnal/4BMB_Urlj.py | 1 | 3309 | #!/usr/bin/python
__author__ = "Yekaterina Kovalyova"
__copyright__ = "Copyright 2017"
__credits__ = ["Yekaterina Kovalyova"]
__license__ = "GPL"
__version__ = "1.0.0"
__maintainer__ = "Yekaterina Kovalyova"
__email__ = "yekaterina.kovalyova@yale.edu"
### Usage: python 4BMB_Urlj.py -u1 <U_RLJ 1> -u2 <U_RLJ 2>
### Example: python 4BMB_Urlj.py -u1 4BMB_F19.txt -u2 4BMB_F19Y.txt
### Note: Structural comparison of protein to its mutant.
import argparse
import mpl_toolkits
from mpl_toolkits.mplot3d import Axes3D
from mpl_toolkits.mplot3d import axes3d
import matplotlib.pyplot as plt
parser = argparse.ArgumentParser(description='Protein Structure Analysis')
parser.add_argument('-u1', '--u1', help='U_RLJs of protein 1', required=True)
parser.add_argument('-u2', '--u2', help='U_RLJs of protein 2', required=True)
args = parser.parse_args()
# Plot repulsive Lennard Jones potential of residues as fxn of Chi_1 and Chi_2
# Hard-coded for the given files
def U_rlj(u1, u2):
#Process the given files with chi1, chi2, and energies
(wt_chi1, wt_chi2, wt_u) = processU(u1) #wildtype residue
(mut_chi1, mut_chi2, mut_u) = processU(u2) #mutant residue
#######################################################
###For wireframe plots, make 2D arrays of each value###
#######################################################
WT_chi1 = [] #2D array for WT chi1
WT_chi2 = [] #2D array for WT chi2
WT_u = [] #2D array for WT energy
MUT_chi1 = [] #2D array for mutant chi1
MUT_chi2 = [] #2D array for mutant chi2
MUT_u = [] #2D array for mutant energy
count = 0
#Actually make the arrays 2D and populate
#Hard-coded 72x72, for 72 chi1 angles x 72 chi2 angles
for i in range(0, 72):
WT_chi1.append([])
WT_chi2.append([])
WT_u.append([])
MUT_chi1.append([])
MUT_chi2.append([])
MUT_u.append([])
for j in range(0, 72):
WT_chi1[i].append(wt_chi1[count])
WT_chi2[i].append(wt_chi2[count])
WT_u[i].append(wt_u[count])
MUT_chi1[i].append(mut_chi1[count])
MUT_chi2[i].append(mut_chi2[count])
MUT_u[i].append(mut_u[count])
count += 1
###############################################
###Plot the energies as fxn of chi1 and chi2###
###############################################
fig = plt.figure()
plot = fig.add_subplot(111, projection='3d')
#blue = WT, red = mutant
plot.plot_wireframe(WT_chi1, WT_chi2, WT_u, rstride=1, cstride=1, color="blue")
plot.plot_wireframe(MUT_chi1, MUT_chi2, MUT_u, rstride=1, cstride=1, color="red")
plot.set_xlim3d(155, 185)
plot.set_ylim3d(40,100)
plot.set_zlim3d(0, 1)
plot.set_xlabel('Chi_1')
plot.set_ylabel('Chi_2')
plot.set_zlabel('U_RLJ')
plt.show()
# Get the chi1, chi2, and energies in separate arrays
def processU(Ufile):
chi1 = []
chi2 = []
U = []
f = open(Ufile, 'r')
f.readline()
for line in f:
tmp = line.split(" ")
chi1.append(int(tmp[0]))
chi2.append(int(tmp[1]))
U.append(float(tmp[2]))
f.close()
return (chi1, chi2, U)
# Run programs
U_rlj(args.u1, args.u2) | mit |
RPGOne/scikit-learn | examples/linear_model/plot_huber_vs_ridge.py | 127 | 2206 | """
=======================================================
HuberRegressor vs Ridge on dataset with strong outliers
=======================================================
Fit Ridge and HuberRegressor on a dataset with outliers.
The example shows that the predictions in ridge are strongly influenced
by the outliers present in the dataset. The Huber regressor is less
influenced by the outliers since the model uses the linear loss for these.
As the parameter epsilon is increased for the Huber regressor, the decision
function approaches that of the ridge.
"""
# Authors: Manoj Kumar mks542@nyu.edu
# License: BSD 3 clause
print(__doc__)
import numpy as np
import matplotlib.pyplot as plt
from sklearn.datasets import make_regression
from sklearn.linear_model import HuberRegressor, Ridge
# Generate toy data.
rng = np.random.RandomState(0)
X, y = make_regression(n_samples=20, n_features=1, random_state=0, noise=4.0,
bias=100.0)
# Add four strong outliers to the dataset.
X_outliers = rng.normal(0, 0.5, size=(4, 1))
y_outliers = rng.normal(0, 2.0, size=4)
X_outliers[:2, :] += X.max() + X.mean() / 4.
X_outliers[2:, :] += X.min() - X.mean() / 4.
y_outliers[:2] += y.min() - y.mean() / 4.
y_outliers[2:] += y.max() + y.mean() / 4.
X = np.vstack((X, X_outliers))
y = np.concatenate((y, y_outliers))
plt.plot(X, y, 'b.')
# Fit the huber regressor over a series of epsilon values.
colors = ['r-', 'b-', 'y-', 'm-']
x = np.linspace(X.min(), X.max(), 7)
epsilon_values = [1.35, 1.5, 1.75, 1.9]
for k, epsilon in enumerate(epsilon_values):
huber = HuberRegressor(fit_intercept=True, alpha=0.0, max_iter=100,
epsilon=epsilon)
huber.fit(X, y)
coef_ = huber.coef_ * x + huber.intercept_
plt.plot(x, coef_, colors[k], label="huber loss, %s" % epsilon)
# Fit a ridge regressor to compare it to huber regressor.
ridge = Ridge(fit_intercept=True, alpha=0.0, random_state=0, normalize=True)
ridge.fit(X, y)
coef_ridge = ridge.coef_
coef_ = ridge.coef_ * x + ridge.intercept_
plt.plot(x, coef_, 'g-', label="ridge regression")
plt.title("Comparison of HuberRegressor vs Ridge")
plt.xlabel("X")
plt.ylabel("y")
plt.legend(loc=0)
plt.show()
| bsd-3-clause |
bibsian/database-development | poplerGUI/logiclayer/class_metaverify.py | 1 | 3054 | #!/usr/bin/env python
import sys, os
from pandas import read_csv, read_sql
from poplerGUI.logiclayer.datalayer import config as orm
rootpath = os.path.dirname(os.path.dirname(os.path.dirname( __file__ )))
metapath = os.path.join(rootpath, 'Cataloged_Data_Current_sorted.csv')
__all__ = ['MetaVerifier']
class MetaVerifier(object):
"""
This is going to be a Singleton pattern i.e.
only one instance of this class will be created
in the whole program.
Keyword Arguments
-----------------
idnumber: integer
lterloc: string
metaurl: string
"""
_meta = read_csv(metapath, encoding='iso-8859-11')
def __init__(self, inputclsinstance):
self.idnumber = int(inputclsinstance.lnedentry['globalid'])
self.lterloc = inputclsinstance.lnedentry['lter']
self.metaurl = inputclsinstance.lnedentry['metaurl']
def verify_entries(self):
'''
This method is going to be used to verify
the user input into the program. Ensuring that
all the parameter supplied match a given record
in the metadata dataframe.
'''
if None in (self.idnumber, self.lterloc, self.metaurl):
raise AttributeError(
'Not all attributes have been set. Please enter' +
'the globalid, lter location, and metadata url.')
else:
try:
assert self.idnumber > 0
except:
raise AttributeError(
'Plese enter the globalid number.')
try:
session = orm.Session()
global_check_q = session.query(
orm.project_table.proj_metadata_key).order_by(
orm.project_table.proj_metadata_key)
session.close()
global_check_df = read_sql(
global_check_q.statement,
global_check_q.session.bind)
uploaded_globals = global_check_df[
'proj_metadata_key'].values.tolist()
assert self.idnumber not in uploaded_globals
assert (self._meta.loc[
self._meta['global_id']== self.idnumber][
'global_id'] ==
self.idnumber).bool() is True
assert (self._meta.loc[
self._meta['global_id']== self.idnumber][
'lter'] ==
self.lterloc).bool() is True
assert (self._meta.loc[
self._meta['global_id']== self.idnumber][
'site_metadata']
== self.metaurl).bool() is True
return True
except Exception as e:
print(str(e))
raise LookupError(
"The verification attributes have not been set" +
" correctly. Or global_id is already present: " +
str(e)
)
| mit |
valexandersaulys/prudential_insurance_kaggle | venv/lib/python2.7/site-packages/sklearn/metrics/cluster/supervised.py | 207 | 27395 | """Utilities to evaluate the clustering performance of models
Functions named as *_score return a scalar value to maximize: the higher the
better.
"""
# Authors: Olivier Grisel <olivier.grisel@ensta.org>
# Wei LI <kuantkid@gmail.com>
# Diego Molla <dmolla-aliod@gmail.com>
# License: BSD 3 clause
from math import log
from scipy.misc import comb
from scipy.sparse import coo_matrix
import numpy as np
from .expected_mutual_info_fast import expected_mutual_information
from ...utils.fixes import bincount
def comb2(n):
# the exact version is faster for k == 2: use it by default globally in
# this module instead of the float approximate variant
return comb(n, 2, exact=1)
def check_clusterings(labels_true, labels_pred):
"""Check that the two clusterings matching 1D integer arrays"""
labels_true = np.asarray(labels_true)
labels_pred = np.asarray(labels_pred)
# input checks
if labels_true.ndim != 1:
raise ValueError(
"labels_true must be 1D: shape is %r" % (labels_true.shape,))
if labels_pred.ndim != 1:
raise ValueError(
"labels_pred must be 1D: shape is %r" % (labels_pred.shape,))
if labels_true.shape != labels_pred.shape:
raise ValueError(
"labels_true and labels_pred must have same size, got %d and %d"
% (labels_true.shape[0], labels_pred.shape[0]))
return labels_true, labels_pred
def contingency_matrix(labels_true, labels_pred, eps=None):
"""Build a contengency matrix describing the relationship between labels.
Parameters
----------
labels_true : int array, shape = [n_samples]
Ground truth class labels to be used as a reference
labels_pred : array, shape = [n_samples]
Cluster labels to evaluate
eps: None or float
If a float, that value is added to all values in the contingency
matrix. This helps to stop NaN propagation.
If ``None``, nothing is adjusted.
Returns
-------
contingency: array, shape=[n_classes_true, n_classes_pred]
Matrix :math:`C` such that :math:`C_{i, j}` is the number of samples in
true class :math:`i` and in predicted class :math:`j`. If
``eps is None``, the dtype of this array will be integer. If ``eps`` is
given, the dtype will be float.
"""
classes, class_idx = np.unique(labels_true, return_inverse=True)
clusters, cluster_idx = np.unique(labels_pred, return_inverse=True)
n_classes = classes.shape[0]
n_clusters = clusters.shape[0]
# Using coo_matrix to accelerate simple histogram calculation,
# i.e. bins are consecutive integers
# Currently, coo_matrix is faster than histogram2d for simple cases
contingency = coo_matrix((np.ones(class_idx.shape[0]),
(class_idx, cluster_idx)),
shape=(n_classes, n_clusters),
dtype=np.int).toarray()
if eps is not None:
# don't use += as contingency is integer
contingency = contingency + eps
return contingency
# clustering measures
def adjusted_rand_score(labels_true, labels_pred):
"""Rand index adjusted for chance
The Rand Index computes a similarity measure between two clusterings
by considering all pairs of samples and counting pairs that are
assigned in the same or different clusters in the predicted and
true clusterings.
The raw RI score is then "adjusted for chance" into the ARI score
using the following scheme::
ARI = (RI - Expected_RI) / (max(RI) - Expected_RI)
The adjusted Rand index is thus ensured to have a value close to
0.0 for random labeling independently of the number of clusters and
samples and exactly 1.0 when the clusterings are identical (up to
a permutation).
ARI is a symmetric measure::
adjusted_rand_score(a, b) == adjusted_rand_score(b, a)
Read more in the :ref:`User Guide <adjusted_rand_score>`.
Parameters
----------
labels_true : int array, shape = [n_samples]
Ground truth class labels to be used as a reference
labels_pred : array, shape = [n_samples]
Cluster labels to evaluate
Returns
-------
ari : float
Similarity score between -1.0 and 1.0. Random labelings have an ARI
close to 0.0. 1.0 stands for perfect match.
Examples
--------
Perfectly maching labelings have a score of 1 even
>>> from sklearn.metrics.cluster import adjusted_rand_score
>>> adjusted_rand_score([0, 0, 1, 1], [0, 0, 1, 1])
1.0
>>> adjusted_rand_score([0, 0, 1, 1], [1, 1, 0, 0])
1.0
Labelings that assign all classes members to the same clusters
are complete be not always pure, hence penalized::
>>> adjusted_rand_score([0, 0, 1, 2], [0, 0, 1, 1]) # doctest: +ELLIPSIS
0.57...
ARI is symmetric, so labelings that have pure clusters with members
coming from the same classes but unnecessary splits are penalized::
>>> adjusted_rand_score([0, 0, 1, 1], [0, 0, 1, 2]) # doctest: +ELLIPSIS
0.57...
If classes members are completely split across different clusters, the
assignment is totally incomplete, hence the ARI is very low::
>>> adjusted_rand_score([0, 0, 0, 0], [0, 1, 2, 3])
0.0
References
----------
.. [Hubert1985] `L. Hubert and P. Arabie, Comparing Partitions,
Journal of Classification 1985`
http://www.springerlink.com/content/x64124718341j1j0/
.. [wk] http://en.wikipedia.org/wiki/Rand_index#Adjusted_Rand_index
See also
--------
adjusted_mutual_info_score: Adjusted Mutual Information
"""
labels_true, labels_pred = check_clusterings(labels_true, labels_pred)
n_samples = labels_true.shape[0]
classes = np.unique(labels_true)
clusters = np.unique(labels_pred)
# Special limit cases: no clustering since the data is not split;
# or trivial clustering where each document is assigned a unique cluster.
# These are perfect matches hence return 1.0.
if (classes.shape[0] == clusters.shape[0] == 1
or classes.shape[0] == clusters.shape[0] == 0
or classes.shape[0] == clusters.shape[0] == len(labels_true)):
return 1.0
contingency = contingency_matrix(labels_true, labels_pred)
# Compute the ARI using the contingency data
sum_comb_c = sum(comb2(n_c) for n_c in contingency.sum(axis=1))
sum_comb_k = sum(comb2(n_k) for n_k in contingency.sum(axis=0))
sum_comb = sum(comb2(n_ij) for n_ij in contingency.flatten())
prod_comb = (sum_comb_c * sum_comb_k) / float(comb(n_samples, 2))
mean_comb = (sum_comb_k + sum_comb_c) / 2.
return ((sum_comb - prod_comb) / (mean_comb - prod_comb))
def homogeneity_completeness_v_measure(labels_true, labels_pred):
"""Compute the homogeneity and completeness and V-Measure scores at once
Those metrics are based on normalized conditional entropy measures of
the clustering labeling to evaluate given the knowledge of a Ground
Truth class labels of the same samples.
A clustering result satisfies homogeneity if all of its clusters
contain only data points which are members of a single class.
A clustering result satisfies completeness if all the data points
that are members of a given class are elements of the same cluster.
Both scores have positive values between 0.0 and 1.0, larger values
being desirable.
Those 3 metrics are independent of the absolute values of the labels:
a permutation of the class or cluster label values won't change the
score values in any way.
V-Measure is furthermore symmetric: swapping ``labels_true`` and
``label_pred`` will give the same score. This does not hold for
homogeneity and completeness.
Read more in the :ref:`User Guide <homogeneity_completeness>`.
Parameters
----------
labels_true : int array, shape = [n_samples]
ground truth class labels to be used as a reference
labels_pred : array, shape = [n_samples]
cluster labels to evaluate
Returns
-------
homogeneity: float
score between 0.0 and 1.0. 1.0 stands for perfectly homogeneous labeling
completeness: float
score between 0.0 and 1.0. 1.0 stands for perfectly complete labeling
v_measure: float
harmonic mean of the first two
See also
--------
homogeneity_score
completeness_score
v_measure_score
"""
labels_true, labels_pred = check_clusterings(labels_true, labels_pred)
if len(labels_true) == 0:
return 1.0, 1.0, 1.0
entropy_C = entropy(labels_true)
entropy_K = entropy(labels_pred)
MI = mutual_info_score(labels_true, labels_pred)
homogeneity = MI / (entropy_C) if entropy_C else 1.0
completeness = MI / (entropy_K) if entropy_K else 1.0
if homogeneity + completeness == 0.0:
v_measure_score = 0.0
else:
v_measure_score = (2.0 * homogeneity * completeness
/ (homogeneity + completeness))
return homogeneity, completeness, v_measure_score
def homogeneity_score(labels_true, labels_pred):
"""Homogeneity metric of a cluster labeling given a ground truth
A clustering result satisfies homogeneity if all of its clusters
contain only data points which are members of a single class.
This metric is independent of the absolute values of the labels:
a permutation of the class or cluster label values won't change the
score value in any way.
This metric is not symmetric: switching ``label_true`` with ``label_pred``
will return the :func:`completeness_score` which will be different in
general.
Read more in the :ref:`User Guide <homogeneity_completeness>`.
Parameters
----------
labels_true : int array, shape = [n_samples]
ground truth class labels to be used as a reference
labels_pred : array, shape = [n_samples]
cluster labels to evaluate
Returns
-------
homogeneity: float
score between 0.0 and 1.0. 1.0 stands for perfectly homogeneous labeling
References
----------
.. [1] `Andrew Rosenberg and Julia Hirschberg, 2007. V-Measure: A
conditional entropy-based external cluster evaluation measure
<http://aclweb.org/anthology/D/D07/D07-1043.pdf>`_
See also
--------
completeness_score
v_measure_score
Examples
--------
Perfect labelings are homogeneous::
>>> from sklearn.metrics.cluster import homogeneity_score
>>> homogeneity_score([0, 0, 1, 1], [1, 1, 0, 0])
1.0
Non-perfect labelings that further split classes into more clusters can be
perfectly homogeneous::
>>> print("%.6f" % homogeneity_score([0, 0, 1, 1], [0, 0, 1, 2]))
... # doctest: +ELLIPSIS
1.0...
>>> print("%.6f" % homogeneity_score([0, 0, 1, 1], [0, 1, 2, 3]))
... # doctest: +ELLIPSIS
1.0...
Clusters that include samples from different classes do not make for an
homogeneous labeling::
>>> print("%.6f" % homogeneity_score([0, 0, 1, 1], [0, 1, 0, 1]))
... # doctest: +ELLIPSIS
0.0...
>>> print("%.6f" % homogeneity_score([0, 0, 1, 1], [0, 0, 0, 0]))
... # doctest: +ELLIPSIS
0.0...
"""
return homogeneity_completeness_v_measure(labels_true, labels_pred)[0]
def completeness_score(labels_true, labels_pred):
"""Completeness metric of a cluster labeling given a ground truth
A clustering result satisfies completeness if all the data points
that are members of a given class are elements of the same cluster.
This metric is independent of the absolute values of the labels:
a permutation of the class or cluster label values won't change the
score value in any way.
This metric is not symmetric: switching ``label_true`` with ``label_pred``
will return the :func:`homogeneity_score` which will be different in
general.
Read more in the :ref:`User Guide <homogeneity_completeness>`.
Parameters
----------
labels_true : int array, shape = [n_samples]
ground truth class labels to be used as a reference
labels_pred : array, shape = [n_samples]
cluster labels to evaluate
Returns
-------
completeness: float
score between 0.0 and 1.0. 1.0 stands for perfectly complete labeling
References
----------
.. [1] `Andrew Rosenberg and Julia Hirschberg, 2007. V-Measure: A
conditional entropy-based external cluster evaluation measure
<http://aclweb.org/anthology/D/D07/D07-1043.pdf>`_
See also
--------
homogeneity_score
v_measure_score
Examples
--------
Perfect labelings are complete::
>>> from sklearn.metrics.cluster import completeness_score
>>> completeness_score([0, 0, 1, 1], [1, 1, 0, 0])
1.0
Non-perfect labelings that assign all classes members to the same clusters
are still complete::
>>> print(completeness_score([0, 0, 1, 1], [0, 0, 0, 0]))
1.0
>>> print(completeness_score([0, 1, 2, 3], [0, 0, 1, 1]))
1.0
If classes members are split across different clusters, the
assignment cannot be complete::
>>> print(completeness_score([0, 0, 1, 1], [0, 1, 0, 1]))
0.0
>>> print(completeness_score([0, 0, 0, 0], [0, 1, 2, 3]))
0.0
"""
return homogeneity_completeness_v_measure(labels_true, labels_pred)[1]
def v_measure_score(labels_true, labels_pred):
"""V-measure cluster labeling given a ground truth.
This score is identical to :func:`normalized_mutual_info_score`.
The V-measure is the harmonic mean between homogeneity and completeness::
v = 2 * (homogeneity * completeness) / (homogeneity + completeness)
This metric is independent of the absolute values of the labels:
a permutation of the class or cluster label values won't change the
score value in any way.
This metric is furthermore symmetric: switching ``label_true`` with
``label_pred`` will return the same score value. This can be useful to
measure the agreement of two independent label assignments strategies
on the same dataset when the real ground truth is not known.
Read more in the :ref:`User Guide <homogeneity_completeness>`.
Parameters
----------
labels_true : int array, shape = [n_samples]
ground truth class labels to be used as a reference
labels_pred : array, shape = [n_samples]
cluster labels to evaluate
Returns
-------
v_measure: float
score between 0.0 and 1.0. 1.0 stands for perfectly complete labeling
References
----------
.. [1] `Andrew Rosenberg and Julia Hirschberg, 2007. V-Measure: A
conditional entropy-based external cluster evaluation measure
<http://aclweb.org/anthology/D/D07/D07-1043.pdf>`_
See also
--------
homogeneity_score
completeness_score
Examples
--------
Perfect labelings are both homogeneous and complete, hence have score 1.0::
>>> from sklearn.metrics.cluster import v_measure_score
>>> v_measure_score([0, 0, 1, 1], [0, 0, 1, 1])
1.0
>>> v_measure_score([0, 0, 1, 1], [1, 1, 0, 0])
1.0
Labelings that assign all classes members to the same clusters
are complete be not homogeneous, hence penalized::
>>> print("%.6f" % v_measure_score([0, 0, 1, 2], [0, 0, 1, 1]))
... # doctest: +ELLIPSIS
0.8...
>>> print("%.6f" % v_measure_score([0, 1, 2, 3], [0, 0, 1, 1]))
... # doctest: +ELLIPSIS
0.66...
Labelings that have pure clusters with members coming from the same
classes are homogeneous but un-necessary splits harms completeness
and thus penalize V-measure as well::
>>> print("%.6f" % v_measure_score([0, 0, 1, 1], [0, 0, 1, 2]))
... # doctest: +ELLIPSIS
0.8...
>>> print("%.6f" % v_measure_score([0, 0, 1, 1], [0, 1, 2, 3]))
... # doctest: +ELLIPSIS
0.66...
If classes members are completely split across different clusters,
the assignment is totally incomplete, hence the V-Measure is null::
>>> print("%.6f" % v_measure_score([0, 0, 0, 0], [0, 1, 2, 3]))
... # doctest: +ELLIPSIS
0.0...
Clusters that include samples from totally different classes totally
destroy the homogeneity of the labeling, hence::
>>> print("%.6f" % v_measure_score([0, 0, 1, 1], [0, 0, 0, 0]))
... # doctest: +ELLIPSIS
0.0...
"""
return homogeneity_completeness_v_measure(labels_true, labels_pred)[2]
def mutual_info_score(labels_true, labels_pred, contingency=None):
"""Mutual Information between two clusterings
The Mutual Information is a measure of the similarity between two labels of
the same data. Where :math:`P(i)` is the probability of a random sample
occurring in cluster :math:`U_i` and :math:`P'(j)` is the probability of a
random sample occurring in cluster :math:`V_j`, the Mutual Information
between clusterings :math:`U` and :math:`V` is given as:
.. math::
MI(U,V)=\sum_{i=1}^R \sum_{j=1}^C P(i,j)\log\\frac{P(i,j)}{P(i)P'(j)}
This is equal to the Kullback-Leibler divergence of the joint distribution
with the product distribution of the marginals.
This metric is independent of the absolute values of the labels:
a permutation of the class or cluster label values won't change the
score value in any way.
This metric is furthermore symmetric: switching ``label_true`` with
``label_pred`` will return the same score value. This can be useful to
measure the agreement of two independent label assignments strategies
on the same dataset when the real ground truth is not known.
Read more in the :ref:`User Guide <mutual_info_score>`.
Parameters
----------
labels_true : int array, shape = [n_samples]
A clustering of the data into disjoint subsets.
labels_pred : array, shape = [n_samples]
A clustering of the data into disjoint subsets.
contingency: None or array, shape = [n_classes_true, n_classes_pred]
A contingency matrix given by the :func:`contingency_matrix` function.
If value is ``None``, it will be computed, otherwise the given value is
used, with ``labels_true`` and ``labels_pred`` ignored.
Returns
-------
mi: float
Mutual information, a non-negative value
See also
--------
adjusted_mutual_info_score: Adjusted against chance Mutual Information
normalized_mutual_info_score: Normalized Mutual Information
"""
if contingency is None:
labels_true, labels_pred = check_clusterings(labels_true, labels_pred)
contingency = contingency_matrix(labels_true, labels_pred)
contingency = np.array(contingency, dtype='float')
contingency_sum = np.sum(contingency)
pi = np.sum(contingency, axis=1)
pj = np.sum(contingency, axis=0)
outer = np.outer(pi, pj)
nnz = contingency != 0.0
# normalized contingency
contingency_nm = contingency[nnz]
log_contingency_nm = np.log(contingency_nm)
contingency_nm /= contingency_sum
# log(a / b) should be calculated as log(a) - log(b) for
# possible loss of precision
log_outer = -np.log(outer[nnz]) + log(pi.sum()) + log(pj.sum())
mi = (contingency_nm * (log_contingency_nm - log(contingency_sum))
+ contingency_nm * log_outer)
return mi.sum()
def adjusted_mutual_info_score(labels_true, labels_pred):
"""Adjusted Mutual Information between two clusterings
Adjusted Mutual Information (AMI) is an adjustment of the Mutual
Information (MI) score to account for chance. It accounts for the fact that
the MI is generally higher for two clusterings with a larger number of
clusters, regardless of whether there is actually more information shared.
For two clusterings :math:`U` and :math:`V`, the AMI is given as::
AMI(U, V) = [MI(U, V) - E(MI(U, V))] / [max(H(U), H(V)) - E(MI(U, V))]
This metric is independent of the absolute values of the labels:
a permutation of the class or cluster label values won't change the
score value in any way.
This metric is furthermore symmetric: switching ``label_true`` with
``label_pred`` will return the same score value. This can be useful to
measure the agreement of two independent label assignments strategies
on the same dataset when the real ground truth is not known.
Be mindful that this function is an order of magnitude slower than other
metrics, such as the Adjusted Rand Index.
Read more in the :ref:`User Guide <mutual_info_score>`.
Parameters
----------
labels_true : int array, shape = [n_samples]
A clustering of the data into disjoint subsets.
labels_pred : array, shape = [n_samples]
A clustering of the data into disjoint subsets.
Returns
-------
ami: float(upperlimited by 1.0)
The AMI returns a value of 1 when the two partitions are identical
(ie perfectly matched). Random partitions (independent labellings) have
an expected AMI around 0 on average hence can be negative.
See also
--------
adjusted_rand_score: Adjusted Rand Index
mutual_information_score: Mutual Information (not adjusted for chance)
Examples
--------
Perfect labelings are both homogeneous and complete, hence have
score 1.0::
>>> from sklearn.metrics.cluster import adjusted_mutual_info_score
>>> adjusted_mutual_info_score([0, 0, 1, 1], [0, 0, 1, 1])
1.0
>>> adjusted_mutual_info_score([0, 0, 1, 1], [1, 1, 0, 0])
1.0
If classes members are completely split across different clusters,
the assignment is totally in-complete, hence the AMI is null::
>>> adjusted_mutual_info_score([0, 0, 0, 0], [0, 1, 2, 3])
0.0
References
----------
.. [1] `Vinh, Epps, and Bailey, (2010). Information Theoretic Measures for
Clusterings Comparison: Variants, Properties, Normalization and
Correction for Chance, JMLR
<http://jmlr.csail.mit.edu/papers/volume11/vinh10a/vinh10a.pdf>`_
.. [2] `Wikipedia entry for the Adjusted Mutual Information
<http://en.wikipedia.org/wiki/Adjusted_Mutual_Information>`_
"""
labels_true, labels_pred = check_clusterings(labels_true, labels_pred)
n_samples = labels_true.shape[0]
classes = np.unique(labels_true)
clusters = np.unique(labels_pred)
# Special limit cases: no clustering since the data is not split.
# This is a perfect match hence return 1.0.
if (classes.shape[0] == clusters.shape[0] == 1
or classes.shape[0] == clusters.shape[0] == 0):
return 1.0
contingency = contingency_matrix(labels_true, labels_pred)
contingency = np.array(contingency, dtype='float')
# Calculate the MI for the two clusterings
mi = mutual_info_score(labels_true, labels_pred,
contingency=contingency)
# Calculate the expected value for the mutual information
emi = expected_mutual_information(contingency, n_samples)
# Calculate entropy for each labeling
h_true, h_pred = entropy(labels_true), entropy(labels_pred)
ami = (mi - emi) / (max(h_true, h_pred) - emi)
return ami
def normalized_mutual_info_score(labels_true, labels_pred):
"""Normalized Mutual Information between two clusterings
Normalized Mutual Information (NMI) is an normalization of the Mutual
Information (MI) score to scale the results between 0 (no mutual
information) and 1 (perfect correlation). In this function, mutual
information is normalized by ``sqrt(H(labels_true) * H(labels_pred))``
This measure is not adjusted for chance. Therefore
:func:`adjusted_mustual_info_score` might be preferred.
This metric is independent of the absolute values of the labels:
a permutation of the class or cluster label values won't change the
score value in any way.
This metric is furthermore symmetric: switching ``label_true`` with
``label_pred`` will return the same score value. This can be useful to
measure the agreement of two independent label assignments strategies
on the same dataset when the real ground truth is not known.
Read more in the :ref:`User Guide <mutual_info_score>`.
Parameters
----------
labels_true : int array, shape = [n_samples]
A clustering of the data into disjoint subsets.
labels_pred : array, shape = [n_samples]
A clustering of the data into disjoint subsets.
Returns
-------
nmi: float
score between 0.0 and 1.0. 1.0 stands for perfectly complete labeling
See also
--------
adjusted_rand_score: Adjusted Rand Index
adjusted_mutual_info_score: Adjusted Mutual Information (adjusted
against chance)
Examples
--------
Perfect labelings are both homogeneous and complete, hence have
score 1.0::
>>> from sklearn.metrics.cluster import normalized_mutual_info_score
>>> normalized_mutual_info_score([0, 0, 1, 1], [0, 0, 1, 1])
1.0
>>> normalized_mutual_info_score([0, 0, 1, 1], [1, 1, 0, 0])
1.0
If classes members are completely split across different clusters,
the assignment is totally in-complete, hence the NMI is null::
>>> normalized_mutual_info_score([0, 0, 0, 0], [0, 1, 2, 3])
0.0
"""
labels_true, labels_pred = check_clusterings(labels_true, labels_pred)
classes = np.unique(labels_true)
clusters = np.unique(labels_pred)
# Special limit cases: no clustering since the data is not split.
# This is a perfect match hence return 1.0.
if (classes.shape[0] == clusters.shape[0] == 1
or classes.shape[0] == clusters.shape[0] == 0):
return 1.0
contingency = contingency_matrix(labels_true, labels_pred)
contingency = np.array(contingency, dtype='float')
# Calculate the MI for the two clusterings
mi = mutual_info_score(labels_true, labels_pred,
contingency=contingency)
# Calculate the expected value for the mutual information
# Calculate entropy for each labeling
h_true, h_pred = entropy(labels_true), entropy(labels_pred)
nmi = mi / max(np.sqrt(h_true * h_pred), 1e-10)
return nmi
def entropy(labels):
"""Calculates the entropy for a labeling."""
if len(labels) == 0:
return 1.0
label_idx = np.unique(labels, return_inverse=True)[1]
pi = bincount(label_idx).astype(np.float)
pi = pi[pi > 0]
pi_sum = np.sum(pi)
# log(a / b) should be calculated as log(a) - log(b) for
# possible loss of precision
return -np.sum((pi / pi_sum) * (np.log(pi) - log(pi_sum)))
| gpl-2.0 |
mihail911/nupic | external/linux32/lib/python2.6/site-packages/matplotlib/backends/backend_gdk.py | 69 | 15968 | from __future__ import division
import math
import os
import sys
import warnings
def fn_name(): return sys._getframe(1).f_code.co_name
import gobject
import gtk; gdk = gtk.gdk
import pango
pygtk_version_required = (2,2,0)
if gtk.pygtk_version < pygtk_version_required:
raise ImportError ("PyGTK %d.%d.%d is installed\n"
"PyGTK %d.%d.%d or later is required"
% (gtk.pygtk_version + pygtk_version_required))
del pygtk_version_required
import numpy as npy
import matplotlib
from matplotlib._pylab_helpers import Gcf
from matplotlib.backend_bases import RendererBase, GraphicsContextBase, \
FigureManagerBase, FigureCanvasBase
from matplotlib.cbook import is_string_like
from matplotlib.figure import Figure
from matplotlib.mathtext import MathTextParser
from matplotlib.transforms import Affine2D
from matplotlib.backends._backend_gdk import pixbuf_get_pixels_array
backend_version = "%d.%d.%d" % gtk.pygtk_version
_debug = False
# Image formats that this backend supports - for FileChooser and print_figure()
IMAGE_FORMAT = ['eps', 'jpg', 'png', 'ps', 'svg'] + ['bmp'] # , 'raw', 'rgb']
IMAGE_FORMAT.sort()
IMAGE_FORMAT_DEFAULT = 'png'
class RendererGDK(RendererBase):
fontweights = {
100 : pango.WEIGHT_ULTRALIGHT,
200 : pango.WEIGHT_LIGHT,
300 : pango.WEIGHT_LIGHT,
400 : pango.WEIGHT_NORMAL,
500 : pango.WEIGHT_NORMAL,
600 : pango.WEIGHT_BOLD,
700 : pango.WEIGHT_BOLD,
800 : pango.WEIGHT_HEAVY,
900 : pango.WEIGHT_ULTRABOLD,
'ultralight' : pango.WEIGHT_ULTRALIGHT,
'light' : pango.WEIGHT_LIGHT,
'normal' : pango.WEIGHT_NORMAL,
'medium' : pango.WEIGHT_NORMAL,
'semibold' : pango.WEIGHT_BOLD,
'bold' : pango.WEIGHT_BOLD,
'heavy' : pango.WEIGHT_HEAVY,
'ultrabold' : pango.WEIGHT_ULTRABOLD,
'black' : pango.WEIGHT_ULTRABOLD,
}
# cache for efficiency, these must be at class, not instance level
layoutd = {} # a map from text prop tups to pango layouts
rotated = {} # a map from text prop tups to rotated text pixbufs
def __init__(self, gtkDA, dpi):
# widget gtkDA is used for:
# '<widget>.create_pango_layout(s)'
# cmap line below)
self.gtkDA = gtkDA
self.dpi = dpi
self._cmap = gtkDA.get_colormap()
self.mathtext_parser = MathTextParser("Agg")
def set_pixmap (self, pixmap):
self.gdkDrawable = pixmap
def set_width_height (self, width, height):
"""w,h is the figure w,h not the pixmap w,h
"""
self.width, self.height = width, height
def draw_path(self, gc, path, transform, rgbFace=None):
transform = transform + Affine2D(). \
scale(1.0, -1.0).translate(0, self.height)
polygons = path.to_polygons(transform, self.width, self.height)
for polygon in polygons:
# draw_polygon won't take an arbitrary sequence -- it must be a list
# of tuples
polygon = [(int(round(x)), int(round(y))) for x, y in polygon]
if rgbFace is not None:
saveColor = gc.gdkGC.foreground
gc.gdkGC.foreground = gc.rgb_to_gdk_color(rgbFace)
self.gdkDrawable.draw_polygon(gc.gdkGC, True, polygon)
gc.gdkGC.foreground = saveColor
if gc.gdkGC.line_width > 0:
self.gdkDrawable.draw_lines(gc.gdkGC, polygon)
def draw_image(self, x, y, im, bbox, clippath=None, clippath_trans=None):
if bbox != None:
l,b,w,h = bbox.bounds
#rectangle = (int(l), self.height-int(b+h),
# int(w), int(h))
# set clip rect?
im.flipud_out()
rows, cols, image_str = im.as_rgba_str()
image_array = npy.fromstring(image_str, npy.uint8)
image_array.shape = rows, cols, 4
pixbuf = gtk.gdk.Pixbuf(gtk.gdk.COLORSPACE_RGB,
has_alpha=True, bits_per_sample=8,
width=cols, height=rows)
array = pixbuf_get_pixels_array(pixbuf)
array[:,:,:] = image_array
gc = self.new_gc()
y = self.height-y-rows
try: # new in 2.2
# can use None instead of gc.gdkGC, if don't need clipping
self.gdkDrawable.draw_pixbuf (gc.gdkGC, pixbuf, 0, 0,
int(x), int(y), cols, rows,
gdk.RGB_DITHER_NONE, 0, 0)
except AttributeError:
# deprecated in 2.2
pixbuf.render_to_drawable(self.gdkDrawable, gc.gdkGC, 0, 0,
int(x), int(y), cols, rows,
gdk.RGB_DITHER_NONE, 0, 0)
# unflip
im.flipud_out()
def draw_text(self, gc, x, y, s, prop, angle, ismath):
x, y = int(x), int(y)
if x <0 or y <0: # window has shrunk and text is off the edge
return
if angle not in (0,90):
warnings.warn('backend_gdk: unable to draw text at angles ' +
'other than 0 or 90')
elif ismath:
self._draw_mathtext(gc, x, y, s, prop, angle)
elif angle==90:
self._draw_rotated_text(gc, x, y, s, prop, angle)
else:
layout, inkRect, logicalRect = self._get_pango_layout(s, prop)
l, b, w, h = inkRect
self.gdkDrawable.draw_layout(gc.gdkGC, x, y-h-b, layout)
def _draw_mathtext(self, gc, x, y, s, prop, angle):
ox, oy, width, height, descent, font_image, used_characters = \
self.mathtext_parser.parse(s, self.dpi, prop)
if angle==90:
width, height = height, width
x -= width
y -= height
imw = font_image.get_width()
imh = font_image.get_height()
N = imw * imh
# a numpixels by num fonts array
Xall = npy.zeros((N,1), npy.uint8)
image_str = font_image.as_str()
Xall[:,0] = npy.fromstring(image_str, npy.uint8)
# get the max alpha at each pixel
Xs = npy.amax(Xall,axis=1)
# convert it to it's proper shape
Xs.shape = imh, imw
pixbuf = gtk.gdk.Pixbuf(gtk.gdk.COLORSPACE_RGB, has_alpha=True,
bits_per_sample=8, width=imw, height=imh)
array = pixbuf_get_pixels_array(pixbuf)
rgb = gc.get_rgb()
array[:,:,0]=int(rgb[0]*255)
array[:,:,1]=int(rgb[1]*255)
array[:,:,2]=int(rgb[2]*255)
array[:,:,3]=Xs
try: # new in 2.2
# can use None instead of gc.gdkGC, if don't need clipping
self.gdkDrawable.draw_pixbuf (gc.gdkGC, pixbuf, 0, 0,
int(x), int(y), imw, imh,
gdk.RGB_DITHER_NONE, 0, 0)
except AttributeError:
# deprecated in 2.2
pixbuf.render_to_drawable(self.gdkDrawable, gc.gdkGC, 0, 0,
int(x), int(y), imw, imh,
gdk.RGB_DITHER_NONE, 0, 0)
def _draw_rotated_text(self, gc, x, y, s, prop, angle):
"""
Draw the text rotated 90 degrees, other angles are not supported
"""
# this function (and its called functions) is a bottleneck
# Pango 1.6 supports rotated text, but pygtk 2.4.0 does not yet have
# wrapper functions
# GTK+ 2.6 pixbufs support rotation
gdrawable = self.gdkDrawable
ggc = gc.gdkGC
layout, inkRect, logicalRect = self._get_pango_layout(s, prop)
l, b, w, h = inkRect
x = int(x-h)
y = int(y-w)
if x < 0 or y < 0: # window has shrunk and text is off the edge
return
key = (x,y,s,angle,hash(prop))
imageVert = self.rotated.get(key)
if imageVert != None:
gdrawable.draw_image(ggc, imageVert, 0, 0, x, y, h, w)
return
imageBack = gdrawable.get_image(x, y, w, h)
imageVert = gdrawable.get_image(x, y, h, w)
imageFlip = gtk.gdk.Image(type=gdk.IMAGE_FASTEST,
visual=gdrawable.get_visual(),
width=w, height=h)
if imageFlip == None or imageBack == None or imageVert == None:
warnings.warn("Could not renderer vertical text")
return
imageFlip.set_colormap(self._cmap)
for i in range(w):
for j in range(h):
imageFlip.put_pixel(i, j, imageVert.get_pixel(j,w-i-1) )
gdrawable.draw_image(ggc, imageFlip, 0, 0, x, y, w, h)
gdrawable.draw_layout(ggc, x, y-b, layout)
imageIn = gdrawable.get_image(x, y, w, h)
for i in range(w):
for j in range(h):
imageVert.put_pixel(j, i, imageIn.get_pixel(w-i-1,j) )
gdrawable.draw_image(ggc, imageBack, 0, 0, x, y, w, h)
gdrawable.draw_image(ggc, imageVert, 0, 0, x, y, h, w)
self.rotated[key] = imageVert
def _get_pango_layout(self, s, prop):
"""
Create a pango layout instance for Text 's' with properties 'prop'.
Return - pango layout (from cache if already exists)
Note that pango assumes a logical DPI of 96
Ref: pango/fonts.c/pango_font_description_set_size() manual page
"""
# problem? - cache gets bigger and bigger, is never cleared out
# two (not one) layouts are created for every text item s (then they
# are cached) - why?
key = self.dpi, s, hash(prop)
value = self.layoutd.get(key)
if value != None:
return value
size = prop.get_size_in_points() * self.dpi / 96.0
size = round(size)
font_str = '%s, %s %i' % (prop.get_name(), prop.get_style(), size,)
font = pango.FontDescription(font_str)
# later - add fontweight to font_str
font.set_weight(self.fontweights[prop.get_weight()])
layout = self.gtkDA.create_pango_layout(s)
layout.set_font_description(font)
inkRect, logicalRect = layout.get_pixel_extents()
self.layoutd[key] = layout, inkRect, logicalRect
return layout, inkRect, logicalRect
def flipy(self):
return True
def get_canvas_width_height(self):
return self.width, self.height
def get_text_width_height_descent(self, s, prop, ismath):
if ismath:
ox, oy, width, height, descent, font_image, used_characters = \
self.mathtext_parser.parse(s, self.dpi, prop)
return width, height, descent
layout, inkRect, logicalRect = self._get_pango_layout(s, prop)
l, b, w, h = inkRect
return w, h+1, h + 1
def new_gc(self):
return GraphicsContextGDK(renderer=self)
def points_to_pixels(self, points):
return points/72.0 * self.dpi
class GraphicsContextGDK(GraphicsContextBase):
# a cache shared by all class instances
_cached = {} # map: rgb color -> gdk.Color
_joind = {
'bevel' : gdk.JOIN_BEVEL,
'miter' : gdk.JOIN_MITER,
'round' : gdk.JOIN_ROUND,
}
_capd = {
'butt' : gdk.CAP_BUTT,
'projecting' : gdk.CAP_PROJECTING,
'round' : gdk.CAP_ROUND,
}
def __init__(self, renderer):
GraphicsContextBase.__init__(self)
self.renderer = renderer
self.gdkGC = gtk.gdk.GC(renderer.gdkDrawable)
self._cmap = renderer._cmap
def rgb_to_gdk_color(self, rgb):
"""
rgb - an RGB tuple (three 0.0-1.0 values)
return an allocated gtk.gdk.Color
"""
try:
return self._cached[tuple(rgb)]
except KeyError:
color = self._cached[tuple(rgb)] = \
self._cmap.alloc_color(
int(rgb[0]*65535),int(rgb[1]*65535),int(rgb[2]*65535))
return color
#def set_antialiased(self, b):
# anti-aliasing is not supported by GDK
def set_capstyle(self, cs):
GraphicsContextBase.set_capstyle(self, cs)
self.gdkGC.cap_style = self._capd[self._capstyle]
def set_clip_rectangle(self, rectangle):
GraphicsContextBase.set_clip_rectangle(self, rectangle)
if rectangle is None:
return
l,b,w,h = rectangle.bounds
rectangle = (int(l), self.renderer.height-int(b+h)+1,
int(w), int(h))
#rectangle = (int(l), self.renderer.height-int(b+h),
# int(w+1), int(h+2))
self.gdkGC.set_clip_rectangle(rectangle)
def set_dashes(self, dash_offset, dash_list):
GraphicsContextBase.set_dashes(self, dash_offset, dash_list)
if dash_list == None:
self.gdkGC.line_style = gdk.LINE_SOLID
else:
pixels = self.renderer.points_to_pixels(npy.asarray(dash_list))
dl = [max(1, int(round(val))) for val in pixels]
self.gdkGC.set_dashes(dash_offset, dl)
self.gdkGC.line_style = gdk.LINE_ON_OFF_DASH
def set_foreground(self, fg, isRGB=False):
GraphicsContextBase.set_foreground(self, fg, isRGB)
self.gdkGC.foreground = self.rgb_to_gdk_color(self.get_rgb())
def set_graylevel(self, frac):
GraphicsContextBase.set_graylevel(self, frac)
self.gdkGC.foreground = self.rgb_to_gdk_color(self.get_rgb())
def set_joinstyle(self, js):
GraphicsContextBase.set_joinstyle(self, js)
self.gdkGC.join_style = self._joind[self._joinstyle]
def set_linewidth(self, w):
GraphicsContextBase.set_linewidth(self, w)
if w == 0:
self.gdkGC.line_width = 0
else:
pixels = self.renderer.points_to_pixels(w)
self.gdkGC.line_width = max(1, int(round(pixels)))
def new_figure_manager(num, *args, **kwargs):
"""
Create a new figure manager instance
"""
FigureClass = kwargs.pop('FigureClass', Figure)
thisFig = FigureClass(*args, **kwargs)
canvas = FigureCanvasGDK(thisFig)
manager = FigureManagerBase(canvas, num)
# equals:
#manager = FigureManagerBase (FigureCanvasGDK (Figure(*args, **kwargs),
# num)
return manager
class FigureCanvasGDK (FigureCanvasBase):
def __init__(self, figure):
FigureCanvasBase.__init__(self, figure)
self._renderer_init()
def _renderer_init(self):
self._renderer = RendererGDK (gtk.DrawingArea(), self.figure.dpi)
def _render_figure(self, pixmap, width, height):
self._renderer.set_pixmap (pixmap)
self._renderer.set_width_height (width, height)
self.figure.draw (self._renderer)
filetypes = FigureCanvasBase.filetypes.copy()
filetypes['jpg'] = 'JPEG'
filetypes['jpeg'] = 'JPEG'
def print_jpeg(self, filename, *args, **kwargs):
return self._print_image(filename, 'jpeg')
print_jpg = print_jpeg
def print_png(self, filename, *args, **kwargs):
return self._print_image(filename, 'png')
def _print_image(self, filename, format, *args, **kwargs):
width, height = self.get_width_height()
pixmap = gtk.gdk.Pixmap (None, width, height, depth=24)
self._render_figure(pixmap, width, height)
# jpg colors don't match the display very well, png colors match
# better
pixbuf = gtk.gdk.Pixbuf(gtk.gdk.COLORSPACE_RGB, 0, 8,
width, height)
pixbuf.get_from_drawable(pixmap, pixmap.get_colormap(),
0, 0, 0, 0, width, height)
pixbuf.save(filename, format)
def get_default_filetype(self):
return 'png'
| gpl-3.0 |
Achuth17/scikit-learn | sklearn/cluster/tests/test_affinity_propagation.py | 341 | 2620 | """
Testing for Clustering methods
"""
import numpy as np
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_raises
from sklearn.cluster.affinity_propagation_ import AffinityPropagation
from sklearn.cluster.affinity_propagation_ import affinity_propagation
from sklearn.datasets.samples_generator import make_blobs
from sklearn.metrics import euclidean_distances
n_clusters = 3
centers = np.array([[1, 1], [-1, -1], [1, -1]]) + 10
X, _ = make_blobs(n_samples=60, n_features=2, centers=centers,
cluster_std=0.4, shuffle=True, random_state=0)
def test_affinity_propagation():
# Affinity Propagation algorithm
# Compute similarities
S = -euclidean_distances(X, squared=True)
preference = np.median(S) * 10
# Compute Affinity Propagation
cluster_centers_indices, labels = affinity_propagation(
S, preference=preference)
n_clusters_ = len(cluster_centers_indices)
assert_equal(n_clusters, n_clusters_)
af = AffinityPropagation(preference=preference, affinity="precomputed")
labels_precomputed = af.fit(S).labels_
af = AffinityPropagation(preference=preference, verbose=True)
labels = af.fit(X).labels_
assert_array_equal(labels, labels_precomputed)
cluster_centers_indices = af.cluster_centers_indices_
n_clusters_ = len(cluster_centers_indices)
assert_equal(np.unique(labels).size, n_clusters_)
assert_equal(n_clusters, n_clusters_)
# Test also with no copy
_, labels_no_copy = affinity_propagation(S, preference=preference,
copy=False)
assert_array_equal(labels, labels_no_copy)
# Test input validation
assert_raises(ValueError, affinity_propagation, S[:, :-1])
assert_raises(ValueError, affinity_propagation, S, damping=0)
af = AffinityPropagation(affinity="unknown")
assert_raises(ValueError, af.fit, X)
def test_affinity_propagation_predict():
# Test AffinityPropagation.predict
af = AffinityPropagation(affinity="euclidean")
labels = af.fit_predict(X)
labels2 = af.predict(X)
assert_array_equal(labels, labels2)
def test_affinity_propagation_predict_error():
# Test exception in AffinityPropagation.predict
# Not fitted.
af = AffinityPropagation(affinity="euclidean")
assert_raises(ValueError, af.predict, X)
# Predict not supported when affinity="precomputed".
S = np.dot(X, X.T)
af = AffinityPropagation(affinity="precomputed")
af.fit(S)
assert_raises(ValueError, af.predict, X)
| bsd-3-clause |
ashhher3/scikit-learn | sklearn/manifold/tests/test_t_sne.py | 4 | 9540 | import sys
from sklearn.externals.six.moves import cStringIO as StringIO
import numpy as np
import scipy.sparse as sp
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_almost_equal
from sklearn.utils.testing import assert_less
from sklearn.utils.testing import assert_raises_regexp
from sklearn.utils import check_random_state
from sklearn.manifold.t_sne import _joint_probabilities
from sklearn.manifold.t_sne import _kl_divergence
from sklearn.manifold.t_sne import _gradient_descent
from sklearn.manifold.t_sne import trustworthiness
from sklearn.manifold.t_sne import TSNE
from sklearn.manifold._utils import _binary_search_perplexity
from scipy.optimize import check_grad
from scipy.spatial.distance import pdist
from scipy.spatial.distance import squareform
def test_gradient_descent_stops():
"""Test stopping conditions of gradient descent."""
class ObjectiveSmallGradient:
def __init__(self):
self.it = -1
def __call__(self, _):
self.it += 1
return (10 - self.it) / 10.0, np.array([1e-5])
def flat_function(_):
return 0.0, np.ones(1)
# Gradient norm
old_stdout = sys.stdout
sys.stdout = StringIO()
try:
_, error, it = _gradient_descent(
ObjectiveSmallGradient(), np.zeros(1), 0, n_iter=100,
n_iter_without_progress=100, momentum=0.0, learning_rate=0.0,
min_gain=0.0, min_grad_norm=1e-5, min_error_diff=0.0, verbose=2)
finally:
out = sys.stdout.getvalue()
sys.stdout.close()
sys.stdout = old_stdout
assert_equal(error, 1.0)
assert_equal(it, 0)
assert("gradient norm" in out)
# Error difference
old_stdout = sys.stdout
sys.stdout = StringIO()
try:
_, error, it = _gradient_descent(
ObjectiveSmallGradient(), np.zeros(1), 0, n_iter=100,
n_iter_without_progress=100, momentum=0.0, learning_rate=0.0,
min_gain=0.0, min_grad_norm=0.0, min_error_diff=0.2, verbose=2)
finally:
out = sys.stdout.getvalue()
sys.stdout.close()
sys.stdout = old_stdout
assert_equal(error, 0.9)
assert_equal(it, 1)
assert("error difference" in out)
# Maximum number of iterations without improvement
old_stdout = sys.stdout
sys.stdout = StringIO()
try:
_, error, it = _gradient_descent(
flat_function, np.zeros(1), 0, n_iter=100,
n_iter_without_progress=10, momentum=0.0, learning_rate=0.0,
min_gain=0.0, min_grad_norm=0.0, min_error_diff=-1.0, verbose=2)
finally:
out = sys.stdout.getvalue()
sys.stdout.close()
sys.stdout = old_stdout
assert_equal(error, 0.0)
assert_equal(it, 11)
assert("did not make any progress" in out)
# Maximum number of iterations
old_stdout = sys.stdout
sys.stdout = StringIO()
try:
_, error, it = _gradient_descent(
ObjectiveSmallGradient(), np.zeros(1), 0, n_iter=11,
n_iter_without_progress=100, momentum=0.0, learning_rate=0.0,
min_gain=0.0, min_grad_norm=0.0, min_error_diff=0.0, verbose=2)
finally:
out = sys.stdout.getvalue()
sys.stdout.close()
sys.stdout = old_stdout
assert_equal(error, 0.0)
assert_equal(it, 10)
assert("Iteration 10" in out)
def test_binary_search():
"""Test if the binary search finds Gaussians with desired perplexity."""
random_state = check_random_state(0)
distances = random_state.randn(50, 2)
distances = distances.dot(distances.T)
np.fill_diagonal(distances, 0.0)
desired_perplexity = 25.0
P = _binary_search_perplexity(distances, desired_perplexity, verbose=0)
P = np.maximum(P, np.finfo(np.double).eps)
mean_perplexity = np.mean([np.exp(-np.sum(P[i] * np.log(P[i])))
for i in range(P.shape[0])])
assert_almost_equal(mean_perplexity, desired_perplexity, decimal=3)
def test_gradient():
"""Test gradient of Kullback-Leibler divergence."""
random_state = check_random_state(0)
n_samples = 50
n_features = 2
n_components = 2
alpha = 1.0
distances = random_state.randn(n_samples, n_features)
distances = distances.dot(distances.T)
np.fill_diagonal(distances, 0.0)
X_embedded = random_state.randn(n_samples, n_components)
P = _joint_probabilities(distances, desired_perplexity=25.0,
verbose=0)
fun = lambda params: _kl_divergence(params, P, alpha, n_samples,
n_components)[0]
grad = lambda params: _kl_divergence(params, P, alpha, n_samples,
n_components)[1]
assert_almost_equal(check_grad(fun, grad, X_embedded.ravel()), 0.0,
decimal=5)
def test_trustworthiness():
"""Test trustworthiness score."""
random_state = check_random_state(0)
# Affine transformation
X = random_state.randn(100, 2)
assert_equal(trustworthiness(X, 5.0 + X / 10.0), 1.0)
# Randomly shuffled
X = np.arange(100).reshape(-1, 1)
X_embedded = X.copy()
random_state.shuffle(X_embedded)
assert_less(trustworthiness(X, X_embedded), 0.6)
# Completely different
X = np.arange(5).reshape(-1, 1)
X_embedded = np.array([[0], [2], [4], [1], [3]])
assert_almost_equal(trustworthiness(X, X_embedded, n_neighbors=1), 0.2)
def test_preserve_trustworthiness_approximately():
"""Nearest neighbors should be preserved approximately."""
random_state = check_random_state(0)
X = random_state.randn(100, 2)
for init in ('random', 'pca'):
tsne = TSNE(n_components=2, perplexity=10, learning_rate=100.0,
init=init, random_state=0)
X_embedded = tsne.fit_transform(X)
assert_almost_equal(trustworthiness(X, X_embedded, n_neighbors=1), 1.0,
decimal=1)
def test_fit_csr_matrix():
"""X can be a sparse matrix."""
random_state = check_random_state(0)
X = random_state.randn(100, 2)
X[(np.random.randint(0, 100, 50), np.random.randint(0, 2, 50))] = 0.0
X_csr = sp.csr_matrix(X)
tsne = TSNE(n_components=2, perplexity=10, learning_rate=100.0,
random_state=0)
X_embedded = tsne.fit_transform(X_csr)
assert_almost_equal(trustworthiness(X_csr, X_embedded, n_neighbors=1), 1.0,
decimal=1)
def test_preserve_trustworthiness_approximately_with_precomputed_distances():
"""Nearest neighbors should be preserved approximately."""
random_state = check_random_state(0)
X = random_state.randn(100, 2)
D = squareform(pdist(X), "sqeuclidean")
tsne = TSNE(n_components=2, perplexity=10, learning_rate=100.0,
metric="precomputed", random_state=0)
X_embedded = tsne.fit_transform(D)
assert_almost_equal(trustworthiness(D, X_embedded, n_neighbors=1,
precomputed=True), 1.0, decimal=1)
def test_early_exaggeration_too_small():
"""Early exaggeration factor must be >= 1."""
tsne = TSNE(early_exaggeration=0.99)
assert_raises_regexp(ValueError, "early_exaggeration .*",
tsne.fit_transform, np.array([[0.0]]))
def test_too_few_iterations():
"""Number of gradient descent iterations must be at least 200."""
tsne = TSNE(n_iter=199)
assert_raises_regexp(ValueError, "n_iter .*", tsne.fit_transform,
np.array([[0.0]]))
def test_non_square_precomputed_distances():
"""Precomputed distance matrices must be square matrices."""
tsne = TSNE(metric="precomputed")
assert_raises_regexp(ValueError, ".* square distance matrix",
tsne.fit_transform, np.array([[0.0], [1.0]]))
def test_init_not_available():
"""'init' must be 'pca' or 'random'."""
assert_raises_regexp(ValueError, "'init' must be either 'pca' or 'random'",
TSNE, init="not available")
def test_distance_not_available():
"""'metric' must be valid."""
tsne = TSNE(metric="not available")
assert_raises_regexp(ValueError, "Unknown metric not available.*",
tsne.fit_transform, np.array([[0.0], [1.0]]))
def test_pca_initialization_not_compatible_with_precomputed_kernel():
"""Precomputed distance matrices must be square matrices."""
tsne = TSNE(metric="precomputed", init="pca")
assert_raises_regexp(ValueError, "The parameter init=\"pca\" cannot be "
"used with metric=\"precomputed\".",
tsne.fit_transform, np.array([[0.0], [1.0]]))
def test_verbose():
random_state = check_random_state(0)
tsne = TSNE(verbose=2)
X = random_state.randn(5, 2)
old_stdout = sys.stdout
sys.stdout = StringIO()
try:
tsne.fit_transform(X)
finally:
out = sys.stdout.getvalue()
sys.stdout.close()
sys.stdout = old_stdout
assert("[t-SNE]" in out)
assert("Computing pairwise distances" in out)
assert("Computed conditional probabilities" in out)
assert("Mean sigma" in out)
assert("Finished" in out)
assert("early exaggeration" in out)
assert("Finished" in out)
def test_chebyshev_metric():
"""t-SNE should allow metrics that cannot be squared (issue #3526)."""
random_state = check_random_state(0)
tsne = TSNE(verbose=2, metric="chebyshev")
X = random_state.randn(5, 2)
tsne.fit_transform(X)
| bsd-3-clause |
waynenilsen/statsmodels | statsmodels/tsa/vector_ar/util.py | 24 | 6383 | """
Miscellaneous utility code for VAR estimation
"""
from statsmodels.compat.python import range, string_types, asbytes
import numpy as np
import scipy.stats as stats
import scipy.linalg as L
import scipy.linalg.decomp as decomp
import statsmodels.tsa.tsatools as tsa
from scipy.linalg import cholesky
#-------------------------------------------------------------------------------
# Auxiliary functions for estimation
def get_var_endog(y, lags, trend='c', has_constant='skip'):
"""
Make predictor matrix for VAR(p) process
Z := (Z_0, ..., Z_T).T (T x Kp)
Z_t = [1 y_t y_{t-1} ... y_{t - p + 1}] (Kp x 1)
Ref: Lutkepohl p.70 (transposed)
has_constant can be 'raise', 'add', or 'skip'. See add_constant.
"""
nobs = len(y)
# Ravel C order, need to put in descending order
Z = np.array([y[t-lags : t][::-1].ravel() for t in range(lags, nobs)])
# Add constant, trend, etc.
if trend != 'nc':
Z = tsa.add_trend(Z, prepend=True, trend=trend,
has_constant=has_constant)
return Z
def get_trendorder(trend='c'):
# Handle constant, etc.
if trend == 'c':
trendorder = 1
elif trend == 'nc':
trendorder = 0
elif trend == 'ct':
trendorder = 2
elif trend == 'ctt':
trendorder = 3
return trendorder
def make_lag_names(names, lag_order, trendorder=1):
"""
Produce list of lag-variable names. Constant / trends go at the beginning
Examples
--------
>>> make_lag_names(['foo', 'bar'], 2, 1)
['const', 'L1.foo', 'L1.bar', 'L2.foo', 'L2.bar']
"""
lag_names = []
if isinstance(names, string_types):
names = [names]
# take care of lagged endogenous names
for i in range(1, lag_order + 1):
for name in names:
if not isinstance(name, string_types):
name = str(name) # will need consistent unicode handling
lag_names.append('L'+str(i)+'.'+name)
# handle the constant name
if trendorder != 0:
lag_names.insert(0, 'const')
if trendorder > 1:
lag_names.insert(0, 'trend')
if trendorder > 2:
lag_names.insert(0, 'trend**2')
return lag_names
def comp_matrix(coefs):
"""
Return compansion matrix for the VAR(1) representation for a VAR(p) process
(companion form)
A = [A_1 A_2 ... A_p-1 A_p
I_K 0 0 0
0 I_K ... 0 0
0 ... I_K 0]
"""
p, k, k2 = coefs.shape
assert(k == k2)
kp = k * p
result = np.zeros((kp, kp))
result[:k] = np.concatenate(coefs, axis=1)
# Set I_K matrices
if p > 1:
result[np.arange(k, kp), np.arange(kp-k)] = 1
return result
#-------------------------------------------------------------------------------
# Miscellaneous stuff
def parse_lutkepohl_data(path): # pragma: no cover
"""
Parse data files from Lutkepohl (2005) book
Source for data files: www.jmulti.de
"""
from collections import deque
from datetime import datetime
import pandas
import pandas.core.datetools as dt
import re
regex = re.compile(asbytes('<(.*) (\w)([\d]+)>.*'))
lines = deque(open(path, 'rb'))
to_skip = 0
while asbytes('*/') not in lines.popleft():
#while '*/' not in lines.popleft():
to_skip += 1
while True:
to_skip += 1
line = lines.popleft()
m = regex.match(line)
if m:
year, freq, start_point = m.groups()
break
data = np.genfromtxt(path, names=True, skip_header=to_skip+1)
n = len(data)
# generate the corresponding date range (using pandas for now)
start_point = int(start_point)
year = int(year)
offsets = {
asbytes('Q') : dt.BQuarterEnd(),
asbytes('M') : dt.BMonthEnd(),
asbytes('A') : dt.BYearEnd()
}
# create an instance
offset = offsets[freq]
inc = offset * (start_point - 1)
start_date = offset.rollforward(datetime(year, 1, 1)) + inc
offset = offsets[freq]
from pandas import DatetimeIndex # pylint: disable=E0611
date_range = DatetimeIndex(start=start_date, freq=offset, periods=n)
return data, date_range
def get_logdet(m):
from statsmodels.tools.linalg import logdet_symm
return logdet_symm(m)
get_logdet = np.deprecate(get_logdet,
"statsmodels.tsa.vector_ar.util.get_logdet",
"statsmodels.tools.linalg.logdet_symm",
"get_logdet is deprecated and will be removed in "
"0.8.0")
def norm_signif_level(alpha=0.05):
return stats.norm.ppf(1 - alpha / 2)
def acf_to_acorr(acf):
diag = np.diag(acf[0])
# numpy broadcasting sufficient
return acf / np.sqrt(np.outer(diag, diag))
def varsim(coefs, intercept, sig_u, steps=100, initvalues=None, seed=None):
"""
Simulate simple VAR(p) process with known coefficients, intercept, white
noise covariance, etc.
"""
if seed is not None:
np.random.seed(seed=seed)
from numpy.random import multivariate_normal as rmvnorm
p, k, k = coefs.shape
ugen = rmvnorm(np.zeros(len(sig_u)), sig_u, steps)
result = np.zeros((steps, k))
result[p:] = intercept + ugen[p:]
# add in AR terms
for t in range(p, steps):
ygen = result[t]
for j in range(p):
ygen += np.dot(coefs[j], result[t-j-1])
return result
def get_index(lst, name):
try:
result = lst.index(name)
except Exception:
if not isinstance(name, int):
raise
result = name
return result
#method used repeatedly in Sims-Zha error bands
def eigval_decomp(sym_array):
"""
Returns
-------
W: array of eigenvectors
eigva: list of eigenvalues
k: largest eigenvector
"""
#check if symmetric, do not include shock period
eigva, W = decomp.eig(sym_array, left=True, right=False)
k = np.argmax(eigva)
return W, eigva, k
def vech(A):
"""
Simple vech operator
Returns
-------
vechvec: vector of all elements on and below diagonal
"""
length=A.shape[1]
vechvec=[]
for i in range(length):
b=i
while b < length:
vechvec.append(A[b,i])
b=b+1
vechvec=np.asarray(vechvec)
return vechvec
| bsd-3-clause |
meduz/scikit-learn | examples/text/document_clustering.py | 32 | 8526 | """
=======================================
Clustering text documents using k-means
=======================================
This is an example showing how the scikit-learn can be used to cluster
documents by topics using a bag-of-words approach. This example uses
a scipy.sparse matrix to store the features instead of standard numpy arrays.
Two feature extraction methods can be used in this example:
- TfidfVectorizer uses a in-memory vocabulary (a python dict) to map the most
frequent words to features indices and hence compute a word occurrence
frequency (sparse) matrix. The word frequencies are then reweighted using
the Inverse Document Frequency (IDF) vector collected feature-wise over
the corpus.
- HashingVectorizer hashes word occurrences to a fixed dimensional space,
possibly with collisions. The word count vectors are then normalized to
each have l2-norm equal to one (projected to the euclidean unit-ball) which
seems to be important for k-means to work in high dimensional space.
HashingVectorizer does not provide IDF weighting as this is a stateless
model (the fit method does nothing). When IDF weighting is needed it can
be added by pipelining its output to a TfidfTransformer instance.
Two algorithms are demoed: ordinary k-means and its more scalable cousin
minibatch k-means.
Additionally, latent semantic analysis can also be used to reduce dimensionality
and discover latent patterns in the data.
It can be noted that k-means (and minibatch k-means) are very sensitive to
feature scaling and that in this case the IDF weighting helps improve the
quality of the clustering by quite a lot as measured against the "ground truth"
provided by the class label assignments of the 20 newsgroups dataset.
This improvement is not visible in the Silhouette Coefficient which is small
for both as this measure seem to suffer from the phenomenon called
"Concentration of Measure" or "Curse of Dimensionality" for high dimensional
datasets such as text data. Other measures such as V-measure and Adjusted Rand
Index are information theoretic based evaluation scores: as they are only based
on cluster assignments rather than distances, hence not affected by the curse
of dimensionality.
Note: as k-means is optimizing a non-convex objective function, it will likely
end up in a local optimum. Several runs with independent random init might be
necessary to get a good convergence.
"""
# Author: Peter Prettenhofer <peter.prettenhofer@gmail.com>
# Lars Buitinck
# License: BSD 3 clause
from __future__ import print_function
from sklearn.datasets import fetch_20newsgroups
from sklearn.decomposition import TruncatedSVD
from sklearn.feature_extraction.text import TfidfVectorizer
from sklearn.feature_extraction.text import HashingVectorizer
from sklearn.feature_extraction.text import TfidfTransformer
from sklearn.pipeline import make_pipeline
from sklearn.preprocessing import Normalizer
from sklearn import metrics
from sklearn.cluster import KMeans, MiniBatchKMeans
import logging
from optparse import OptionParser
import sys
from time import time
import numpy as np
# Display progress logs on stdout
logging.basicConfig(level=logging.INFO,
format='%(asctime)s %(levelname)s %(message)s')
# parse commandline arguments
op = OptionParser()
op.add_option("--lsa",
dest="n_components", type="int",
help="Preprocess documents with latent semantic analysis.")
op.add_option("--no-minibatch",
action="store_false", dest="minibatch", default=True,
help="Use ordinary k-means algorithm (in batch mode).")
op.add_option("--no-idf",
action="store_false", dest="use_idf", default=True,
help="Disable Inverse Document Frequency feature weighting.")
op.add_option("--use-hashing",
action="store_true", default=False,
help="Use a hashing feature vectorizer")
op.add_option("--n-features", type=int, default=10000,
help="Maximum number of features (dimensions)"
" to extract from text.")
op.add_option("--verbose",
action="store_true", dest="verbose", default=False,
help="Print progress reports inside k-means algorithm.")
print(__doc__)
op.print_help()
def is_interactive():
return not hasattr(sys.modules['__main__'], '__file__')
# work-around for Jupyter notebook and IPython console
argv = [] if is_interactive() else sys.argv[1:]
(opts, args) = op.parse_args(argv)
if len(args) > 0:
op.error("this script takes no arguments.")
sys.exit(1)
###############################################################################
# Load some categories from the training set
categories = [
'alt.atheism',
'talk.religion.misc',
'comp.graphics',
'sci.space',
]
# Uncomment the following to do the analysis on all the categories
# categories = None
print("Loading 20 newsgroups dataset for categories:")
print(categories)
dataset = fetch_20newsgroups(subset='all', categories=categories,
shuffle=True, random_state=42)
print("%d documents" % len(dataset.data))
print("%d categories" % len(dataset.target_names))
print()
labels = dataset.target
true_k = np.unique(labels).shape[0]
print("Extracting features from the training dataset using a sparse vectorizer")
t0 = time()
if opts.use_hashing:
if opts.use_idf:
# Perform an IDF normalization on the output of HashingVectorizer
hasher = HashingVectorizer(n_features=opts.n_features,
stop_words='english', non_negative=True,
norm=None, binary=False)
vectorizer = make_pipeline(hasher, TfidfTransformer())
else:
vectorizer = HashingVectorizer(n_features=opts.n_features,
stop_words='english',
non_negative=False, norm='l2',
binary=False)
else:
vectorizer = TfidfVectorizer(max_df=0.5, max_features=opts.n_features,
min_df=2, stop_words='english',
use_idf=opts.use_idf)
X = vectorizer.fit_transform(dataset.data)
print("done in %fs" % (time() - t0))
print("n_samples: %d, n_features: %d" % X.shape)
print()
if opts.n_components:
print("Performing dimensionality reduction using LSA")
t0 = time()
# Vectorizer results are normalized, which makes KMeans behave as
# spherical k-means for better results. Since LSA/SVD results are
# not normalized, we have to redo the normalization.
svd = TruncatedSVD(opts.n_components)
normalizer = Normalizer(copy=False)
lsa = make_pipeline(svd, normalizer)
X = lsa.fit_transform(X)
print("done in %fs" % (time() - t0))
explained_variance = svd.explained_variance_ratio_.sum()
print("Explained variance of the SVD step: {}%".format(
int(explained_variance * 100)))
print()
###############################################################################
# Do the actual clustering
if opts.minibatch:
km = MiniBatchKMeans(n_clusters=true_k, init='k-means++', n_init=1,
init_size=1000, batch_size=1000, verbose=opts.verbose)
else:
km = KMeans(n_clusters=true_k, init='k-means++', max_iter=100, n_init=1,
verbose=opts.verbose)
print("Clustering sparse data with %s" % km)
t0 = time()
km.fit(X)
print("done in %0.3fs" % (time() - t0))
print()
print("Homogeneity: %0.3f" % metrics.homogeneity_score(labels, km.labels_))
print("Completeness: %0.3f" % metrics.completeness_score(labels, km.labels_))
print("V-measure: %0.3f" % metrics.v_measure_score(labels, km.labels_))
print("Adjusted Rand-Index: %.3f"
% metrics.adjusted_rand_score(labels, km.labels_))
print("Silhouette Coefficient: %0.3f"
% metrics.silhouette_score(X, km.labels_, sample_size=1000))
print()
if not opts.use_hashing:
print("Top terms per cluster:")
if opts.n_components:
original_space_centroids = svd.inverse_transform(km.cluster_centers_)
order_centroids = original_space_centroids.argsort()[:, ::-1]
else:
order_centroids = km.cluster_centers_.argsort()[:, ::-1]
terms = vectorizer.get_feature_names()
for i in range(true_k):
print("Cluster %d:" % i, end='')
for ind in order_centroids[i, :10]:
print(' %s' % terms[ind], end='')
print()
| bsd-3-clause |
pratapvardhan/scikit-learn | sklearn/ensemble/gradient_boosting.py | 25 | 71089 | """Gradient Boosted Regression Trees
This module contains methods for fitting gradient boosted regression trees for
both classification and regression.
The module structure is the following:
- The ``BaseGradientBoosting`` base class implements a common ``fit`` method
for all the estimators in the module. Regression and classification
only differ in the concrete ``LossFunction`` used.
- ``GradientBoostingClassifier`` implements gradient boosting for
classification problems.
- ``GradientBoostingRegressor`` implements gradient boosting for
regression problems.
"""
# Authors: Peter Prettenhofer, Scott White, Gilles Louppe, Emanuele Olivetti,
# Arnaud Joly, Jacob Schreiber
# License: BSD 3 clause
from __future__ import print_function
from __future__ import division
from abc import ABCMeta
from abc import abstractmethod
from .base import BaseEnsemble
from ..base import BaseEstimator
from ..base import ClassifierMixin
from ..base import RegressorMixin
from ..externals import six
from ..feature_selection.from_model import _LearntSelectorMixin
from ._gradient_boosting import predict_stages
from ._gradient_boosting import predict_stage
from ._gradient_boosting import _random_sample_mask
import numbers
import numpy as np
from scipy import stats
from scipy.sparse import csc_matrix
from scipy.sparse import csr_matrix
from scipy.sparse import issparse
from time import time
from ..tree.tree import DecisionTreeRegressor
from ..tree._tree import DTYPE
from ..tree._tree import TREE_LEAF
from ..utils import check_random_state
from ..utils import check_array
from ..utils import check_X_y
from ..utils import column_or_1d
from ..utils import check_consistent_length
from ..utils import deprecated
from ..utils.extmath import logsumexp
from ..utils.fixes import expit
from ..utils.fixes import bincount
from ..utils.stats import _weighted_percentile
from ..utils.validation import check_is_fitted
from ..utils.multiclass import check_classification_targets
from ..exceptions import NotFittedError
class QuantileEstimator(BaseEstimator):
"""An estimator predicting the alpha-quantile of the training targets."""
def __init__(self, alpha=0.9):
if not 0 < alpha < 1.0:
raise ValueError("`alpha` must be in (0, 1.0) but was %r" % alpha)
self.alpha = alpha
def fit(self, X, y, sample_weight=None):
if sample_weight is None:
self.quantile = stats.scoreatpercentile(y, self.alpha * 100.0)
else:
self.quantile = _weighted_percentile(y, sample_weight,
self.alpha * 100.0)
def predict(self, X):
check_is_fitted(self, 'quantile')
y = np.empty((X.shape[0], 1), dtype=np.float64)
y.fill(self.quantile)
return y
class MeanEstimator(BaseEstimator):
"""An estimator predicting the mean of the training targets."""
def fit(self, X, y, sample_weight=None):
if sample_weight is None:
self.mean = np.mean(y)
else:
self.mean = np.average(y, weights=sample_weight)
def predict(self, X):
check_is_fitted(self, 'mean')
y = np.empty((X.shape[0], 1), dtype=np.float64)
y.fill(self.mean)
return y
class LogOddsEstimator(BaseEstimator):
"""An estimator predicting the log odds ratio."""
scale = 1.0
def fit(self, X, y, sample_weight=None):
# pre-cond: pos, neg are encoded as 1, 0
if sample_weight is None:
pos = np.sum(y)
neg = y.shape[0] - pos
else:
pos = np.sum(sample_weight * y)
neg = np.sum(sample_weight * (1 - y))
if neg == 0 or pos == 0:
raise ValueError('y contains non binary labels.')
self.prior = self.scale * np.log(pos / neg)
def predict(self, X):
check_is_fitted(self, 'prior')
y = np.empty((X.shape[0], 1), dtype=np.float64)
y.fill(self.prior)
return y
class ScaledLogOddsEstimator(LogOddsEstimator):
"""Log odds ratio scaled by 0.5 -- for exponential loss. """
scale = 0.5
class PriorProbabilityEstimator(BaseEstimator):
"""An estimator predicting the probability of each
class in the training data.
"""
def fit(self, X, y, sample_weight=None):
if sample_weight is None:
sample_weight = np.ones_like(y, dtype=np.float64)
class_counts = bincount(y, weights=sample_weight)
self.priors = class_counts / class_counts.sum()
def predict(self, X):
check_is_fitted(self, 'priors')
y = np.empty((X.shape[0], self.priors.shape[0]), dtype=np.float64)
y[:] = self.priors
return y
class ZeroEstimator(BaseEstimator):
"""An estimator that simply predicts zero. """
def fit(self, X, y, sample_weight=None):
if np.issubdtype(y.dtype, int):
# classification
self.n_classes = np.unique(y).shape[0]
if self.n_classes == 2:
self.n_classes = 1
else:
# regression
self.n_classes = 1
def predict(self, X):
check_is_fitted(self, 'n_classes')
y = np.empty((X.shape[0], self.n_classes), dtype=np.float64)
y.fill(0.0)
return y
class LossFunction(six.with_metaclass(ABCMeta, object)):
"""Abstract base class for various loss functions.
Attributes
----------
K : int
The number of regression trees to be induced;
1 for regression and binary classification;
``n_classes`` for multi-class classification.
"""
is_multi_class = False
def __init__(self, n_classes):
self.K = n_classes
def init_estimator(self):
"""Default ``init`` estimator for loss function. """
raise NotImplementedError()
@abstractmethod
def __call__(self, y, pred, sample_weight=None):
"""Compute the loss of prediction ``pred`` and ``y``. """
@abstractmethod
def negative_gradient(self, y, y_pred, **kargs):
"""Compute the negative gradient.
Parameters
---------
y : np.ndarray, shape=(n,)
The target labels.
y_pred : np.ndarray, shape=(n,):
The predictions.
"""
def update_terminal_regions(self, tree, X, y, residual, y_pred,
sample_weight, sample_mask,
learning_rate=1.0, k=0):
"""Update the terminal regions (=leaves) of the given tree and
updates the current predictions of the model. Traverses tree
and invokes template method `_update_terminal_region`.
Parameters
----------
tree : tree.Tree
The tree object.
X : ndarray, shape=(n, m)
The data array.
y : ndarray, shape=(n,)
The target labels.
residual : ndarray, shape=(n,)
The residuals (usually the negative gradient).
y_pred : ndarray, shape=(n,)
The predictions.
sample_weight : ndarray, shape=(n,)
The weight of each sample.
sample_mask : ndarray, shape=(n,)
The sample mask to be used.
learning_rate : float, default=0.1
learning rate shrinks the contribution of each tree by
``learning_rate``.
k : int, default 0
The index of the estimator being updated.
"""
# compute leaf for each sample in ``X``.
terminal_regions = tree.apply(X)
# mask all which are not in sample mask.
masked_terminal_regions = terminal_regions.copy()
masked_terminal_regions[~sample_mask] = -1
# update each leaf (= perform line search)
for leaf in np.where(tree.children_left == TREE_LEAF)[0]:
self._update_terminal_region(tree, masked_terminal_regions,
leaf, X, y, residual,
y_pred[:, k], sample_weight)
# update predictions (both in-bag and out-of-bag)
y_pred[:, k] += (learning_rate
* tree.value[:, 0, 0].take(terminal_regions, axis=0))
@abstractmethod
def _update_terminal_region(self, tree, terminal_regions, leaf, X, y,
residual, pred, sample_weight):
"""Template method for updating terminal regions (=leaves). """
class RegressionLossFunction(six.with_metaclass(ABCMeta, LossFunction)):
"""Base class for regression loss functions. """
def __init__(self, n_classes):
if n_classes != 1:
raise ValueError("``n_classes`` must be 1 for regression but "
"was %r" % n_classes)
super(RegressionLossFunction, self).__init__(n_classes)
class LeastSquaresError(RegressionLossFunction):
"""Loss function for least squares (LS) estimation.
Terminal regions need not to be updated for least squares. """
def init_estimator(self):
return MeanEstimator()
def __call__(self, y, pred, sample_weight=None):
if sample_weight is None:
return np.mean((y - pred.ravel()) ** 2.0)
else:
return (1.0 / sample_weight.sum() *
np.sum(sample_weight * ((y - pred.ravel()) ** 2.0)))
def negative_gradient(self, y, pred, **kargs):
return y - pred.ravel()
def update_terminal_regions(self, tree, X, y, residual, y_pred,
sample_weight, sample_mask,
learning_rate=1.0, k=0):
"""Least squares does not need to update terminal regions.
But it has to update the predictions.
"""
# update predictions
y_pred[:, k] += learning_rate * tree.predict(X).ravel()
def _update_terminal_region(self, tree, terminal_regions, leaf, X, y,
residual, pred, sample_weight):
pass
class LeastAbsoluteError(RegressionLossFunction):
"""Loss function for least absolute deviation (LAD) regression. """
def init_estimator(self):
return QuantileEstimator(alpha=0.5)
def __call__(self, y, pred, sample_weight=None):
if sample_weight is None:
return np.abs(y - pred.ravel()).mean()
else:
return (1.0 / sample_weight.sum() *
np.sum(sample_weight * np.abs(y - pred.ravel())))
def negative_gradient(self, y, pred, **kargs):
"""1.0 if y - pred > 0.0 else -1.0"""
pred = pred.ravel()
return 2.0 * (y - pred > 0.0) - 1.0
def _update_terminal_region(self, tree, terminal_regions, leaf, X, y,
residual, pred, sample_weight):
"""LAD updates terminal regions to median estimates. """
terminal_region = np.where(terminal_regions == leaf)[0]
sample_weight = sample_weight.take(terminal_region, axis=0)
diff = y.take(terminal_region, axis=0) - pred.take(terminal_region, axis=0)
tree.value[leaf, 0, 0] = _weighted_percentile(diff, sample_weight, percentile=50)
class HuberLossFunction(RegressionLossFunction):
"""Huber loss function for robust regression.
M-Regression proposed in Friedman 2001.
References
----------
J. Friedman, Greedy Function Approximation: A Gradient Boosting
Machine, The Annals of Statistics, Vol. 29, No. 5, 2001.
"""
def __init__(self, n_classes, alpha=0.9):
super(HuberLossFunction, self).__init__(n_classes)
self.alpha = alpha
self.gamma = None
def init_estimator(self):
return QuantileEstimator(alpha=0.5)
def __call__(self, y, pred, sample_weight=None):
pred = pred.ravel()
diff = y - pred
gamma = self.gamma
if gamma is None:
if sample_weight is None:
gamma = stats.scoreatpercentile(np.abs(diff), self.alpha * 100)
else:
gamma = _weighted_percentile(np.abs(diff), sample_weight, self.alpha * 100)
gamma_mask = np.abs(diff) <= gamma
if sample_weight is None:
sq_loss = np.sum(0.5 * diff[gamma_mask] ** 2.0)
lin_loss = np.sum(gamma * (np.abs(diff[~gamma_mask]) - gamma / 2.0))
loss = (sq_loss + lin_loss) / y.shape[0]
else:
sq_loss = np.sum(0.5 * sample_weight[gamma_mask] * diff[gamma_mask] ** 2.0)
lin_loss = np.sum(gamma * sample_weight[~gamma_mask] *
(np.abs(diff[~gamma_mask]) - gamma / 2.0))
loss = (sq_loss + lin_loss) / sample_weight.sum()
return loss
def negative_gradient(self, y, pred, sample_weight=None, **kargs):
pred = pred.ravel()
diff = y - pred
if sample_weight is None:
gamma = stats.scoreatpercentile(np.abs(diff), self.alpha * 100)
else:
gamma = _weighted_percentile(np.abs(diff), sample_weight, self.alpha * 100)
gamma_mask = np.abs(diff) <= gamma
residual = np.zeros((y.shape[0],), dtype=np.float64)
residual[gamma_mask] = diff[gamma_mask]
residual[~gamma_mask] = gamma * np.sign(diff[~gamma_mask])
self.gamma = gamma
return residual
def _update_terminal_region(self, tree, terminal_regions, leaf, X, y,
residual, pred, sample_weight):
terminal_region = np.where(terminal_regions == leaf)[0]
sample_weight = sample_weight.take(terminal_region, axis=0)
gamma = self.gamma
diff = (y.take(terminal_region, axis=0)
- pred.take(terminal_region, axis=0))
median = _weighted_percentile(diff, sample_weight, percentile=50)
diff_minus_median = diff - median
tree.value[leaf, 0] = median + np.mean(
np.sign(diff_minus_median) *
np.minimum(np.abs(diff_minus_median), gamma))
class QuantileLossFunction(RegressionLossFunction):
"""Loss function for quantile regression.
Quantile regression allows to estimate the percentiles
of the conditional distribution of the target.
"""
def __init__(self, n_classes, alpha=0.9):
super(QuantileLossFunction, self).__init__(n_classes)
assert 0 < alpha < 1.0
self.alpha = alpha
self.percentile = alpha * 100.0
def init_estimator(self):
return QuantileEstimator(self.alpha)
def __call__(self, y, pred, sample_weight=None):
pred = pred.ravel()
diff = y - pred
alpha = self.alpha
mask = y > pred
if sample_weight is None:
loss = (alpha * diff[mask].sum() +
(1.0 - alpha) * diff[~mask].sum()) / y.shape[0]
else:
loss = ((alpha * np.sum(sample_weight[mask] * diff[mask]) +
(1.0 - alpha) * np.sum(sample_weight[~mask] * diff[~mask])) /
sample_weight.sum())
return loss
def negative_gradient(self, y, pred, **kargs):
alpha = self.alpha
pred = pred.ravel()
mask = y > pred
return (alpha * mask) - ((1.0 - alpha) * ~mask)
def _update_terminal_region(self, tree, terminal_regions, leaf, X, y,
residual, pred, sample_weight):
terminal_region = np.where(terminal_regions == leaf)[0]
diff = (y.take(terminal_region, axis=0)
- pred.take(terminal_region, axis=0))
sample_weight = sample_weight.take(terminal_region, axis=0)
val = _weighted_percentile(diff, sample_weight, self.percentile)
tree.value[leaf, 0] = val
class ClassificationLossFunction(six.with_metaclass(ABCMeta, LossFunction)):
"""Base class for classification loss functions. """
def _score_to_proba(self, score):
"""Template method to convert scores to probabilities.
the does not support probabilites raises AttributeError.
"""
raise TypeError('%s does not support predict_proba' % type(self).__name__)
@abstractmethod
def _score_to_decision(self, score):
"""Template method to convert scores to decisions.
Returns int arrays.
"""
class BinomialDeviance(ClassificationLossFunction):
"""Binomial deviance loss function for binary classification.
Binary classification is a special case; here, we only need to
fit one tree instead of ``n_classes`` trees.
"""
def __init__(self, n_classes):
if n_classes != 2:
raise ValueError("{0:s} requires 2 classes.".format(
self.__class__.__name__))
# we only need to fit one tree for binary clf.
super(BinomialDeviance, self).__init__(1)
def init_estimator(self):
return LogOddsEstimator()
def __call__(self, y, pred, sample_weight=None):
"""Compute the deviance (= 2 * negative log-likelihood). """
# logaddexp(0, v) == log(1.0 + exp(v))
pred = pred.ravel()
if sample_weight is None:
return -2.0 * np.mean((y * pred) - np.logaddexp(0.0, pred))
else:
return (-2.0 / sample_weight.sum() *
np.sum(sample_weight * ((y * pred) - np.logaddexp(0.0, pred))))
def negative_gradient(self, y, pred, **kargs):
"""Compute the residual (= negative gradient). """
return y - expit(pred.ravel())
def _update_terminal_region(self, tree, terminal_regions, leaf, X, y,
residual, pred, sample_weight):
"""Make a single Newton-Raphson step.
our node estimate is given by:
sum(w * (y - prob)) / sum(w * prob * (1 - prob))
we take advantage that: y - prob = residual
"""
terminal_region = np.where(terminal_regions == leaf)[0]
residual = residual.take(terminal_region, axis=0)
y = y.take(terminal_region, axis=0)
sample_weight = sample_weight.take(terminal_region, axis=0)
numerator = np.sum(sample_weight * residual)
denominator = np.sum(sample_weight * (y - residual) * (1 - y + residual))
if denominator == 0.0:
tree.value[leaf, 0, 0] = 0.0
else:
tree.value[leaf, 0, 0] = numerator / denominator
def _score_to_proba(self, score):
proba = np.ones((score.shape[0], 2), dtype=np.float64)
proba[:, 1] = expit(score.ravel())
proba[:, 0] -= proba[:, 1]
return proba
def _score_to_decision(self, score):
proba = self._score_to_proba(score)
return np.argmax(proba, axis=1)
class MultinomialDeviance(ClassificationLossFunction):
"""Multinomial deviance loss function for multi-class classification.
For multi-class classification we need to fit ``n_classes`` trees at
each stage.
"""
is_multi_class = True
def __init__(self, n_classes):
if n_classes < 3:
raise ValueError("{0:s} requires more than 2 classes.".format(
self.__class__.__name__))
super(MultinomialDeviance, self).__init__(n_classes)
def init_estimator(self):
return PriorProbabilityEstimator()
def __call__(self, y, pred, sample_weight=None):
# create one-hot label encoding
Y = np.zeros((y.shape[0], self.K), dtype=np.float64)
for k in range(self.K):
Y[:, k] = y == k
if sample_weight is None:
return np.sum(-1 * (Y * pred).sum(axis=1) +
logsumexp(pred, axis=1))
else:
return np.sum(-1 * sample_weight * (Y * pred).sum(axis=1) +
logsumexp(pred, axis=1))
def negative_gradient(self, y, pred, k=0, **kwargs):
"""Compute negative gradient for the ``k``-th class. """
return y - np.nan_to_num(np.exp(pred[:, k] -
logsumexp(pred, axis=1)))
def _update_terminal_region(self, tree, terminal_regions, leaf, X, y,
residual, pred, sample_weight):
"""Make a single Newton-Raphson step. """
terminal_region = np.where(terminal_regions == leaf)[0]
residual = residual.take(terminal_region, axis=0)
y = y.take(terminal_region, axis=0)
sample_weight = sample_weight.take(terminal_region, axis=0)
numerator = np.sum(sample_weight * residual)
numerator *= (self.K - 1) / self.K
denominator = np.sum(sample_weight * (y - residual) *
(1.0 - y + residual))
if denominator == 0.0:
tree.value[leaf, 0, 0] = 0.0
else:
tree.value[leaf, 0, 0] = numerator / denominator
def _score_to_proba(self, score):
return np.nan_to_num(
np.exp(score - (logsumexp(score, axis=1)[:, np.newaxis])))
def _score_to_decision(self, score):
proba = self._score_to_proba(score)
return np.argmax(proba, axis=1)
class ExponentialLoss(ClassificationLossFunction):
"""Exponential loss function for binary classification.
Same loss as AdaBoost.
References
----------
Greg Ridgeway, Generalized Boosted Models: A guide to the gbm package, 2007
"""
def __init__(self, n_classes):
if n_classes != 2:
raise ValueError("{0:s} requires 2 classes.".format(
self.__class__.__name__))
# we only need to fit one tree for binary clf.
super(ExponentialLoss, self).__init__(1)
def init_estimator(self):
return ScaledLogOddsEstimator()
def __call__(self, y, pred, sample_weight=None):
pred = pred.ravel()
if sample_weight is None:
return np.mean(np.exp(-(2. * y - 1.) * pred))
else:
return (1.0 / sample_weight.sum() *
np.sum(sample_weight * np.exp(-(2 * y - 1) * pred)))
def negative_gradient(self, y, pred, **kargs):
y_ = -(2. * y - 1.)
return y_ * np.exp(y_ * pred.ravel())
def _update_terminal_region(self, tree, terminal_regions, leaf, X, y,
residual, pred, sample_weight):
terminal_region = np.where(terminal_regions == leaf)[0]
pred = pred.take(terminal_region, axis=0)
y = y.take(terminal_region, axis=0)
sample_weight = sample_weight.take(terminal_region, axis=0)
y_ = 2. * y - 1.
numerator = np.sum(y_ * sample_weight * np.exp(-y_ * pred))
denominator = np.sum(sample_weight * np.exp(-y_ * pred))
if denominator == 0.0:
tree.value[leaf, 0, 0] = 0.0
else:
tree.value[leaf, 0, 0] = numerator / denominator
def _score_to_proba(self, score):
proba = np.ones((score.shape[0], 2), dtype=np.float64)
proba[:, 1] = expit(2.0 * score.ravel())
proba[:, 0] -= proba[:, 1]
return proba
def _score_to_decision(self, score):
return (score.ravel() >= 0.0).astype(np.int)
LOSS_FUNCTIONS = {'ls': LeastSquaresError,
'lad': LeastAbsoluteError,
'huber': HuberLossFunction,
'quantile': QuantileLossFunction,
'deviance': None, # for both, multinomial and binomial
'exponential': ExponentialLoss,
}
INIT_ESTIMATORS = {'zero': ZeroEstimator}
class VerboseReporter(object):
"""Reports verbose output to stdout.
If ``verbose==1`` output is printed once in a while (when iteration mod
verbose_mod is zero).; if larger than 1 then output is printed for
each update.
"""
def __init__(self, verbose):
self.verbose = verbose
def init(self, est, begin_at_stage=0):
# header fields and line format str
header_fields = ['Iter', 'Train Loss']
verbose_fmt = ['{iter:>10d}', '{train_score:>16.4f}']
# do oob?
if est.subsample < 1:
header_fields.append('OOB Improve')
verbose_fmt.append('{oob_impr:>16.4f}')
header_fields.append('Remaining Time')
verbose_fmt.append('{remaining_time:>16s}')
# print the header line
print(('%10s ' + '%16s ' *
(len(header_fields) - 1)) % tuple(header_fields))
self.verbose_fmt = ' '.join(verbose_fmt)
# plot verbose info each time i % verbose_mod == 0
self.verbose_mod = 1
self.start_time = time()
self.begin_at_stage = begin_at_stage
def update(self, j, est):
"""Update reporter with new iteration. """
do_oob = est.subsample < 1
# we need to take into account if we fit additional estimators.
i = j - self.begin_at_stage # iteration relative to the start iter
if (i + 1) % self.verbose_mod == 0:
oob_impr = est.oob_improvement_[j] if do_oob else 0
remaining_time = ((est.n_estimators - (j + 1)) *
(time() - self.start_time) / float(i + 1))
if remaining_time > 60:
remaining_time = '{0:.2f}m'.format(remaining_time / 60.0)
else:
remaining_time = '{0:.2f}s'.format(remaining_time)
print(self.verbose_fmt.format(iter=j + 1,
train_score=est.train_score_[j],
oob_impr=oob_impr,
remaining_time=remaining_time))
if self.verbose == 1 and ((i + 1) // (self.verbose_mod * 10) > 0):
# adjust verbose frequency (powers of 10)
self.verbose_mod *= 10
class BaseGradientBoosting(six.with_metaclass(ABCMeta, BaseEnsemble,
_LearntSelectorMixin)):
"""Abstract base class for Gradient Boosting. """
@abstractmethod
def __init__(self, loss, learning_rate, n_estimators, min_samples_split,
min_samples_leaf, min_weight_fraction_leaf,
max_depth, init, subsample, max_features,
random_state, alpha=0.9, verbose=0, max_leaf_nodes=None,
warm_start=False, presort='auto'):
self.n_estimators = n_estimators
self.learning_rate = learning_rate
self.loss = loss
self.min_samples_split = min_samples_split
self.min_samples_leaf = min_samples_leaf
self.min_weight_fraction_leaf = min_weight_fraction_leaf
self.subsample = subsample
self.max_features = max_features
self.max_depth = max_depth
self.init = init
self.random_state = random_state
self.alpha = alpha
self.verbose = verbose
self.max_leaf_nodes = max_leaf_nodes
self.warm_start = warm_start
self.presort = presort
self.estimators_ = np.empty((0, 0), dtype=np.object)
def _fit_stage(self, i, X, y, y_pred, sample_weight, sample_mask,
random_state, X_idx_sorted, X_csc=None, X_csr=None):
"""Fit another stage of ``n_classes_`` trees to the boosting model. """
assert sample_mask.dtype == np.bool
loss = self.loss_
original_y = y
for k in range(loss.K):
if loss.is_multi_class:
y = np.array(original_y == k, dtype=np.float64)
residual = loss.negative_gradient(y, y_pred, k=k,
sample_weight=sample_weight)
# induce regression tree on residuals
tree = DecisionTreeRegressor(
criterion='friedman_mse',
splitter='best',
max_depth=self.max_depth,
min_samples_split=self.min_samples_split,
min_samples_leaf=self.min_samples_leaf,
min_weight_fraction_leaf=self.min_weight_fraction_leaf,
max_features=self.max_features,
max_leaf_nodes=self.max_leaf_nodes,
random_state=random_state,
presort=self.presort)
if self.subsample < 1.0:
# no inplace multiplication!
sample_weight = sample_weight * sample_mask.astype(np.float64)
if X_csc is not None:
tree.fit(X_csc, residual, sample_weight=sample_weight,
check_input=False, X_idx_sorted=X_idx_sorted)
else:
tree.fit(X, residual, sample_weight=sample_weight,
check_input=False, X_idx_sorted=X_idx_sorted)
# update tree leaves
if X_csr is not None:
loss.update_terminal_regions(tree.tree_, X_csr, y, residual, y_pred,
sample_weight, sample_mask,
self.learning_rate, k=k)
else:
loss.update_terminal_regions(tree.tree_, X, y, residual, y_pred,
sample_weight, sample_mask,
self.learning_rate, k=k)
# add tree to ensemble
self.estimators_[i, k] = tree
return y_pred
def _check_params(self):
"""Check validity of parameters and raise ValueError if not valid. """
if self.n_estimators <= 0:
raise ValueError("n_estimators must be greater than 0 but "
"was %r" % self.n_estimators)
if self.learning_rate <= 0.0:
raise ValueError("learning_rate must be greater than 0 but "
"was %r" % self.learning_rate)
if (self.loss not in self._SUPPORTED_LOSS
or self.loss not in LOSS_FUNCTIONS):
raise ValueError("Loss '{0:s}' not supported. ".format(self.loss))
if self.loss == 'deviance':
loss_class = (MultinomialDeviance
if len(self.classes_) > 2
else BinomialDeviance)
else:
loss_class = LOSS_FUNCTIONS[self.loss]
if self.loss in ('huber', 'quantile'):
self.loss_ = loss_class(self.n_classes_, self.alpha)
else:
self.loss_ = loss_class(self.n_classes_)
if not (0.0 < self.subsample <= 1.0):
raise ValueError("subsample must be in (0,1] but "
"was %r" % self.subsample)
if self.init is not None:
if isinstance(self.init, six.string_types):
if self.init not in INIT_ESTIMATORS:
raise ValueError('init="%s" is not supported' % self.init)
else:
if (not hasattr(self.init, 'fit')
or not hasattr(self.init, 'predict')):
raise ValueError("init=%r must be valid BaseEstimator "
"and support both fit and "
"predict" % self.init)
if not (0.0 < self.alpha < 1.0):
raise ValueError("alpha must be in (0.0, 1.0) but "
"was %r" % self.alpha)
if isinstance(self.max_features, six.string_types):
if self.max_features == "auto":
# if is_classification
if self.n_classes_ > 1:
max_features = max(1, int(np.sqrt(self.n_features)))
else:
# is regression
max_features = self.n_features
elif self.max_features == "sqrt":
max_features = max(1, int(np.sqrt(self.n_features)))
elif self.max_features == "log2":
max_features = max(1, int(np.log2(self.n_features)))
else:
raise ValueError("Invalid value for max_features: %r. "
"Allowed string values are 'auto', 'sqrt' "
"or 'log2'." % self.max_features)
elif self.max_features is None:
max_features = self.n_features
elif isinstance(self.max_features, (numbers.Integral, np.integer)):
max_features = self.max_features
else: # float
if 0. < self.max_features <= 1.:
max_features = max(int(self.max_features * self.n_features), 1)
else:
raise ValueError("max_features must be in (0, n_features]")
self.max_features_ = max_features
def _init_state(self):
"""Initialize model state and allocate model state data structures. """
if self.init is None:
self.init_ = self.loss_.init_estimator()
elif isinstance(self.init, six.string_types):
self.init_ = INIT_ESTIMATORS[self.init]()
else:
self.init_ = self.init
self.estimators_ = np.empty((self.n_estimators, self.loss_.K),
dtype=np.object)
self.train_score_ = np.zeros((self.n_estimators,), dtype=np.float64)
# do oob?
if self.subsample < 1.0:
self.oob_improvement_ = np.zeros((self.n_estimators),
dtype=np.float64)
def _clear_state(self):
"""Clear the state of the gradient boosting model. """
if hasattr(self, 'estimators_'):
self.estimators_ = np.empty((0, 0), dtype=np.object)
if hasattr(self, 'train_score_'):
del self.train_score_
if hasattr(self, 'oob_improvement_'):
del self.oob_improvement_
if hasattr(self, 'init_'):
del self.init_
def _resize_state(self):
"""Add additional ``n_estimators`` entries to all attributes. """
# self.n_estimators is the number of additional est to fit
total_n_estimators = self.n_estimators
if total_n_estimators < self.estimators_.shape[0]:
raise ValueError('resize with smaller n_estimators %d < %d' %
(total_n_estimators, self.estimators_[0]))
self.estimators_.resize((total_n_estimators, self.loss_.K))
self.train_score_.resize(total_n_estimators)
if (self.subsample < 1 or hasattr(self, 'oob_improvement_')):
# if do oob resize arrays or create new if not available
if hasattr(self, 'oob_improvement_'):
self.oob_improvement_.resize(total_n_estimators)
else:
self.oob_improvement_ = np.zeros((total_n_estimators,),
dtype=np.float64)
def _is_initialized(self):
return len(getattr(self, 'estimators_', [])) > 0
def _check_initialized(self):
"""Check that the estimator is initialized, raising an error if not."""
if self.estimators_ is None or len(self.estimators_) == 0:
raise NotFittedError("Estimator not fitted, call `fit`"
" before making predictions`.")
def fit(self, X, y, sample_weight=None, monitor=None):
"""Fit the gradient boosting model.
Parameters
----------
X : array-like, shape = [n_samples, n_features]
Training vectors, where n_samples is the number of samples
and n_features is the number of features.
y : array-like, shape = [n_samples]
Target values (integers in classification, real numbers in
regression)
For classification, labels must correspond to classes.
sample_weight : array-like, shape = [n_samples] or None
Sample weights. If None, then samples are equally weighted. Splits
that would create child nodes with net zero or negative weight are
ignored while searching for a split in each node. In the case of
classification, splits are also ignored if they would result in any
single class carrying a negative weight in either child node.
monitor : callable, optional
The monitor is called after each iteration with the current
iteration, a reference to the estimator and the local variables of
``_fit_stages`` as keyword arguments ``callable(i, self,
locals())``. If the callable returns ``True`` the fitting procedure
is stopped. The monitor can be used for various things such as
computing held-out estimates, early stopping, model introspect, and
snapshoting.
Returns
-------
self : object
Returns self.
"""
# if not warmstart - clear the estimator state
if not self.warm_start:
self._clear_state()
# Check input
X, y = check_X_y(X, y, accept_sparse=['csr', 'csc', 'coo'], dtype=DTYPE)
n_samples, self.n_features = X.shape
if sample_weight is None:
sample_weight = np.ones(n_samples, dtype=np.float32)
else:
sample_weight = column_or_1d(sample_weight, warn=True)
check_consistent_length(X, y, sample_weight)
y = self._validate_y(y)
random_state = check_random_state(self.random_state)
self._check_params()
if not self._is_initialized():
# init state
self._init_state()
# fit initial model - FIXME make sample_weight optional
self.init_.fit(X, y, sample_weight)
# init predictions
y_pred = self.init_.predict(X)
begin_at_stage = 0
else:
# add more estimators to fitted model
# invariant: warm_start = True
if self.n_estimators < self.estimators_.shape[0]:
raise ValueError('n_estimators=%d must be larger or equal to '
'estimators_.shape[0]=%d when '
'warm_start==True'
% (self.n_estimators,
self.estimators_.shape[0]))
begin_at_stage = self.estimators_.shape[0]
y_pred = self._decision_function(X)
self._resize_state()
X_idx_sorted = None
presort = self.presort
# Allow presort to be 'auto', which means True if the dataset is dense,
# otherwise it will be False.
if presort == 'auto' and issparse(X):
presort = False
elif presort == 'auto':
presort = True
if presort == True:
if issparse(X):
raise ValueError("Presorting is not supported for sparse matrices.")
else:
X_idx_sorted = np.asfortranarray(np.argsort(X, axis=0),
dtype=np.int32)
# fit the boosting stages
n_stages = self._fit_stages(X, y, y_pred, sample_weight, random_state,
begin_at_stage, monitor, X_idx_sorted)
# change shape of arrays after fit (early-stopping or additional ests)
if n_stages != self.estimators_.shape[0]:
self.estimators_ = self.estimators_[:n_stages]
self.train_score_ = self.train_score_[:n_stages]
if hasattr(self, 'oob_improvement_'):
self.oob_improvement_ = self.oob_improvement_[:n_stages]
return self
def _fit_stages(self, X, y, y_pred, sample_weight, random_state,
begin_at_stage=0, monitor=None, X_idx_sorted=None):
"""Iteratively fits the stages.
For each stage it computes the progress (OOB, train score)
and delegates to ``_fit_stage``.
Returns the number of stages fit; might differ from ``n_estimators``
due to early stopping.
"""
n_samples = X.shape[0]
do_oob = self.subsample < 1.0
sample_mask = np.ones((n_samples, ), dtype=np.bool)
n_inbag = max(1, int(self.subsample * n_samples))
loss_ = self.loss_
# Set min_weight_leaf from min_weight_fraction_leaf
if self.min_weight_fraction_leaf != 0. and sample_weight is not None:
min_weight_leaf = (self.min_weight_fraction_leaf *
np.sum(sample_weight))
else:
min_weight_leaf = 0.
if self.verbose:
verbose_reporter = VerboseReporter(self.verbose)
verbose_reporter.init(self, begin_at_stage)
X_csc = csc_matrix(X) if issparse(X) else None
X_csr = csr_matrix(X) if issparse(X) else None
# perform boosting iterations
i = begin_at_stage
for i in range(begin_at_stage, self.n_estimators):
# subsampling
if do_oob:
sample_mask = _random_sample_mask(n_samples, n_inbag,
random_state)
# OOB score before adding this stage
old_oob_score = loss_(y[~sample_mask],
y_pred[~sample_mask],
sample_weight[~sample_mask])
# fit next stage of trees
y_pred = self._fit_stage(i, X, y, y_pred, sample_weight,
sample_mask, random_state, X_idx_sorted,
X_csc, X_csr)
# track deviance (= loss)
if do_oob:
self.train_score_[i] = loss_(y[sample_mask],
y_pred[sample_mask],
sample_weight[sample_mask])
self.oob_improvement_[i] = (
old_oob_score - loss_(y[~sample_mask],
y_pred[~sample_mask],
sample_weight[~sample_mask]))
else:
# no need to fancy index w/ no subsampling
self.train_score_[i] = loss_(y, y_pred, sample_weight)
if self.verbose > 0:
verbose_reporter.update(i, self)
if monitor is not None:
early_stopping = monitor(i, self, locals())
if early_stopping:
break
return i + 1
def _make_estimator(self, append=True):
# we don't need _make_estimator
raise NotImplementedError()
def _init_decision_function(self, X):
"""Check input and compute prediction of ``init``. """
self._check_initialized()
X = self.estimators_[0, 0]._validate_X_predict(X, check_input=True)
if X.shape[1] != self.n_features:
raise ValueError("X.shape[1] should be {0:d}, not {1:d}.".format(
self.n_features, X.shape[1]))
score = self.init_.predict(X).astype(np.float64)
return score
def _decision_function(self, X):
# for use in inner loop, not raveling the output in single-class case,
# not doing input validation.
score = self._init_decision_function(X)
predict_stages(self.estimators_, X, self.learning_rate, score)
return score
@deprecated(" and will be removed in 0.19")
def decision_function(self, X):
"""Compute the decision function of ``X``.
Parameters
----------
X : array-like of shape = [n_samples, n_features]
The input samples.
Returns
-------
score : array, shape = [n_samples, n_classes] or [n_samples]
The decision function of the input samples. The order of the
classes corresponds to that in the attribute `classes_`.
Regression and binary classification produce an array of shape
[n_samples].
"""
self._check_initialized()
X = self.estimators_[0, 0]._validate_X_predict(X, check_input=True)
score = self._decision_function(X)
if score.shape[1] == 1:
return score.ravel()
return score
def _staged_decision_function(self, X):
"""Compute decision function of ``X`` for each iteration.
This method allows monitoring (i.e. determine error on testing set)
after each stage.
Parameters
----------
X : array-like of shape = [n_samples, n_features]
The input samples.
Returns
-------
score : generator of array, shape = [n_samples, k]
The decision function of the input samples. The order of the
classes corresponds to that in the attribute `classes_`.
Regression and binary classification are special cases with
``k == 1``, otherwise ``k==n_classes``.
"""
X = check_array(X, dtype=DTYPE, order="C")
score = self._init_decision_function(X)
for i in range(self.estimators_.shape[0]):
predict_stage(self.estimators_, i, X, self.learning_rate, score)
yield score.copy()
@deprecated(" and will be removed in 0.19")
def staged_decision_function(self, X):
"""Compute decision function of ``X`` for each iteration.
This method allows monitoring (i.e. determine error on testing set)
after each stage.
Parameters
----------
X : array-like of shape = [n_samples, n_features]
The input samples.
Returns
-------
score : generator of array, shape = [n_samples, k]
The decision function of the input samples. The order of the
classes corresponds to that in the attribute `classes_`.
Regression and binary classification are special cases with
``k == 1``, otherwise ``k==n_classes``.
"""
for dec in self._staged_decision_function(X):
# no yield from in Python2.X
yield dec
@property
def feature_importances_(self):
"""Return the feature importances (the higher, the more important the
feature).
Returns
-------
feature_importances_ : array, shape = [n_features]
"""
self._check_initialized()
total_sum = np.zeros((self.n_features, ), dtype=np.float64)
for stage in self.estimators_:
stage_sum = sum(tree.feature_importances_
for tree in stage) / len(stage)
total_sum += stage_sum
importances = total_sum / len(self.estimators_)
return importances
def _validate_y(self, y):
self.n_classes_ = 1
if y.dtype.kind == 'O':
y = y.astype(np.float64)
# Default implementation
return y
def apply(self, X):
"""Apply trees in the ensemble to X, return leaf indices.
.. versionadded:: 0.17
Parameters
----------
X : array-like or sparse matrix, shape = [n_samples, n_features]
The input samples. Internally, it will be converted to
``dtype=np.float32`` and if a sparse matrix is provided
to a sparse ``csr_matrix``.
Returns
-------
X_leaves : array_like, shape = [n_samples, n_estimators, n_classes]
For each datapoint x in X and for each tree in the ensemble,
return the index of the leaf x ends up in each estimator.
In the case of binary classification n_classes is 1.
"""
self._check_initialized()
X = self.estimators_[0, 0]._validate_X_predict(X, check_input=True)
# n_classes will be equal to 1 in the binary classification or the
# regression case.
n_estimators, n_classes = self.estimators_.shape
leaves = np.zeros((X.shape[0], n_estimators, n_classes))
for i in range(n_estimators):
for j in range(n_classes):
estimator = self.estimators_[i, j]
leaves[:, i, j] = estimator.apply(X, check_input=False)
return leaves
class GradientBoostingClassifier(BaseGradientBoosting, ClassifierMixin):
"""Gradient Boosting for classification.
GB builds an additive model in a
forward stage-wise fashion; it allows for the optimization of
arbitrary differentiable loss functions. In each stage ``n_classes_``
regression trees are fit on the negative gradient of the
binomial or multinomial deviance loss function. Binary classification
is a special case where only a single regression tree is induced.
Read more in the :ref:`User Guide <gradient_boosting>`.
Parameters
----------
loss : {'deviance', 'exponential'}, optional (default='deviance')
loss function to be optimized. 'deviance' refers to
deviance (= logistic regression) for classification
with probabilistic outputs. For loss 'exponential' gradient
boosting recovers the AdaBoost algorithm.
learning_rate : float, optional (default=0.1)
learning rate shrinks the contribution of each tree by `learning_rate`.
There is a trade-off between learning_rate and n_estimators.
n_estimators : int (default=100)
The number of boosting stages to perform. Gradient boosting
is fairly robust to over-fitting so a large number usually
results in better performance.
max_depth : integer, optional (default=3)
maximum depth of the individual regression estimators. The maximum
depth limits the number of nodes in the tree. Tune this parameter
for best performance; the best value depends on the interaction
of the input variables.
Ignored if ``max_leaf_nodes`` is not None.
min_samples_split : int, float, optional (default=2)
The minimum number of samples required to split an internal node:
- If int, then consider `min_samples_split` as the minimum number.
- If float, then `min_samples_split` is a percentage and
`ceil(min_samples_split * n_samples)` are the minimum
number of samples for each split.
min_samples_leaf : int, float, optional (default=1)
The minimum number of samples required to be at a leaf node:
- If int, then consider `min_samples_leaf` as the minimum number.
- If float, then `min_samples_leaf` is a percentage and
`ceil(min_samples_leaf * n_samples)` are the minimum
number of samples for each node.
min_weight_fraction_leaf : float, optional (default=0.)
The minimum weighted fraction of the input samples required to be at a
leaf node.
subsample : float, optional (default=1.0)
The fraction of samples to be used for fitting the individual base
learners. If smaller than 1.0 this results in Stochastic Gradient
Boosting. `subsample` interacts with the parameter `n_estimators`.
Choosing `subsample < 1.0` leads to a reduction of variance
and an increase in bias.
max_features : int, float, string or None, optional (default=None)
The number of features to consider when looking for the best split:
- If int, then consider `max_features` features at each split.
- If float, then `max_features` is a percentage and
`int(max_features * n_features)` features are considered at each
split.
- If "auto", then `max_features=sqrt(n_features)`.
- If "sqrt", then `max_features=sqrt(n_features)`.
- If "log2", then `max_features=log2(n_features)`.
- If None, then `max_features=n_features`.
Choosing `max_features < n_features` leads to a reduction of variance
and an increase in bias.
Note: the search for a split does not stop until at least one
valid partition of the node samples is found, even if it requires to
effectively inspect more than ``max_features`` features.
max_leaf_nodes : int or None, optional (default=None)
Grow trees with ``max_leaf_nodes`` in best-first fashion.
Best nodes are defined as relative reduction in impurity.
If None then unlimited number of leaf nodes.
If not None then ``max_depth`` will be ignored.
init : BaseEstimator, None, optional (default=None)
An estimator object that is used to compute the initial
predictions. ``init`` has to provide ``fit`` and ``predict``.
If None it uses ``loss.init_estimator``.
verbose : int, default: 0
Enable verbose output. If 1 then it prints progress and performance
once in a while (the more trees the lower the frequency). If greater
than 1 then it prints progress and performance for every tree.
warm_start : bool, default: False
When set to ``True``, reuse the solution of the previous call to fit
and add more estimators to the ensemble, otherwise, just erase the
previous solution.
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
presort : bool or 'auto', optional (default='auto')
Whether to presort the data to speed up the finding of best splits in
fitting. Auto mode by default will use presorting on dense data and
default to normal sorting on sparse data. Setting presort to true on
sparse data will raise an error.
.. versionadded:: 0.17
*presort* parameter.
Attributes
----------
feature_importances_ : array, shape = [n_features]
The feature importances (the higher, the more important the feature).
oob_improvement_ : array, shape = [n_estimators]
The improvement in loss (= deviance) on the out-of-bag samples
relative to the previous iteration.
``oob_improvement_[0]`` is the improvement in
loss of the first stage over the ``init`` estimator.
train_score_ : array, shape = [n_estimators]
The i-th score ``train_score_[i]`` is the deviance (= loss) of the
model at iteration ``i`` on the in-bag sample.
If ``subsample == 1`` this is the deviance on the training data.
loss_ : LossFunction
The concrete ``LossFunction`` object.
init : BaseEstimator
The estimator that provides the initial predictions.
Set via the ``init`` argument or ``loss.init_estimator``.
estimators_ : ndarray of DecisionTreeRegressor, shape = [n_estimators, ``loss_.K``]
The collection of fitted sub-estimators. ``loss_.K`` is 1 for binary
classification, otherwise n_classes.
See also
--------
sklearn.tree.DecisionTreeClassifier, RandomForestClassifier
AdaBoostClassifier
References
----------
J. Friedman, Greedy Function Approximation: A Gradient Boosting
Machine, The Annals of Statistics, Vol. 29, No. 5, 2001.
J. Friedman, Stochastic Gradient Boosting, 1999
T. Hastie, R. Tibshirani and J. Friedman.
Elements of Statistical Learning Ed. 2, Springer, 2009.
"""
_SUPPORTED_LOSS = ('deviance', 'exponential')
def __init__(self, loss='deviance', learning_rate=0.1, n_estimators=100,
subsample=1.0, min_samples_split=2,
min_samples_leaf=1, min_weight_fraction_leaf=0.,
max_depth=3, init=None, random_state=None,
max_features=None, verbose=0,
max_leaf_nodes=None, warm_start=False,
presort='auto'):
super(GradientBoostingClassifier, self).__init__(
loss=loss, learning_rate=learning_rate, n_estimators=n_estimators,
min_samples_split=min_samples_split,
min_samples_leaf=min_samples_leaf,
min_weight_fraction_leaf=min_weight_fraction_leaf,
max_depth=max_depth, init=init, subsample=subsample,
max_features=max_features,
random_state=random_state, verbose=verbose,
max_leaf_nodes=max_leaf_nodes, warm_start=warm_start,
presort=presort)
def _validate_y(self, y):
check_classification_targets(y)
self.classes_, y = np.unique(y, return_inverse=True)
self.n_classes_ = len(self.classes_)
return y
def decision_function(self, X):
"""Compute the decision function of ``X``.
Parameters
----------
X : array-like of shape = [n_samples, n_features]
The input samples.
Returns
-------
score : array, shape = [n_samples, n_classes] or [n_samples]
The decision function of the input samples. The order of the
classes corresponds to that in the attribute `classes_`.
Regression and binary classification produce an array of shape
[n_samples].
"""
X = check_array(X, dtype=DTYPE, order="C")
score = self._decision_function(X)
if score.shape[1] == 1:
return score.ravel()
return score
def staged_decision_function(self, X):
"""Compute decision function of ``X`` for each iteration.
This method allows monitoring (i.e. determine error on testing set)
after each stage.
Parameters
----------
X : array-like of shape = [n_samples, n_features]
The input samples.
Returns
-------
score : generator of array, shape = [n_samples, k]
The decision function of the input samples. The order of the
classes corresponds to that in the attribute `classes_`.
Regression and binary classification are special cases with
``k == 1``, otherwise ``k==n_classes``.
"""
for dec in self._staged_decision_function(X):
# no yield from in Python2.X
yield dec
def predict(self, X):
"""Predict class for X.
Parameters
----------
X : array-like of shape = [n_samples, n_features]
The input samples.
Returns
-------
y: array of shape = ["n_samples]
The predicted values.
"""
score = self.decision_function(X)
decisions = self.loss_._score_to_decision(score)
return self.classes_.take(decisions, axis=0)
def staged_predict(self, X):
"""Predict class at each stage for X.
This method allows monitoring (i.e. determine error on testing set)
after each stage.
Parameters
----------
X : array-like of shape = [n_samples, n_features]
The input samples.
Returns
-------
y : generator of array of shape = [n_samples]
The predicted value of the input samples.
"""
for score in self._staged_decision_function(X):
decisions = self.loss_._score_to_decision(score)
yield self.classes_.take(decisions, axis=0)
def predict_proba(self, X):
"""Predict class probabilities for X.
Parameters
----------
X : array-like of shape = [n_samples, n_features]
The input samples.
Raises
------
AttributeError
If the ``loss`` does not support probabilities.
Returns
-------
p : array of shape = [n_samples]
The class probabilities of the input samples. The order of the
classes corresponds to that in the attribute `classes_`.
"""
score = self.decision_function(X)
try:
return self.loss_._score_to_proba(score)
except NotFittedError:
raise
except AttributeError:
raise AttributeError('loss=%r does not support predict_proba' %
self.loss)
def predict_log_proba(self, X):
"""Predict class log-probabilities for X.
Parameters
----------
X : array-like of shape = [n_samples, n_features]
The input samples.
Raises
------
AttributeError
If the ``loss`` does not support probabilities.
Returns
-------
p : array of shape = [n_samples]
The class log-probabilities of the input samples. The order of the
classes corresponds to that in the attribute `classes_`.
"""
proba = self.predict_proba(X)
return np.log(proba)
def staged_predict_proba(self, X):
"""Predict class probabilities at each stage for X.
This method allows monitoring (i.e. determine error on testing set)
after each stage.
Parameters
----------
X : array-like of shape = [n_samples, n_features]
The input samples.
Returns
-------
y : generator of array of shape = [n_samples]
The predicted value of the input samples.
"""
try:
for score in self._staged_decision_function(X):
yield self.loss_._score_to_proba(score)
except NotFittedError:
raise
except AttributeError:
raise AttributeError('loss=%r does not support predict_proba' %
self.loss)
class GradientBoostingRegressor(BaseGradientBoosting, RegressorMixin):
"""Gradient Boosting for regression.
GB builds an additive model in a forward stage-wise fashion;
it allows for the optimization of arbitrary differentiable loss functions.
In each stage a regression tree is fit on the negative gradient of the
given loss function.
Read more in the :ref:`User Guide <gradient_boosting>`.
Parameters
----------
loss : {'ls', 'lad', 'huber', 'quantile'}, optional (default='ls')
loss function to be optimized. 'ls' refers to least squares
regression. 'lad' (least absolute deviation) is a highly robust
loss function solely based on order information of the input
variables. 'huber' is a combination of the two. 'quantile'
allows quantile regression (use `alpha` to specify the quantile).
learning_rate : float, optional (default=0.1)
learning rate shrinks the contribution of each tree by `learning_rate`.
There is a trade-off between learning_rate and n_estimators.
n_estimators : int (default=100)
The number of boosting stages to perform. Gradient boosting
is fairly robust to over-fitting so a large number usually
results in better performance.
max_depth : integer, optional (default=3)
maximum depth of the individual regression estimators. The maximum
depth limits the number of nodes in the tree. Tune this parameter
for best performance; the best value depends on the interaction
of the input variables.
Ignored if ``max_leaf_nodes`` is not None.
min_samples_split : int, float, optional (default=2)
The minimum number of samples required to split an internal node:
- If int, then consider `min_samples_split` as the minimum number.
- If float, then `min_samples_split` is a percentage and
`ceil(min_samples_split * n_samples)` are the minimum
number of samples for each split.
min_samples_leaf : int, float, optional (default=1)
The minimum number of samples required to be at a leaf node:
- If int, then consider `min_samples_leaf` as the minimum number.
- If float, then `min_samples_leaf` is a percentage and
`ceil(min_samples_leaf * n_samples)` are the minimum
number of samples for each node.
min_weight_fraction_leaf : float, optional (default=0.)
The minimum weighted fraction of the input samples required to be at a
leaf node.
subsample : float, optional (default=1.0)
The fraction of samples to be used for fitting the individual base
learners. If smaller than 1.0 this results in Stochastic Gradient
Boosting. `subsample` interacts with the parameter `n_estimators`.
Choosing `subsample < 1.0` leads to a reduction of variance
and an increase in bias.
max_features : int, float, string or None, optional (default=None)
The number of features to consider when looking for the best split:
- If int, then consider `max_features` features at each split.
- If float, then `max_features` is a percentage and
`int(max_features * n_features)` features are considered at each
split.
- If "auto", then `max_features=n_features`.
- If "sqrt", then `max_features=sqrt(n_features)`.
- If "log2", then `max_features=log2(n_features)`.
- If None, then `max_features=n_features`.
Choosing `max_features < n_features` leads to a reduction of variance
and an increase in bias.
Note: the search for a split does not stop until at least one
valid partition of the node samples is found, even if it requires to
effectively inspect more than ``max_features`` features.
max_leaf_nodes : int or None, optional (default=None)
Grow trees with ``max_leaf_nodes`` in best-first fashion.
Best nodes are defined as relative reduction in impurity.
If None then unlimited number of leaf nodes.
alpha : float (default=0.9)
The alpha-quantile of the huber loss function and the quantile
loss function. Only if ``loss='huber'`` or ``loss='quantile'``.
init : BaseEstimator, None, optional (default=None)
An estimator object that is used to compute the initial
predictions. ``init`` has to provide ``fit`` and ``predict``.
If None it uses ``loss.init_estimator``.
verbose : int, default: 0
Enable verbose output. If 1 then it prints progress and performance
once in a while (the more trees the lower the frequency). If greater
than 1 then it prints progress and performance for every tree.
warm_start : bool, default: False
When set to ``True``, reuse the solution of the previous call to fit
and add more estimators to the ensemble, otherwise, just erase the
previous solution.
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
presort : bool or 'auto', optional (default='auto')
Whether to presort the data to speed up the finding of best splits in
fitting. Auto mode by default will use presorting on dense data and
default to normal sorting on sparse data. Setting presort to true on
sparse data will raise an error.
.. versionadded:: 0.17
optional parameter *presort*.
Attributes
----------
feature_importances_ : array, shape = [n_features]
The feature importances (the higher, the more important the feature).
oob_improvement_ : array, shape = [n_estimators]
The improvement in loss (= deviance) on the out-of-bag samples
relative to the previous iteration.
``oob_improvement_[0]`` is the improvement in
loss of the first stage over the ``init`` estimator.
train_score_ : array, shape = [n_estimators]
The i-th score ``train_score_[i]`` is the deviance (= loss) of the
model at iteration ``i`` on the in-bag sample.
If ``subsample == 1`` this is the deviance on the training data.
loss_ : LossFunction
The concrete ``LossFunction`` object.
`init` : BaseEstimator
The estimator that provides the initial predictions.
Set via the ``init`` argument or ``loss.init_estimator``.
estimators_ : ndarray of DecisionTreeRegressor, shape = [n_estimators, 1]
The collection of fitted sub-estimators.
See also
--------
DecisionTreeRegressor, RandomForestRegressor
References
----------
J. Friedman, Greedy Function Approximation: A Gradient Boosting
Machine, The Annals of Statistics, Vol. 29, No. 5, 2001.
J. Friedman, Stochastic Gradient Boosting, 1999
T. Hastie, R. Tibshirani and J. Friedman.
Elements of Statistical Learning Ed. 2, Springer, 2009.
"""
_SUPPORTED_LOSS = ('ls', 'lad', 'huber', 'quantile')
def __init__(self, loss='ls', learning_rate=0.1, n_estimators=100,
subsample=1.0, min_samples_split=2,
min_samples_leaf=1, min_weight_fraction_leaf=0.,
max_depth=3, init=None, random_state=None,
max_features=None, alpha=0.9, verbose=0, max_leaf_nodes=None,
warm_start=False, presort='auto'):
super(GradientBoostingRegressor, self).__init__(
loss=loss, learning_rate=learning_rate, n_estimators=n_estimators,
min_samples_split=min_samples_split,
min_samples_leaf=min_samples_leaf,
min_weight_fraction_leaf=min_weight_fraction_leaf,
max_depth=max_depth, init=init, subsample=subsample,
max_features=max_features,
random_state=random_state, alpha=alpha, verbose=verbose,
max_leaf_nodes=max_leaf_nodes, warm_start=warm_start,
presort=presort)
def predict(self, X):
"""Predict regression target for X.
Parameters
----------
X : array-like of shape = [n_samples, n_features]
The input samples.
Returns
-------
y : array of shape = [n_samples]
The predicted values.
"""
X = check_array(X, dtype=DTYPE, order="C")
return self._decision_function(X).ravel()
def staged_predict(self, X):
"""Predict regression target at each stage for X.
This method allows monitoring (i.e. determine error on testing set)
after each stage.
Parameters
----------
X : array-like of shape = [n_samples, n_features]
The input samples.
Returns
-------
y : generator of array of shape = [n_samples]
The predicted value of the input samples.
"""
for y in self._staged_decision_function(X):
yield y.ravel()
def apply(self, X):
"""Apply trees in the ensemble to X, return leaf indices.
.. versionadded:: 0.17
Parameters
----------
X : array-like or sparse matrix, shape = [n_samples, n_features]
The input samples. Internally, it will be converted to
``dtype=np.float32`` and if a sparse matrix is provided
to a sparse ``csr_matrix``.
Returns
-------
X_leaves : array_like, shape = [n_samples, n_estimators]
For each datapoint x in X and for each tree in the ensemble,
return the index of the leaf x ends up in each estimator.
"""
leaves = super(GradientBoostingRegressor, self).apply(X)
leaves = leaves.reshape(X.shape[0], self.estimators_.shape[0])
return leaves
| bsd-3-clause |
Eigenstate/msmbuilder | msmbuilder/tests/test_feature_selection.py | 5 | 2888 | import numpy as np
import pandas as pd
from sklearn.feature_selection import VarianceThreshold as VarianceThresholdR
from msmbuilder.example_datasets import AlanineDipeptide
from msmbuilder.feature_selection import FeatureSelector, VarianceThreshold, FeatureSlicer
from msmbuilder.featurizer import DihedralFeaturizer
FEATS = [
('phi', DihedralFeaturizer(types=['phi'], sincos=True)),
('psi', DihedralFeaturizer(types=['psi'], sincos=True)),
]
def test_featureselector_order():
fs1 = FeatureSelector(FEATS)
fs2 = FeatureSelector(FEATS[::-1])
assert fs1.which_feat == ['phi', 'psi']
assert fs2.which_feat == ['psi', 'phi']
def test_featureselector_selection():
trajectories = AlanineDipeptide().get_cached().trajectories
fs = FeatureSelector(FEATS, which_feat='phi')
assert fs.which_feat == ['phi']
y1 = fs.partial_transform(trajectories[0])
y_ref1 = FEATS[0][1].partial_transform(trajectories[0])
np.testing.assert_array_almost_equal(y_ref1, y1)
def test_featureselector_transform():
trajectories = AlanineDipeptide().get_cached().trajectories
fs = FeatureSelector(FEATS, which_feat='psi')
y1 = fs.transform(trajectories)
assert len(y1) == len(trajectories)
def test_variancethreshold_vs_sklearn():
trajectories = AlanineDipeptide().get_cached().trajectories
fs = FeatureSelector(FEATS)
vt = VarianceThreshold(0.1)
vtr = VarianceThresholdR(0.1)
y = fs.partial_transform(trajectories[0])
z1 = vt.fit_transform([y])[0]
z_ref1 = vtr.fit_transform(y)
np.testing.assert_array_almost_equal(z_ref1, z1)
def test_which_feat_types():
# trajectories = AlanineDipeptide().get_cached().trajectories
fs = FeatureSelector(FEATS, which_feat=('phi', 'psi'))
assert fs.which_feat == ['phi', 'psi']
fs = FeatureSelector(FEATS, which_feat=set(('phi', 'psi')))
assert fs.which_feat == ['phi', 'psi'] or fs.which_feat == ['psi', 'phi']
try:
fs = FeatureSelector(FEATS, which_feat={'phi':'psi'})
assert False
except TypeError:
pass
try:
fs = FeatureSelector(FEATS, which_feat=['phiii'])
assert False
except ValueError:
pass
def test_feature_slicer():
trajectories = AlanineDipeptide().get_cached().trajectories
f = DihedralFeaturizer()
fs = FeatureSlicer(f, indices=[0,1])
y1 = fs.transform(trajectories)
assert y1[0].shape[1] == 2
df = pd.DataFrame(fs.describe_features(trajectories[0]))
assert len(df) == 2
assert 'psi' not in df.featuregroup[0]
assert 'psi' not in df.featuregroup[1]
fs = FeatureSlicer(f, indices=[2,3])
y1 = fs.transform(trajectories)
assert y1[0].shape[1] == 2
df = pd.DataFrame(fs.describe_features(trajectories[0]))
assert len(df) == 2
assert 'phi' not in df.featuregroup[0]
assert 'phi' not in df.featuregroup[1]
| lgpl-2.1 |
hainm/scikit-learn | benchmarks/bench_plot_fastkmeans.py | 294 | 4676 | from __future__ import print_function
from collections import defaultdict
from time import time
import numpy as np
from numpy import random as nr
from sklearn.cluster.k_means_ import KMeans, MiniBatchKMeans
def compute_bench(samples_range, features_range):
it = 0
results = defaultdict(lambda: [])
chunk = 100
max_it = len(samples_range) * len(features_range)
for n_samples in samples_range:
for n_features in features_range:
it += 1
print('==============================')
print('Iteration %03d of %03d' % (it, max_it))
print('==============================')
print()
data = nr.random_integers(-50, 50, (n_samples, n_features))
print('K-Means')
tstart = time()
kmeans = KMeans(init='k-means++', n_clusters=10).fit(data)
delta = time() - tstart
print("Speed: %0.3fs" % delta)
print("Inertia: %0.5f" % kmeans.inertia_)
print()
results['kmeans_speed'].append(delta)
results['kmeans_quality'].append(kmeans.inertia_)
print('Fast K-Means')
# let's prepare the data in small chunks
mbkmeans = MiniBatchKMeans(init='k-means++',
n_clusters=10,
batch_size=chunk)
tstart = time()
mbkmeans.fit(data)
delta = time() - tstart
print("Speed: %0.3fs" % delta)
print("Inertia: %f" % mbkmeans.inertia_)
print()
print()
results['MiniBatchKMeans Speed'].append(delta)
results['MiniBatchKMeans Quality'].append(mbkmeans.inertia_)
return results
def compute_bench_2(chunks):
results = defaultdict(lambda: [])
n_features = 50000
means = np.array([[1, 1], [-1, -1], [1, -1], [-1, 1],
[0.5, 0.5], [0.75, -0.5], [-1, 0.75], [1, 0]])
X = np.empty((0, 2))
for i in range(8):
X = np.r_[X, means[i] + 0.8 * np.random.randn(n_features, 2)]
max_it = len(chunks)
it = 0
for chunk in chunks:
it += 1
print('==============================')
print('Iteration %03d of %03d' % (it, max_it))
print('==============================')
print()
print('Fast K-Means')
tstart = time()
mbkmeans = MiniBatchKMeans(init='k-means++',
n_clusters=8,
batch_size=chunk)
mbkmeans.fit(X)
delta = time() - tstart
print("Speed: %0.3fs" % delta)
print("Inertia: %0.3fs" % mbkmeans.inertia_)
print()
results['MiniBatchKMeans Speed'].append(delta)
results['MiniBatchKMeans Quality'].append(mbkmeans.inertia_)
return results
if __name__ == '__main__':
from mpl_toolkits.mplot3d import axes3d # register the 3d projection
import matplotlib.pyplot as plt
samples_range = np.linspace(50, 150, 5).astype(np.int)
features_range = np.linspace(150, 50000, 5).astype(np.int)
chunks = np.linspace(500, 10000, 15).astype(np.int)
results = compute_bench(samples_range, features_range)
results_2 = compute_bench_2(chunks)
max_time = max([max(i) for i in [t for (label, t) in results.iteritems()
if "speed" in label]])
max_inertia = max([max(i) for i in [
t for (label, t) in results.iteritems()
if "speed" not in label]])
fig = plt.figure('scikit-learn K-Means benchmark results')
for c, (label, timings) in zip('brcy',
sorted(results.iteritems())):
if 'speed' in label:
ax = fig.add_subplot(2, 2, 1, projection='3d')
ax.set_zlim3d(0.0, max_time * 1.1)
else:
ax = fig.add_subplot(2, 2, 2, projection='3d')
ax.set_zlim3d(0.0, max_inertia * 1.1)
X, Y = np.meshgrid(samples_range, features_range)
Z = np.asarray(timings).reshape(samples_range.shape[0],
features_range.shape[0])
ax.plot_surface(X, Y, Z.T, cstride=1, rstride=1, color=c, alpha=0.5)
ax.set_xlabel('n_samples')
ax.set_ylabel('n_features')
i = 0
for c, (label, timings) in zip('br',
sorted(results_2.iteritems())):
i += 1
ax = fig.add_subplot(2, 2, i + 2)
y = np.asarray(timings)
ax.plot(chunks, y, color=c, alpha=0.8)
ax.set_xlabel('Chunks')
ax.set_ylabel(label)
plt.show()
| bsd-3-clause |
mikhailpedrosa/sistmult | histograma.py | 1 | 10917 | # -*- coding: utf-8 -*-
__author__ = 'Mikhail Pedrosa <mikhail.jose@hotmail.com> e Tiago AlvesTiago Alves <tiagoinformativa@gmail.com >'
__description__ = 'Operações com Imagens - Histograma, Operações Pontuais, Lógicas, Aritméticas, Translação, Rotação e Escala'
__version__ = '0.1'
__date__ = '15/09/2014'
from scipy import ndimage
from pylab import *
from PIL import Image
import PIL
import scipy
import matplotlib.pyplot as plt
import numpy as np
#Bibliotecas Usadas - matplotlib, numpy, pillow
#Python 2.7
def histogram(imagem1, imagem2):
matriz1 = np.asarray(imagem1.convert('L'))
matriz2 = np.asarray(imagem2.convert('L'))
# Visualização do Histograma (FotoCinza)
plt.hist(matriz1.flatten(), color='green')
plt.title('Histograma')
plt.xlabel('Pixel')
plt.ylabel('Ocorrencias')
plt.show()
# Visualização do Histograma (FotoCinza2)
plt.hist(matriz2.flatten(), color='red')
plt.title('Histograma')
plt.xlabel('Pixel')
plt.ylabel('Ocorrencias')
plt.show()
def rotacao(imagem1):
imagem1 = Image.open("foto1.jpg")
# Rotação da Imagem 90 graus e 180 graus
rot_90 = imagem1.transpose(Image.ROTATE_90)
rot_90 = rot_90.save("rotacao_90.png")
rot_180 = imagem1.transpose(Image.ROTATE_180)
rot_180 = rot_180.save("rotacao_180.png")
return rot_90, rot_180
def espelhamento(imagem1):
imagem1 = Image.open("foto1.jpg")
left_right = imagem1.transpose(Image.FLIP_LEFT_RIGHT)
left_right = left_right.save("espelhamento_left_right.png")
top = imagem1.transpose(Image.FLIP_TOP_BOTTOM)
top = top.save("espelhamento_top.png")
return left_right, top
def soma(imagem1, imagem2):
#Tamanho (Altura e Largura)
im1_W = imagem1.size[0]
im1_H = imagem2.size[1]
for y in xrange(0, im1_H):
for x in xrange(0, im1_W):
xy = (x, y)
#Função que pega os Pixel das Imagens 1 e 2
red_im1, green_im1, blue_im1 = imagem1.getpixel(xy)
red_im2, green_im2, blue_im2 = imagem2.getpixel(xy)
#Operação Soma
red_im1 += red_im2
green_im1 += green_im2
blue_im1 += blue_im2
#Junção dos Pixels das Imagens
imagem1.putpixel((x, y), (red_im1, green_im1, blue_im1))
return imagem1.save('soma.png')
def subt(imagem1, imagem2):
#Tamanho (Altura e Largura)
im1_W = imagem1.size[0]
im1_H = imagem2.size[1]
for y in xrange(0, im1_H):
for x in xrange(0, im1_W):
xy = (x, y)
#Função que pega os Pixel das Imagens 1 e 2
red_im1, green_im1, blue_im1 = imagem1.getpixel(xy)
red_im2, green_im2, blue_im2 = imagem2.getpixel(xy)
#Operação Subtração
red_im1 -= red_im2
green_im1 -= green_im2
blue_im1 -= blue_im2
#Junção dos Pixels das Imagens
imagem1.putpixel((x, y), (red_im1, green_im1, blue_im1))
return imagem1.save('sub.png')
def mult(imagem1, imagem2):
#Tamanho (Altura e Largura)
im1_W = imagem1.size[0]
im1_H = imagem2.size[1]
for y in xrange(0, im1_H):
for x in xrange(0, im1_W):
xy = (x, y)
#Função que pega os Pixel das Imagens 1 e 2
red_im1, green_im1, blue_im1 = imagem1.getpixel(xy)
red_im2, green_im2, blue_im2 = imagem2.getpixel(xy)
#Operação Multiplicção
red_im1 *= red_im2
green_im1 *= green_im2
blue_im1 *= blue_im2
#Junção dos Pixels das Imagens
imagem1.putpixel((x, y), (red_im1, green_im1, blue_im1))
return imagem1.save('mult.png')
def div(imagem1, imagem2):
#Tamanho (Altura e Largura)
im1_W = imagem1.size[0]
im1_H = imagem2.size[1]
for y in xrange(0, im1_H):
for x in xrange(0, im1_W):
xy = (x, y)
#Função que pega os Pixel das Imagens 1 e 2
red_im1, green_im1, blue_im1 = imagem1.getpixel(xy)
red_im2, green_im2, blue_im2 = imagem2.getpixel(xy)
#Operação Divisão
try:
red_im1 /= red_im2
green_im1 /= green_im2
blue_im1 /= blue_im2
except ZeroDivisionError:
print #"You can't divide by zero, you're silly."
#Junção dos Pixels das Imagens
imagem1.putpixel((x, y), (red_im1, green_im1, blue_im1))
return imagem1.save('div.png')
def E(imagem1, imagem2):
#Tamanho (Altura e Largura)
im1_W = imagem1.size[0]
im1_H = imagem2.size[1]
for y in xrange(0, im1_H):
for x in xrange(0, im1_W):
xy = (x, y)
#Função que pega os Pixel das Imagens 1 e 2
red_im1, green_im1, blue_im1 = imagem1.getpixel(xy)
red_im2, green_im2, blue_im2 = imagem2.getpixel(xy)
#Operação AND
red_im1 and red_im2
green_im1 and green_im2
blue_im1 and blue_im2
#Junção dos Pixels das Imagens
imagem1.putpixel((x, y), (red_im1, green_im1, blue_im1))
return imagem1.save('and.png')
def OU(imagem1, imagem2):
#Tamanho (Altura e Largura)
im1_W = imagem1.size[0]
im1_H = imagem2.size[1]
for y in xrange(0, im1_H):
for x in xrange(0, im1_W):
xy = (x, y)
#Função que pega os Pixel das Imagens 1 e 2
red_im1, green_im1, blue_im1 = imagem1.getpixel(xy)
red_im2, green_im2, blue_im2 = imagem2.getpixel(xy)
#Operação OU
red_im1 or red_im2
green_im1 or green_im2
blue_im1 or blue_im2
#Junção dos Pixels das Imagens
imagem1.putpixel((x, y), (red_im1, green_im1, blue_im1))
return imagem1.save('or.png')
def XOR(imagem1, imagem2):
#Tamanho (Altura e Largura)
im1_W = imagem1.size[0]
im1_H = imagem2.size[1]
for y in xrange(0, im1_H):
for x in xrange(0, im1_W):
xy = (x, y)
#Função que pega os Pixel das Imagens 1 e 2
red_im1, green_im1, blue_im1 = imagem1.getpixel(xy)
red_im2, green_im2, blue_im2 = imagem2.getpixel(xy)
#Operação OU
red_im1 ^ red_im2
green_im1 ^ green_im2
blue_im1 ^ blue_im2
#Junção dos Pixels das Imagens
imagem1.putpixel((x, y), (red_im1, green_im1, blue_im1))
return imagem1.save('xor.png')
def escala(imagem1):
#Largura da Base
basewidth=300
#Porcento da Altura
wpercent = (basewidth/float(imagem1.size[0]))
#Tamanho da Altura
hsize = int((float(imagem1.size[1])*float(wpercent)))
imagem_menor = imagem1.resize((basewidth,hsize), PIL.Image.ANTIALIAS)
lar=1500
alt = 1000
imagem_maior = imagem1.resize((lar,alt))
return imagem_menor.save('escala_menor.png'), imagem_maior.save("escala_maior.png")
def translacao():
imagem = Image.open("foto.png")
#Tamanho Imagem - Largura e Altura
lar = imagem.size[0]
alt = imagem.size[1]
x_loc = 100
y_loc = 150
imagem_matriz = np.asarray(imagem.convert('RGB'))
for x in range(lar):
for y in range(alt):
if x >= x_loc and y >= y_loc:
yo = x - x_loc
xo = y - y_loc
imagem.putpixel((x,y), (imagem_matriz[xo,yo][0],imagem_matriz[xo,yo][1],imagem_matriz[xo,yo][2]))
else:
imagem.putpixel((x,y), (255, 255, 255, 255))
return imagem.save("translate.png")
def desfoque_alisamento(imagem):
blurred_lena = ndimage.gaussian_filter(imagem, sigma=3)
very_blurred = ndimage.gaussian_filter(imagem, sigma=5)
local_mean = ndimage.uniform_filter(imagem, size=11)
plt.figure(figsize=(9, 3))
plt.subplot(131)
plt.imshow(blurred_lena, cmap=plt.cm.gray)
plt.axis('off')
plt.subplot(132)
plt.imshow(very_blurred, cmap=plt.cm.gray)
plt.axis('off')
plt.subplot(133)
plt.imshow(local_mean, cmap=plt.cm.gray)
plt.axis('off')
plt.subplots_adjust(wspace=0, hspace=0., top=0.99, bottom=0.01,
left=0.01, right=0.99)
plt.show()
def afinacao(imagem):
blurred_imagem = ndimage.gaussian_filter(imagem, 3)
filter_blurred_imagem = ndimage.gaussian_filter(blurred_imagem, 1)
alpha = 30
sharpened = blurred_imagem + alpha * (blurred_imagem - filter_blurred_imagem)
plt.figure(figsize=(12, 4))
plt.subplot(131)
plt.imshow(imagem, cmap=plt.cm.gray)
plt.axis('off')
plt.subplot(132)
plt.imshow(blurred_imagem, cmap=plt.cm.gray)
plt.axis('off')
plt.subplot(133)
plt.imshow(sharpened, cmap=plt.cm.gray)
plt.axis('off')
plt.show()
def suavizacao():
imagem = scipy.misc.lena()
imagem = imagem[230:290, 220:320]
noisy = imagem + 0.4*imagem.std()*np.random.random(imagem.shape)
gauss_denoised = ndimage.gaussian_filter(noisy, 2)
med_denoised = ndimage.median_filter(noisy, 3)
plt.figure(figsize=(12,2.8))
plt.subplot(131)
plt.imshow(noisy, cmap=plt.cm.gray, vmin=40, vmax=220)
plt.axis('off')
plt.title('Noisy', fontsize=20)
plt.subplot(132)
plt.imshow(gauss_denoised, cmap=plt.cm.gray, vmin=40, vmax=220)
plt.axis('off')
plt.title('Gaussian filter', fontsize=20)
plt.subplot(133)
plt.imshow(med_denoised, cmap=plt.cm.gray, vmin=40, vmax=220)
plt.axis('off')
plt.title('Median filter', fontsize=20)
plt.subplots_adjust(wspace=0.02, hspace=0.02, top=0.9, bottom=0, left=0,
right=1)
plt.show()
if __name__ == '__main__':
# Leitura das Imagens e Conversão para RGB
imagem1 = Image.open("foto1.jpg").convert('RGB')
imagem2 = Image.open("foto2.jpg").convert('RGB')
#Histograma
#histogram(imagem1, imagem2)
#Operações Pontuais
#Operações Aritméticas (Soma, Subtração, Multiplicação, Divisão)
#soma(imagem1, imagem2)
#subt(imagem1, imagem2)
#mult(imagem1, imagem2)
#div(imagem1, imagem2)
#Operações Lógicas (And, Ou, Xor)
#E(imagem1, imagem2)
#OU(imagem1, imagem2)
#XOR(imagem1, imagem2)
#Operações Locais
#desfoque_alisamento(imagem1)
afinacao(imagem1)
suavizacao()
#Transformações Geométricas
#translacao()
#Rotação
#rotacao(imagem1)
#Escala
#escala(imagem1)
#Espelhamento ou Reflexão
#espelhamento(imagem1)
| gpl-2.0 |
0asa/scikit-learn | benchmarks/bench_plot_parallel_pairwise.py | 297 | 1247 | # Author: Mathieu Blondel <mathieu@mblondel.org>
# License: BSD 3 clause
import time
import pylab as pl
from sklearn.utils import check_random_state
from sklearn.metrics.pairwise import pairwise_distances
from sklearn.metrics.pairwise import pairwise_kernels
def plot(func):
random_state = check_random_state(0)
one_core = []
multi_core = []
sample_sizes = range(1000, 6000, 1000)
for n_samples in sample_sizes:
X = random_state.rand(n_samples, 300)
start = time.time()
func(X, n_jobs=1)
one_core.append(time.time() - start)
start = time.time()
func(X, n_jobs=-1)
multi_core.append(time.time() - start)
pl.figure('scikit-learn parallel %s benchmark results' % func.__name__)
pl.plot(sample_sizes, one_core, label="one core")
pl.plot(sample_sizes, multi_core, label="multi core")
pl.xlabel('n_samples')
pl.ylabel('Time (s)')
pl.title('Parallel %s' % func.__name__)
pl.legend()
def euclidean_distances(X, n_jobs):
return pairwise_distances(X, metric="euclidean", n_jobs=n_jobs)
def rbf_kernels(X, n_jobs):
return pairwise_kernels(X, metric="rbf", n_jobs=n_jobs, gamma=0.1)
plot(euclidean_distances)
plot(rbf_kernels)
pl.show()
| bsd-3-clause |
Diegojnb/JdeRobot | src/drivers/MAVLinkServer/MAVProxy/modules/lib/grapher.py | 6 | 13457 | #!/usr/bin/env python
'''
core library for graphing in mavexplorer
'''
import sys, struct, time, os, datetime
import math, re
import matplotlib
from math import *
from pymavlink.mavextra import *
import pylab
from pymavlink import mavutil
colors = [ 'red', 'green', 'blue', 'orange', 'olive', 'black', 'grey', 'yellow', 'brown', 'darkcyan',
'cornflowerblue', 'darkmagenta', 'deeppink', 'darkred']
colourmap = {
'apm' : {
'MANUAL' : (1.0, 0, 0),
'AUTO' : ( 0, 1.0, 0),
'LOITER' : ( 0, 0, 1.0),
'FBWA' : (1.0, 0.5, 0),
'RTL' : ( 1, 0, 0.5),
'STABILIZE' : (0.5, 1.0, 0),
'LAND' : ( 0, 1.0, 0.5),
'STEERING' : (0.5, 0, 1.0),
'HOLD' : ( 0, 0.5, 1.0),
'ALT_HOLD' : (1.0, 0.5, 0.5),
'CIRCLE' : (0.5, 1.0, 0.5),
'POSITION' : (1.0, 0.0, 1.0),
'GUIDED' : (0.5, 0.5, 1.0),
'ACRO' : (1.0, 1.0, 0),
'CRUISE' : ( 0, 1.0, 1.0)
},
'px4' : {
'MANUAL' : (1.0, 0, 0),
'SEATBELT' : ( 0.5, 0.5, 0),
'EASY' : ( 0, 1.0, 0),
'AUTO' : ( 0, 0, 1.0),
'UNKNOWN' : ( 1.0, 1.0, 1.0)
}
}
edge_colour = (0.1, 0.1, 0.1)
class MavGraph(object):
def __init__(self):
self.lowest_x = None
self.highest_x = None
self.mav_list = []
self.fields = []
self.condition = None
self.xaxis = None
self.marker = None
self.linestyle = None
self.flightmode = None
self.legend = 'upper left'
self.legend2 = 'upper right'
self.timeshift = 0
self.labels = None
self.multi = False
def add_field(self, field):
'''add another field to plot'''
self.fields.append(field)
def add_mav(self, mav):
'''add another data source to plot'''
self.mav_list.append(mav)
def set_condition(self, condition):
'''set graph condition'''
self.condition = condition
def set_xaxis(self, xaxis):
'''set graph xaxis'''
self.xaxis = xaxis
def set_marker(self, marker):
'''set graph marker'''
self.marker = marker
def set_timeshift(self, timeshift):
'''set graph timeshift'''
self.timeshift = timeshift
def set_legend2(self, legend2):
'''set graph legend2'''
self.legend2 = legend2
def set_legend(self, legend):
'''set graph legend'''
self.legend = legend
def set_flightmode(self, flightmode):
'''set graph flightmode'''
self.flightmode = flightmode
def set_linestyle(self, linestyle):
'''set graph linestyle'''
self.linestyle = linestyle
def set_multi(self, multi):
'''set multiple graph option'''
self.multi = multi
def make_format(self, current, other):
# current and other are axes
def format_coord(x, y):
# x, y are data coordinates
# convert to display coords
display_coord = current.transData.transform((x,y))
inv = other.transData.inverted()
# convert back to data coords with respect to ax
ax_coord = inv.transform(display_coord)
xstr = self.formatter(x)
y2 = ax_coord[1]
if self.xaxis:
return ('x=%.3f Left=%.3f Right=%.3f' % (x, y2, y))
else:
return ('x=%s Left=%.3f Right=%.3f' % (xstr, y2, y))
return format_coord
def plotit(self, x, y, fields, colors=[]):
'''plot a set of graphs using date for x axis'''
pylab.ion()
fig = pylab.figure(num=1, figsize=(12,6))
ax1 = fig.gca()
ax2 = None
xrange = 0.0
for i in range(0, len(fields)):
if len(x[i]) == 0: continue
if self.lowest_x is None or x[i][0] < self.lowest_x:
self.lowest_x = x[i][0]
if self.highest_x is None or x[i][-1] > self.highest_x:
self.highest_x = x[i][-1]
if self.highest_x is None or self.lowest_x is None:
return
xrange = self.highest_x - self.lowest_x
xrange *= 24 * 60 * 60
self.formatter = matplotlib.dates.DateFormatter('%H:%M:%S')
interval = 1
intervals = [ 1, 2, 5, 10, 15, 30, 60, 120, 240, 300, 600,
900, 1800, 3600, 7200, 5*3600, 10*3600, 24*3600 ]
for interval in intervals:
if xrange / interval < 15:
break
locator = matplotlib.dates.SecondLocator(interval=interval)
if not self.xaxis:
ax1.xaxis.set_major_locator(locator)
ax1.xaxis.set_major_formatter(self.formatter)
empty = True
ax1_labels = []
ax2_labels = []
for i in range(0, len(fields)):
if len(x[i]) == 0:
print("Failed to find any values for field %s" % fields[i])
continue
if i < len(colors):
color = colors[i]
else:
color = 'red'
(tz, tzdst) = time.tzname
if self.axes[i] == 2:
if ax2 == None:
ax2 = ax1.twinx()
ax2.format_coord = self.make_format(ax2, ax1)
ax = ax2
if not self.xaxis:
ax2.xaxis.set_major_locator(locator)
ax2.xaxis.set_major_formatter(self.formatter)
label = fields[i]
if label.endswith(":2"):
label = label[:-2]
ax2_labels.append(label)
else:
ax1_labels.append(fields[i])
ax = ax1
if self.xaxis:
if self.marker is not None:
marker = self.marker
else:
marker = '+'
if self.linestyle is not None:
linestyle = self.linestyle
else:
linestyle = 'None'
ax.plot(x[i], y[i], color=color, label=fields[i],
linestyle=linestyle, marker=marker)
else:
if self.marker is not None:
marker = self.marker
else:
marker = 'None'
if self.linestyle is not None:
linestyle = self.linestyle
else:
linestyle = '-'
ax.plot_date(x[i], y[i], color=color, label=fields[i],
linestyle=linestyle, marker=marker, tz=None)
empty = False
if self.flightmode is not None:
for i in range(len(self.modes)-1):
c = colourmap[self.flightmode].get(self.modes[i][1], edge_colour)
ax1.axvspan(self.modes[i][0], self.modes[i+1][0], fc=c, ec=edge_colour, alpha=0.1)
c = colourmap[self.flightmode].get(self.modes[-1][1], edge_colour)
ax1.axvspan(self.modes[-1][0], ax1.get_xlim()[1], fc=c, ec=edge_colour, alpha=0.1)
if ax1_labels != []:
ax1.legend(ax1_labels,loc=self.legend)
if ax2_labels != []:
ax2.legend(ax2_labels,loc=self.legend2)
if empty:
print("No data to graph")
return
def add_data(self, t, msg, vars, flightmode):
'''add some data'''
mtype = msg.get_type()
if self.flightmode is not None and (len(self.modes) == 0 or self.modes[-1][1] != flightmode):
self.modes.append((t, flightmode))
for i in range(0, len(self.fields)):
if mtype not in self.field_types[i]:
continue
f = self.fields[i]
if f.endswith(":2"):
self.axes[i] = 2
f = f[:-2]
if f.endswith(":1"):
self.first_only[i] = True
f = f[:-2]
v = mavutil.evaluate_expression(f, vars)
if v is None:
continue
if self.xaxis is None:
xv = t
else:
xv = mavutil.evaluate_expression(self.xaxis, vars)
if xv is None:
continue
self.y[i].append(v)
self.x[i].append(xv)
def process_mav(self, mlog, timeshift):
'''process one file'''
self.vars = {}
while True:
msg = mlog.recv_msg()
if msg is None:
break
if msg.get_type() not in self.msg_types:
continue
if self.condition:
if not mavutil.evaluate_condition(self.condition, mlog.messages):
continue
tdays = matplotlib.dates.date2num(datetime.datetime.fromtimestamp(msg._timestamp+timeshift))
self.add_data(tdays, msg, mlog.messages, mlog.flightmode)
def process(self, block=True):
'''process and display graph'''
self.msg_types = set()
self.multiplier = []
self.field_types = []
# work out msg types we are interested in
self.x = []
self.y = []
self.modes = []
self.axes = []
self.first_only = []
re_caps = re.compile('[A-Z_][A-Z0-9_]+')
for f in self.fields:
caps = set(re.findall(re_caps, f))
self.msg_types = self.msg_types.union(caps)
self.field_types.append(caps)
self.y.append([])
self.x.append([])
self.axes.append(1)
self.first_only.append(False)
if self.labels is not None:
labels = self.labels.split(',')
if len(labels) != len(fields)*len(self.mav_list):
print("Number of labels (%u) must match number of fields (%u)" % (
len(labels), len(fields)*len(self.mav_list)))
return
else:
labels = None
timeshift = self.timeshift
for fi in range(0, len(self.mav_list)):
mlog = self.mav_list[fi]
self.process_mav(mlog, timeshift)
timeshift = 0
for i in range(0, len(self.x)):
if self.first_only[i] and fi != 0:
self.x[i] = []
self.y[i] = []
if labels:
lab = labels[fi*len(self.fields):(fi+1)*len(self.fields)]
else:
lab = self.fields[:]
if self.multi:
col = colors[:]
else:
col = colors[fi*len(self.fields):]
self.plotit(self.x, self.y, lab, colors=col)
for i in range(0, len(self.x)):
self.x[i] = []
self.y[i] = []
pylab.draw()
def show(self, block=True):
'''show graph'''
pylab.show(block=block)
if __name__ == "__main__":
from argparse import ArgumentParser
parser = ArgumentParser(description=__doc__)
parser.add_argument("--no-timestamps", dest="notimestamps", action='store_true', help="Log doesn't have timestamps")
parser.add_argument("--planner", action='store_true', help="use planner file format")
parser.add_argument("--condition", default=None, help="select packets by a condition")
parser.add_argument("--labels", default=None, help="comma separated field labels")
parser.add_argument("--legend", default='upper left', help="default legend position")
parser.add_argument("--legend2", default='upper right', help="default legend2 position")
parser.add_argument("--marker", default=None, help="point marker")
parser.add_argument("--linestyle", default=None, help="line style")
parser.add_argument("--xaxis", default=None, help="X axis expression")
parser.add_argument("--multi", action='store_true', help="multiple files with same colours")
parser.add_argument("--zero-time-base", action='store_true', help="use Z time base for DF logs")
parser.add_argument("--flightmode", default=None,
help="Choose the plot background according to the active flight mode of the specified type, e.g. --flightmode=apm for ArduPilot or --flightmode=px4 for PX4 stack logs. Cannot be specified with --xaxis.")
parser.add_argument("--dialect", default="ardupilotmega", help="MAVLink dialect")
parser.add_argument("--output", default=None, help="provide an output format")
parser.add_argument("--timeshift", type=float, default=0, help="shift time on first graph in seconds")
parser.add_argument("logs_fields", metavar="<LOG or FIELD>", nargs="+")
args = parser.parse_args()
mg = MavGraph()
filenames = []
for f in args.logs_fields:
if os.path.exists(f):
mlog = mavutil.mavlink_connection(f, notimestamps=args.notimestamps,
zero_time_base=args.zero_time_base,
dialect=args.dialect)
mg.add_mav(mlog)
else:
mg.add_field(f)
mg.set_condition(args.condition)
mg.set_xaxis(args.xaxis)
mg.set_marker(args.marker)
mg.set_legend(args.legend)
mg.set_legend2(args.legend2)
mg.set_multi(args.multi)
mg.set_flightmode(args.flightmode)
mg.process()
mg.show()
| gpl-3.0 |
commaai/openpilot | tools/replay/lib/ui_helpers.py | 1 | 8366 | import itertools
from typing import Any, Dict, Tuple
import matplotlib
import matplotlib.pyplot as plt
import numpy as np
import pygame # pylint: disable=import-error
from common.transformations.camera import (eon_f_frame_size, eon_f_focal_length,
tici_f_frame_size, tici_f_focal_length,
get_view_frame_from_calib_frame)
from selfdrive.config import UIParams as UP
from selfdrive.config import RADAR_TO_CAMERA
RED = (255, 0, 0)
GREEN = (0, 255, 0)
BLUE = (0, 0, 255)
YELLOW = (255, 255, 0)
BLACK = (0, 0, 0)
WHITE = (255, 255, 255)
_FULL_FRAME_SIZE = {
}
_BB_TO_FULL_FRAME = {}
_FULL_FRAME_TO_BB = {}
_INTRINSICS = {}
cams = [(eon_f_frame_size[0], eon_f_frame_size[1], eon_f_focal_length),
(tici_f_frame_size[0], tici_f_frame_size[1], tici_f_focal_length)]
for width, height, focal in cams:
sz = width * height
_BB_SCALE = width / 640.
_BB_TO_FULL_FRAME[sz] = np.asarray([
[_BB_SCALE, 0., 0.],
[0., _BB_SCALE, 0.],
[0., 0., 1.]])
_FULL_FRAME_TO_BB[sz] = np.linalg.inv(_BB_TO_FULL_FRAME[sz])
_FULL_FRAME_SIZE[sz] = (width, height)
_INTRINSICS[sz] = np.array([
[focal, 0., width / 2.],
[0., focal, height / 2.],
[0., 0., 1.]])
METER_WIDTH = 20
class Calibration:
def __init__(self, num_px, rpy, intrinsic):
self.intrinsic = intrinsic
self.extrinsics_matrix = get_view_frame_from_calib_frame(rpy[0], rpy[1], rpy[2], 0.0)[:,:3]
self.zoom = _BB_TO_FULL_FRAME[num_px][0, 0]
def car_space_to_ff(self, x, y, z):
car_space_projective = np.column_stack((x, y, z)).T
ep = self.extrinsics_matrix.dot(car_space_projective)
kep = self.intrinsic.dot(ep)
return (kep[:-1, :] / kep[-1, :]).T
def car_space_to_bb(self, x, y, z):
pts = self.car_space_to_ff(x, y, z)
return pts / self.zoom
_COLOR_CACHE : Dict[Tuple[int, int, int], Any] = {}
def find_color(lidar_surface, color):
if color in _COLOR_CACHE:
return _COLOR_CACHE[color]
tcolor = 0
ret = 255
for x in lidar_surface.get_palette():
if x[0:3] == color:
ret = tcolor
break
tcolor += 1
_COLOR_CACHE[color] = ret
return ret
def to_topdown_pt(y, x):
px, py = x * UP.lidar_zoom + UP.lidar_car_x, -y * UP.lidar_zoom + UP.lidar_car_y
if px > 0 and py > 0 and px < UP.lidar_x and py < UP.lidar_y:
return int(px), int(py)
return -1, -1
def draw_path(path, color, img, calibration, top_down, lid_color=None, z_off=0):
x, y, z = np.asarray(path.x), np.asarray(path.y), np.asarray(path.z) + z_off
pts = calibration.car_space_to_bb(x, y, z)
pts = np.round(pts).astype(int)
# draw lidar path point on lidar
# find color in 8 bit
if lid_color is not None and top_down is not None:
tcolor = find_color(top_down[0], lid_color)
for i in range(len(x)):
px, py = to_topdown_pt(x[i], y[i])
if px != -1:
top_down[1][px, py] = tcolor
height, width = img.shape[:2]
for x, y in pts:
if 1 < x < width - 1 and 1 < y < height - 1:
for a, b in itertools.permutations([-1, 0, -1], 2):
img[y + a, x + b] = color
def init_plots(arr, name_to_arr_idx, plot_xlims, plot_ylims, plot_names, plot_colors, plot_styles, bigplots=False):
color_palette = { "r": (1, 0, 0),
"g": (0, 1, 0),
"b": (0, 0, 1),
"k": (0, 0, 0),
"y": (1, 1, 0),
"p": (0, 1, 1),
"m": (1, 0, 1)}
if bigplots:
fig = plt.figure(figsize=(6.4, 7.0))
else:
fig = plt.figure()
fig.set_facecolor((0.2, 0.2, 0.2))
axs = []
for pn in range(len(plot_ylims)):
ax = fig.add_subplot(len(plot_ylims), 1, len(axs)+1)
ax.set_xlim(plot_xlims[pn][0], plot_xlims[pn][1])
ax.set_ylim(plot_ylims[pn][0], plot_ylims[pn][1])
ax.patch.set_facecolor((0.4, 0.4, 0.4))
axs.append(ax)
plots, idxs, plot_select = [], [], []
for i, pl_list in enumerate(plot_names):
for j, item in enumerate(pl_list):
plot, = axs[i].plot(arr[:, name_to_arr_idx[item]],
label=item,
color=color_palette[plot_colors[i][j]],
linestyle=plot_styles[i][j])
plots.append(plot)
idxs.append(name_to_arr_idx[item])
plot_select.append(i)
axs[i].set_title(", ".join("%s (%s)" % (nm, cl)
for (nm, cl) in zip(pl_list, plot_colors[i])), fontsize=10)
axs[i].tick_params(axis="x", colors="white")
axs[i].tick_params(axis="y", colors="white")
axs[i].title.set_color("white")
if i < len(plot_ylims) - 1:
axs[i].set_xticks([])
fig.canvas.draw()
renderer = fig.canvas.get_renderer()
if matplotlib.get_backend() == "MacOSX":
fig.draw(renderer)
def draw_plots(arr):
for ax in axs:
ax.draw_artist(ax.patch)
for i in range(len(plots)):
plots[i].set_ydata(arr[:, idxs[i]])
axs[plot_select[i]].draw_artist(plots[i])
if matplotlib.get_backend() == "QT4Agg":
fig.canvas.update()
fig.canvas.flush_events()
raw_data = renderer.tostring_rgb()
x, y = fig.canvas.get_width_height()
# Handle 2x scaling
if len(raw_data) == 4 * x * y * 3:
plot_surface = pygame.image.frombuffer(raw_data, (2*x, 2*y), "RGB").convert()
plot_surface = pygame.transform.scale(plot_surface, (x, y))
else:
plot_surface = pygame.image.frombuffer(raw_data, fig.canvas.get_width_height(), "RGB").convert()
return plot_surface
return draw_plots
def pygame_modules_have_loaded():
return pygame.display.get_init() and pygame.font.get_init()
def plot_model(m, img, calibration, top_down):
if calibration is None or top_down is None:
return
for lead in m.leads:
if lead.prob < 0.5:
continue
x, y, _, _ = lead.xyva
x_std, _, _, _ = lead.xyvaStd
x -= RADAR_TO_CAMERA
_, py_top = to_topdown_pt(x + x_std, y)
px, py_bottom = to_topdown_pt(x - x_std, y)
top_down[1][int(round(px - 4)):int(round(px + 4)), py_top:py_bottom] = find_color(top_down[0], YELLOW)
for path, prob, _ in zip(m.laneLines, m.laneLineProbs, m.laneLineStds):
color = (0, int(255 * prob), 0)
draw_path(path, color, img, calibration, top_down, YELLOW)
for edge, std in zip(m.roadEdges, m.roadEdgeStds):
prob = max(1 - std, 0)
color = (int(255 * prob), 0, 0)
draw_path(edge, color, img, calibration, top_down, RED)
color = (255, 0, 0)
draw_path(m.position, color, img, calibration, top_down, RED, 1.22)
def plot_lead(rs, top_down):
for lead in [rs.leadOne, rs.leadTwo]:
if not lead.status:
continue
x = lead.dRel
px_left, py = to_topdown_pt(x, -10)
px_right, _ = to_topdown_pt(x, 10)
top_down[1][px_left:px_right, py] = find_color(top_down[0], RED)
def maybe_update_radar_points(lt, lid_overlay):
ar_pts = []
if lt is not None:
ar_pts = {}
for track in lt:
ar_pts[track.trackId] = [track.dRel, track.yRel, track.vRel, track.aRel, track.oncoming, track.stationary]
for ids, pt in ar_pts.items():
# negative here since radar is left positive
px, py = to_topdown_pt(pt[0], -pt[1])
if px != -1:
if pt[-1]:
color = 240
elif pt[-2]:
color = 230
else:
color = 255
if int(ids) == 1:
lid_overlay[px - 2:px + 2, py - 10:py + 10] = 100
else:
lid_overlay[px - 2:px + 2, py - 2:py + 2] = color
def get_blank_lid_overlay(UP):
lid_overlay = np.zeros((UP.lidar_x, UP.lidar_y), 'uint8')
# Draw the car.
lid_overlay[int(round(UP.lidar_car_x - UP.car_hwidth)):int(
round(UP.lidar_car_x + UP.car_hwidth)), int(round(UP.lidar_car_y -
UP.car_front))] = UP.car_color
lid_overlay[int(round(UP.lidar_car_x - UP.car_hwidth)):int(
round(UP.lidar_car_x + UP.car_hwidth)), int(round(UP.lidar_car_y +
UP.car_back))] = UP.car_color
lid_overlay[int(round(UP.lidar_car_x - UP.car_hwidth)), int(
round(UP.lidar_car_y - UP.car_front)):int(round(
UP.lidar_car_y + UP.car_back))] = UP.car_color
lid_overlay[int(round(UP.lidar_car_x + UP.car_hwidth)), int(
round(UP.lidar_car_y - UP.car_front)):int(round(
UP.lidar_car_y + UP.car_back))] = UP.car_color
return lid_overlay
| mit |
hanteng/pyCountrySize | pyCountrySize/31_construct_size_database.py | 1 | 1384 | # -*- coding: utf-8 -*-
#歧視無邊,回頭是岸。鍵起鍵落,情真情幻。
import ConfigParser
Config = ConfigParser.ConfigParser()
Config.read("config.ini")
dir_source = Config.get("Directory", 'source')
dir_inprocess = Config.get("Directory",'inprocess')
dir_outcome = Config.get("Directory",'outcome')
fn_suffix = Config.get("Filename",'suffix')
import os.path, glob
import pandas as pd
import numpy as np
filename_list=[os.path.normpath(x) for x in glob.glob(os.path.join(dir_outcome, "size*."+fn_suffix))]
#filename_list.reverse()
## Basis: >>> wp.items from size_IPop_LP_PPPGDP.pkl
# Index([u'IPop', u'LP', u'PPPGDP'], dtype='object')
init=[i for i,x in enumerate(filename_list) if "IPop" in x][0]
filename_list=filename_list[init:]+filename_list[:init]
data=dict()
for i,f in enumerate(filename_list):
if i==0:
wp = pd.read_pickle(f)
for item in list(wp.items):
data[item]=wp[item]
else:
df_ = pd.read_pickle(f)
#df=df.join(df_, how="outer", on=df_.index.name)
data_label=f.split("size_")[1].split(".pkl")[0]
data[data_label]=df_
## Reconstructing panel
wp=pd.Panel(data)
##>>> wp.items
##Index([u'IH', u'IPop', u'IPv4', u'LP', u'PPPGDP'], dtype='object')
dir_db = Config.get("Directory",'database')
fn_db = Config.get("output",'filename')
wp.to_pickle(os.path.join(dir_db, fn_db))
| gpl-3.0 |
bgris/ODL_bgris | lib/python3.5/site-packages/Sphinx-1.5.1-py3.5.egg/sphinx/ext/inheritance_diagram.py | 4 | 14751 | # -*- coding: utf-8 -*-
r"""
sphinx.ext.inheritance_diagram
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
Defines a docutils directive for inserting inheritance diagrams.
Provide the directive with one or more classes or modules (separated
by whitespace). For modules, all of the classes in that module will
be used.
Example::
Given the following classes:
class A: pass
class B(A): pass
class C(A): pass
class D(B, C): pass
class E(B): pass
.. inheritance-diagram: D E
Produces a graph like the following:
A
/ \
B C
/ \ /
E D
The graph is inserted as a PNG+image map into HTML and a PDF in
LaTeX.
:copyright: Copyright 2007-2016 by the Sphinx team, see AUTHORS.
:license: BSD, see LICENSE for details.
"""
import re
import sys
import inspect
try:
from hashlib import md5
except ImportError:
from md5 import md5
from six import text_type
from six.moves import builtins
from docutils import nodes
from docutils.parsers.rst import directives
import sphinx
from sphinx.ext.graphviz import render_dot_html, render_dot_latex, \
render_dot_texinfo, figure_wrapper
from sphinx.pycode import ModuleAnalyzer
from sphinx.util import force_decode
from sphinx.util.compat import Directive
module_sig_re = re.compile(r'''^(?:([\w.]*)\.)? # module names
(\w+) \s* $ # class/final module name
''', re.VERBOSE)
def try_import(objname):
"""Import a object or module using *name* and *currentmodule*.
*name* should be a relative name from *currentmodule* or
a fully-qualified name.
Returns imported object or module. If failed, returns None value.
"""
try:
__import__(objname)
return sys.modules.get(objname)
except ImportError:
modname, attrname = module_sig_re.match(objname).groups()
if modname is None:
return None
try:
__import__(modname)
return getattr(sys.modules.get(modname), attrname, None)
except ImportError:
return None
def import_classes(name, currmodule):
"""Import a class using its fully-qualified *name*."""
target = None
# import class or module using currmodule
if currmodule:
target = try_import(currmodule + '.' + name)
# import class or module without currmodule
if target is None:
target = try_import(name)
if target is None:
raise InheritanceException(
'Could not import class or module %r specified for '
'inheritance diagram' % name)
if inspect.isclass(target):
# If imported object is a class, just return it
return [target]
elif inspect.ismodule(target):
# If imported object is a module, return classes defined on it
classes = []
for cls in target.__dict__.values():
if inspect.isclass(cls) and cls.__module__ == target.__name__:
classes.append(cls)
return classes
raise InheritanceException('%r specified for inheritance diagram is '
'not a class or module' % name)
class InheritanceException(Exception):
pass
class InheritanceGraph(object):
"""
Given a list of classes, determines the set of classes that they inherit
from all the way to the root "object", and then is able to generate a
graphviz dot graph from them.
"""
def __init__(self, class_names, currmodule, show_builtins=False,
private_bases=False, parts=0):
"""*class_names* is a list of child classes to show bases from.
If *show_builtins* is True, then Python builtins will be shown
in the graph.
"""
self.class_names = class_names
classes = self._import_classes(class_names, currmodule)
self.class_info = self._class_info(classes, show_builtins,
private_bases, parts)
if not self.class_info:
raise InheritanceException('No classes found for '
'inheritance diagram')
def _import_classes(self, class_names, currmodule):
"""Import a list of classes."""
classes = []
for name in class_names:
classes.extend(import_classes(name, currmodule))
return classes
def _class_info(self, classes, show_builtins, private_bases, parts):
"""Return name and bases for all classes that are ancestors of
*classes*.
*parts* gives the number of dotted name parts that is removed from the
displayed node names.
"""
all_classes = {}
py_builtins = vars(builtins).values()
def recurse(cls):
if not show_builtins and cls in py_builtins:
return
if not private_bases and cls.__name__.startswith('_'):
return
nodename = self.class_name(cls, parts)
fullname = self.class_name(cls, 0)
# Use first line of docstring as tooltip, if available
tooltip = None
try:
if cls.__doc__:
enc = ModuleAnalyzer.for_module(cls.__module__).encoding
doc = cls.__doc__.strip().split("\n")[0]
if not isinstance(doc, text_type):
doc = force_decode(doc, enc)
if doc:
tooltip = '"%s"' % doc.replace('"', '\\"')
except Exception: # might raise AttributeError for strange classes
pass
baselist = []
all_classes[cls] = (nodename, fullname, baselist, tooltip)
for base in cls.__bases__:
if not show_builtins and base in py_builtins:
continue
if not private_bases and base.__name__.startswith('_'):
continue
baselist.append(self.class_name(base, parts))
if base not in all_classes:
recurse(base)
for cls in classes:
recurse(cls)
return list(all_classes.values())
def class_name(self, cls, parts=0):
"""Given a class object, return a fully-qualified name.
This works for things I've tested in matplotlib so far, but may not be
completely general.
"""
module = cls.__module__
if module in ('__builtin__', 'builtins'):
fullname = cls.__name__
else:
fullname = '%s.%s' % (module, cls.__name__)
if parts == 0:
return fullname
name_parts = fullname.split('.')
return '.'.join(name_parts[-parts:])
def get_all_class_names(self):
"""Get all of the class names involved in the graph."""
return [fullname for (_, fullname, _, _) in self.class_info]
# These are the default attrs for graphviz
default_graph_attrs = {
'rankdir': 'LR',
'size': '"8.0, 12.0"',
}
default_node_attrs = {
'shape': 'box',
'fontsize': 10,
'height': 0.25,
'fontname': '"Vera Sans, DejaVu Sans, Liberation Sans, '
'Arial, Helvetica, sans"',
'style': '"setlinewidth(0.5)"',
}
default_edge_attrs = {
'arrowsize': 0.5,
'style': '"setlinewidth(0.5)"',
}
def _format_node_attrs(self, attrs):
return ','.join(['%s=%s' % x for x in sorted(attrs.items())])
def _format_graph_attrs(self, attrs):
return ''.join(['%s=%s;\n' % x for x in sorted(attrs.items())])
def generate_dot(self, name, urls={}, env=None,
graph_attrs={}, node_attrs={}, edge_attrs={}):
"""Generate a graphviz dot graph from the classes that were passed in
to __init__.
*name* is the name of the graph.
*urls* is a dictionary mapping class names to HTTP URLs.
*graph_attrs*, *node_attrs*, *edge_attrs* are dictionaries containing
key/value pairs to pass on as graphviz properties.
"""
g_attrs = self.default_graph_attrs.copy()
n_attrs = self.default_node_attrs.copy()
e_attrs = self.default_edge_attrs.copy()
g_attrs.update(graph_attrs)
n_attrs.update(node_attrs)
e_attrs.update(edge_attrs)
if env:
g_attrs.update(env.config.inheritance_graph_attrs)
n_attrs.update(env.config.inheritance_node_attrs)
e_attrs.update(env.config.inheritance_edge_attrs)
res = []
res.append('digraph %s {\n' % name)
res.append(self._format_graph_attrs(g_attrs))
for name, fullname, bases, tooltip in sorted(self.class_info):
# Write the node
this_node_attrs = n_attrs.copy()
if fullname in urls:
this_node_attrs['URL'] = '"%s"' % urls[fullname]
this_node_attrs['target'] = '"_top"'
if tooltip:
this_node_attrs['tooltip'] = tooltip
res.append(' "%s" [%s];\n' %
(name, self._format_node_attrs(this_node_attrs)))
# Write the edges
for base_name in bases:
res.append(' "%s" -> "%s" [%s];\n' %
(base_name, name,
self._format_node_attrs(e_attrs)))
res.append('}\n')
return ''.join(res)
class inheritance_diagram(nodes.General, nodes.Element):
"""
A docutils node to use as a placeholder for the inheritance diagram.
"""
pass
class InheritanceDiagram(Directive):
"""
Run when the inheritance_diagram directive is first encountered.
"""
has_content = False
required_arguments = 1
optional_arguments = 0
final_argument_whitespace = True
option_spec = {
'parts': directives.nonnegative_int,
'private-bases': directives.flag,
'caption': directives.unchanged,
}
def run(self):
node = inheritance_diagram()
node.document = self.state.document
env = self.state.document.settings.env
class_names = self.arguments[0].split()
class_role = env.get_domain('py').role('class')
# Store the original content for use as a hash
node['parts'] = self.options.get('parts', 0)
node['content'] = ', '.join(class_names)
# Create a graph starting with the list of classes
try:
graph = InheritanceGraph(
class_names, env.ref_context.get('py:module'),
parts=node['parts'],
private_bases='private-bases' in self.options)
except InheritanceException as err:
return [node.document.reporter.warning(err.args[0],
line=self.lineno)]
# Create xref nodes for each target of the graph's image map and
# add them to the doc tree so that Sphinx can resolve the
# references to real URLs later. These nodes will eventually be
# removed from the doctree after we're done with them.
for name in graph.get_all_class_names():
refnodes, x = class_role(
'class', ':class:`%s`' % name, name, 0, self.state)
node.extend(refnodes)
# Store the graph object so we can use it to generate the
# dot file later
node['graph'] = graph
# wrap the result in figure node
caption = self.options.get('caption')
if caption:
node = figure_wrapper(self, node, caption)
return [node]
def get_graph_hash(node):
encoded = (node['content'] + str(node['parts'])).encode('utf-8')
return md5(encoded).hexdigest()[-10:]
def html_visit_inheritance_diagram(self, node):
"""
Output the graph for HTML. This will insert a PNG with clickable
image map.
"""
graph = node['graph']
graph_hash = get_graph_hash(node)
name = 'inheritance%s' % graph_hash
# Create a mapping from fully-qualified class names to URLs.
graphviz_output_format = self.builder.env.config.graphviz_output_format.upper()
current_filename = self.builder.current_docname + self.builder.out_suffix
urls = {}
for child in node:
if child.get('refuri') is not None:
if graphviz_output_format == 'SVG':
urls[child['reftitle']] = "../" + child.get('refuri')
else:
urls[child['reftitle']] = child.get('refuri')
elif child.get('refid') is not None:
if graphviz_output_format == 'SVG':
urls[child['reftitle']] = '../' + current_filename + '#' + child.get('refid')
else:
urls[child['reftitle']] = '#' + child.get('refid')
dotcode = graph.generate_dot(name, urls, env=self.builder.env)
render_dot_html(self, node, dotcode, {}, 'inheritance', 'inheritance',
alt='Inheritance diagram of ' + node['content'])
raise nodes.SkipNode
def latex_visit_inheritance_diagram(self, node):
"""
Output the graph for LaTeX. This will insert a PDF.
"""
graph = node['graph']
graph_hash = get_graph_hash(node)
name = 'inheritance%s' % graph_hash
dotcode = graph.generate_dot(name, env=self.builder.env,
graph_attrs={'size': '"6.0,6.0"'})
render_dot_latex(self, node, dotcode, {}, 'inheritance')
raise nodes.SkipNode
def texinfo_visit_inheritance_diagram(self, node):
"""
Output the graph for Texinfo. This will insert a PNG.
"""
graph = node['graph']
graph_hash = get_graph_hash(node)
name = 'inheritance%s' % graph_hash
dotcode = graph.generate_dot(name, env=self.builder.env,
graph_attrs={'size': '"6.0,6.0"'})
render_dot_texinfo(self, node, dotcode, {}, 'inheritance')
raise nodes.SkipNode
def skip(self, node):
raise nodes.SkipNode
def setup(app):
app.setup_extension('sphinx.ext.graphviz')
app.add_node(
inheritance_diagram,
latex=(latex_visit_inheritance_diagram, None),
html=(html_visit_inheritance_diagram, None),
text=(skip, None),
man=(skip, None),
texinfo=(texinfo_visit_inheritance_diagram, None))
app.add_directive('inheritance-diagram', InheritanceDiagram)
app.add_config_value('inheritance_graph_attrs', {}, False)
app.add_config_value('inheritance_node_attrs', {}, False)
app.add_config_value('inheritance_edge_attrs', {}, False)
return {'version': sphinx.__display_version__, 'parallel_read_safe': True}
| gpl-3.0 |
jakirkham/mpld3 | examples/mpld3_logo.py | 19 | 3751 | """
mpld3 Logo Idea
===============
This example shows how mpld3 can be used to generate relatively intricate
vector graphics in the browser. This is an adaptation of a logo proposal by
github user debjan, in turn based on both the matplotlib and D3js logos.
"""
# Author: Jake VanderPlas
import matplotlib.pyplot as plt
from matplotlib import image, patches, colors
from matplotlib.colors import colorConverter
import numpy as np
import mpld3
imsize = np.array([319, 217])
center = [108.5, 108.5]
max_radius = 108.5
radii = np.linspace(16, max_radius, 5)
angles = np.arange(0, 360, 45)
fig = plt.figure(figsize=imsize / 50.)
ax = fig.add_axes([0, 0, 1, 1], frameon=False, xticks=[], yticks=[])
# Create a clip path for the elements
clip_path = patches.Rectangle((0, 0), imsize[0], imsize[1],
transform=ax.transData)
# Create the background gradient
x = np.array([0, 104, 196, 300])
y = np.linspace(150, 450, 86)[:, None]
c = np.cos(-np.pi / 4)
s = np.sin(-np.pi / 4)
X, Y = (c * x - s * y) - 116, (s * x + c * y)
C = np.arange(255).reshape((3, 85)).T
C = C[::-1, :]
cmap = colors.LinearSegmentedColormap.from_list("mpld3",
[[0.97, 0.6, 0.29],
[0.97, 0.59, 0.27],
[0.97, 0.58, 0.25],
[0.95, 0.44, 0.34],
[0.92, 0.51, 0.29],
[0.68, 0.21, 0.20]])
mesh = ax.pcolormesh(X, Y, C, cmap=cmap, shading='gourand', zorder=0)
mesh.set_clip_path(clip_path)
# cut-off the background to form the "D" and "3" using white patches
# (this could also be done with a clip path)
kwargs = dict(fc='white', ec='none', zorder=1)
ax.add_patch(patches.Rectangle([0, 0], center[0], imsize[1], **kwargs))
ax.add_patch(patches.Circle(center, radii[2], **kwargs))
ax.add_patch(patches.Wedge(center, 127, -90, 90, width=18.5, **kwargs))
ax.add_patch(patches.Circle((252, 66), 18, **kwargs))
ax.add_patch(patches.Rectangle([216, 48], 36, 36, **kwargs))
ax.add_patch(patches.Wedge((252, 66), 101, -90, 40.1, width=35, **kwargs))
ax.add_patch(patches.Circle((252, 151), 18, **kwargs))
ax.add_patch(patches.Rectangle([216, 133], 36, 36, **kwargs))
ax.add_patch(patches.Wedge((252, 151), 101, -40.1, 90, width=35, **kwargs))
ax.add_patch(patches.Rectangle([-200, -200], 719, 200, **kwargs))
ax.add_patch(patches.Rectangle([-200, -200], 200, 617, **kwargs))
ax.add_patch(patches.Rectangle([-200, imsize[1]], 719, 200, **kwargs))
ax.add_patch(patches.Rectangle([imsize[0], -200], 200, 617, **kwargs))
# plot circles and lines
for radius in radii:
ax.add_patch(patches.Circle(center, radius, lw=0.5,
ec='gray', fc='none', zorder=2))
for angle in angles:
dx, dy = np.sin(np.radians(angle)), np.cos(np.radians(angle))
ax.plot([max_radius * (1 - dx), max_radius * (1 + dx)],
[max_radius * (1 - dy), max_radius * (1 + dy)],
'-', color='gray', lw=0.5, zorder=2)
# plot wedges within the graph
wedges = [(98, 231, 258, '#FF6600'),
(85, 170, 205, '#FFC500'),
(60, 80, 103, '#7DFF78'),
(96, 45, 58, '#FD7C1A'),
(73, 291, 308, '#CCFF28'),
(47, 146, 155, '#28FFCC'),
(25, 340, 360, '#004AFF')]
for (radius, theta1, theta2, color) in wedges:
ax.add_patch(patches.Wedge(center, radius, theta1, theta2,
fc=color, ec='black', alpha=0.6, zorder=3))
for patch in ax.patches:
patch.set_clip_path(clip_path)
ax.set_xlim(0, imsize[0])
ax.set_ylim(imsize[1], 0)
#plt.savefig('mpld3.png')
mpld3.show()
| bsd-3-clause |
endrebak/epic | tests/merge/test_add_regions.py | 1 | 23480 | import pytest
from collections import OrderedDict
import pandas as pd
from io import StringIO
from epic.merge.merge import merge_matrixes
from epic.merge.merge_helpers import add_new_enriched_bins_matrixes
@pytest.fixture
def regions(tmpdir):
fs = []
for i, c in enumerate(["""chr1 10000 10599 2.2761062711783457e-05 67.49046260339546 .
chr1 72000 91599 2.4770408905838545e-226 235.4664881299362 .""",
"""chr1 9800 15199 0.0048446172754557214 33.652547110032025 .
chr1 40000 41199 2.187570707966001e-08 1000.0 .""",
"""chr1 9800 10599 3.239383152206723e-79 204.30687218918862 .
chr1 38800 40799 2.4798100382025985e-11 1000.0 ."""]):
name = str(i)
f = tmpdir.mkdir(name).join(name)
f.write(c)
fs.append(str(f))
return fs
@pytest.fixture
def dfs():
od = OrderedDict()
for n, c in [("fibroblast.matrix.gz",u"""Chromosome Bin Enriched_fibroblast chrX/ChIP_1_fibroblast.bed.gz chrX/ChIP_2_fibroblast.bed.gz chrX/ChIP_3_fibroblast.bed.gz chrX/Input_1_fibroblast.bed.gz chrX/Input_2_fibroblast.bed.gz chrX/Input_3_fibroblast.bed.gz
chr1 10000 1 4 14 13 2 4 14
chr1 10200 1 17 24 14 9 9 16
chr1 10400 1 3 1 1 1 0 2
chr1 11400 1 0 0 1 0 0 0
chr1 11600 1 0 0 0 0 0 0
chr1 11800 1 1 0 1 0 0 0
chr1 12000 1 0 0 0 0 0 0
chr1 12200 1 0 0 1 0 0 0
chr1 12400 1 0 0 0 0 0 0
chr1 12600 1 0 0 0 1 0 0
chr1 12800 1 0 0 0 0 0 0
chr1 13000 1 3 6 4 1 3 2
chr1 13200 1 0 1 0 1 1 0
chr1 13400 1 7 4 5 1 1 3
chr1 13600 1 0 0 0 0 0 0
chr1 13800 1 0 0 1 0 0 0
chr1 14000 1 0 0 0 0 0 0
chr1 14200 1 0 0 0 0 0 0
chr1 14400 1 0 0 0 0 0 0"""),
("keratinocyte.matrix.gz", u"""Chromosome Bin Enriched_keratinocyte chrX/ChIP_1_keratinocyte.bed.gz chrX/ChIP_2_keratinocyte.bed.gz chrX/ChIP_3_keratinocyte.bed.gz chrX/Input_1_keratinocyte.bed.gz chrX/Input_2_keratinocyte.bed.gz chrX/Input_3_keratinocyte.bed.gz
chr1 9800 1 1 0 0 2 0 0
chr1 10000 1 13 15 17 11 2 17
chr1 10200 1 13 25 23 16 2 24
chr1 10400 1 2 0 2 10 0 3
chr1 10600 1 0 0 0 0 0 0
chr1 10800 1 0 0 1 0 0 0
chr1 11000 1 0 0 0 0 0 0
chr1 11200 1 0 0 0 0 0 0
chr1 11400 1 0 0 0 0 0 0
chr1 11600 1 0 0 1 0 0 0
chr1 11800 1 0 0 0 0 0 0
chr1 12000 1 0 0 2 0 0 0
chr1 12200 1 0 0 0 0 0 0
chr1 12400 1 0 0 0 0 0 0
chr1 12600 1 0 0 0 0 0 1
chr1 12800 1 0 1 1 0 0 0
chr1 13000 1 0 0 6 7 1 3
chr1 13200 1 0 2 0 2 1 1
chr1 13400 1 1 1 6 4 0 2"""),
("melanocyte.matrix.gz", u"""Chromosome Bin Enriched_melanocyte chrX/ChIP_1_melanocyte.bed.gz chrX/ChIP_2_melanocyte.bed.gz chrX/ChIP_3_melanocyte.bed.gz chrX/Input_1_melanocyte.bed.gz chrX/Input_2_melanocyte.bed.gz chrX/Input_3_melanocyte.bed.gz
chr1 9800 1 0 0 2 0 0 0
chr1 10000 1 13 3 128 2 2 21
chr1 10200 1 15 8 96 5 3 23
chr1 10400 1 3 0 4 3 0 7
chr1 11800 0 1 0 0 0 0 0
chr1 12000 0 0 0 0 0 0 4
chr1 12200 0 0 0 0 0 0 1
chr1 12400 0 0 1 0 0 0 1
chr1 12600 0 1 0 0 0 0 0
chr1 13000 0 5 3 1 0 0 2
chr1 13200 0 1 3 0 0 0 1
chr1 13400 0 3 0 3 1 0 3
chr1 13800 0 1 0 0 0 0 0
chr1 14600 0 1 0 1 0 0 0
chr1 14800 0 1 0 4 1 0 4
chr1 15000 0 1 0 1 1 0 2
chr1 15600 0 0 1 0 0 0 1
chr1 15800 0 1 0 0 0 0 0
chr1 16000 0 1 1 2 1 2 2""")]:
df = pd.read_table(StringIO(c), sep="\s+", header=0, index_col=[0, 1])
od[n] = df
return od
def test_add_regions(regions, dfs, expected_result):
result = add_new_enriched_bins_matrixes(regions, dfs, 200)
for (kr, vr), (kx, vx) in zip(result.items(), expected_result.items()):
print(kr, kx)
print(vr.to_csv(sep=" "))
print(vx.to_csv(sep=" "))
assert vr.equals(vx)
@pytest.fixture
def simple_dfs():
od = OrderedDict()
for n, c in [("melanocyte.matrix", u"""Chromosome Bin Enriched_melano chrX/ChIP_1_melanocyte.bed.gz chrX/ChIP_2_melanocyte.bed.gz chrX/Input_1_melanocyte.bed.gz chrX/Input_2_melanocyte.bed.gz
chr1 800 1 0 2 0 0
chr1 1200 1 13 128 2 2"""),
("fibroblast.matrix", u"""Chromosome Bin Enriched_fibro chrX/ChIP_1_fibroblast.bed.gz chrX/ChIP_2_fibroblast.bed.gz chrX/Input_1_fibroblast.bed.gz chrX/Input_2_fibroblast.bed.gz
chr1 800 1 0 2 0 0
chr1 1200 1 13 128 2 2""")]:
df = pd.read_table(StringIO(c), sep="\s+", header=0, index_col=[0, 1])
od[n] = df
return od
@pytest.fixture
def simple_regions(tmpdir):
fs = []
for n, c in zip(["melanocyte", "fibroblast"],
[u"""chr1 600 1200 2.2761062711783457e-05 67.49046260339546 .""",
u"""chr1 200 1600 0.0048446172754557214 33.652547110032025 ."""]):
name = n
f = tmpdir.mkdir(name).join(name)
f.write(c)
fs.append(str(f))
return fs
@pytest.fixture
def simple_expected_result():
melano = u"""Chromosome Bin chrX/ChIP_1_melanocyte.bed.gz chrX/ChIP_2_melanocyte.bed.gz chrX/Input_1_melanocyte.bed.gz chrX/Input_2_melanocyte.bed.gz Enriched_melanocyte
chr1 200 0.0 0.0 0.0 0.0 0.0
chr1 400 0.0 0.0 0.0 0.0 0.0
chr1 600 0.0 0.0 0.0 0.0 1.0
chr1 800 0.0 2.0 0.0 0.0 1.0
chr1 1000 0.0 0.0 0.0 0.0 1.0
chr1 1200 13.0 128.0 2.0 2.0 1.0
chr1 1400 0.0 0.0 0.0 0.0 0.0
chr1 1600 0.0 0.0 0.0 0.0 0.0"""
fibro = u"""Chromosome Bin chrX/ChIP_1_fibroblast.bed.gz chrX/ChIP_2_fibroblast.bed.gz chrX/Input_1_fibroblast.bed.gz chrX/Input_2_fibroblast.bed.gz Enriched_fibroblast
chr1 200 0.0 0.0 0.0 0.0 1
chr1 400 0.0 0.0 0.0 0.0 1
chr1 600 0.0 0.0 0.0 0.0 1
chr1 800 0.0 2.0 0.0 0.0 1
chr1 1000 0.0 0.0 0.0 0.0 1
chr1 1200 13.0 128.0 2.0 2.0 1
chr1 1400 0.0 0.0 0.0 0.0 1
chr1 1600 0.0 0.0 0.0 0.0 1"""
od = OrderedDict()
od["melano"] = pd.read_table(StringIO(melano), sep="\s+", index_col=[0, 1])
od["fibro"] = pd.read_table(StringIO(fibro), sep="\s+", index_col=[0, 1])
return od
def test_simple_add_regions(simple_dfs, simple_regions, simple_expected_result):
result = add_new_enriched_bins_matrixes(simple_regions, simple_dfs, 200)
for (k, v), (k2, x) in zip(result.items(), simple_expected_result.items()):
print(k, k2)
print(x.to_csv(sep=" "))
print(v.to_csv(sep=" "))
assert x.equals(v)
@pytest.fixture
def expected_melano():
c = u"""Chromosome Bin chrX/ChIP_1_melanocyte.bed.gz chrX/ChIP_2_melanocyte.bed.gz chrX/ChIP_3_melanocyte.bed.gz chrX/Input_1_melanocyte.bed.gz chrX/Input_2_melanocyte.bed.gz chrX/Input_3_melanocyte.bed.gz Enriched_2
chr1 9800 0.0 0.0 2.0 0.0 0.0 0.0 1.0
chr1 10000 13.0 3.0 128.0 2.0 2.0 21.0 1.0
chr1 10200 15.0 8.0 96.0 5.0 3.0 23.0 1.0
chr1 10400 3.0 0.0 4.0 3.0 0.0 7.0 1.0
chr1 10600 0.0 0.0 0.0 0.0 0.0 0.0 0.0
chr1 10800 0.0 0.0 0.0 0.0 0.0 0.0 0.0
chr1 11000 0.0 0.0 0.0 0.0 0.0 0.0 0.0
chr1 11200 0.0 0.0 0.0 0.0 0.0 0.0 0.0
chr1 11400 0.0 0.0 0.0 0.0 0.0 0.0 0.0
chr1 11600 0.0 0.0 0.0 0.0 0.0 0.0 0.0
chr1 11800 1.0 0.0 0.0 0.0 0.0 0.0 0.0
chr1 12000 0.0 0.0 0.0 0.0 0.0 4.0 0.0
chr1 12200 0.0 0.0 0.0 0.0 0.0 1.0 0.0
chr1 12400 0.0 1.0 0.0 0.0 0.0 1.0 0.0
chr1 12600 1.0 0.0 0.0 0.0 0.0 0.0 0.0
chr1 12800 0.0 0.0 0.0 0.0 0.0 0.0 0.0
chr1 13000 5.0 3.0 1.0 0.0 0.0 2.0 0.0
chr1 13200 1.0 3.0 0.0 0.0 0.0 1.0 0.0
chr1 13400 3.0 0.0 3.0 1.0 0.0 3.0 0.0
chr1 13600 0.0 0.0 0.0 0.0 0.0 0.0 0.0
chr1 13800 1.0 0.0 0.0 0.0 0.0 0.0 0.0
chr1 14000 0.0 0.0 0.0 0.0 0.0 0.0 0.0
chr1 14200 0.0 0.0 0.0 0.0 0.0 0.0 0.0
chr1 14400 0.0 0.0 0.0 0.0 0.0 0.0 0.0
chr1 14600 1.0 0.0 1.0 0.0 0.0 0.0 0.0
chr1 14800 1.0 0.0 4.0 1.0 0.0 4.0 0.0
chr1 15000 1.0 0.0 1.0 1.0 0.0 2.0 0.0
chr1 15600 0.0 1.0 0.0 0.0 0.0 1.0 0.0
chr1 15800 1.0 0.0 0.0 0.0 0.0 0.0 0.0
chr1 16000 1.0 1.0 2.0 1.0 2.0 2.0 0.0
chr1 38800 0.0 0.0 0.0 0.0 0.0 0.0 1.0
chr1 39000 0.0 0.0 0.0 0.0 0.0 0.0 1.0
chr1 39200 0.0 0.0 0.0 0.0 0.0 0.0 1.0
chr1 39400 0.0 0.0 0.0 0.0 0.0 0.0 1.0
chr1 39600 0.0 0.0 0.0 0.0 0.0 0.0 1.0
chr1 39800 0.0 0.0 0.0 0.0 0.0 0.0 1.0
chr1 40000 0.0 0.0 0.0 0.0 0.0 0.0 1.0
chr1 40200 0.0 0.0 0.0 0.0 0.0 0.0 1.0
chr1 40400 0.0 0.0 0.0 0.0 0.0 0.0 1.0
chr1 40600 0.0 0.0 0.0 0.0 0.0 0.0 1.0
chr1 40800 0.0 0.0 0.0 0.0 0.0 0.0 0.0
chr1 41000 0.0 0.0 0.0 0.0 0.0 0.0 0.0
chr1 72000 0.0 0.0 0.0 0.0 0.0 0.0 0.0
chr1 72200 0.0 0.0 0.0 0.0 0.0 0.0 0.0
chr1 72400 0.0 0.0 0.0 0.0 0.0 0.0 0.0
chr1 72600 0.0 0.0 0.0 0.0 0.0 0.0 0.0
chr1 72800 0.0 0.0 0.0 0.0 0.0 0.0 0.0
chr1 73000 0.0 0.0 0.0 0.0 0.0 0.0 0.0
chr1 73200 0.0 0.0 0.0 0.0 0.0 0.0 0.0
chr1 73400 0.0 0.0 0.0 0.0 0.0 0.0 0.0
chr1 73600 0.0 0.0 0.0 0.0 0.0 0.0 0.0
chr1 73800 0.0 0.0 0.0 0.0 0.0 0.0 0.0
chr1 74000 0.0 0.0 0.0 0.0 0.0 0.0 0.0
chr1 74200 0.0 0.0 0.0 0.0 0.0 0.0 0.0
chr1 74400 0.0 0.0 0.0 0.0 0.0 0.0 0.0
chr1 74600 0.0 0.0 0.0 0.0 0.0 0.0 0.0
chr1 74800 0.0 0.0 0.0 0.0 0.0 0.0 0.0
chr1 75000 0.0 0.0 0.0 0.0 0.0 0.0 0.0
chr1 75200 0.0 0.0 0.0 0.0 0.0 0.0 0.0
chr1 75400 0.0 0.0 0.0 0.0 0.0 0.0 0.0
chr1 75600 0.0 0.0 0.0 0.0 0.0 0.0 0.0
chr1 75800 0.0 0.0 0.0 0.0 0.0 0.0 0.0
chr1 76000 0.0 0.0 0.0 0.0 0.0 0.0 0.0
chr1 76200 0.0 0.0 0.0 0.0 0.0 0.0 0.0
chr1 76400 0.0 0.0 0.0 0.0 0.0 0.0 0.0
chr1 76600 0.0 0.0 0.0 0.0 0.0 0.0 0.0
chr1 76800 0.0 0.0 0.0 0.0 0.0 0.0 0.0
chr1 77000 0.0 0.0 0.0 0.0 0.0 0.0 0.0
chr1 77200 0.0 0.0 0.0 0.0 0.0 0.0 0.0
chr1 77400 0.0 0.0 0.0 0.0 0.0 0.0 0.0
chr1 77600 0.0 0.0 0.0 0.0 0.0 0.0 0.0
chr1 77800 0.0 0.0 0.0 0.0 0.0 0.0 0.0
chr1 78000 0.0 0.0 0.0 0.0 0.0 0.0 0.0
chr1 78200 0.0 0.0 0.0 0.0 0.0 0.0 0.0
chr1 78400 0.0 0.0 0.0 0.0 0.0 0.0 0.0
chr1 78600 0.0 0.0 0.0 0.0 0.0 0.0 0.0
chr1 78800 0.0 0.0 0.0 0.0 0.0 0.0 0.0
chr1 79000 0.0 0.0 0.0 0.0 0.0 0.0 0.0
chr1 79200 0.0 0.0 0.0 0.0 0.0 0.0 0.0
chr1 79400 0.0 0.0 0.0 0.0 0.0 0.0 0.0
chr1 79600 0.0 0.0 0.0 0.0 0.0 0.0 0.0
chr1 79800 0.0 0.0 0.0 0.0 0.0 0.0 0.0
chr1 80000 0.0 0.0 0.0 0.0 0.0 0.0 0.0
chr1 80200 0.0 0.0 0.0 0.0 0.0 0.0 0.0
chr1 80400 0.0 0.0 0.0 0.0 0.0 0.0 0.0
chr1 80600 0.0 0.0 0.0 0.0 0.0 0.0 0.0
chr1 80800 0.0 0.0 0.0 0.0 0.0 0.0 0.0
chr1 81000 0.0 0.0 0.0 0.0 0.0 0.0 0.0
chr1 81200 0.0 0.0 0.0 0.0 0.0 0.0 0.0
chr1 81400 0.0 0.0 0.0 0.0 0.0 0.0 0.0
chr1 81600 0.0 0.0 0.0 0.0 0.0 0.0 0.0
chr1 81800 0.0 0.0 0.0 0.0 0.0 0.0 0.0
chr1 82000 0.0 0.0 0.0 0.0 0.0 0.0 0.0
chr1 82200 0.0 0.0 0.0 0.0 0.0 0.0 0.0
chr1 82400 0.0 0.0 0.0 0.0 0.0 0.0 0.0
chr1 82600 0.0 0.0 0.0 0.0 0.0 0.0 0.0
chr1 82800 0.0 0.0 0.0 0.0 0.0 0.0 0.0
chr1 83000 0.0 0.0 0.0 0.0 0.0 0.0 0.0
chr1 83200 0.0 0.0 0.0 0.0 0.0 0.0 0.0
chr1 83400 0.0 0.0 0.0 0.0 0.0 0.0 0.0
chr1 83600 0.0 0.0 0.0 0.0 0.0 0.0 0.0
chr1 83800 0.0 0.0 0.0 0.0 0.0 0.0 0.0
chr1 84000 0.0 0.0 0.0 0.0 0.0 0.0 0.0
chr1 84200 0.0 0.0 0.0 0.0 0.0 0.0 0.0
chr1 84400 0.0 0.0 0.0 0.0 0.0 0.0 0.0
chr1 84600 0.0 0.0 0.0 0.0 0.0 0.0 0.0
chr1 84800 0.0 0.0 0.0 0.0 0.0 0.0 0.0
chr1 85000 0.0 0.0 0.0 0.0 0.0 0.0 0.0
chr1 85200 0.0 0.0 0.0 0.0 0.0 0.0 0.0
chr1 85400 0.0 0.0 0.0 0.0 0.0 0.0 0.0
chr1 85600 0.0 0.0 0.0 0.0 0.0 0.0 0.0
chr1 85800 0.0 0.0 0.0 0.0 0.0 0.0 0.0
chr1 86000 0.0 0.0 0.0 0.0 0.0 0.0 0.0
chr1 86200 0.0 0.0 0.0 0.0 0.0 0.0 0.0
chr1 86400 0.0 0.0 0.0 0.0 0.0 0.0 0.0
chr1 86600 0.0 0.0 0.0 0.0 0.0 0.0 0.0
chr1 86800 0.0 0.0 0.0 0.0 0.0 0.0 0.0
chr1 87000 0.0 0.0 0.0 0.0 0.0 0.0 0.0
chr1 87200 0.0 0.0 0.0 0.0 0.0 0.0 0.0
chr1 87400 0.0 0.0 0.0 0.0 0.0 0.0 0.0
chr1 87600 0.0 0.0 0.0 0.0 0.0 0.0 0.0
chr1 87800 0.0 0.0 0.0 0.0 0.0 0.0 0.0
chr1 88000 0.0 0.0 0.0 0.0 0.0 0.0 0.0
chr1 88200 0.0 0.0 0.0 0.0 0.0 0.0 0.0
chr1 88400 0.0 0.0 0.0 0.0 0.0 0.0 0.0
chr1 88600 0.0 0.0 0.0 0.0 0.0 0.0 0.0
chr1 88800 0.0 0.0 0.0 0.0 0.0 0.0 0.0
chr1 89000 0.0 0.0 0.0 0.0 0.0 0.0 0.0
chr1 89200 0.0 0.0 0.0 0.0 0.0 0.0 0.0
chr1 89400 0.0 0.0 0.0 0.0 0.0 0.0 0.0
chr1 89600 0.0 0.0 0.0 0.0 0.0 0.0 0.0
chr1 89800 0.0 0.0 0.0 0.0 0.0 0.0 0.0
chr1 90000 0.0 0.0 0.0 0.0 0.0 0.0 0.0
chr1 90200 0.0 0.0 0.0 0.0 0.0 0.0 0.0
chr1 90400 0.0 0.0 0.0 0.0 0.0 0.0 0.0
chr1 90600 0.0 0.0 0.0 0.0 0.0 0.0 0.0
chr1 90800 0.0 0.0 0.0 0.0 0.0 0.0 0.0
chr1 91000 0.0 0.0 0.0 0.0 0.0 0.0 0.0
chr1 91200 0.0 0.0 0.0 0.0 0.0 0.0 0.0
chr1 91400 0.0 0.0 0.0 0.0 0.0 0.0 0.0"""
return pd.read_table(StringIO(c), sep=" ", index_col=[0, 1])
@pytest.fixture
def expected_keratino():
c = u"""Chromosome Bin chrX/ChIP_1_keratinocyte.bed.gz chrX/ChIP_2_keratinocyte.bed.gz chrX/ChIP_3_keratinocyte.bed.gz chrX/Input_1_keratinocyte.bed.gz chrX/Input_2_keratinocyte.bed.gz chrX/Input_3_keratinocyte.bed.gz Enriched_1
chr1 9800 1.0 0.0 0.0 2.0 0.0 0.0 1.0
chr1 10000 13.0 15.0 17.0 11.0 2.0 17.0 1.0
chr1 10200 13.0 25.0 23.0 16.0 2.0 24.0 1.0
chr1 10400 2.0 0.0 2.0 10.0 0.0 3.0 1.0
chr1 10600 0.0 0.0 0.0 0.0 0.0 0.0 1.0
chr1 10800 0.0 0.0 1.0 0.0 0.0 0.0 1.0
chr1 11000 0.0 0.0 0.0 0.0 0.0 0.0 1.0
chr1 11200 0.0 0.0 0.0 0.0 0.0 0.0 1.0
chr1 11400 0.0 0.0 0.0 0.0 0.0 0.0 1.0
chr1 11600 0.0 0.0 1.0 0.0 0.0 0.0 1.0
chr1 11800 0.0 0.0 0.0 0.0 0.0 0.0 1.0
chr1 12000 0.0 0.0 2.0 0.0 0.0 0.0 1.0
chr1 12200 0.0 0.0 0.0 0.0 0.0 0.0 1.0
chr1 12400 0.0 0.0 0.0 0.0 0.0 0.0 1.0
chr1 12600 0.0 0.0 0.0 0.0 0.0 1.0 1.0
chr1 12800 0.0 1.0 1.0 0.0 0.0 0.0 1.0
chr1 13000 0.0 0.0 6.0 7.0 1.0 3.0 1.0
chr1 13200 0.0 2.0 0.0 2.0 1.0 1.0 1.0
chr1 13400 1.0 1.0 6.0 4.0 0.0 2.0 1.0
chr1 13600 0.0 0.0 0.0 0.0 0.0 0.0 1.0
chr1 13800 0.0 0.0 0.0 0.0 0.0 0.0 1.0
chr1 14000 0.0 0.0 0.0 0.0 0.0 0.0 1.0
chr1 14200 0.0 0.0 0.0 0.0 0.0 0.0 1.0
chr1 14400 0.0 0.0 0.0 0.0 0.0 0.0 1.0
chr1 14600 0.0 0.0 0.0 0.0 0.0 0.0 1.0
chr1 14800 0.0 0.0 0.0 0.0 0.0 0.0 1.0
chr1 15000 0.0 0.0 0.0 0.0 0.0 0.0 1.0
chr1 38800 0.0 0.0 0.0 0.0 0.0 0.0 0.0
chr1 39000 0.0 0.0 0.0 0.0 0.0 0.0 0.0
chr1 39200 0.0 0.0 0.0 0.0 0.0 0.0 0.0
chr1 39400 0.0 0.0 0.0 0.0 0.0 0.0 0.0
chr1 39600 0.0 0.0 0.0 0.0 0.0 0.0 0.0
chr1 39800 0.0 0.0 0.0 0.0 0.0 0.0 0.0
chr1 40000 0.0 0.0 0.0 0.0 0.0 0.0 1.0
chr1 40200 0.0 0.0 0.0 0.0 0.0 0.0 1.0
chr1 40400 0.0 0.0 0.0 0.0 0.0 0.0 1.0
chr1 40600 0.0 0.0 0.0 0.0 0.0 0.0 1.0
chr1 40800 0.0 0.0 0.0 0.0 0.0 0.0 1.0
chr1 41000 0.0 0.0 0.0 0.0 0.0 0.0 1.0
chr1 72000 0.0 0.0 0.0 0.0 0.0 0.0 0.0
chr1 72200 0.0 0.0 0.0 0.0 0.0 0.0 0.0
chr1 72400 0.0 0.0 0.0 0.0 0.0 0.0 0.0
chr1 72600 0.0 0.0 0.0 0.0 0.0 0.0 0.0
chr1 72800 0.0 0.0 0.0 0.0 0.0 0.0 0.0
chr1 73000 0.0 0.0 0.0 0.0 0.0 0.0 0.0
chr1 73200 0.0 0.0 0.0 0.0 0.0 0.0 0.0
chr1 73400 0.0 0.0 0.0 0.0 0.0 0.0 0.0
chr1 73600 0.0 0.0 0.0 0.0 0.0 0.0 0.0
chr1 73800 0.0 0.0 0.0 0.0 0.0 0.0 0.0
chr1 74000 0.0 0.0 0.0 0.0 0.0 0.0 0.0
chr1 74200 0.0 0.0 0.0 0.0 0.0 0.0 0.0
chr1 74400 0.0 0.0 0.0 0.0 0.0 0.0 0.0
chr1 74600 0.0 0.0 0.0 0.0 0.0 0.0 0.0
chr1 74800 0.0 0.0 0.0 0.0 0.0 0.0 0.0
chr1 75000 0.0 0.0 0.0 0.0 0.0 0.0 0.0
chr1 75200 0.0 0.0 0.0 0.0 0.0 0.0 0.0
chr1 75400 0.0 0.0 0.0 0.0 0.0 0.0 0.0
chr1 75600 0.0 0.0 0.0 0.0 0.0 0.0 0.0
chr1 75800 0.0 0.0 0.0 0.0 0.0 0.0 0.0
chr1 76000 0.0 0.0 0.0 0.0 0.0 0.0 0.0
chr1 76200 0.0 0.0 0.0 0.0 0.0 0.0 0.0
chr1 76400 0.0 0.0 0.0 0.0 0.0 0.0 0.0
chr1 76600 0.0 0.0 0.0 0.0 0.0 0.0 0.0
chr1 76800 0.0 0.0 0.0 0.0 0.0 0.0 0.0
chr1 77000 0.0 0.0 0.0 0.0 0.0 0.0 0.0
chr1 77200 0.0 0.0 0.0 0.0 0.0 0.0 0.0
chr1 77400 0.0 0.0 0.0 0.0 0.0 0.0 0.0
chr1 77600 0.0 0.0 0.0 0.0 0.0 0.0 0.0
chr1 77800 0.0 0.0 0.0 0.0 0.0 0.0 0.0
chr1 78000 0.0 0.0 0.0 0.0 0.0 0.0 0.0
chr1 78200 0.0 0.0 0.0 0.0 0.0 0.0 0.0
chr1 78400 0.0 0.0 0.0 0.0 0.0 0.0 0.0
chr1 78600 0.0 0.0 0.0 0.0 0.0 0.0 0.0
chr1 78800 0.0 0.0 0.0 0.0 0.0 0.0 0.0
chr1 79000 0.0 0.0 0.0 0.0 0.0 0.0 0.0
chr1 79200 0.0 0.0 0.0 0.0 0.0 0.0 0.0
chr1 79400 0.0 0.0 0.0 0.0 0.0 0.0 0.0
chr1 79600 0.0 0.0 0.0 0.0 0.0 0.0 0.0
chr1 79800 0.0 0.0 0.0 0.0 0.0 0.0 0.0
chr1 80000 0.0 0.0 0.0 0.0 0.0 0.0 0.0
chr1 80200 0.0 0.0 0.0 0.0 0.0 0.0 0.0
chr1 80400 0.0 0.0 0.0 0.0 0.0 0.0 0.0
chr1 80600 0.0 0.0 0.0 0.0 0.0 0.0 0.0
chr1 80800 0.0 0.0 0.0 0.0 0.0 0.0 0.0
chr1 81000 0.0 0.0 0.0 0.0 0.0 0.0 0.0
chr1 81200 0.0 0.0 0.0 0.0 0.0 0.0 0.0
chr1 81400 0.0 0.0 0.0 0.0 0.0 0.0 0.0
chr1 81600 0.0 0.0 0.0 0.0 0.0 0.0 0.0
chr1 81800 0.0 0.0 0.0 0.0 0.0 0.0 0.0
chr1 82000 0.0 0.0 0.0 0.0 0.0 0.0 0.0
chr1 82200 0.0 0.0 0.0 0.0 0.0 0.0 0.0
chr1 82400 0.0 0.0 0.0 0.0 0.0 0.0 0.0
chr1 82600 0.0 0.0 0.0 0.0 0.0 0.0 0.0
chr1 82800 0.0 0.0 0.0 0.0 0.0 0.0 0.0
chr1 83000 0.0 0.0 0.0 0.0 0.0 0.0 0.0
chr1 83200 0.0 0.0 0.0 0.0 0.0 0.0 0.0
chr1 83400 0.0 0.0 0.0 0.0 0.0 0.0 0.0
chr1 83600 0.0 0.0 0.0 0.0 0.0 0.0 0.0
chr1 83800 0.0 0.0 0.0 0.0 0.0 0.0 0.0
chr1 84000 0.0 0.0 0.0 0.0 0.0 0.0 0.0
chr1 84200 0.0 0.0 0.0 0.0 0.0 0.0 0.0
chr1 84400 0.0 0.0 0.0 0.0 0.0 0.0 0.0
chr1 84600 0.0 0.0 0.0 0.0 0.0 0.0 0.0
chr1 84800 0.0 0.0 0.0 0.0 0.0 0.0 0.0
chr1 85000 0.0 0.0 0.0 0.0 0.0 0.0 0.0
chr1 85200 0.0 0.0 0.0 0.0 0.0 0.0 0.0
chr1 85400 0.0 0.0 0.0 0.0 0.0 0.0 0.0
chr1 85600 0.0 0.0 0.0 0.0 0.0 0.0 0.0
chr1 85800 0.0 0.0 0.0 0.0 0.0 0.0 0.0
chr1 86000 0.0 0.0 0.0 0.0 0.0 0.0 0.0
chr1 86200 0.0 0.0 0.0 0.0 0.0 0.0 0.0
chr1 86400 0.0 0.0 0.0 0.0 0.0 0.0 0.0
chr1 86600 0.0 0.0 0.0 0.0 0.0 0.0 0.0
chr1 86800 0.0 0.0 0.0 0.0 0.0 0.0 0.0
chr1 87000 0.0 0.0 0.0 0.0 0.0 0.0 0.0
chr1 87200 0.0 0.0 0.0 0.0 0.0 0.0 0.0
chr1 87400 0.0 0.0 0.0 0.0 0.0 0.0 0.0
chr1 87600 0.0 0.0 0.0 0.0 0.0 0.0 0.0
chr1 87800 0.0 0.0 0.0 0.0 0.0 0.0 0.0
chr1 88000 0.0 0.0 0.0 0.0 0.0 0.0 0.0
chr1 88200 0.0 0.0 0.0 0.0 0.0 0.0 0.0
chr1 88400 0.0 0.0 0.0 0.0 0.0 0.0 0.0
chr1 88600 0.0 0.0 0.0 0.0 0.0 0.0 0.0
chr1 88800 0.0 0.0 0.0 0.0 0.0 0.0 0.0
chr1 89000 0.0 0.0 0.0 0.0 0.0 0.0 0.0
chr1 89200 0.0 0.0 0.0 0.0 0.0 0.0 0.0
chr1 89400 0.0 0.0 0.0 0.0 0.0 0.0 0.0
chr1 89600 0.0 0.0 0.0 0.0 0.0 0.0 0.0
chr1 89800 0.0 0.0 0.0 0.0 0.0 0.0 0.0
chr1 90000 0.0 0.0 0.0 0.0 0.0 0.0 0.0
chr1 90200 0.0 0.0 0.0 0.0 0.0 0.0 0.0
chr1 90400 0.0 0.0 0.0 0.0 0.0 0.0 0.0
chr1 90600 0.0 0.0 0.0 0.0 0.0 0.0 0.0
chr1 90800 0.0 0.0 0.0 0.0 0.0 0.0 0.0
chr1 91000 0.0 0.0 0.0 0.0 0.0 0.0 0.0
chr1 91200 0.0 0.0 0.0 0.0 0.0 0.0 0.0
chr1 91400 0.0 0.0 0.0 0.0 0.0 0.0 0.0"""
return pd.read_table(StringIO(c), sep=" ", index_col=[0, 1])
@pytest.fixture
def expected_fibro():
c = u"""Chromosome Bin chrX/ChIP_1_fibroblast.bed.gz chrX/ChIP_2_fibroblast.bed.gz chrX/ChIP_3_fibroblast.bed.gz chrX/Input_1_fibroblast.bed.gz chrX/Input_2_fibroblast.bed.gz chrX/Input_3_fibroblast.bed.gz Enriched_0
chr1 9800 0.0 0.0 0.0 0.0 0.0 0.0 0.0
chr1 10000 4.0 14.0 13.0 2.0 4.0 14.0 1.0
chr1 10200 17.0 24.0 14.0 9.0 9.0 16.0 1.0
chr1 10400 3.0 1.0 1.0 1.0 0.0 2.0 1.0
chr1 10600 0.0 0.0 0.0 0.0 0.0 0.0 0.0
chr1 10800 0.0 0.0 0.0 0.0 0.0 0.0 0.0
chr1 11000 0.0 0.0 0.0 0.0 0.0 0.0 0.0
chr1 11200 0.0 0.0 0.0 0.0 0.0 0.0 0.0
chr1 11400 0.0 0.0 1.0 0.0 0.0 0.0 0.0
chr1 11600 0.0 0.0 0.0 0.0 0.0 0.0 0.0
chr1 11800 1.0 0.0 1.0 0.0 0.0 0.0 0.0
chr1 12000 0.0 0.0 0.0 0.0 0.0 0.0 0.0
chr1 12200 0.0 0.0 1.0 0.0 0.0 0.0 0.0
chr1 12400 0.0 0.0 0.0 0.0 0.0 0.0 0.0
chr1 12600 0.0 0.0 0.0 1.0 0.0 0.0 0.0
chr1 12800 0.0 0.0 0.0 0.0 0.0 0.0 0.0
chr1 13000 3.0 6.0 4.0 1.0 3.0 2.0 0.0
chr1 13200 0.0 1.0 0.0 1.0 1.0 0.0 0.0
chr1 13400 7.0 4.0 5.0 1.0 1.0 3.0 0.0
chr1 13600 0.0 0.0 0.0 0.0 0.0 0.0 0.0
chr1 13800 0.0 0.0 1.0 0.0 0.0 0.0 0.0
chr1 14000 0.0 0.0 0.0 0.0 0.0 0.0 0.0
chr1 14200 0.0 0.0 0.0 0.0 0.0 0.0 0.0
chr1 14400 0.0 0.0 0.0 0.0 0.0 0.0 0.0
chr1 14600 0.0 0.0 0.0 0.0 0.0 0.0 0.0
chr1 14800 0.0 0.0 0.0 0.0 0.0 0.0 0.0
chr1 15000 0.0 0.0 0.0 0.0 0.0 0.0 0.0
chr1 38800 0.0 0.0 0.0 0.0 0.0 0.0 0.0
chr1 39000 0.0 0.0 0.0 0.0 0.0 0.0 0.0
chr1 39200 0.0 0.0 0.0 0.0 0.0 0.0 0.0
chr1 39400 0.0 0.0 0.0 0.0 0.0 0.0 0.0
chr1 39600 0.0 0.0 0.0 0.0 0.0 0.0 0.0
chr1 39800 0.0 0.0 0.0 0.0 0.0 0.0 0.0
chr1 40000 0.0 0.0 0.0 0.0 0.0 0.0 0.0
chr1 40200 0.0 0.0 0.0 0.0 0.0 0.0 0.0
chr1 40400 0.0 0.0 0.0 0.0 0.0 0.0 0.0
chr1 40600 0.0 0.0 0.0 0.0 0.0 0.0 0.0
chr1 40800 0.0 0.0 0.0 0.0 0.0 0.0 0.0
chr1 41000 0.0 0.0 0.0 0.0 0.0 0.0 0.0
chr1 72000 0.0 0.0 0.0 0.0 0.0 0.0 1.0
chr1 72200 0.0 0.0 0.0 0.0 0.0 0.0 1.0
chr1 72400 0.0 0.0 0.0 0.0 0.0 0.0 1.0
chr1 72600 0.0 0.0 0.0 0.0 0.0 0.0 1.0
chr1 72800 0.0 0.0 0.0 0.0 0.0 0.0 1.0
chr1 73000 0.0 0.0 0.0 0.0 0.0 0.0 1.0
chr1 73200 0.0 0.0 0.0 0.0 0.0 0.0 1.0
chr1 73400 0.0 0.0 0.0 0.0 0.0 0.0 1.0
chr1 73600 0.0 0.0 0.0 0.0 0.0 0.0 1.0
chr1 73800 0.0 0.0 0.0 0.0 0.0 0.0 1.0
chr1 74000 0.0 0.0 0.0 0.0 0.0 0.0 1.0
chr1 74200 0.0 0.0 0.0 0.0 0.0 0.0 1.0
chr1 74400 0.0 0.0 0.0 0.0 0.0 0.0 1.0
chr1 74600 0.0 0.0 0.0 0.0 0.0 0.0 1.0
chr1 74800 0.0 0.0 0.0 0.0 0.0 0.0 1.0
chr1 75000 0.0 0.0 0.0 0.0 0.0 0.0 1.0
chr1 75200 0.0 0.0 0.0 0.0 0.0 0.0 1.0
chr1 75400 0.0 0.0 0.0 0.0 0.0 0.0 1.0
chr1 75600 0.0 0.0 0.0 0.0 0.0 0.0 1.0
chr1 75800 0.0 0.0 0.0 0.0 0.0 0.0 1.0
chr1 76000 0.0 0.0 0.0 0.0 0.0 0.0 1.0
chr1 76200 0.0 0.0 0.0 0.0 0.0 0.0 1.0
chr1 76400 0.0 0.0 0.0 0.0 0.0 0.0 1.0
chr1 76600 0.0 0.0 0.0 0.0 0.0 0.0 1.0
chr1 76800 0.0 0.0 0.0 0.0 0.0 0.0 1.0
chr1 77000 0.0 0.0 0.0 0.0 0.0 0.0 1.0
chr1 77200 0.0 0.0 0.0 0.0 0.0 0.0 1.0
chr1 77400 0.0 0.0 0.0 0.0 0.0 0.0 1.0
chr1 77600 0.0 0.0 0.0 0.0 0.0 0.0 1.0
chr1 77800 0.0 0.0 0.0 0.0 0.0 0.0 1.0
chr1 78000 0.0 0.0 0.0 0.0 0.0 0.0 1.0
chr1 78200 0.0 0.0 0.0 0.0 0.0 0.0 1.0
chr1 78400 0.0 0.0 0.0 0.0 0.0 0.0 1.0
chr1 78600 0.0 0.0 0.0 0.0 0.0 0.0 1.0
chr1 78800 0.0 0.0 0.0 0.0 0.0 0.0 1.0
chr1 79000 0.0 0.0 0.0 0.0 0.0 0.0 1.0
chr1 79200 0.0 0.0 0.0 0.0 0.0 0.0 1.0
chr1 79400 0.0 0.0 0.0 0.0 0.0 0.0 1.0
chr1 79600 0.0 0.0 0.0 0.0 0.0 0.0 1.0
chr1 79800 0.0 0.0 0.0 0.0 0.0 0.0 1.0
chr1 80000 0.0 0.0 0.0 0.0 0.0 0.0 1.0
chr1 80200 0.0 0.0 0.0 0.0 0.0 0.0 1.0
chr1 80400 0.0 0.0 0.0 0.0 0.0 0.0 1.0
chr1 80600 0.0 0.0 0.0 0.0 0.0 0.0 1.0
chr1 80800 0.0 0.0 0.0 0.0 0.0 0.0 1.0
chr1 81000 0.0 0.0 0.0 0.0 0.0 0.0 1.0
chr1 81200 0.0 0.0 0.0 0.0 0.0 0.0 1.0
chr1 81400 0.0 0.0 0.0 0.0 0.0 0.0 1.0
chr1 81600 0.0 0.0 0.0 0.0 0.0 0.0 1.0
chr1 81800 0.0 0.0 0.0 0.0 0.0 0.0 1.0
chr1 82000 0.0 0.0 0.0 0.0 0.0 0.0 1.0
chr1 82200 0.0 0.0 0.0 0.0 0.0 0.0 1.0
chr1 82400 0.0 0.0 0.0 0.0 0.0 0.0 1.0
chr1 82600 0.0 0.0 0.0 0.0 0.0 0.0 1.0
chr1 82800 0.0 0.0 0.0 0.0 0.0 0.0 1.0
chr1 83000 0.0 0.0 0.0 0.0 0.0 0.0 1.0
chr1 83200 0.0 0.0 0.0 0.0 0.0 0.0 1.0
chr1 83400 0.0 0.0 0.0 0.0 0.0 0.0 1.0
chr1 83600 0.0 0.0 0.0 0.0 0.0 0.0 1.0
chr1 83800 0.0 0.0 0.0 0.0 0.0 0.0 1.0
chr1 84000 0.0 0.0 0.0 0.0 0.0 0.0 1.0
chr1 84200 0.0 0.0 0.0 0.0 0.0 0.0 1.0
chr1 84400 0.0 0.0 0.0 0.0 0.0 0.0 1.0
chr1 84600 0.0 0.0 0.0 0.0 0.0 0.0 1.0
chr1 84800 0.0 0.0 0.0 0.0 0.0 0.0 1.0
chr1 85000 0.0 0.0 0.0 0.0 0.0 0.0 1.0
chr1 85200 0.0 0.0 0.0 0.0 0.0 0.0 1.0
chr1 85400 0.0 0.0 0.0 0.0 0.0 0.0 1.0
chr1 85600 0.0 0.0 0.0 0.0 0.0 0.0 1.0
chr1 85800 0.0 0.0 0.0 0.0 0.0 0.0 1.0
chr1 86000 0.0 0.0 0.0 0.0 0.0 0.0 1.0
chr1 86200 0.0 0.0 0.0 0.0 0.0 0.0 1.0
chr1 86400 0.0 0.0 0.0 0.0 0.0 0.0 1.0
chr1 86600 0.0 0.0 0.0 0.0 0.0 0.0 1.0
chr1 86800 0.0 0.0 0.0 0.0 0.0 0.0 1.0
chr1 87000 0.0 0.0 0.0 0.0 0.0 0.0 1.0
chr1 87200 0.0 0.0 0.0 0.0 0.0 0.0 1.0
chr1 87400 0.0 0.0 0.0 0.0 0.0 0.0 1.0
chr1 87600 0.0 0.0 0.0 0.0 0.0 0.0 1.0
chr1 87800 0.0 0.0 0.0 0.0 0.0 0.0 1.0
chr1 88000 0.0 0.0 0.0 0.0 0.0 0.0 1.0
chr1 88200 0.0 0.0 0.0 0.0 0.0 0.0 1.0
chr1 88400 0.0 0.0 0.0 0.0 0.0 0.0 1.0
chr1 88600 0.0 0.0 0.0 0.0 0.0 0.0 1.0
chr1 88800 0.0 0.0 0.0 0.0 0.0 0.0 1.0
chr1 89000 0.0 0.0 0.0 0.0 0.0 0.0 1.0
chr1 89200 0.0 0.0 0.0 0.0 0.0 0.0 1.0
chr1 89400 0.0 0.0 0.0 0.0 0.0 0.0 1.0
chr1 89600 0.0 0.0 0.0 0.0 0.0 0.0 1.0
chr1 89800 0.0 0.0 0.0 0.0 0.0 0.0 1.0
chr1 90000 0.0 0.0 0.0 0.0 0.0 0.0 1.0
chr1 90200 0.0 0.0 0.0 0.0 0.0 0.0 1.0
chr1 90400 0.0 0.0 0.0 0.0 0.0 0.0 1.0
chr1 90600 0.0 0.0 0.0 0.0 0.0 0.0 1.0
chr1 90800 0.0 0.0 0.0 0.0 0.0 0.0 1.0
chr1 91000 0.0 0.0 0.0 0.0 0.0 0.0 1.0
chr1 91200 0.0 0.0 0.0 0.0 0.0 0.0 1.0
chr1 91400 0.0 0.0 0.0 0.0 0.0 0.0 1.0"""
return pd.read_table(StringIO(c), sep=" ", index_col=[0, 1])
@pytest.fixture
def expected_result(expected_fibro, expected_keratino, expected_melano):
od = OrderedDict()
od["fibro"] = expected_fibro
od["keratino"] = expected_keratino
od["melano"] = expected_melano
return od
| mit |
beiko-lab/gengis | bin/Lib/site-packages/matplotlib/backends/backend_gtkagg.py | 6 | 4346 | """
Render to gtk from agg
"""
from __future__ import division, print_function
import os
import matplotlib
from matplotlib.figure import Figure
from matplotlib.backends.backend_agg import FigureCanvasAgg
from matplotlib.backends.backend_gtk import gtk, FigureManagerGTK, FigureCanvasGTK,\
show, draw_if_interactive,\
error_msg_gtk, NavigationToolbar, PIXELS_PER_INCH, backend_version, \
NavigationToolbar2GTK
from matplotlib.backends._gtkagg import agg_to_gtk_drawable
DEBUG = False
class NavigationToolbar2GTKAgg(NavigationToolbar2GTK):
def _get_canvas(self, fig):
return FigureCanvasGTKAgg(fig)
class FigureManagerGTKAgg(FigureManagerGTK):
def _get_toolbar(self, canvas):
# must be inited after the window, drawingArea and figure
# attrs are set
if matplotlib.rcParams['toolbar']=='classic':
toolbar = NavigationToolbar (canvas, self.window)
elif matplotlib.rcParams['toolbar']=='toolbar2':
toolbar = NavigationToolbar2GTKAgg (canvas, self.window)
else:
toolbar = None
return toolbar
def new_figure_manager(num, *args, **kwargs):
"""
Create a new figure manager instance
"""
if DEBUG: print('backend_gtkagg.new_figure_manager')
FigureClass = kwargs.pop('FigureClass', Figure)
thisFig = FigureClass(*args, **kwargs)
return new_figure_manager_given_figure(num, thisFig)
def new_figure_manager_given_figure(num, figure):
"""
Create a new figure manager instance for the given figure.
"""
canvas = FigureCanvasGTKAgg(figure)
return FigureManagerGTKAgg(canvas, num)
if DEBUG: print('backend_gtkagg.new_figure_manager done')
class FigureCanvasGTKAgg(FigureCanvasGTK, FigureCanvasAgg):
filetypes = FigureCanvasGTK.filetypes.copy()
filetypes.update(FigureCanvasAgg.filetypes)
def configure_event(self, widget, event=None):
if DEBUG: print('FigureCanvasGTKAgg.configure_event')
if widget.window is None:
return
try:
del self.renderer
except AttributeError:
pass
w,h = widget.window.get_size()
if w==1 or h==1: return # empty fig
# compute desired figure size in inches
dpival = self.figure.dpi
winch = w/dpival
hinch = h/dpival
self.figure.set_size_inches(winch, hinch)
self._need_redraw = True
self.resize_event()
if DEBUG: print('FigureCanvasGTKAgg.configure_event end')
return True
def _render_figure(self, pixmap, width, height):
if DEBUG: print('FigureCanvasGTKAgg.render_figure')
FigureCanvasAgg.draw(self)
if DEBUG: print('FigureCanvasGTKAgg.render_figure pixmap', pixmap)
#agg_to_gtk_drawable(pixmap, self.renderer._renderer, None)
buf = self.buffer_rgba()
ren = self.get_renderer()
w = int(ren.width)
h = int(ren.height)
pixbuf = gtk.gdk.pixbuf_new_from_data(
buf, gtk.gdk.COLORSPACE_RGB, True, 8, w, h, w*4)
pixmap.draw_pixbuf(pixmap.new_gc(), pixbuf, 0, 0, 0, 0, w, h,
gtk.gdk.RGB_DITHER_NONE, 0, 0)
if DEBUG: print('FigureCanvasGTKAgg.render_figure done')
def blit(self, bbox=None):
if DEBUG: print('FigureCanvasGTKAgg.blit', self._pixmap)
agg_to_gtk_drawable(self._pixmap, self.renderer._renderer, bbox)
x, y, w, h = self.allocation
self.window.draw_drawable (self.style.fg_gc[self.state], self._pixmap,
0, 0, 0, 0, w, h)
if DEBUG: print('FigureCanvasGTKAgg.done')
def print_png(self, filename, *args, **kwargs):
# Do this so we can save the resolution of figure in the PNG file
agg = self.switch_backends(FigureCanvasAgg)
return agg.print_png(filename, *args, **kwargs)
"""\
Traceback (most recent call last):
File "/home/titan/johnh/local/lib/python2.3/site-packages/matplotlib/backends/backend_gtk.py", line 304, in expose_event
self._render_figure(self._pixmap, w, h)
File "/home/titan/johnh/local/lib/python2.3/site-packages/matplotlib/backends/backend_gtkagg.py", line 77, in _render_figure
pixbuf = gtk.gdk.pixbuf_new_from_data(
ValueError: data length (3156672) is less then required by the other parameters (3160608)
"""
| gpl-3.0 |
fxsjy/pybrain | examples/supervised/evolino/superimposed_sine.py | 4 | 3443 | #!/usr/bin/env python
__author__ = 'Michael Isik'
from pylab import plot, show, ion, cla, subplot, title, figlegend, draw
import numpy
from pybrain.structure.modules.evolinonetwork import EvolinoNetwork
from pybrain.supervised.trainers.evolino import EvolinoTrainer
from lib.data_generator import generateSuperimposedSineData
print
print "=== Learning to extrapolate 5 superimposed sine waves ==="
print
sinefreqs = ( 0.2, 0.311, 0.42, 0.51, 0.74 )
# sinefreqs = ( 0.2, 0.311, 0.42, 0.51, 0.74, 0.81 )
metascale = 8.
scale = 0.5 * metascale
stepsize = 0.1 * metascale
# === create training dataset
# the sequences must be stored in the target field
# the input field will be ignored
print "creating training data"
trnInputSpace = numpy.arange( 0*scale , 190*scale , stepsize )
trnData = generateSuperimposedSineData(sinefreqs, trnInputSpace)
# === create testing dataset
print "creating test data"
tstInputSpace = numpy.arange( 400*scale , 540*scale , stepsize)
tstData = generateSuperimposedSineData(sinefreqs, tstInputSpace)
# === create the evolino-network
print "creating EvolinoNetwork"
net = EvolinoNetwork( trnData.outdim, 40 )
wtRatio = 1./3.
# === instantiate an evolino trainer
# it will train our network through evolutionary algorithms
print "creating EvolinoTrainer"
trainer = EvolinoTrainer(
net,
dataset=trnData,
subPopulationSize = 20,
nParents = 8,
nCombinations = 1,
initialWeightRange = ( -0.01 , 0.01 ),
# initialWeightRange = ( -0.1 , 0.1 ),
# initialWeightRange = ( -0.5 , -0.2 ),
backprojectionFactor = 0.001,
mutationAlpha = 0.001,
# mutationAlpha = 0.0000001,
nBurstMutationEpochs = numpy.Infinity,
wtRatio = wtRatio,
verbosity = 2)
# === prepare sequences for extrapolation and plotting
trnSequence = trnData.getField('target')
separatorIdx = int(len(trnSequence)*wtRatio)
trnSequenceWashout = trnSequence[0:separatorIdx]
trnSequenceTarget = trnSequence[separatorIdx:]
tstSequence = tstData.getField('target')
separatorIdx = int(len(tstSequence)*wtRatio)
tstSequenceWashout = tstSequence[0:separatorIdx]
tstSequenceTarget = tstSequence[separatorIdx:]
ion() # switch matplotlib to interactive mode
for i in range(3000):
print "======================"
print "====== NEXT RUN ======"
print "======================"
print "=== TRAINING"
# train the network for 1 epoch
trainer.trainEpochs( 1 )
print "=== PLOTTING\n"
# calculate the nets output for train and the test data
trnSequenceOutput = net.extrapolate(trnSequenceWashout, len(trnSequenceTarget))
tstSequenceOutput = net.extrapolate(tstSequenceWashout, len(tstSequenceTarget))
# plot training data
sp = subplot(211) # switch to the first subplot
cla() # clear the subplot
title("Training Set") # set the subplot's title
sp.set_autoscale_on( True ) # enable autoscaling
targetline = plot(trnSequenceTarget,"r-") # plot the targets
sp.set_autoscale_on( False ) # disable autoscaling
outputline = plot(trnSequenceOutput,"b-") # plot the actual output
# plot test data
sp = subplot(212)
cla()
title("Test Set")
sp.set_autoscale_on( True )
plot(tstSequenceTarget,"r-")
sp.set_autoscale_on( False )
plot(tstSequenceOutput,"b-")
# create a legend
figlegend((targetline, outputline),('target','output'),('upper right'))
# draw everything
draw()
show()
| bsd-3-clause |
ueshin/apache-spark | python/pyspark/testing/sqlutils.py | 23 | 7740 | #
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import datetime
import os
import shutil
import tempfile
from contextlib import contextmanager
from pyspark.sql import SparkSession
from pyspark.sql.types import ArrayType, DoubleType, UserDefinedType, Row
from pyspark.testing.utils import ReusedPySparkTestCase
pandas_requirement_message = None
try:
from pyspark.sql.pandas.utils import require_minimum_pandas_version
require_minimum_pandas_version()
except ImportError as e:
# If Pandas version requirement is not satisfied, skip related tests.
pandas_requirement_message = str(e)
pyarrow_requirement_message = None
try:
from pyspark.sql.pandas.utils import require_minimum_pyarrow_version
require_minimum_pyarrow_version()
except ImportError as e:
# If Arrow version requirement is not satisfied, skip related tests.
pyarrow_requirement_message = str(e)
test_not_compiled_message = None
try:
from pyspark.sql.utils import require_test_compiled
require_test_compiled()
except Exception as e:
test_not_compiled_message = str(e)
have_pandas = pandas_requirement_message is None
have_pyarrow = pyarrow_requirement_message is None
test_compiled = test_not_compiled_message is None
class UTCOffsetTimezone(datetime.tzinfo):
"""
Specifies timezone in UTC offset
"""
def __init__(self, offset=0):
self.ZERO = datetime.timedelta(hours=offset)
def utcoffset(self, dt):
return self.ZERO
def dst(self, dt):
return self.ZERO
class ExamplePointUDT(UserDefinedType):
"""
User-defined type (UDT) for ExamplePoint.
"""
@classmethod
def sqlType(self):
return ArrayType(DoubleType(), False)
@classmethod
def module(cls):
return 'pyspark.sql.tests'
@classmethod
def scalaUDT(cls):
return 'org.apache.spark.sql.test.ExamplePointUDT'
def serialize(self, obj):
return [obj.x, obj.y]
def deserialize(self, datum):
return ExamplePoint(datum[0], datum[1])
class ExamplePoint:
"""
An example class to demonstrate UDT in Scala, Java, and Python.
"""
__UDT__ = ExamplePointUDT()
def __init__(self, x, y):
self.x = x
self.y = y
def __repr__(self):
return "ExamplePoint(%s,%s)" % (self.x, self.y)
def __str__(self):
return "(%s,%s)" % (self.x, self.y)
def __eq__(self, other):
return isinstance(other, self.__class__) and \
other.x == self.x and other.y == self.y
class PythonOnlyUDT(UserDefinedType):
"""
User-defined type (UDT) for ExamplePoint.
"""
@classmethod
def sqlType(self):
return ArrayType(DoubleType(), False)
@classmethod
def module(cls):
return '__main__'
def serialize(self, obj):
return [obj.x, obj.y]
def deserialize(self, datum):
return PythonOnlyPoint(datum[0], datum[1])
@staticmethod
def foo():
pass
@property
def props(self):
return {}
class PythonOnlyPoint(ExamplePoint):
"""
An example class to demonstrate UDT in only Python
"""
__UDT__ = PythonOnlyUDT() # type: ignore
class MyObject(object):
def __init__(self, key, value):
self.key = key
self.value = value
class SQLTestUtils(object):
"""
This util assumes the instance of this to have 'spark' attribute, having a spark session.
It is usually used with 'ReusedSQLTestCase' class but can be used if you feel sure the
the implementation of this class has 'spark' attribute.
"""
@contextmanager
def sql_conf(self, pairs):
"""
A convenient context manager to test some configuration specific logic. This sets
`value` to the configuration `key` and then restores it back when it exits.
"""
assert isinstance(pairs, dict), "pairs should be a dictionary."
assert hasattr(self, "spark"), "it should have 'spark' attribute, having a spark session."
keys = pairs.keys()
new_values = pairs.values()
old_values = [self.spark.conf.get(key, None) for key in keys]
for key, new_value in zip(keys, new_values):
self.spark.conf.set(key, new_value)
try:
yield
finally:
for key, old_value in zip(keys, old_values):
if old_value is None:
self.spark.conf.unset(key)
else:
self.spark.conf.set(key, old_value)
@contextmanager
def database(self, *databases):
"""
A convenient context manager to test with some specific databases. This drops the given
databases if it exists and sets current database to "default" when it exits.
"""
assert hasattr(self, "spark"), "it should have 'spark' attribute, having a spark session."
try:
yield
finally:
for db in databases:
self.spark.sql("DROP DATABASE IF EXISTS %s CASCADE" % db)
self.spark.catalog.setCurrentDatabase("default")
@contextmanager
def table(self, *tables):
"""
A convenient context manager to test with some specific tables. This drops the given tables
if it exists.
"""
assert hasattr(self, "spark"), "it should have 'spark' attribute, having a spark session."
try:
yield
finally:
for t in tables:
self.spark.sql("DROP TABLE IF EXISTS %s" % t)
@contextmanager
def tempView(self, *views):
"""
A convenient context manager to test with some specific views. This drops the given views
if it exists.
"""
assert hasattr(self, "spark"), "it should have 'spark' attribute, having a spark session."
try:
yield
finally:
for v in views:
self.spark.catalog.dropTempView(v)
@contextmanager
def function(self, *functions):
"""
A convenient context manager to test with some specific functions. This drops the given
functions if it exists.
"""
assert hasattr(self, "spark"), "it should have 'spark' attribute, having a spark session."
try:
yield
finally:
for f in functions:
self.spark.sql("DROP FUNCTION IF EXISTS %s" % f)
class ReusedSQLTestCase(ReusedPySparkTestCase, SQLTestUtils):
@classmethod
def setUpClass(cls):
super(ReusedSQLTestCase, cls).setUpClass()
cls.spark = SparkSession(cls.sc)
cls.tempdir = tempfile.NamedTemporaryFile(delete=False)
os.unlink(cls.tempdir.name)
cls.testData = [Row(key=i, value=str(i)) for i in range(100)]
cls.df = cls.spark.createDataFrame(cls.testData)
@classmethod
def tearDownClass(cls):
super(ReusedSQLTestCase, cls).tearDownClass()
cls.spark.stop()
shutil.rmtree(cls.tempdir.name, ignore_errors=True)
| apache-2.0 |
davidastephens/zipline | setup.py | 2 | 2481 | #!/usr/bin/env python
#
# Copyright 2013 Quantopian, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import sys
from setuptools import setup, find_packages
LONG_DESCRIPTION = None
README_MARKDOWN = None
with open('README.md') as markdown_source:
README_MARKDOWN = markdown_source.read()
if 'upload' in sys.argv:
# Converts the README.md file to ReST, since PyPI uses ReST for formatting,
# This allows to have one canonical README file, being the README.md
# The conversion only needs to be done on upload.
# Otherwise, the pandoc import and errors that are thrown when
# pandoc are both overhead and a source of confusion for general
# usage/installation.
import pandoc
pandoc.core.PANDOC_PATH = 'pandoc'
doc = pandoc.Document()
doc.markdown = README_MARKDOWN
LONG_DESCRIPTION = doc.rst
else:
# If pandoc isn't installed, e.g. when downloading from pip,
# just use the regular README.
LONG_DESCRIPTION = README_MARKDOWN
setup(
name='zipline',
version='0.5.11.dev',
description='A backtester for financial algorithms.',
author='Quantopian Inc.',
author_email='opensource@quantopian.com',
packages=find_packages(),
long_description=LONG_DESCRIPTION,
license='Apache 2.0',
classifiers=[
'Development Status :: 4 - Beta',
'License :: OSI Approved :: Apache Software License',
'Natural Language :: English',
'Programming Language :: Python',
'Programming Language :: Python :: 2.7',
'Operating System :: OS Independent',
'Intended Audience :: Science/Research',
'Topic :: Office/Business :: Financial',
'Topic :: Scientific/Engineering :: Information Analysis',
'Topic :: System :: Distributed Computing',
],
install_requires=[
'iso8601',
'Logbook',
'pytz',
'requests',
'numpy',
'pandas'
],
url="https://github.com/quantopian/zipline"
)
| apache-2.0 |
foolcage/fooltrader | fooltrader/botsamples/eos_statistic_bot.py | 1 | 9413 | # -*- coding: utf-8 -*-
from datetime import timedelta, datetime
import pandas as pd
from fooltrader.bot.bot import NotifyEventBot
from fooltrader.contract.es_contract import get_es_kdata_index, get_es_statistic_index
from fooltrader.domain.data.es_quote import CommonKData, CommonStatistic
from fooltrader.settings import TIME_FORMAT_MICRO
from fooltrader.utils.es_utils import es_get_latest_timestamp, es_get_latest_record, es_index_mapping
from fooltrader.utils.utils import to_timestamp, to_time_str, fill_doc_type, is_same_date, is_same_time
statistic_index_name = get_es_statistic_index(security_type='cryptocurrency', exchange='contract')
kdata_index_name = get_es_kdata_index(security_type='cryptocurrency', exchange='contract', level='1min')
es_index_mapping(statistic_index_name, CommonStatistic)
es_index_mapping(kdata_index_name, CommonKData)
class EosStatisticBot(NotifyEventBot):
BIG_ORDER = 2000 * 10000
MIDDLE_ORDER = 500 * 10000
def on_init(self):
super().on_init()
self.security_id = 'cryptocurrency_contract_RAM-EOS'
query = {
"term": {"securityId": ""}
}
query["term"]["securityId"] = self.security_id
# get latest kdata timestamp
latest_kdata_timestamp = es_get_latest_timestamp(index=kdata_index_name, query=query)
# get latest eos statistic timestamp
latest_statistic_record = es_get_latest_record(index=statistic_index_name,
query=query, time_field='updateTimestamp')
if latest_statistic_record:
self.latest_statistic_record = CommonStatistic(
meta={'id': latest_statistic_record['id'], 'index': statistic_index_name},
**latest_statistic_record)
if not is_same_time(latest_kdata_timestamp, self.latest_statistic_record['updateTimestamp']):
self.logger.warning(
"latest_kdata_timestamp:{},latest_statistic_timestamp:{}".format(latest_kdata_timestamp,
self.latest_statistic_record[
'updateTimestamp']))
else:
self.latest_statistic_record = None
if latest_kdata_timestamp and self.latest_statistic_record:
self.start_timestamp = min(latest_kdata_timestamp,
to_timestamp(self.latest_statistic_record['updateTimestamp']))
def after_init(self):
super().after_init()
if not self.start_timestamp:
self.start_timestamp = to_timestamp(self.security_item['listDate'])
# the last timestamp for the computing interval
self.last_timestamp = None
self.last_day_time_str = None
self.last_mirco_time_str = None
self.df = pd.DataFrame()
self.item_list = []
self.computing_start = None
def init_new_computing_interval(self, event_timestamp):
self.last_timestamp = to_timestamp(event_timestamp)
self.kdata_timestamp = self.last_timestamp + timedelta(seconds=-self.last_timestamp.second,
microseconds=-self.last_timestamp.microsecond)
self.last_day_time_str = to_time_str(self.kdata_timestamp)
self.last_mirco_time_str = to_time_str(self.kdata_timestamp, time_fmt=TIME_FORMAT_MICRO)
def on_event(self, event_item):
if not self.computing_start:
self.computing_start = datetime.now()
if not self.last_timestamp:
self.init_new_computing_interval(event_item['timestamp'])
current_timestamp = to_timestamp(event_item['timestamp'])
# calculating last minute
if current_timestamp.minute != self.last_timestamp.minute:
self.df = pd.DataFrame(self.item_list)
self.generate_1min_kdata()
self.generate_eos_daily_statistic()
self.init_new_computing_interval(event_item['timestamp'])
self.item_list = []
self.logger.info("using computing time:{}".format(datetime.now() - self.computing_start))
self.computing_start = datetime.now()
self.item_list.append(event_item)
def update_statistic_doc(self, statistic_doc, append_record, updateTimestamp):
for key in append_record.keys():
if pd.isna(append_record[key]):
the_value = 0
else:
the_value = append_record[key]
if key in statistic_doc:
statistic_doc[key] += float(the_value)
else:
statistic_doc[key] = float(the_value)
statistic_doc['updateTimestamp'] = updateTimestamp
statistic_doc.save(force=True)
def generate_eos_daily_statistic(self):
# ignore the statistic has computed before
if self.latest_statistic_record and self.kdata_timestamp <= to_timestamp(
self.latest_statistic_record['updateTimestamp']):
return
# update the statistic
if (not self.latest_statistic_record) or (not is_same_date(self.latest_statistic_record['timestamp'],
self.df['timestamp'][0])):
doc_id = "{}_{}".format(self.security_id, self.last_day_time_str)
self.latest_statistic_record = CommonStatistic(meta={'id': doc_id, 'index': statistic_index_name},
id=doc_id,
timestamp=self.last_day_time_str,
securityId=self.security_id,
code=self.security_item['code'],
name=self.security_item['name'])
volume = self.df['volume'].sum()
turnover = self.df['turnover'].sum()
flow = (self.df['turnover'] * self.df['direction']).sum()
flowIn = self.df[self.df['direction'] == 1]['turnover'].sum()
flowOut = self.df[self.df['direction'] == -1]['turnover'].sum()
bigFlowIn = self.df[(self.df['direction'] == 1) & (self.df['turnover'] >= self.BIG_ORDER)]['turnover'].sum()
middleFlowIn = self.df[(self.df['direction'] == 1) & (self.df['turnover'] >= self.MIDDLE_ORDER) & (
self.df['turnover'] < self.BIG_ORDER)]['turnover'].sum()
smallFlowIn = self.df[(self.df['direction'] == 1) & (self.df['turnover'] < self.MIDDLE_ORDER)]['turnover'].sum()
bigFlowOut = self.df[(self.df['direction'] == -1) & (self.df['turnover'] >= self.BIG_ORDER)]['turnover'].sum()
middleFlowOut = self.df[(self.df['direction'] == -1) & (self.df['turnover'] >= self.MIDDLE_ORDER) & (
self.df['turnover'] < self.BIG_ORDER)]['turnover'].sum()
smallFlowOut = self.df[(self.df['direction'] == -1) & (self.df['turnover'] < self.MIDDLE_ORDER)][
'turnover'].sum()
self.update_statistic_doc(self.latest_statistic_record, {'volume': volume,
'turnover': turnover,
'flow': flow,
'flowIn': flowIn,
'flowOut': flowOut,
'bigFlowIn': bigFlowIn,
'middleFlowIn': middleFlowIn,
'smallFlowIn': smallFlowIn,
'bigFlowOut': bigFlowOut,
'middleFlowOut': middleFlowOut,
'smallFlowOut': smallFlowOut
}, updateTimestamp=self.last_mirco_time_str)
def generate_1min_kdata(self):
doc_id = "{}_{}".format(self.security_id, self.last_mirco_time_str)
kdata_doc = CommonKData(meta={'id': doc_id, 'index': kdata_index_name}, id=doc_id)
if kdata_doc.exist(index=kdata_index_name):
return
se_price = self.df['price']
high = se_price.max()
low = se_price.min()
open = se_price[0]
close = se_price[len(se_price) - 1]
volume = self.df['volume'].sum()
turnover = self.df['turnover'].sum()
kdata_json = {
'id': doc_id,
'timestamp': self.last_mirco_time_str,
'updateTimestamp': self.last_mirco_time_str,
'securityId': self.security_item['id'],
'code': self.security_item['code'],
'name': self.security_item['name'],
'open': float(open),
'high': float(high),
'low': float(low),
'close': float(close),
'volume': float(volume),
'turnover': float(turnover)
}
fill_doc_type(kdata_doc, kdata_json)
kdata_doc.save(force=True)
if __name__ == '__main__':
EosStatisticBot().run()
| mit |
architecture-building-systems/CityEnergyAnalyst | cea/optimization/distribution/network_optimization_features.py | 2 | 3192 | """
Network optimization
"""
import pandas as pd
__author__ = "Sreepathi Bhargava Krishna"
__copyright__ = "Copyright 2015, Architecture and Building Systems - ETH Zurich"
__credits__ = ["Sreepathi Bhargava Krishna", "Tim Vollrath", "Thuy-An Nguyen", "Jimeno A. Fonseca"]
__license__ = "MIT"
__version__ = "0.1"
__maintainer__ = "Daren Thomas"
__email__ = "thomas@arch.ethz.ch"
__status__ = "Production"
class NetworkOptimizationFeatures(object):
"""
This class just sets-ip constants of the linear model of the distribution.
These results are extracted from the work of Florian at the chair.
Unfortunately his work only worked for this case study and could not be used else where
See the paper of Fonseca et al 2015 of the city energy analyst for more info on how that procedure used to work.
"""
def __init__(self, district_heating_network, district_cooling_network, locator):
self.network_names = ['']
for network_name in self.network_names:
if district_heating_network:
self.E_pump_DHN_W = pd.read_csv(locator.get_network_energy_pumping_requirements_file("DH", network_name))[
'pressure_loss_total_kW'].values * 1000
self.mass_flow_rate_DHN = self.mass_flow_rate_plant(locator, network_name, "DH")
self.thermallosses_DHN = pd.read_csv(locator.get_network_total_thermal_loss_file("DH", network_name))[
'thermal_loss_total_kW'].values
self.pipesCosts_DHN_USD = self.pipe_costs(locator, network_name, "DH")
if district_cooling_network:
self.E_pump_DCN_W = pd.read_csv(locator.get_network_energy_pumping_requirements_file("DC", network_name))[
'pressure_loss_total_kW'].values * 1000
self.mass_flow_rate_DCN = self.mass_flow_rate_plant(locator, network_name, "DC")
self.thermallosses_DCN = pd.read_csv(locator.get_network_total_thermal_loss_file("DC", network_name))[
'thermal_loss_total_kW'].values
self.pipesCosts_DCN_USD = self.pipe_costs(locator, network_name, "DC")
def mass_flow_rate_plant(self, locator, network_name, network_type):
mass_flow_df = pd.read_csv((locator.get_thermal_network_layout_massflow_nodes_file(network_type, network_name)))
mass_flow_nodes_df = pd.read_csv((locator.get_thermal_network_node_types_csv_file(network_type, network_name)))
# identify the node with the plant
node_id = mass_flow_nodes_df.loc[mass_flow_nodes_df['Type'] == "PLANT", 'Name'].item()
return mass_flow_df[node_id].values
def pipe_costs(self, locator, network_name, network_type):
edges_file = pd.read_csv(locator.get_thermal_network_edge_list_file(network_type, network_name))
piping_cost_data = pd.read_excel(locator.get_database_distribution_systems(), sheet_name="THERMAL_GRID")
merge_df = edges_file.merge(piping_cost_data, left_on='Pipe_DN', right_on='Pipe_DN')
merge_df['Inv_USD2015'] = merge_df['Inv_USD2015perm'] * merge_df['length_m']
pipe_costs = merge_df['Inv_USD2015'].sum()
return pipe_costs
| mit |
gef756/statsmodels | statsmodels/datasets/star98/data.py | 3 | 3939 | """Star98 Educational Testing dataset."""
__docformat__ = 'restructuredtext'
COPYRIGHT = """Used with express permission from the original author,
who retains all rights."""
TITLE = "Star98 Educational Dataset"
SOURCE = """
Jeff Gill's `Generalized Linear Models: A Unified Approach`
http://jgill.wustl.edu/research/books.html
"""
DESCRSHORT = """Math scores for 303 student with 10 explanatory factors"""
DESCRLONG = """
This data is on the California education policy and outcomes (STAR program
results for 1998. The data measured standardized testing by the California
Department of Education that required evaluation of 2nd - 11th grade students
by the the Stanford 9 test on a variety of subjects. This dataset is at
the level of the unified school district and consists of 303 cases. The
binary response variable represents the number of 9th graders scoring
over the national median value on the mathematics exam.
The data used in this example is only a subset of the original source.
"""
NOTE = """::
Number of Observations - 303 (counties in California).
Number of Variables - 13 and 8 interaction terms.
Definition of variables names::
NABOVE - Total number of students above the national median for the
math section.
NBELOW - Total number of students below the national median for the
math section.
LOWINC - Percentage of low income students
PERASIAN - Percentage of Asian student
PERBLACK - Percentage of black students
PERHISP - Percentage of Hispanic students
PERMINTE - Percentage of minority teachers
AVYRSEXP - Sum of teachers' years in educational service divided by the
number of teachers.
AVSALK - Total salary budget including benefits divided by the number
of full-time teachers (in thousands)
PERSPENK - Per-pupil spending (in thousands)
PTRATIO - Pupil-teacher ratio.
PCTAF - Percentage of students taking UC/CSU prep courses
PCTCHRT - Percentage of charter schools
PCTYRRND - Percentage of year-round schools
The below variables are interaction terms of the variables defined
above.
PERMINTE_AVYRSEXP
PEMINTE_AVSAL
AVYRSEXP_AVSAL
PERSPEN_PTRATIO
PERSPEN_PCTAF
PTRATIO_PCTAF
PERMINTE_AVTRSEXP_AVSAL
PERSPEN_PTRATIO_PCTAF
"""
from numpy import recfromtxt, column_stack, array
from statsmodels.datasets import utils as du
from os.path import dirname, abspath
def load():
"""
Load the star98 data and returns a Dataset class instance.
Returns
-------
Load instance:
a class of the data with array attrbutes 'endog' and 'exog'
"""
data = _get_data()
return du.process_recarray(data, endog_idx=[0, 1], dtype=float)
def load_pandas():
data = _get_data()
return du.process_recarray_pandas(data, endog_idx=['NABOVE', 'NBELOW'],
dtype=float)
def _get_data():
filepath = dirname(abspath(__file__))
##### EDIT THE FOLLOWING TO POINT TO DatasetName.csv #####
names = ["NABOVE","NBELOW","LOWINC","PERASIAN","PERBLACK","PERHISP",
"PERMINTE","AVYRSEXP","AVSALK","PERSPENK","PTRATIO","PCTAF",
"PCTCHRT","PCTYRRND","PERMINTE_AVYRSEXP","PERMINTE_AVSAL",
"AVYRSEXP_AVSAL","PERSPEN_PTRATIO","PERSPEN_PCTAF","PTRATIO_PCTAF",
"PERMINTE_AVYRSEXP_AVSAL","PERSPEN_PTRATIO_PCTAF"]
with open(filepath + '/star98.csv',"rb") as f:
data = recfromtxt(f, delimiter=",",
names=names, skip_header=1, dtype=float)
# careful now
nabove = data['NABOVE'].copy()
nbelow = data['NBELOW'].copy()
data['NABOVE'] = nbelow # successes
data['NBELOW'] = nabove - nbelow # now failures
return data
| bsd-3-clause |
plotly/python-api | packages/python/plotly/plotly/graph_objs/streamtube/_starts.py | 2 | 6484 | from plotly.basedatatypes import BaseTraceHierarchyType as _BaseTraceHierarchyType
import copy as _copy
class Starts(_BaseTraceHierarchyType):
# class properties
# --------------------
_parent_path_str = "streamtube"
_path_str = "streamtube.starts"
_valid_props = {"x", "xsrc", "y", "ysrc", "z", "zsrc"}
# x
# -
@property
def x(self):
"""
Sets the x components of the starting position of the
streamtubes
The 'x' property is an array that may be specified as a tuple,
list, numpy array, or pandas Series
Returns
-------
numpy.ndarray
"""
return self["x"]
@x.setter
def x(self, val):
self["x"] = val
# xsrc
# ----
@property
def xsrc(self):
"""
Sets the source reference on Chart Studio Cloud for x .
The 'xsrc' property must be specified as a string or
as a plotly.grid_objs.Column object
Returns
-------
str
"""
return self["xsrc"]
@xsrc.setter
def xsrc(self, val):
self["xsrc"] = val
# y
# -
@property
def y(self):
"""
Sets the y components of the starting position of the
streamtubes
The 'y' property is an array that may be specified as a tuple,
list, numpy array, or pandas Series
Returns
-------
numpy.ndarray
"""
return self["y"]
@y.setter
def y(self, val):
self["y"] = val
# ysrc
# ----
@property
def ysrc(self):
"""
Sets the source reference on Chart Studio Cloud for y .
The 'ysrc' property must be specified as a string or
as a plotly.grid_objs.Column object
Returns
-------
str
"""
return self["ysrc"]
@ysrc.setter
def ysrc(self, val):
self["ysrc"] = val
# z
# -
@property
def z(self):
"""
Sets the z components of the starting position of the
streamtubes
The 'z' property is an array that may be specified as a tuple,
list, numpy array, or pandas Series
Returns
-------
numpy.ndarray
"""
return self["z"]
@z.setter
def z(self, val):
self["z"] = val
# zsrc
# ----
@property
def zsrc(self):
"""
Sets the source reference on Chart Studio Cloud for z .
The 'zsrc' property must be specified as a string or
as a plotly.grid_objs.Column object
Returns
-------
str
"""
return self["zsrc"]
@zsrc.setter
def zsrc(self, val):
self["zsrc"] = val
# Self properties description
# ---------------------------
@property
def _prop_descriptions(self):
return """\
x
Sets the x components of the starting position of the
streamtubes
xsrc
Sets the source reference on Chart Studio Cloud for x
.
y
Sets the y components of the starting position of the
streamtubes
ysrc
Sets the source reference on Chart Studio Cloud for y
.
z
Sets the z components of the starting position of the
streamtubes
zsrc
Sets the source reference on Chart Studio Cloud for z
.
"""
def __init__(
self,
arg=None,
x=None,
xsrc=None,
y=None,
ysrc=None,
z=None,
zsrc=None,
**kwargs
):
"""
Construct a new Starts object
Parameters
----------
arg
dict of properties compatible with this constructor or
an instance of
:class:`plotly.graph_objs.streamtube.Starts`
x
Sets the x components of the starting position of the
streamtubes
xsrc
Sets the source reference on Chart Studio Cloud for x
.
y
Sets the y components of the starting position of the
streamtubes
ysrc
Sets the source reference on Chart Studio Cloud for y
.
z
Sets the z components of the starting position of the
streamtubes
zsrc
Sets the source reference on Chart Studio Cloud for z
.
Returns
-------
Starts
"""
super(Starts, self).__init__("starts")
if "_parent" in kwargs:
self._parent = kwargs["_parent"]
return
# Validate arg
# ------------
if arg is None:
arg = {}
elif isinstance(arg, self.__class__):
arg = arg.to_plotly_json()
elif isinstance(arg, dict):
arg = _copy.copy(arg)
else:
raise ValueError(
"""\
The first argument to the plotly.graph_objs.streamtube.Starts
constructor must be a dict or
an instance of :class:`plotly.graph_objs.streamtube.Starts`"""
)
# Handle skip_invalid
# -------------------
self._skip_invalid = kwargs.pop("skip_invalid", False)
self._validate = kwargs.pop("_validate", True)
# Populate data dict with properties
# ----------------------------------
_v = arg.pop("x", None)
_v = x if x is not None else _v
if _v is not None:
self["x"] = _v
_v = arg.pop("xsrc", None)
_v = xsrc if xsrc is not None else _v
if _v is not None:
self["xsrc"] = _v
_v = arg.pop("y", None)
_v = y if y is not None else _v
if _v is not None:
self["y"] = _v
_v = arg.pop("ysrc", None)
_v = ysrc if ysrc is not None else _v
if _v is not None:
self["ysrc"] = _v
_v = arg.pop("z", None)
_v = z if z is not None else _v
if _v is not None:
self["z"] = _v
_v = arg.pop("zsrc", None)
_v = zsrc if zsrc is not None else _v
if _v is not None:
self["zsrc"] = _v
# Process unknown kwargs
# ----------------------
self._process_kwargs(**dict(arg, **kwargs))
# Reset skip_invalid
# ------------------
self._skip_invalid = False
| mit |
dnidever/noaosourcecatalog | python/parallax.py | 1 | 3927 | #!/usr/bin/env python
# Fit proper motion and parallax using ra/dec/mjd data
# Most of this code was taken from here:
# https://github.com/ctheissen/WISE_Parallaxes/blob/master/WISE_Parallax.py
import numpy as np
from astropy.table import Table, vstack, join
import matplotlib.pyplot as plt
from astropy import units as u
from scipy.optimize import curve_fit, minimize
from astropy.time import Time
import astropy.coordinates as coords
from dlnpyutils import utils as dln, coords as dcoords
# Set some constants
d2a = 3600.
d2ma = 3600000.
d2y = 1/365.25
def astrometryfunc(x, Delta1, Delta2, PMra, PMdec, pi):
""" Compute proper motion and parallax model for a set of ra/dec/mjd values."""
# x: input list of central RA and DEC positions and array of MJDs
# Delta1: initial dRA position
# Delta2: initial dDEC position
# PMra: proper motion in RA (arcsec/yr)
# PMdec: proper motion in DEC (arcsec/yr)
# pi: parallax (arcsec)
ra0, dec0, mjds = x
n = len(mjds)
years = (mjds - mjds[0])*d2y
ras = np.zeros(n,np.float64)+ra0
decs = np.zeros(n,np.float64)+dec0
bary = coords.get_body_barycentric('earth', Time(mjds, format='mjd'))
# Parallax factors
Fac1 = (bary.x * np.sin(ras*np.pi/180.) - bary.y * np.cos(ras*np.pi/180.) )
Fac2 = bary.x * np.cos(ras*np.pi/180.) * np.sin(decs*np.pi/180.) + \
bary.y * np.sin(ras*np.pi/180.) * np.sin(decs*np.pi/180.) - \
bary.z * np.cos(decs*np.pi/180.)
RAsend = Delta1 + PMra * years + pi * Fac1.value
DECsend = Delta2 + PMdec * years + pi * Fac2.value
return np.concatenate( [RAsend, DECsend]).flatten()
def fit(cat):
""" Fit proper motion and parallax to ra/dec/mjd data in a table."""
mjd = cat['mjd']
ra = cat['ra']
raerr = cat['raerr']
dec = cat['dec']
decerr = cat['decerr']
# Compute relative positions
cenra = np.mean(ra)
cendec = np.mean(dec)
lon,lat = dcoords.rotsphcen(ra,dec,cenra,cendec,gnomic=True)
lon *= d2a
lat *= d2a
# Fit proper motion and parallax
pars, cov = curve_fit(astrometryfunc, [ra, dec, mjd] ,
np.concatenate( [lon,lat] ).flatten(),
sigma=np.concatenate( [ raerr, decerr ] ).flatten() )
return pars,cov
def plotfit(cat,pars,cov,savefig=None):
""" Plot a figure of the data and the proper motion/parallax fit."""
plt.rcParams.update({'font.size': 12})
# Compute relative positions
cenra = np.mean(cat['ra'])
cendec = np.mean(cat['dec'])
lon,lat = dcoords.rotsphcen(cat['ra'],cat['dec'],cenra,cendec,gnomic=True)
lon *= d2a
lat *= d2a
# Array of MJDs for model curve
mjd = np.linspace(np.min(cat['mjd']),np.max(cat['mjd']),100)
out = astrometryfunc([cenra,cendec,mjd],pars[0],pars[1],pars[2],pars[3],pars[4])
ll = out[0:100]
bb = out[100:]
# Plot the model and data
plt.plot(ll,bb)
plt.errorbar(lon,lat,xerr=cat['raerr'],yerr=cat['decerr'],fmt='o',color='black',
markersize=5,ecolor='lightgray',elinewidth=2,linestyle='none',capsize=0)
plt.xlabel('dRA (arcsec)')
plt.ylabel('dDEC (arcsec)')
xr = dln.minmax(np.concatenate((lon,ll)))
xr = [xr[0]-0.05*dln.valrange(xr),xr[1]+0.05*dln.valrange(xr)]
yr = dln.minmax(np.concatenate((lat,bb)))
yr = [yr[0]-0.05*dln.valrange(yr),yr[1]+0.05*dln.valrange(yr)]
plt.xlim(xr)
plt.ylim(yr)
perr = np.sqrt(np.diag(cov))
plt.annotate(r'$\mu_\alpha$ = %5.3f $\pm$ %5.3f mas/yr' % (pars[2]*1e3,perr[2]*1e3) + '\n' +
r'$\mu_\delta$ = %5.3f $\pm$ %5.3f mas/yr' % (pars[3]*1e3,perr[3]*1e3) + '\n' +
r'$\pi$ = %5.3f $\pm$ %5.3f mas' % (pars[4]*1e3,perr[4]*1e3),
xy=(xr[0]+0.05*dln.valrange(xr),yr[1]-0.20*dln.valrange(yr)),ha='left')
if savefig is not None:
plt.savefig(savefig)
| mit |
ARM-software/lisa | lisa/analysis/frequency.py | 2 | 23565 | # SPDX-License-Identifier: Apache-2.0
#
# Copyright (C) 2015, ARM Limited and contributors.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
""" Frequency Analysis Module """
import itertools
import matplotlib.gridspec as gridspec
import matplotlib.pyplot as plt
from matplotlib.ticker import FuncFormatter
import pandas as pd
import numpy as np
from lisa.analysis.base import TraceAnalysisBase
from lisa.utils import memoized
from lisa.trace import requires_events, requires_one_event_of, CPU, MissingTraceEventError
from lisa.datautils import series_integrate, df_refit_index, series_refit_index, series_deduplicate, df_add_delta, series_mean, df_window
class FrequencyAnalysis(TraceAnalysisBase):
"""
Support for plotting Frequency Analysis data
:param trace: input Trace object
:type trace: :class:`trace.Trace`
"""
name = 'frequency'
@requires_one_event_of('cpu_frequency', 'userspace@cpu_frequency_devlib')
def df_cpus_frequency(self, signals_init=True):
"""
Similar to ``trace.df_event('cpu_frequency')``, with
``userspace@cpu_frequency_devlib`` support.
:param signals_init: If ``True``, and initial value for signals will be
provided. This includes initial value taken outside window
boundaries and devlib-provided events.
The ``userspace@cpu_frequency_devlib`` user event is merged in the dataframe if
it provides earlier values for a CPU.
"""
def rename(df):
return df.rename(
{
'cpu_id': 'cpu',
'state': 'frequency',
},
axis=1,
)
def check_empty(df, excep):
if df.empty:
raise excep
else:
return df
try:
df = self.trace.df_event('cpu_frequency', signals_init=signals_init)
except MissingTraceEventError as e:
excep = e
df = pd.DataFrame(columns=['cpu', 'frequency'])
else:
excep = None
df = rename(df)
if not signals_init:
return check_empty(df, excep)
try:
devlib_df = self.trace.df_event('userspace@cpu_frequency_devlib')
except MissingTraceEventError as e:
return check_empty(df, e)
else:
devlib_df = rename(devlib_df)
def groupby_cpu(df):
return df.groupby('cpu', observed=True, sort=False)
# Get the initial values for each CPU
def init_freq(df, from_devlib):
df = groupby_cpu(df).head(1).copy()
df['from_devlib'] = from_devlib
return df
init_df = init_freq(df, False)
init_devlib_df = init_freq(devlib_df, True)
# Get the first frequency for each CPU as given by devlib and cpufreq.
init_df = pd.concat([init_df, init_devlib_df])
init_df.sort_index(inplace=True)
# Get the first value for each CPU
first_df = groupby_cpu(init_df).head(1)
# Only keep the ones coming from devlib, as the other ones are already
# in the cpufreq df
first_df = first_df[first_df['from_devlib'] == True]
del first_df['from_devlib']
df = pd.concat([first_df, df])
df.sort_index(inplace=True)
df.index.name = 'Time'
return check_empty(df, None)
@df_cpus_frequency.used_events
def df_cpu_frequency(self, cpu, **kwargs):
"""
Same as :meth:`df_cpus_frequency` but for a single CPU.
:param cpu: CPU ID to get the frequency of.
:type cpu: int
:Variable keyword arguments: Forwarded to :meth:`df_cpus_frequency`.
"""
df = self.df_cpus_frequency(**kwargs)
return df[df['cpu'] == cpu]
@df_cpus_frequency.used_events
def _check_freq_domain_coherency(self, cpus=None):
"""
Check that all CPUs of a given frequency domain have the same frequency
transitions.
:param cpus: CPUs to take into account. All other CPUs are ignored.
If `None`, all CPUs will be checked.
:type cpus: list(int) or None
"""
domains = self.trace.plat_info['freq-domains']
if cpus is None:
cpus = list(itertools.chain.from_iterable(domains))
if len(cpus) < 2:
return
df = self.df_cpus_frequency()
for domain in domains:
# restrict the domain to what we care. Other CPUs may have garbage
# data, but the caller is not going to look at it anyway.
domain = set(domain) & set(cpus)
if len(domain) < 2:
continue
# Get the frequency column for each CPU in the domain
freq_columns = [
# drop the index since we only care about the transitions, and
# not when they happened
df[df['cpu'] == cpu]['frequency'].reset_index(drop=True)
for cpu in domain
]
# Check that all columns are equal. If they are not, that means that
# at least one CPU has a frequency transition that is different
# from another one in the same domain, which is highly suspicious
ref = freq_columns[0]
for col in freq_columns:
# If the trace started in the middle of a group of transitions,
# ignore that transition by shifting and re-test
if not (ref.equals(col) or ref[:-1].equals(col.shift()[1:])):
raise ValueError(f'Frequencies of CPUs in the freq domain {cpus} are not coherent')
@TraceAnalysisBase.cache
@df_cpus_frequency.used_events
@requires_events('cpu_idle')
def _get_frequency_residency(self, cpus):
"""
Get a DataFrame with per cluster frequency residency, i.e. amount of
time spent at a given frequency in each cluster.
:param cpus: A tuple of CPU IDs
:type cpus: tuple(int)
:returns: A :class:`pandas.DataFrame` with:
* A ``total_time`` column (the total time spent at a frequency)
* A ``active_time`` column (the non-idle time spent at a frequency)
"""
freq_df = self.df_cpus_frequency()
# Assumption: all CPUs in a cluster run at the same frequency, i.e. the
# frequency is scaled per-cluster not per-CPU. Hence, we can limit the
# cluster frequencies data to a single CPU.
self._check_freq_domain_coherency(cpus)
cluster_freqs = freq_df[freq_df.cpu == cpus[0]]
# Compute TOTAL Time
cluster_freqs = df_add_delta(cluster_freqs, col="total_time", window=self.trace.window)
time_df = cluster_freqs[["total_time", "frequency"]].groupby('frequency', observed=True, sort=False).sum()
# Compute ACTIVE Time
cluster_active = self.trace.analysis.idle.signal_cluster_active(cpus)
# In order to compute the active time spent at each frequency we
# multiply 2 square waves:
# - cluster_active, a square wave of the form:
# cluster_active[t] == 1 if at least one CPU is reported to be
# non-idle by CPUFreq at time t
# cluster_active[t] == 0 otherwise
# - freq_active, square wave of the form:
# freq_active[t] == 1 if at time t the frequency is f
# freq_active[t] == 0 otherwise
cluster_freqs = cluster_freqs.join(
cluster_active.to_frame(name='active'), how='outer')
cluster_freqs.fillna(method='ffill', inplace=True)
# Compute total time by integrating the square wave
time_df['active_time'] = pd.Series({
freq: series_integrate(
cluster_freqs['active'] * (cluster_freqs['frequency'] == freq)
)
for freq in cluster_freqs['frequency'].unique()
})
return time_df
@_get_frequency_residency.used_events
def df_cpu_frequency_residency(self, cpu):
"""
Get per-CPU frequency residency, i.e. amount of
time CPU `cpu` spent at each frequency.
:param cpu: CPU ID
:type cpu: int
:returns: A :class:`pandas.DataFrame` with:
* A ``total_time`` column (the total time spent at a frequency)
* A ``active_time`` column (the non-idle time spent at a frequency)
"""
if not isinstance(cpu, int):
raise TypeError('Input CPU parameter must be an integer')
return self._get_frequency_residency((cpu,))
@_get_frequency_residency.used_events
def df_domain_frequency_residency(self, cpu):
"""
Get per-frequency-domain frequency residency, i.e. amount of time each
domain at each frequency.
:param cpu: Any CPU of the domain to analyse
:type cpu: int
:returns: A :class:`pandas.DataFrame` with:
* A ``total_time`` column (the total time spent at a frequency)
* A ``active_time`` column (the non-idle time spent at a frequency)
"""
domains = [
domain
for domain in self.trace.plat_info['freq-domains']
if cpu in domain
]
if not domains:
raise ValueError(f'The given CPU "{cpu}" does not belong to any domain')
else:
domain, = domains
return self._get_frequency_residency(tuple(domain))
@TraceAnalysisBase.cache
@df_cpu_frequency.used_events
def df_cpu_frequency_transitions(self, cpu):
"""
Compute number of frequency transitions of a given CPU.
:param cpu: a CPU ID
:type cpu: int
:returns: A :class:`pandas.DataFrame` with:
* A ``transitions`` column (the number of frequency transitions)
"""
freq_df = self.df_cpu_frequency(cpu, signals_init=False)
# Since we want to count the number of events appearing inside the
# window, make sure we don't get anything outside it
freq_df = df_window(
freq_df,
window=self.trace.window,
method='exclusive',
clip_window=False,
)
cpu_freqs = freq_df['frequency']
# Remove possible duplicates (example: when devlib sets trace markers
# a cpu_frequency event is triggered that can generate a duplicate)
cpu_freqs = series_deduplicate(cpu_freqs, keep='first', consecutives=True)
transitions = cpu_freqs.value_counts()
transitions.name = "transitions"
transitions.sort_index(inplace=True)
return pd.DataFrame(transitions)
@TraceAnalysisBase.cache
@df_cpu_frequency_transitions.used_events
def df_cpu_frequency_transition_rate(self, cpu):
"""
Compute frequency transition rate of a given CPU.
:param cpu: a CPU ID
:type cpu: int
:returns: A :class:`pandas.DataFrame` with:
* A ``transitions`` column (the number of frequency transitions per second)
"""
transitions = self.df_cpu_frequency_transitions(cpu)['transitions']
return pd.DataFrame(dict(
transitions=transitions / self.trace.time_range,
))
@df_cpu_frequency.used_events
def get_average_cpu_frequency(self, cpu):
"""
Get the average frequency for a given CPU
:param cpu: The CPU to analyse
:type cpu: int
"""
df = self.df_cpu_frequency(cpu)
freq = series_refit_index(df['frequency'], window=self.trace.window)
return series_mean(freq)
@TraceAnalysisBase.cache
@requires_events('clock_set_rate', 'clock_enable', 'clock_disable')
def df_peripheral_clock_effective_rate(self, clk_name):
rate_df = self.trace.df_event('clock_set_rate')
enable_df = self.trace.df_event('clock_enable')
disable_df = self.trace.df_event('clock_disable')
freq = rate_df[rate_df.clk_name == clk_name]
enables = enable_df[enable_df.clk_name == clk_name]
disables = disable_df[disable_df.clk_name == clk_name]
freq = pd.concat([freq, enables, disables], sort=False).sort_index()
freq['start'] = freq.index
freq['len'] = (freq.start - freq.start.shift()).fillna(0).shift(-1)
# The last value will be NaN, fix to be appropriate length
freq.loc[freq.index[-1], 'len'] = self.trace.end - freq.index[-1]
freq.ffill(inplace=True)
freq['effective_rate'] = np.where(
freq['state'] == 0, 0,
np.where(freq['state'] == 1, freq['state'], float('nan'))
)
return freq
###############################################################################
# Plotting Methods
###############################################################################
@TraceAnalysisBase.plot_method(return_axis=True)
@df_peripheral_clock_effective_rate.used_events
def plot_peripheral_clock(self, clk, axis=None, **kwargs):
"""
Plot the frequency of a particular peripheral clock
:param clk: The clk name to chart
:type clk: str
"""
logger = self.get_logger()
window = self.trace.window
start, end = window
def plotter(axis, local_fig):
freq_axis, state_axis = axis
freq_axis.get_figure().suptitle('Peripheral frequency', y=.97, fontsize=16, horizontalalignment='center')
freq = self.df_peripheral_clock_effective_rate(clk)
freq = df_refit_index(freq, window=window)
# Plot frequency information (set rate)
freq_axis.set_title("Clock frequency for " + clk)
set_rate = freq['state'].dropna()
rate_axis_lib = 0
if len(set_rate) > 0:
rate_axis_lib = set_rate.max()
set_rate.plot(style=['b--'], ax=freq_axis, drawstyle='steps-post', alpha=0.4, label="clock_set_rate value")
freq_axis.hlines(set_rate.iloc[-1], set_rate.index[-1], end, linestyle='--', color='b', alpha=0.4)
else:
logger.warning('No clock_set_rate events to plot')
# Plot frequency information (effective rate)
eff_rate = freq['effective_rate'].dropna()
eff_rate = series_refit_index(eff_rate, window=window)
if len(eff_rate) > 0 and eff_rate.max() > 0:
rate_axis_lib = max(rate_axis_lib, eff_rate.max())
eff_rate.plot(style=['b-'], ax=freq_axis, drawstyle='steps-post', alpha=1.0, label="Effective rate (with on/off)")
freq_axis.hlines(eff_rate.iloc[-1], eff_rate.index[-1], end, linestyle='-', color='b', alpha=1.0)
else:
logger.warning('No effective frequency events to plot')
freq_axis.set_ylim(0, rate_axis_lib * 1.1)
freq_axis.set_xlabel('')
freq_axis.grid(True)
freq_axis.legend()
def mhz(x, pos):
return '{:1.2f} MHz'.format(x * 1e-6)
freq_axis.get_yaxis().set_major_formatter(FuncFormatter(mhz))
on = freq[freq.state == 1]
state_axis.hlines([0] * len(on),
on['start'], on['start'] + on['len'],
linewidth=10.0, label='clock on', color='green')
off = freq[freq.state == 0]
state_axis.hlines([0] * len(off),
off['start'], off['start'] + off['len'],
linewidth=10.0, label='clock off', color='red')
# Plot time period that the clock state was unknown from the trace
indeterminate = pd.concat([on, off]).sort_index()
if indeterminate.empty:
indet_range_max = end
else:
indet_range_max = indeterminate.index[0]
state_axis.hlines(0, 0, indet_range_max, linewidth=1.0, label='indeterminate clock state', linestyle='--')
state_axis.legend(bbox_to_anchor=(0., 1.02, 1., 0.102), loc=3, ncol=3, mode='expand')
state_axis.set_yticks([])
state_axis.set_xlabel('seconds')
state_axis.set_xlim(start, end)
return self.do_plot(plotter, height=8, nrows=2, axis=axis, **kwargs)
@TraceAnalysisBase.plot_method()
@df_cpu_frequency.used_events
def plot_cpu_frequencies(self, cpu: CPU, axis, local_fig, average: bool=True):
"""
Plot frequency for the specified CPU
:param cpu: The CPU for which to plot frequencies
:type cpus: int
:param average: If ``True``, add a horizontal line which is the
frequency average.
:type average: bool
If ``sched_overutilized`` events are available, the plots will also
show the intervals of time where the system was overutilized.
"""
logger = self.get_logger()
df = self.df_cpu_frequency(cpu)
if "freqs" in self.trace.plat_info:
frequencies = self.trace.plat_info['freqs'][cpu]
else:
logger.info(f"Estimating CPU{cpu} frequencies from trace")
frequencies = sorted(list(df.frequency.unique()))
logger.debug(f"Estimated frequencies: {frequencies}")
avg = self.get_average_cpu_frequency(cpu)
logger.info(
"Average frequency for CPU{} : {:.3f} GHz".format(cpu, avg / 1e6))
df = df_refit_index(df, window=self.trace.window)
df['frequency'].plot(ax=axis, drawstyle='steps-post')
if average and avg > 0:
axis.axhline(avg, color=self.get_next_color(axis), linestyle='--',
label="average")
plot_overutilized = self.trace.analysis.status.plot_overutilized
if self.trace.has_events(plot_overutilized.used_events):
plot_overutilized(axis=axis)
axis.set_ylabel('Frequency (Hz)')
axis.set_ylim(frequencies[0] * 0.9, frequencies[-1] * 1.1)
axis.legend()
if local_fig:
axis.set_xlabel('Time')
axis.set_title(f'Frequency of CPU{cpu}')
axis.grid(True)
@TraceAnalysisBase.plot_method(return_axis=True)
@plot_cpu_frequencies.used_events
def plot_domain_frequencies(self, axis=None, **kwargs):
"""
Plot frequency trend for all frequency domains.
If ``sched_overutilized`` events are available, the plots will also show
the intervals of time where the cluster was overutilized.
"""
domains = self.trace.plat_info['freq-domains']
def plotter(axes, local_fig):
for idx, domain in enumerate(domains):
axis = axes[idx] if len(domains) > 1 else axes
self.plot_cpu_frequencies(domain[0], axis=axis)
axis.set_title(f'Frequencies of CPUS {domain}')
return self.do_plot(plotter, nrows=len(domains), sharex=True, axis=axis, **kwargs)
@TraceAnalysisBase.plot_method(return_axis=True)
@df_cpu_frequency_residency.used_events
def plot_cpu_frequency_residency(self, cpu: CPU, pct: bool=False, axis=None, **kwargs):
"""
Plot per-CPU frequency residency.
:param cpu: The CPU to generate the plot for
:type cpu: int
:param pct: Plot residencies in percentage
:type pct: bool
"""
residency_df = self.df_cpu_frequency_residency(cpu)
total_df = residency_df.total_time
active_df = residency_df.active_time
if pct:
total_df = total_df * 100 / total_df.sum()
active_df = active_df * 100 / active_df.sum()
def plotter(axes, local_fig):
total_df.plot.barh(ax=axes[0])
axes[0].set_title(f"CPU{cpu} total frequency residency")
active_df.plot.barh(ax=axes[1])
axes[1].set_title(f"CPU{cpu} active frequency residency")
for axis in axes:
if pct:
axis.set_xlabel("Time share (%)")
else:
axis.set_xlabel("Time (s)")
axis.set_ylabel("Frequency (Hz)")
axis.grid(True)
return self.do_plot(plotter, nrows=2, axis=axis, **kwargs)
@TraceAnalysisBase.plot_method(return_axis=True)
@plot_cpu_frequency_residency.used_events
def plot_domain_frequency_residency(self, pct: bool=False, axis=None, **kwargs):
"""
Plot the frequency residency for all frequency domains.
:param pct: Plot residencies in percentage
:type pct: bool
"""
domains = self.trace.plat_info['freq-domains']
def plotter(axes, local_fig):
for idx, domain in enumerate(domains):
local_axes = axes[2 * idx: 2 * (idx + 1)]
self.plot_cpu_frequency_residency(domain[0],
pct=pct,
axis=local_axes,
)
for axis in local_axes:
title = axis.get_title()
axis.set_title(title.replace(f'CPU{domain[0]}', f"CPUs {domain}"))
return self.do_plot(plotter, nrows=2 * len(domains), sharex=True, axis=axis, **kwargs)
@TraceAnalysisBase.plot_method()
@df_cpu_frequency_transitions.used_events
def plot_cpu_frequency_transitions(self, cpu: CPU, axis, local_fig, pct: bool=False):
"""
Plot frequency transitions count of the specified CPU
:param cpu: The CPU to genererate the plot for
:type cpu: int
:param pct: Plot frequency transitions in percentage
:type pct: bool
"""
df = self.df_cpu_frequency_transitions(cpu)
if pct:
df = df * 100 / df.sum()
if not df.empty:
df["transitions"].plot.barh(ax=axis)
axis.set_title(f'Frequency transitions of CPU{cpu}')
if pct:
axis.set_xlabel("Transitions share (%)")
else:
axis.set_xlabel("Transition count")
axis.set_ylabel("Frequency (Hz)")
axis.grid(True)
@TraceAnalysisBase.plot_method(return_axis=True)
@plot_cpu_frequency_transitions.used_events
def plot_domain_frequency_transitions(self, pct: bool=False, axis=None, **kwargs):
"""
Plot frequency transitions count for all frequency domains
:param pct: Plot frequency transitions in percentage
:type pct: bool
"""
domains = self.trace.plat_info['freq-domains']
def plotter(axes, local_fig):
for domain, axis in zip(domains, axes):
self.plot_cpu_frequency_transitions(
cpu=domain[0],
pct=pct,
axis=axis,
)
title = axis.get_title()
axis.set_title(title.replace(f'CPU{domain[0]}', f"CPUs {domain}"))
return self.do_plot(plotter, nrows=len(domains), axis=axis, **kwargs)
# vim :set tabstop=4 shiftwidth=4 expandtab textwidth=80
| apache-2.0 |
oscar6echo/ezhc | ezhc/build.py | 1 | 14543 |
import pandas as pd
DEFAULT_COLORS = ["#7cb5ec", "#434348", "#90ed7d", "#f7a35c", "#8085e9",
"#f15c80", "#e4d354", "#2b908f", "#f45b5b", "#91e8e1"]
def series(df, *args, **kwargs):
idx = df.index
col = df.columns
data = df.values
assert(isinstance(idx, pd.core.index.Index))
series = []
for k, c in enumerate(col):
if df[c].dtype.kind in 'fib':
v = data[:, k]
sec = c in kwargs.get('secondary_y', [])
d = {
'name': c if not sec else c + ' (right)',
'yAxis': int(sec),
'data': [[idx[q], v[q]] for q in range(len(v))],
}
if c in kwargs.get('color', []):
d['color'] = kwargs['color'].get(c)
if c in kwargs.get('visible', []):
d['visible'] = kwargs['visible'].get(c)
if c in kwargs.get('fillColor', []):
d['type'] = 'area'
d['fillColor'] = kwargs['fillColor'].get(c)
if c in kwargs.get('lineColor', []):
d['lineColor'] = kwargs['lineColor'].get(c)
if kwargs.get('dashStyle', []):
d['dashStyle'] = kwargs['dashStyle'].get(c, 'Solid')
series.append(d)
return series
def series_range(df, *args, **kwargs):
idx = df.index
col = df.columns
data = df.values
assert(isinstance(idx, pd.core.index.Index))
assert(len(col) == 2)
assert(df[col[0]].dtype.kind in 'if')
assert(df[col[1]].dtype.kind in 'if')
series = [{
'data': [[data[q, 0], data[q, 1]] for q in range(data.shape[0])],
}]
if 'color' in kwargs:
series['color'] = kwargs['color']
axis_categories = list(idx)
return axis_categories, series
def series_drilldown_orig(df, colorByPoint=True, pointPlacement='on', *args, **kwargs):
idx = df.index
col = df.columns
assert(isinstance(idx, pd.core.index.MultiIndex))
assert(len(idx.levshape) == 2)
for c in col:
assert df[c].dtype.kind in 'if'
levone = list(idx.levels[0])
data = []
series = []
drilldownSeries = []
for co in col:
data = []
for c in levone:
dfs = df.xs(c)
ii = dfs[[co]].index.values.flatten()
vv = dfs[[co]].values.flatten()
d1 = {
'name': c,
'y': dfs[[co]].sum().values[0],
'drilldown': c + ' - ' + co if len(dfs) > 1 else None,
'pointPlacement': pointPlacement
}
if co in kwargs.get('color', []):
d1['color'] = kwargs['color'].get(co)
data.append(d1)
if len(dfs) > 1:
d2 = {
'name': c + ' - ' + co,
'data': [[str(ii[q]), vv[q]] for q in range(len(ii))],
'id': c + ' - ' + co,
'pointPlacement': pointPlacement
}
drilldownSeries.append(d2)
s = {'name': co, 'data': data, 'colorByPoint': colorByPoint}
if co in kwargs.get('color', []):
s['color'] = kwargs['color'].get(co)
series.append(s)
return series, drilldownSeries
def series_drilldown(df,
top_name='Top',
colorByPoint=True,
pointPlacement='on',
set_color=False,
colors=None,
set_precision=False,
precision=None,
*args,
**kwargs):
idx = df.index
col = df.columns
assert(isinstance(idx, pd.core.index.MultiIndex))
for c in col:
assert df[c].dtype.kind in 'if'
class DrillDownBuilder:
"""
Recursive build of drilldown structure
"""
def __init__(self,
df,
top_name='Top',
colorByPoint=True,
pointPlacement='on',
set_color=False,
colors=None,
set_precision=False,
precision=None):
self.top_name = top_name
self.colorByPoint = colorByPoint
self.pointPlacement = pointPlacement
self.df = df
self.set_color = set_color
self.colors = colors
self.set_precision = set_precision
self.precision = precision
self.items = []
self.top_item = self.build(df)
def build(self, dfxs, parent=None, name=None):
top_items = []
for col in dfxs.columns:
dfc = dfxs[[col]]
item = {
'id': col + '-toplevel',
'name': dfc.columns[0]
}
if len(dfxs.columns) == 1:
item['name'] = self.top_name
if parent:
item['id'] = str(parent) + '-' + name
item['name'] = name
if self.colorByPoint:
item['colorByPoint'] = self.colorByPoint
data = []
idx = dfc.index
if isinstance(idx, pd.MultiIndex):
for k, idx0 in enumerate(idx.levels[0]):
df_sub = dfc.xs(idx0)
total = df_sub.sum()[0]
d = {
'name': str(idx0),
'y': total,
'drilldown': item['id'] + '-' + str(idx0)
}
if self.pointPlacement:
d['pointPlacement'] = self.pointPlacement
if self.set_precision:
d['y'] = round(total, self.precision)
if self.set_color:
d['color'] = self.colors[k % len(self.colors)]
data.append(d)
self.build(df_sub,
parent=item['id'],
name=str(idx0))
elif isinstance(idx, pd.Index):
for k, idx0 in enumerate(idx):
df_sub = dfc.xs(idx0)
total = df_sub.sum()
d = {
'name': str(idx0),
'y': total,
}
if self.set_precision:
d['y'] = round(total, self.precision)
if self.set_color:
d['color'] = self.colors[k % len(self.colors)]
data.append(d)
else:
raise Exception('Pandas Index Unknown Problem')
item['data'] = data
self.items.append(item)
top_items.append(item)
return top_items
def series(self):
return self.top_item
def drilldown_series(self):
return self.items
dd = DrillDownBuilder(df,
top_name=top_name,
colorByPoint=colorByPoint,
pointPlacement=pointPlacement,
set_color=set_color,
colors=colors,
set_precision=set_precision,
precision=precision)
return dd.series(), dd.drilldown_series()
# def series_drilldown_orig(df, colorByPoint=True, pointPlacement='on', *args, **kwargs):
# idx = df.index
# col = df.columns
# assert(isinstance(idx, pd.core.index.MultiIndex))
# assert(len(idx.levshape) == 2)
# for c in col:
# assert df[c].dtype.kind in 'if'
# levone = list(idx.levels[0])
# data = []
# series = []
# drilldownSeries = []
# for co in col:
# data = []
# for c in levone:
# dfs = df.xs(c)
# ii = dfs[[co]].index.values.flatten()
# vv = dfs[[co]].values.flatten()
# d1 = {
# 'name': c,
# 'y': dfs[[co]].sum().values[0],
# 'drilldown': c + ' - ' + co if len(dfs) > 1 else None,
# 'pointPlacement': pointPlacement
# }
# if co in kwargs.get('color', []):
# d1['color'] = kwargs['color'].get(co)
# data.append(d1)
# if len(dfs) > 1:
# d2 = {
# 'name': c + ' - ' + co,
# 'data': [[str(ii[q]), vv[q]] for q in range(len(ii))],
# 'id': c + ' - ' + co,
# 'pointPlacement': pointPlacement
# }
# drilldownSeries.append(d2)
# s = {'name': co, 'data': data, 'colorByPoint': colorByPoint}
# if co in kwargs.get('color', []):
# s['color'] = kwargs['color'].get(co)
# series.append(s)
# return series, drilldownSeries
def series_scatter(df, color_column=None, title_column=None, *args, **kwargs):
idx = df.index
col = df.columns
assert(isinstance(idx, pd.core.index.MultiIndex))
assert(len(idx.levshape) == 2)
assert(len(col) <= 2)
assert(df[color_column].dtype.kind in 'iO')
if color_column == None:
assert(len(col) == 1)
color_column = col[0]
if title_column == None:
title_column = color_column
data = df[[color_column]].values.flatten()
elmt = list(set(data))
color = kwargs.get('color', {})
series = []
for e in elmt:
dfs = df[df[color_column] == e]
idx = dfs.index
names = list(dfs[title_column])
series.append({'animation': False,
'name': e,
'turboThreshold': 0,
'color': color.get(e, None),
'data': [{'x': idx[k][0], 'y': idx[k][1], 'name': str(names[k])}
for k in range(len(dfs))],
})
return series
def series_bubble(df, *args, **kwargs):
idx = df.index
col = df.columns
assert(isinstance(idx, pd.core.index.MultiIndex))
assert(len(idx.levshape) == 3)
assert(len(col) == 1)
assert(df[col[0]].dtype.kind in 'fib')
names = list(idx.levels[0])
series = []
for s in names:
dfs = df.xs(s)
v = dfs.values.flatten()
idxs = dfs.index
d = {
'name': s,
'data': [[idxs[q][0], idxs[q][1], v[q]] for q in range(len(v))],
}
if s in kwargs.get('color', []):
d['color'] = kwargs['color'].get(s)
series.append(d)
return series
def series_heatmap(df, *args, **kwargs):
idx = df.index
col = df.columns
assert(isinstance(idx, pd.core.index.Index))
for c in col:
assert(df[c].dtype.kind in 'if')
dft = df.copy()
dft.index = range(len(df.index))
dft.columns = range(len(df.columns))
res_data = list(dft.stack().swaplevel(0, 1).reset_index().values)
res_idx = list(df.columns)
res_col = list(df.index)
return res_idx, res_col, res_data
def series_tree(df,
set_total=False,
name_total='All',
set_color=False,
colors=DEFAULT_COLORS,
set_value=True,
precision=2):
class TreeBuilder:
"""
Recursive build of tree data structure
"""
def __init__(self,
df,
set_total,
name_total,
set_color,
colors,
set_value,
precision):
self.df = df
self.set_color = set_color
self.colors = colors
self.set_value = set_value
self.precision = precision
self.points = []
if set_total:
point = {'id': '0',
'name': name_total,
}
if set_value:
point['value'] = round(df.sum().sum(), precision)
self.points.append(point)
total = self.build(df, parent='0')
else:
total = self.build(df)
def build(self, dfxs, parent=None):
idx = dfxs.index
sum_value = 0
if isinstance(idx, pd.MultiIndex):
for k, name in enumerate(idx.levels[0]):
point = {
'name': name,
}
if parent:
point['id'] = parent + '.' + str(k)
point['parent'] = parent
else:
point['id'] = '0.' + str(k)
if self.set_color:
point['color'] = self.colors[k % len(self.colors)]
df_sub = dfxs.xs(name)
df_sub = df_sub.reset_index().set_index(df_sub.index.names)
value = self.build(df_sub,
parent=point.get('id'))
sum_value += value
if self.set_value:
point['value'] = round(value, self.precision)
self.points.append(point)
elif isinstance(idx, pd.Index):
for k, name in enumerate(idx):
value = dfxs.loc[name][0]
sum_value += value
point = {
'name': name,
'value': round(value, self.precision)
}
if parent:
point['id'] = parent + '.' + str(k)
point['parent'] = parent
else:
point['id'] = 'id_' + str(k)
point['color'] = self.colors[k % len(self.colors)]
self.points.append(point)
else:
raise Exception('Pandas Index Unknown Problem')
return sum_value
col = df.columns
assert(len(col) == 1)
tb = TreeBuilder(df,
set_total,
name_total,
set_color,
colors,
set_value,
precision)
return tb.points
| mit |
tejasckulkarni/hydrology | ch_623/ch_623_daily.py | 1 | 21589 | __author__ = 'kiruba'
import numpy as np
import matplotlib.pyplot as plt
import pandas as pd
import checkdam.checkdam as cd
from matplotlib import rc
import matplotlib.cm as cmx
import matplotlib.colors as colors
from datetime import timedelta
import math
import ccy_classic_lstsqr
# latex parameters
rc('font', **{'family': 'sans-serif', 'sans-serif': ['Helvetica']})
rc('text', usetex=True)
plt.rc('text', usetex=True)
plt.rc('font', family='serif', size=18)
"""
Variables
"""
full_stage = 1.41 # check dam height, above this it is assumed check dam will overflow
date_format = '%Y-%m-%d %H:%M:%S'
daily_format = '%Y-%m-%d'
resolution_ody = 0.0008
stage_cutoff = 0.1
# ------------------------------------------------------------------#
# Weather file
weather_file = '/media/kiruba/New Volume/ACCUWA_Data/weather_station/hadonahalli/corrected_weather_ws.csv'
# Rain file
rain_file = '/media/kiruba/New Volume/ACCUWA_Data/weather_station/hadonahalli/ksndmc_rain.csv'
# convert to pandas dataframe
weather_df = pd.read_csv(weather_file, sep=',', header=0)
# print weather_df.head()
weather_df['Date_Time'] = pd.to_datetime(weather_df['Date_Time'], format=date_format)
weather_df.set_index(weather_df['Date_Time'], inplace=True)
weather_df.sort_index(inplace=True)
weather_df = weather_df.drop('Date_Time', 1)
# Rain data frame
rain_df = pd.read_csv(rain_file, sep=',', header=0)
# set index
rain_df['Date_Time'] = pd.to_datetime(rain_df['Date_Time'], format=date_format)
rain_df.set_index(rain_df['Date_Time'], inplace=True)
# sort based on index
rain_df.sort_index(inplace=True)
# drop date time column
rain_df = rain_df.drop('Date_Time', 1)
"""
Check dam calibration
"""
y_cal_1 = np.array([100, 1000, 2000, 3000, 4000, 5000])
x_cal_1 = np.array([1894, 2563, 3298, 4049, 4794, 5548])
a_stage_1 = cd.polyfit(x_cal_1, y_cal_1, 1)
coeff_cal_1 = a_stage_1['polynomial']
slope_1 = coeff_cal_1[0]
intercept_1 = coeff_cal_1[1]
y_cal = np.array([100, 1000, 2000, 3000, 4000, 5000])
x_cal = np.array([1864, 2540, 3313, 4078, 4835, 5582])
a_stage = cd.polyfit(x_cal, y_cal, 1)
coeff_cal = a_stage['polynomial']
slope = coeff_cal[0]
intercept = coeff_cal[1]
"""
Read Check dam data
"""
block_1 = '/media/kiruba/New Volume/ACCUWA_Data/check_dam_water_level/3075/3075_012_001.CSV'
water_level_1 = cd.read_correct_ch_dam_data(block_1, slope_1, intercept_1)
block_2 = '/media/kiruba/New Volume/ACCUWA_Data/check_dam_water_level/3075/3075_012_00325_8_14.CSV'
water_level_2 = cd.read_correct_ch_dam_data(block_2, slope_1, intercept_1)
# block_3 = '/media/kiruba/New Volume/ACCUWA_Data/check_dam_water_level/2976/2976_019_001.CSV'
# water_level_3 = cd.read_correct_ch_dam_data(block_3, slope, intercept)
# block_4 = '/media/kiruba/New Volume/ACCUWA_Data/check_dam_water_level/2976/2976_019_002_03_12_2014.CSV'
# water_level_4 = cd.read_correct_ch_dam_data(block_4, slope, intercept)
# block_5 = '/media/kiruba/New Volume/ACCUWA_Data/check_dam_water_level/2976/2976_019_003_11_12_2014.CSV'
# water_level_5 = cd.read_correct_ch_dam_data(block_5, slope, intercept)
# block_6 = '/media/kiruba/New Volume/ACCUWA_Data/check_dam_water_level/2976/2976_019_004_23_12_2014.CSV'
# water_level_6 = cd.read_correct_ch_dam_data(block_6, slope, intercept)
# block_7 = '/media/kiruba/New Volume/ACCUWA_Data/check_dam_water_level/2976/2976_019_005_03_01_2015.CSV'
# water_level_7 = cd.read_correct_ch_dam_data(block_7, slope, intercept)
# Take data upto july 17, reason : mouse bite
water_level_2 = water_level_2[:'2014-07-17']
# print water_level_2.tail()
for i in range(1, 3, 1):
eval("water_level_{0}.drop(water_level_{0}.tail(1).index, inplace=True, axis=0)".format(i))
eval("water_level_{0}.drop(water_level_{0}.head(1).index, inplace=True, axis=0)".format(i))
for i in range(1, 3, 1):
print "water_level_{0}".format(i)
print eval("water_level_{0}.head()".format(i))
fig = plt.figure()
for i in range(1, 3, 1):
x = eval("water_level_{0}.index".format(i))
y = eval("water_level_{0}['stage(m)']".format(i))
plt.plot(x, y)
plt.show()
# raise SystemExit(0)
water_level_30min = pd.concat([water_level_1, water_level_2], axis=0)
water_level_30min = water_level_30min.sort()
rounded = np.array(water_level_30min.index, dtype='datetime64[m]')
water_level_30min = water_level_30min.set_index(rounded)
start_time_30 = min(water_level_30min.index)
end_time_30 = max(water_level_30min.index)
# new_index_30min = pd.date_range(start=start_time_30.strftime('%Y-%m-%d %H:%M'), end=end_time_30.strftime('%Y-%m-%d %H:%M'), freq='30min')
new_index_30 = pd.date_range(start=start_time_30, end=end_time_30, freq='30min')
water_level_30min = water_level_30min.reindex(new_index_30, method=None)
water_level_30min = water_level_30min.interpolate(method='time')
# water_level_30min = water_level_30min.set_index(new_index_30min)
water_level_30min.index.name = 'Date'
# print water_level_30min.head()
# raise SystemExit(0)
# water_level_10min = pd.concat([water_level_6, water_level_7, water_level_8, water_level_9, water_level_10, water_level_11, water_level_12], axis=0)
# water_level_10min = water_level_10min.sort()
# rounded = np.array(water_level_10min.index, dtype='datetime64[m]')
# water_level_10min = water_level_10min.set_index(rounded)
# start_time_10 = min(water_level_10min.index)
# end_time_10 = max(water_level_10min.index)
# # new_index_10min = pd.date_range(start=start_time_10.strftime('%Y-%m-%d %H:%M'), end=end_time_10.strftime('%Y-%m-%d %H:%M'), freq='10min')
# new_index_10 = pd.date_range(start=start_time_10, end=end_time_10, freq='10min')
# water_level_10min = water_level_10min.reindex(new_index_10, method=None)
# water_level_10min = water_level_10min.interpolate(method='time')
# # water_level_10min = water_level_10min.set_index(new_index_10min)
# water_level_10min.index.name = 'Date'
#
# water_level = pd.concat([water_level_30min, water_level_10min], axis=0)
# water_level = water_level.resample('30min', how=np.mean, label='right', closed='right')
water_level = water_level_30min
water_level.to_csv('/media/kiruba/New Volume/ACCUWA_Data/Checkdam_water_balance/ch_623/stage_623.csv')
# raise SystemExit(0)
"""
Join weather and rain data
"""
weather_df = weather_df.join(rain_df, how='right')
weather_df = weather_df[min(water_level.index).strftime(daily_format): max(water_level.index).strftime(daily_format)]
weather_df = weather_df.join(water_level, how='right')
# fig, ax1 = plt.subplots()
# ax1.bar(weather_df.index, weather_df['Rain Collection (mm)'], 0.35, color='b')
# plt.gca().invert_yaxis()
# ax2 = ax1.twinx()
# ax2.plot(weather_df.index, weather_df['stage(m)'], '-b')
# plt.hlines(y=stage_cutoff, xmin=min(weather_df.index), xmax=max(weather_df.index))
# plt.hlines(y=full_stage, xmin=min(weather_df.index), xmax=max(weather_df.index), colors='g')
# plt.show()
# raise SystemExit(0)
"""
Remove Duplicates
"""
weather_df['index'] = weather_df.index
weather_df.drop_duplicates(subset='index', take_last=True, inplace=True)
del weather_df['index']
weather_df = weather_df.sort()
# print weather_df.head()
"""
Open water evaporation
"""
z = 822
p = (1 - (2.25577 * (10 ** -5) * z))
air_p_pa = 101325 * (p ** 5.25588)
# give air pressure value
weather_df['AirPr(Pa)'] = air_p_pa
"""
Half hourly Extraterrestrial Radiation Calculation(J/m2/30min)
"""
sc_default = 1367.0 # Solar constant in W/m^2 is 1367.0.
ch_623_lat = 13.353
ch_623_long = 77.549
weather_df['Rext (MJ/m2/30min)'] = 0.000
for i in weather_df.index:
weather_df['Rext (MJ/m2/30min)'][i.strftime(date_format)] = (cd.extraterrestrial_irrad(local_datetime=i,
latitude_deg=ch_623_lat,
longitude_deg=ch_623_long))
"""
wind speed from km/h to m/s
1 kmph = 0.277778 m/s
"""
# weather_df['Wind Speed (mps)'] = weather_df['Wind Speed (kmph)'] * 0.277778
"""
Radiation unit conversion
"""
weather_df['Solar Radiation (MJ/m2/30min)'] = (weather_df['Solar Radiation (Wpm2)'] * 1800) / (10 ** 6)
"""
Average Temperature Calculation
"""
# weather_df['Average Temp (C)'] = 0.5 * (weather_df['Min Air Temperature (C)'] + weather_df['Max Air Temperature (C)'])
# print weather_df.head()
# raise SystemExit(0)
"""
Half hourly Evaporation calculation
"""
airtemp = weather_df['Air Temperature (C)']
hum = weather_df['Humidity (%)']
airpress = weather_df['AirPr(Pa)']
rs = weather_df['Solar Radiation (MJ/m2/30min)']
rext = weather_df['Rext (MJ/m2/30min)']
wind_speed = weather_df['Wind Speed (mps)']
weather_df['Evaporation (mm/30min)'] = cd.half_hour_evaporation(airtemp=airtemp, rh=hum, airpress=airpress,
rs=rs, rext=rext, u=wind_speed, z=z)
"""
Select data where stage is available
"""
weather_stage_avl_df = weather_df[min(water_level.index):max(water_level.index)]
"""
Convert observed stage to volume by linear interpolation
"""
# set stage as index
stage_vol_df = pd.read_csv('/media/kiruba/New Volume/ACCUWA_Data/Checkdam_water_balance/ch_623/stage_vol.csv',
sep=',', header=0, names=['sno', 'stage_m', 'total_vol_cu_m'])
# print stage_vol_df
stage_vol_df.drop('sno', inplace=True, axis=1)
stage_vol_df.set_index(stage_vol_df['stage_m'], inplace=True)
water_balance_df = weather_stage_avl_df[['rain (mm)', 'Evaporation (mm/30min)', 'stage(m)']]
water_balance_df['volume (cu.m)'] = 0.000
for index, row in water_balance_df.iterrows():
obs_stage = row['stage(m)'] # observed stage
if obs_stage >= stage_cutoff:
x1, x2 = cd.find_range(stage_vol_df['stage_m'].tolist(), obs_stage)
x_diff = x2 - x1
y1 = stage_vol_df['total_vol_cu_m'][x1]
y2 = stage_vol_df['total_vol_cu_m'][x2]
y_diff = y2 - y1
slope = y_diff / x_diff
y_intercept = y2 - (slope * x2)
water_balance_df['volume (cu.m)'][index.strftime(date_format)] = (slope * obs_stage) + y_intercept
"""
full volume calculation
"""
x1, x2 = cd.find_range(stage_vol_df['stage_m'].tolist(), full_stage)
x_diff = x2 - x1
y1 = stage_vol_df['total_vol_cu_m'][x1]
y2 = stage_vol_df['total_vol_cu_m'][x2]
y_diff = y2 - y1
slope = y_diff / x_diff
y_intercept = y2 - (slope * x2)
full_volume = (slope * full_stage) + y_intercept
print full_volume
"""
Overflow
"""
water_balance_df['overflow(cu.m)'] = 0.000
for index, row in water_balance_df.iterrows():
obs_volume = row['volume (cu.m)']
if obs_volume > full_volume:
overflow_volume = obs_volume - full_volume
water_balance_df['overflow(cu.m)'][index.strftime(date_format)] = obs_volume - full_volume
print water_balance_df['overflow(cu.m)'].sum()
# raise SystemExit(0)
water_balance_df = water_balance_df["2014-05-15":]
water_balance_df.to_csv('/media/kiruba/New Volume/ACCUWA_Data/Checkdam_water_balance/591/overflow_check.csv')
print "overflow"
"""
Stage vs area linear relationship
"""
stage_area_df = pd.read_csv('/media/kiruba/New Volume/ACCUWA_Data/Checkdam_water_balance/ch_623/cont_area.csv',
sep=',', header=0, names=['sno', 'stage_m', 'total_area_sq_m'])
stage_area_df.drop('sno', inplace=True, axis=1)
# set stage as index
stage_area_df.set_index(stage_area_df['stage_m'], inplace=True)
# create empty column
water_balance_df['ws_area(sq.m)'] = 0.000
for index, row in water_balance_df.iterrows():
obs_stage = row['stage(m)'] # observed stage
if obs_stage >= stage_cutoff:
x1, x2 = cd.find_range(stage_area_df['stage_m'].tolist(), obs_stage)
x_diff = x2 - x1
y1 = stage_area_df['total_area_sq_m'][x1]
y2 = stage_area_df['total_area_sq_m'][x2]
y_diff = y2 - y1
slope = y_diff / x_diff
y_intercept = y2 - (slope * x2)
water_balance_df['ws_area(sq.m)'][index.strftime(date_format)] = (slope * obs_stage) + y_intercept
# print water_balance_df.head()
"""
Surface area to volume ratio
"""
average_stage = full_stage/2.0
x1, x2 = cd.find_range(stage_vol_df['stage_m'].tolist(), average_stage)
x_diff = x2 - x1
y1 = stage_vol_df['total_vol_cu_m'][x1]
y2 = stage_vol_df['total_vol_cu_m'][x2]
y_diff = y2 - y1
slope = y_diff / x_diff
y_intercept = y2 - (slope * x2)
average_volume = (slope*average_stage) + y_intercept
print average_stage
print average_volume
x1, x2 = cd.find_range(stage_area_df['stage_m'].tolist(), average_stage)
x_diff = x2 - x1
y1 = stage_area_df['total_area_sq_m'][x1]
y2 = stage_area_df['total_area_sq_m'][x2]
y_diff = y2 - y1
slope = y_diff / x_diff
y_intercept = y2 - (slope * x2)
average_area = (slope*average_stage) + y_intercept
print average_area
surface_area_to_vol_ratio = average_area/average_volume
print "surface area to vol ratio is %0.2f" %surface_area_to_vol_ratio
raise SystemExit(0)
"""
Evaporation Volume estimation
"""
water_balance_df['Evaporation (cu.m)'] = (water_balance_df['Evaporation (mm/30min)'] * 0.001) * water_balance_df[
'ws_area(sq.m)']
"""
Daily Totals of Rain, Evaporation, Overflow
"""
sum_df = water_balance_df[['rain (mm)', 'Evaporation (cu.m)', 'Evaporation (mm/30min)', 'overflow(cu.m)']]
sum_df = sum_df.resample('D', how=np.sum)
"""
Daily average of Stage
"""
stage_df = water_balance_df[['stage(m)']]
stage_df = stage_df.resample('D', how=np.mean)
print stage_df.head()
water_balance_daily_df = sum_df.join(stage_df, how='left')
water_balance_daily_df['ws_area(sq.m)'] = 0.000
for index, row in water_balance_daily_df.iterrows():
obs_stage = row['stage(m)'] # observed stage
if obs_stage >= stage_cutoff:
x1, x2 = cd.find_range(stage_area_df['stage_m'].tolist(), obs_stage)
x_diff = x2 - x1
y1 = stage_area_df['total_area_sq_m'][x1]
y2 = stage_area_df['total_area_sq_m'][x2]
y_diff = y2 - y1
slope = y_diff / x_diff
y_intercept = y2 - (slope * x2)
water_balance_daily_df['ws_area(sq.m)'][index.strftime(date_format)] = (slope * obs_stage) + y_intercept
"""
Change in storage
"""
# water_balance_daily_df = water_balance_df
# separate out 23:30 readings
hour = water_balance_df.index.hour
minute = water_balance_df.index.minute
ch_storage_df = water_balance_df[['volume (cu.m)']][((hour == 23) & (minute == 30))]
ch_storage_df = ch_storage_df.resample('D', how=np.mean)
water_balance_daily_df['change_storage(cu.m)'] = 0.000
for index in ch_storage_df.index:
if index > min(ch_storage_df.index):
previous_date = index - timedelta(days=1)
d1_storage = ch_storage_df['volume (cu.m)'][previous_date.strftime(daily_format)]
d2_storage = ch_storage_df['volume (cu.m)'][index.strftime(daily_format)]
water_balance_daily_df['change_storage(cu.m)'][index.strftime(daily_format)] = d2_storage - d1_storage
# new_df = water_balance_daily_df.join(ch_storage_df, how='right')
# new_df.to_csv('/media/kiruba/New Volume/ACCUWA_Data/Checkdam_water_balance/ch_634/proof.csv')
# print water_balance_df.head()
# for index in water_balance_daily_df.index:
# if index > min(water_balance_daily_df.index):
# previous_time = index - timedelta(minutes=30)
# d1_storage = water_balance_df['volume (cu.m)'][previous_time.strftime(date_format)]
# d2_storage = water_balance_df['volume (cu.m)'][index.strftime(date_format)]
# water_balance_daily_df['change_storage(cu.m)'][index.strftime(date_format)] = d2_storage - d1_storage
print "change_Storage"
"""
Separate out no inflow/ non rainy days
two continuous days of no rain
"""
water_balance_daily_df['status'] = "Y"
# no_rain_df = water_balance_daily_df[water_balance_daily_df['Rain Collection (mm)'] == 0]
# no_rain_df['status'] = "Y"
for index in water_balance_daily_df.index:
initial_time_stamp = min(water_balance_daily_df.index) + timedelta(days=1)
if index > initial_time_stamp and (water_balance_daily_df['rain (mm)'][index.strftime(daily_format)] < 2) and (
water_balance_daily_df['change_storage(cu.m)'][index.strftime(daily_format)] < 0) and (
water_balance_daily_df['overflow(cu.m)'][index.strftime(daily_format)] == 0):
water_balance_daily_df['status'][index.strftime(daily_format)] = "N"
dry_water_balance_df = water_balance_daily_df[water_balance_daily_df['status'] == "N"]
rain_water_balance_df = water_balance_daily_df[water_balance_daily_df['status'] == "Y"]
print "dry day sep"
"""
Calculate infiltration
"""
dry_water_balance_df['infiltration(cu.m)'] = 0.000
delta_s = water_balance_daily_df['change_storage(cu.m)']
evap = water_balance_daily_df['Evaporation (cu.m)']
outflow = water_balance_daily_df['overflow(cu.m)']
for index, row in dry_water_balance_df.iterrows():
if index > min(water_balance_daily_df.index):
t_1 = index - timedelta(days=1)
if t_1 < max(water_balance_daily_df.index):
dry_water_balance_df['infiltration(cu.m)'][index.strftime(daily_format)] = -1.0 * (
delta_s[index.strftime(daily_format)] + evap[index.strftime(daily_format)])
dry_water_balance_df.to_csv('/media/kiruba/New Volume/ACCUWA_Data/Checkdam_water_balance/591/dry_wb.csv')
print "infilt"
"""
Fitting exponential function
"""
dry_water_balance_df['average_stage(m)'] = 0.00
stage_cal = dry_water_balance_df['stage(m)']
# stage_cal = dry_water_balance_df['average_stage_m']
inf_cal = dry_water_balance_df['infiltration(cu.m)']
fig = plt.figure()
plt.plot(stage_cal, inf_cal, 'bo')
plt.hlines(y=0, xmin=min(stage_cal), xmax=max(stage_cal))
plt.show()
# raise SystemExit(0)
# print dry_water_balance_df.shape
log_x = np.log(stage_cal)
log_y = np.log(inf_cal)
OK = log_y == log_y
masked_log_y = log_y[OK]
masked_log_x = log_x[OK]
fig = plt.figure()
plt.plot(masked_log_x, masked_log_y, 'ro')
plt.show()
# raise SystemExit(0)
# pars = np.polyfit(masked_log_x, masked_log_y, 1)
slope, intercept = ccy_classic_lstsqr.ccy_classic_lstsqr(masked_log_x, masked_log_y)
print "fit done"
print slope, intercept
"""
Rainy day infiltration
"""
rain_water_balance_df['infiltration(cu.m)'] = 0.0
for i in rain_water_balance_df.index:
if rain_water_balance_df['stage(m)'][i.strftime(daily_format)] >= stage_cutoff:
x = rain_water_balance_df['stage(m)'][i.strftime(daily_format)]
log_infilt = (slope * np.log(x)) + intercept
rain_water_balance_df['infiltration(cu.m)'][i.strftime(daily_format)] = math.exp(log_infilt)
print "rainy day"
"""
Inflow calculation
"""
merged_water_balance = pd.concat([dry_water_balance_df, rain_water_balance_df])
merged_water_balance['Inflow (cu.m)'] = 0.000
delta_s_rain = water_balance_daily_df['change_storage(cu.m)']
inf_rain = merged_water_balance['infiltration(cu.m)']
evap_rain = water_balance_daily_df['Evaporation (cu.m)']
outflow_rain = water_balance_daily_df['overflow(cu.m)']
for i, row in merged_water_balance.iterrows():
if i > min(merged_water_balance.index):
string1 = intern(row['status'])
string2 = intern('N')
if string1 != string2:
# i_1 = i - timedelta(days=1)
merged_water_balance['Inflow (cu.m)'][i.strftime(daily_format)] = (delta_s_rain[i.strftime(daily_format)] +
inf_rain[i.strftime(daily_format)] +
evap_rain[i.strftime(daily_format)] +
outflow_rain[i.strftime(daily_format)])
merged_water_balance.sort_index(inplace=True)
wb = (merged_water_balance['Evaporation (cu.m)'].sum() +
merged_water_balance['infiltration(cu.m)'].sum() +
merged_water_balance['overflow(cu.m)'].sum()) - merged_water_balance['Inflow (cu.m)'].sum()
print "E =", merged_water_balance['Evaporation (cu.m)'].sum()
print "Infil=", merged_water_balance['infiltration(cu.m)'].sum()
print "Overflow=", merged_water_balance['overflow(cu.m)'].sum()
print "Inflow =", merged_water_balance['Inflow (cu.m)'].sum()
print "Storage=", wb
merged_water_balance.index.name = 'Date'
merged_water_balance.to_csv('/media/kiruba/New Volume/ACCUWA_Data/Checkdam_water_balance/ch_634/et_infilt_634_w_of.csv')
new_df = merged_water_balance.join(ch_storage_df, how='right')
new_df.to_csv('/media/kiruba/New Volume/ACCUWA_Data/Checkdam_water_balance/ch_634/proof.csv')
fig, (ax1, ax2, ax3, ax4, ax5) = plt.subplots(nrows=5, sharex=True)
bar2 = ax2.bar(merged_water_balance.index, merged_water_balance['Inflow (cu.m)'], color='b')
bar3 = ax3.bar(merged_water_balance.index, merged_water_balance['infiltration(cu.m)'], color='r')
bar4 = ax4.bar(merged_water_balance.index, merged_water_balance['Evaporation (cu.m)'], color='g')
bar1 = ax1.plot(water_balance_df.index, water_balance_df['stage(m)'], 'ro-', label='Stage (m)' )
bar5 = ax5.bar(merged_water_balance.index, merged_water_balance['rain (mm)'], color='y')
# plt.plot(merged_water_balance['Inflow (cu.m)'],merged_water_balance['cum_rain'], 'bo')
# plt.plot(merged_water_balance.index, merged_water_balance['infiltration(cu.m)'], '-ro')
# ax4.hlines(y=0, xmin=min(merged_water_balance.index), xmax=max(merged_water_balance.index))
ax1.legend(prop={'size':16}).draggable()
lns = [bar2, bar3, bar4, bar5]
labs =['Inflow (cu.m)', "Infiltration (cu.m)", "Evaporation (cu.m)", 'Rain (mm)']
# Shrink current axis's height by 10% on the bottom
box = ax5.get_position()
ax5.set_position([box.x0, box.y0 + box.height * 0.1, box.width, box.height * 0.9])
ax5.legend(lns, labs, loc='upper center', fancybox=True, ncol=4, bbox_to_anchor=(0.5, -0.05),prop={'size':16} ).draggable()
ax1.set_title("Check dam 623 Hadonahalli")
plt.show()
| gpl-3.0 |
kilink/naarad | src/naarad/graphing/matplotlib_naarad.py | 4 | 9100 | # coding=utf-8
"""
Copyright 2013 LinkedIn Corp. All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import numpy
import os
import matplotlib as mpl
mpl.use('Agg')
import matplotlib.pyplot as plt
import matplotlib.dates as mdates
from mpl_toolkits.axes_grid1 import host_subplot
import mpl_toolkits.axisartist as AA
import logging
import naarad.naarad_constants as CONSTANTS
logger = logging.getLogger('naarad.graphing.matplotlib')
def convert_to_mdate(date_str):
mdate = mdates.epoch2num(int(date_str) / 1000)
return mdate
# MPL-WA-07
# matplotlib does not rotate colors correctly when using multiple y axes. This method fills in that gap.
def get_current_color(index):
return CONSTANTS.COLOR_PALETTE[index % len(CONSTANTS.COLOR_PALETTE)]
def get_graph_metadata(plots):
height = 0
width = 0
title = ''
for plot in plots:
if plot.graph_height > height:
height = plot.graph_height
if plot.graph_width > width:
width = plot.graph_width
if title == '':
title = plot.graph_title
elif title != plot.graph_title:
title = title + ',' + plot.graph_title
return height / 80, width / 80, title
def curate_plot_list(plots):
delete_nodes = []
for plot in plots:
if os.path.exists(plot.input_csv):
if not os.path.getsize(plot.input_csv):
logger.warning("%s file is empty. No plot corresponding to this file will be generated", plot.input_csv)
delete_nodes.append(plot)
else:
logger.warning("%s file does not exist. No plot corresponding to this file will be generated", plot.input_csv)
delete_nodes.append(plot)
for node in delete_nodes:
plots.remove(node)
return plots
def highlight_region(plt, start_x, end_x):
"""
Highlight a region on the chart between the specified start and end x-co-ordinates.
param pyplot plt: matplotlibk pyplot which contains the charts to be highlighted
param string start_x : epoch time millis
param string end_x : epoch time millis
"""
start_x = convert_to_mdate(start_x)
end_x = convert_to_mdate(end_x)
plt.axvspan(start_x, end_x, color=CONSTANTS.HIGHLIGHT_COLOR, alpha=CONSTANTS.HIGHLIGHT_ALPHA)
def graph_data(list_of_plots, output_directory, resource_path, output_filename):
plots = curate_plot_list(list_of_plots)
plot_count = len(plots)
if plot_count == 0:
return False, None
graph_height, graph_width, graph_title = get_graph_metadata(list_of_plots)
current_plot_count = 0
fig, axis = plt.subplots()
fig.set_size_inches(graph_width, graph_height)
if plot_count < 2:
fig.subplots_adjust(left=CONSTANTS.SUBPLOT_LEFT_OFFSET, bottom=CONSTANTS.SUBPLOT_BOTTOM_OFFSET, right=CONSTANTS.SUBPLOT_RIGHT_OFFSET)
else:
fig.subplots_adjust(left=CONSTANTS.SUBPLOT_LEFT_OFFSET, bottom=CONSTANTS.SUBPLOT_BOTTOM_OFFSET,
right=CONSTANTS.SUBPLOT_RIGHT_OFFSET - CONSTANTS.Y_AXIS_OFFSET * (plot_count - 2))
current_axis = axis
for plot in plots:
current_plot_count += 1
logger.info('Processing: ' + plot.input_csv + ' [ ' + output_filename + ' ]')
timestamp, yval = numpy.loadtxt(plot.input_csv, unpack=True, delimiter=',', converters={0: convert_to_mdate})
maximum_yvalue = numpy.amax(yval) * (1.0 + CONSTANTS.ZOOM_FACTOR * current_plot_count)
minimum_yvalue = numpy.amin(yval) * (1.0 - CONSTANTS.ZOOM_FACTOR * current_plot_count)
if current_plot_count == 0:
current_axis.yaxis.set_ticks_position('left')
if current_plot_count > 1:
current_axis = axis.twinx()
current_axis.yaxis.grid(False)
# Set right y-axis for additional plots
current_axis.yaxis.set_ticks_position('right')
# Offset the right y axis to avoid overlap
current_axis.spines['right'].set_position(('axes', 1 + CONSTANTS.Y_AXIS_OFFSET * (current_plot_count - 2)))
current_axis.spines['right'].set_smart_bounds(False)
current_axis.spines['right'].set_color(get_current_color(current_plot_count))
current_axis.set_frame_on(True)
current_axis.patch.set_visible(False)
current_axis.set_ylabel(plot.y_label, color=get_current_color(current_plot_count), fontsize=CONSTANTS.Y_LABEL_FONTSIZE)
current_axis.set_ylim([minimum_yvalue, maximum_yvalue])
if plot.graph_type == 'line':
current_axis.plot_date(x=timestamp, y=yval, linestyle='-', marker=None, color=get_current_color(current_plot_count))
else:
current_axis.plot_date(x=timestamp, y=yval, marker='.', color=get_current_color(current_plot_count))
y_ticks = current_axis.get_yticklabels()
for y_tick in y_ticks:
y_tick.set_color(get_current_color(current_plot_count))
y_tick.set_fontsize(CONSTANTS.Y_TICKS_FONTSIZE)
for x_tick in current_axis.get_xticklabels():
x_tick.set_fontsize(CONSTANTS.X_TICKS_FONTSIZE)
if plot.highlight_regions is not None:
for region in plot.highlight_regions:
highlight_region(plt, str(region.start_timestamp), str(region.end_timestamp))
axis.yaxis.grid(True)
axis.xaxis.grid(True)
axis.set_title(graph_title)
axis.set_xlabel('Time')
x_date_format = mdates.DateFormatter(CONSTANTS.X_TICKS_DATEFORMAT)
axis.xaxis.set_major_formatter(x_date_format)
plot_file_name = os.path.join(output_directory, output_filename + ".png")
fig.savefig(plot_file_name)
plt.close()
# Create html fragment to be used for creation of the report
with open(os.path.join(output_directory, output_filename + '.div'), 'w') as div_file:
div_file.write('<a name="' + os.path.basename(plot_file_name).replace(".png", "").replace(".diff", "") + '"></a><div class="col-md-12"><img src="' +
resource_path + '/' + os.path.basename(plot_file_name) + '" id="' + os.path.basename(plot_file_name) +
'" width="100%" height="auto"/></div><div class="col-md-12"><p align="center"><strong>' + os.path.basename(plot_file_name) +
'</strong></p></div><hr />')
return True, os.path.join(output_directory, output_filename + '.div')
def graph_data_on_the_same_graph(list_of_plots, output_directory, resource_path, output_filename):
"""
graph_data_on_the_same_graph: put a list of plots on the same graph: currently it supports CDF
"""
maximum_yvalue = -float('inf')
minimum_yvalue = float('inf')
plots = curate_plot_list(list_of_plots)
plot_count = len(plots)
if plot_count == 0:
return False, None
graph_height, graph_width, graph_title = get_graph_metadata(plots)
current_plot_count = 0
fig, axis = plt.subplots()
fig.set_size_inches(graph_width, graph_height)
if plot_count < 2:
fig.subplots_adjust(left=CONSTANTS.SUBPLOT_LEFT_OFFSET, bottom=CONSTANTS.SUBPLOT_BOTTOM_OFFSET, right=CONSTANTS.SUBPLOT_RIGHT_OFFSET)
else:
fig.subplots_adjust(left=CONSTANTS.SUBPLOT_LEFT_OFFSET, bottom=CONSTANTS.SUBPLOT_BOTTOM_OFFSET,
right=CONSTANTS.SUBPLOT_RIGHT_OFFSET - CONSTANTS.Y_AXIS_OFFSET * (plot_count - 2))
# Generate each plot on the graph
for plot in plots:
current_plot_count += 1
logger.info('Processing: ' + plot.input_csv + ' [ ' + output_filename + ' ]')
xval, yval = numpy.loadtxt(plot.input_csv, unpack=True, delimiter=',')
axis.plot(xval, yval, linestyle='-', marker=None, color=get_current_color(current_plot_count), label=plot.plot_label)
axis.legend()
maximum_yvalue = max(maximum_yvalue, numpy.amax(yval) * (1.0 + CONSTANTS.ZOOM_FACTOR * current_plot_count))
minimum_yvalue = min(minimum_yvalue, numpy.amin(yval) * (1.0 - CONSTANTS.ZOOM_FACTOR * current_plot_count))
# Set properties of the plots
axis.yaxis.set_ticks_position('left')
axis.set_xlabel(plots[0].x_label)
axis.set_ylabel(plots[0].y_label, fontsize=CONSTANTS.Y_LABEL_FONTSIZE)
axis.set_ylim([minimum_yvalue, maximum_yvalue])
axis.yaxis.grid(True)
axis.xaxis.grid(True)
axis.set_title(graph_title)
plot_file_name = os.path.join(output_directory, output_filename + ".png")
fig.savefig(plot_file_name)
plt.close()
# Create html fragment to be used for creation of the report
with open(os.path.join(output_directory, output_filename + '.div'), 'w') as div_file:
div_file.write('<a name="' + os.path.basename(plot_file_name).replace(".png", "").replace(".diff", "") + '"></a><div class="col-md-12"><img src="' +
resource_path + '/' + os.path.basename(plot_file_name) + '" id="' + os.path.basename(plot_file_name) +
'" width="100%" height="auto"/></div><div class="col-md-12"><p align=center>' + os.path.basename(plot_file_name) + '<br/></p></div>')
return True, os.path.join(output_directory, output_filename + '.div')
| apache-2.0 |
joergdietrich/astropy | ah_bootstrap.py | 31 | 36162 | """
This bootstrap module contains code for ensuring that the astropy_helpers
package will be importable by the time the setup.py script runs. It also
includes some workarounds to ensure that a recent-enough version of setuptools
is being used for the installation.
This module should be the first thing imported in the setup.py of distributions
that make use of the utilities in astropy_helpers. If the distribution ships
with its own copy of astropy_helpers, this module will first attempt to import
from the shipped copy. However, it will also check PyPI to see if there are
any bug-fix releases on top of the current version that may be useful to get
past platform-specific bugs that have been fixed. When running setup.py, use
the ``--offline`` command-line option to disable the auto-upgrade checks.
When this module is imported or otherwise executed it automatically calls a
main function that attempts to read the project's setup.cfg file, which it
checks for a configuration section called ``[ah_bootstrap]`` the presences of
that section, and options therein, determine the next step taken: If it
contains an option called ``auto_use`` with a value of ``True``, it will
automatically call the main function of this module called
`use_astropy_helpers` (see that function's docstring for full details).
Otherwise no further action is taken (however,
``ah_bootstrap.use_astropy_helpers`` may be called manually from within the
setup.py script).
Additional options in the ``[ah_boostrap]`` section of setup.cfg have the same
names as the arguments to `use_astropy_helpers`, and can be used to configure
the bootstrap script when ``auto_use = True``.
See https://github.com/astropy/astropy-helpers for more details, and for the
latest version of this module.
"""
import contextlib
import errno
import imp
import io
import locale
import os
import re
import subprocess as sp
import sys
try:
from ConfigParser import ConfigParser, RawConfigParser
except ImportError:
from configparser import ConfigParser, RawConfigParser
if sys.version_info[0] < 3:
_str_types = (str, unicode)
_text_type = unicode
PY3 = False
else:
_str_types = (str, bytes)
_text_type = str
PY3 = True
# What follows are several import statements meant to deal with install-time
# issues with either missing or misbehaving pacakges (including making sure
# setuptools itself is installed):
# Some pre-setuptools checks to ensure that either distribute or setuptools >=
# 0.7 is used (over pre-distribute setuptools) if it is available on the path;
# otherwise the latest setuptools will be downloaded and bootstrapped with
# ``ez_setup.py``. This used to be included in a separate file called
# setuptools_bootstrap.py; but it was combined into ah_bootstrap.py
try:
import pkg_resources
_setuptools_req = pkg_resources.Requirement.parse('setuptools>=0.7')
# This may raise a DistributionNotFound in which case no version of
# setuptools or distribute is properly installed
_setuptools = pkg_resources.get_distribution('setuptools')
if _setuptools not in _setuptools_req:
# Older version of setuptools; check if we have distribute; again if
# this results in DistributionNotFound we want to give up
_distribute = pkg_resources.get_distribution('distribute')
if _setuptools != _distribute:
# It's possible on some pathological systems to have an old version
# of setuptools and distribute on sys.path simultaneously; make
# sure distribute is the one that's used
sys.path.insert(1, _distribute.location)
_distribute.activate()
imp.reload(pkg_resources)
except:
# There are several types of exceptions that can occur here; if all else
# fails bootstrap and use the bootstrapped version
from ez_setup import use_setuptools
use_setuptools()
# Note: The following import is required as a workaround to
# https://github.com/astropy/astropy-helpers/issues/89; if we don't import this
# module now, it will get cleaned up after `run_setup` is called, but that will
# later cause the TemporaryDirectory class defined in it to stop working when
# used later on by setuptools
try:
import setuptools.py31compat
except ImportError:
pass
# matplotlib can cause problems if it is imported from within a call of
# run_setup(), because in some circumstances it will try to write to the user's
# home directory, resulting in a SandboxViolation. See
# https://github.com/matplotlib/matplotlib/pull/4165
# Making sure matplotlib, if it is available, is imported early in the setup
# process can mitigate this (note importing matplotlib.pyplot has the same
# issue)
try:
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot
except:
# Ignore if this fails for *any* reason*
pass
# End compatibility imports...
# In case it didn't successfully import before the ez_setup checks
import pkg_resources
from setuptools import Distribution
from setuptools.package_index import PackageIndex
from setuptools.sandbox import run_setup
from distutils import log
from distutils.debug import DEBUG
# TODO: Maybe enable checking for a specific version of astropy_helpers?
DIST_NAME = 'astropy-helpers'
PACKAGE_NAME = 'astropy_helpers'
# Defaults for other options
DOWNLOAD_IF_NEEDED = True
INDEX_URL = 'https://pypi.python.org/simple'
USE_GIT = True
OFFLINE = False
AUTO_UPGRADE = True
# A list of all the configuration options and their required types
CFG_OPTIONS = [
('auto_use', bool), ('path', str), ('download_if_needed', bool),
('index_url', str), ('use_git', bool), ('offline', bool),
('auto_upgrade', bool)
]
class _Bootstrapper(object):
"""
Bootstrapper implementation. See ``use_astropy_helpers`` for parameter
documentation.
"""
def __init__(self, path=None, index_url=None, use_git=None, offline=None,
download_if_needed=None, auto_upgrade=None):
if path is None:
path = PACKAGE_NAME
if not (isinstance(path, _str_types) or path is False):
raise TypeError('path must be a string or False')
if PY3 and not isinstance(path, _text_type):
fs_encoding = sys.getfilesystemencoding()
path = path.decode(fs_encoding) # path to unicode
self.path = path
# Set other option attributes, using defaults where necessary
self.index_url = index_url if index_url is not None else INDEX_URL
self.offline = offline if offline is not None else OFFLINE
# If offline=True, override download and auto-upgrade
if self.offline:
download_if_needed = False
auto_upgrade = False
self.download = (download_if_needed
if download_if_needed is not None
else DOWNLOAD_IF_NEEDED)
self.auto_upgrade = (auto_upgrade
if auto_upgrade is not None else AUTO_UPGRADE)
# If this is a release then the .git directory will not exist so we
# should not use git.
git_dir_exists = os.path.exists(os.path.join(os.path.dirname(__file__), '.git'))
if use_git is None and not git_dir_exists:
use_git = False
self.use_git = use_git if use_git is not None else USE_GIT
# Declared as False by default--later we check if astropy-helpers can be
# upgraded from PyPI, but only if not using a source distribution (as in
# the case of import from a git submodule)
self.is_submodule = False
@classmethod
def main(cls, argv=None):
if argv is None:
argv = sys.argv
config = cls.parse_config()
config.update(cls.parse_command_line(argv))
auto_use = config.pop('auto_use', False)
bootstrapper = cls(**config)
if auto_use:
# Run the bootstrapper, otherwise the setup.py is using the old
# use_astropy_helpers() interface, in which case it will run the
# bootstrapper manually after reconfiguring it.
bootstrapper.run()
return bootstrapper
@classmethod
def parse_config(cls):
if not os.path.exists('setup.cfg'):
return {}
cfg = ConfigParser()
try:
cfg.read('setup.cfg')
except Exception as e:
if DEBUG:
raise
log.error(
"Error reading setup.cfg: {0!r}\n{1} will not be "
"automatically bootstrapped and package installation may fail."
"\n{2}".format(e, PACKAGE_NAME, _err_help_msg))
return {}
if not cfg.has_section('ah_bootstrap'):
return {}
config = {}
for option, type_ in CFG_OPTIONS:
if not cfg.has_option('ah_bootstrap', option):
continue
if type_ is bool:
value = cfg.getboolean('ah_bootstrap', option)
else:
value = cfg.get('ah_bootstrap', option)
config[option] = value
return config
@classmethod
def parse_command_line(cls, argv=None):
if argv is None:
argv = sys.argv
config = {}
# For now we just pop recognized ah_bootstrap options out of the
# arg list. This is imperfect; in the unlikely case that a setup.py
# custom command or even custom Distribution class defines an argument
# of the same name then we will break that. However there's a catch22
# here that we can't just do full argument parsing right here, because
# we don't yet know *how* to parse all possible command-line arguments.
if '--no-git' in argv:
config['use_git'] = False
argv.remove('--no-git')
if '--offline' in argv:
config['offline'] = True
argv.remove('--offline')
return config
def run(self):
strategies = ['local_directory', 'local_file', 'index']
dist = None
# First, remove any previously imported versions of astropy_helpers;
# this is necessary for nested installs where one package's installer
# is installing another package via setuptools.sandbox.run_setup, as in
# the case of setup_requires
for key in list(sys.modules):
try:
if key == PACKAGE_NAME or key.startswith(PACKAGE_NAME + '.'):
del sys.modules[key]
except AttributeError:
# Sometimes mysterious non-string things can turn up in
# sys.modules
continue
# Check to see if the path is a submodule
self.is_submodule = self._check_submodule()
for strategy in strategies:
method = getattr(self, 'get_{0}_dist'.format(strategy))
dist = method()
if dist is not None:
break
else:
raise _AHBootstrapSystemExit(
"No source found for the {0!r} package; {0} must be "
"available and importable as a prerequisite to building "
"or installing this package.".format(PACKAGE_NAME))
# This is a bit hacky, but if astropy_helpers was loaded from a
# directory/submodule its Distribution object gets a "precedence" of
# "DEVELOP_DIST". However, in other cases it gets a precedence of
# "EGG_DIST". However, when activing the distribution it will only be
# placed early on sys.path if it is treated as an EGG_DIST, so always
# do that
dist = dist.clone(precedence=pkg_resources.EGG_DIST)
# Otherwise we found a version of astropy-helpers, so we're done
# Just active the found distribution on sys.path--if we did a
# download this usually happens automatically but it doesn't hurt to
# do it again
# Note: Adding the dist to the global working set also activates it
# (makes it importable on sys.path) by default.
try:
pkg_resources.working_set.add(dist, replace=True)
except TypeError:
# Some (much) older versions of setuptools do not have the
# replace=True option here. These versions are old enough that all
# bets may be off anyways, but it's easy enough to work around just
# in case...
if dist.key in pkg_resources.working_set.by_key:
del pkg_resources.working_set.by_key[dist.key]
pkg_resources.working_set.add(dist)
@property
def config(self):
"""
A `dict` containing the options this `_Bootstrapper` was configured
with.
"""
return dict((optname, getattr(self, optname))
for optname, _ in CFG_OPTIONS if hasattr(self, optname))
def get_local_directory_dist(self):
"""
Handle importing a vendored package from a subdirectory of the source
distribution.
"""
if not os.path.isdir(self.path):
return
log.info('Attempting to import astropy_helpers from {0} {1!r}'.format(
'submodule' if self.is_submodule else 'directory',
self.path))
dist = self._directory_import()
if dist is None:
log.warn(
'The requested path {0!r} for importing {1} does not '
'exist, or does not contain a copy of the {1} '
'package.'.format(self.path, PACKAGE_NAME))
elif self.auto_upgrade and not self.is_submodule:
# A version of astropy-helpers was found on the available path, but
# check to see if a bugfix release is available on PyPI
upgrade = self._do_upgrade(dist)
if upgrade is not None:
dist = upgrade
return dist
def get_local_file_dist(self):
"""
Handle importing from a source archive; this also uses setup_requires
but points easy_install directly to the source archive.
"""
if not os.path.isfile(self.path):
return
log.info('Attempting to unpack and import astropy_helpers from '
'{0!r}'.format(self.path))
try:
dist = self._do_download(find_links=[self.path])
except Exception as e:
if DEBUG:
raise
log.warn(
'Failed to import {0} from the specified archive {1!r}: '
'{2}'.format(PACKAGE_NAME, self.path, str(e)))
dist = None
if dist is not None and self.auto_upgrade:
# A version of astropy-helpers was found on the available path, but
# check to see if a bugfix release is available on PyPI
upgrade = self._do_upgrade(dist)
if upgrade is not None:
dist = upgrade
return dist
def get_index_dist(self):
if not self.download:
log.warn('Downloading {0!r} disabled.'.format(DIST_NAME))
return None
log.warn(
"Downloading {0!r}; run setup.py with the --offline option to "
"force offline installation.".format(DIST_NAME))
try:
dist = self._do_download()
except Exception as e:
if DEBUG:
raise
log.warn(
'Failed to download and/or install {0!r} from {1!r}:\n'
'{2}'.format(DIST_NAME, self.index_url, str(e)))
dist = None
# No need to run auto-upgrade here since we've already presumably
# gotten the most up-to-date version from the package index
return dist
def _directory_import(self):
"""
Import astropy_helpers from the given path, which will be added to
sys.path.
Must return True if the import succeeded, and False otherwise.
"""
# Return True on success, False on failure but download is allowed, and
# otherwise raise SystemExit
path = os.path.abspath(self.path)
# Use an empty WorkingSet rather than the man
# pkg_resources.working_set, since on older versions of setuptools this
# will invoke a VersionConflict when trying to install an upgrade
ws = pkg_resources.WorkingSet([])
ws.add_entry(path)
dist = ws.by_key.get(DIST_NAME)
if dist is None:
# We didn't find an egg-info/dist-info in the given path, but if a
# setup.py exists we can generate it
setup_py = os.path.join(path, 'setup.py')
if os.path.isfile(setup_py):
with _silence():
run_setup(os.path.join(path, 'setup.py'),
['egg_info'])
for dist in pkg_resources.find_distributions(path, True):
# There should be only one...
return dist
return dist
def _do_download(self, version='', find_links=None):
if find_links:
allow_hosts = ''
index_url = None
else:
allow_hosts = None
index_url = self.index_url
# Annoyingly, setuptools will not handle other arguments to
# Distribution (such as options) before handling setup_requires, so it
# is not straightforward to programmatically augment the arguments which
# are passed to easy_install
class _Distribution(Distribution):
def get_option_dict(self, command_name):
opts = Distribution.get_option_dict(self, command_name)
if command_name == 'easy_install':
if find_links is not None:
opts['find_links'] = ('setup script', find_links)
if index_url is not None:
opts['index_url'] = ('setup script', index_url)
if allow_hosts is not None:
opts['allow_hosts'] = ('setup script', allow_hosts)
return opts
if version:
req = '{0}=={1}'.format(DIST_NAME, version)
else:
req = DIST_NAME
attrs = {'setup_requires': [req]}
try:
if DEBUG:
_Distribution(attrs=attrs)
else:
with _silence():
_Distribution(attrs=attrs)
# If the setup_requires succeeded it will have added the new dist to
# the main working_set
return pkg_resources.working_set.by_key.get(DIST_NAME)
except Exception as e:
if DEBUG:
raise
msg = 'Error retrieving {0} from {1}:\n{2}'
if find_links:
source = find_links[0]
elif index_url != INDEX_URL:
source = index_url
else:
source = 'PyPI'
raise Exception(msg.format(DIST_NAME, source, repr(e)))
def _do_upgrade(self, dist):
# Build up a requirement for a higher bugfix release but a lower minor
# release (so API compatibility is guaranteed)
next_version = _next_version(dist.parsed_version)
req = pkg_resources.Requirement.parse(
'{0}>{1},<{2}'.format(DIST_NAME, dist.version, next_version))
package_index = PackageIndex(index_url=self.index_url)
upgrade = package_index.obtain(req)
if upgrade is not None:
return self._do_download(version=upgrade.version)
def _check_submodule(self):
"""
Check if the given path is a git submodule.
See the docstrings for ``_check_submodule_using_git`` and
``_check_submodule_no_git`` for further details.
"""
if (self.path is None or
(os.path.exists(self.path) and not os.path.isdir(self.path))):
return False
if self.use_git:
return self._check_submodule_using_git()
else:
return self._check_submodule_no_git()
def _check_submodule_using_git(self):
"""
Check if the given path is a git submodule. If so, attempt to initialize
and/or update the submodule if needed.
This function makes calls to the ``git`` command in subprocesses. The
``_check_submodule_no_git`` option uses pure Python to check if the given
path looks like a git submodule, but it cannot perform updates.
"""
cmd = ['git', 'submodule', 'status', '--', self.path]
try:
log.info('Running `{0}`; use the --no-git option to disable git '
'commands'.format(' '.join(cmd)))
returncode, stdout, stderr = run_cmd(cmd)
except _CommandNotFound:
# The git command simply wasn't found; this is most likely the
# case on user systems that don't have git and are simply
# trying to install the package from PyPI or a source
# distribution. Silently ignore this case and simply don't try
# to use submodules
return False
stderr = stderr.strip()
if returncode != 0 and stderr:
# Unfortunately the return code alone cannot be relied on, as
# earlier versions of git returned 0 even if the requested submodule
# does not exist
# This is a warning that occurs in perl (from running git submodule)
# which only occurs with a malformatted locale setting which can
# happen sometimes on OSX. See again
# https://github.com/astropy/astropy/issues/2749
perl_warning = ('perl: warning: Falling back to the standard locale '
'("C").')
if not stderr.strip().endswith(perl_warning):
# Some other unknown error condition occurred
log.warn('git submodule command failed '
'unexpectedly:\n{0}'.format(stderr))
return False
# Output of `git submodule status` is as follows:
#
# 1: Status indicator: '-' for submodule is uninitialized, '+' if
# submodule is initialized but is not at the commit currently indicated
# in .gitmodules (and thus needs to be updated), or 'U' if the
# submodule is in an unstable state (i.e. has merge conflicts)
#
# 2. SHA-1 hash of the current commit of the submodule (we don't really
# need this information but it's useful for checking that the output is
# correct)
#
# 3. The output of `git describe` for the submodule's current commit
# hash (this includes for example what branches the commit is on) but
# only if the submodule is initialized. We ignore this information for
# now
_git_submodule_status_re = re.compile(
'^(?P<status>[+-U ])(?P<commit>[0-9a-f]{40}) '
'(?P<submodule>\S+)( .*)?$')
# The stdout should only contain one line--the status of the
# requested submodule
m = _git_submodule_status_re.match(stdout)
if m:
# Yes, the path *is* a git submodule
self._update_submodule(m.group('submodule'), m.group('status'))
return True
else:
log.warn(
'Unexpected output from `git submodule status`:\n{0}\n'
'Will attempt import from {1!r} regardless.'.format(
stdout, self.path))
return False
def _check_submodule_no_git(self):
"""
Like ``_check_submodule_using_git``, but simply parses the .gitmodules file
to determine if the supplied path is a git submodule, and does not exec any
subprocesses.
This can only determine if a path is a submodule--it does not perform
updates, etc. This function may need to be updated if the format of the
.gitmodules file is changed between git versions.
"""
gitmodules_path = os.path.abspath('.gitmodules')
if not os.path.isfile(gitmodules_path):
return False
# This is a minimal reader for gitconfig-style files. It handles a few of
# the quirks that make gitconfig files incompatible with ConfigParser-style
# files, but does not support the full gitconfig syntax (just enough
# needed to read a .gitmodules file).
gitmodules_fileobj = io.StringIO()
# Must use io.open for cross-Python-compatible behavior wrt unicode
with io.open(gitmodules_path) as f:
for line in f:
# gitconfig files are more flexible with leading whitespace; just
# go ahead and remove it
line = line.lstrip()
# comments can start with either # or ;
if line and line[0] in (':', ';'):
continue
gitmodules_fileobj.write(line)
gitmodules_fileobj.seek(0)
cfg = RawConfigParser()
try:
cfg.readfp(gitmodules_fileobj)
except Exception as exc:
log.warn('Malformatted .gitmodules file: {0}\n'
'{1} cannot be assumed to be a git submodule.'.format(
exc, self.path))
return False
for section in cfg.sections():
if not cfg.has_option(section, 'path'):
continue
submodule_path = cfg.get(section, 'path').rstrip(os.sep)
if submodule_path == self.path.rstrip(os.sep):
return True
return False
def _update_submodule(self, submodule, status):
if status == ' ':
# The submodule is up to date; no action necessary
return
elif status == '-':
if self.offline:
raise _AHBootstrapSystemExit(
"Cannot initialize the {0} submodule in --offline mode; "
"this requires being able to clone the submodule from an "
"online repository.".format(submodule))
cmd = ['update', '--init']
action = 'Initializing'
elif status == '+':
cmd = ['update']
action = 'Updating'
if self.offline:
cmd.append('--no-fetch')
elif status == 'U':
raise _AHBoostrapSystemExit(
'Error: Submodule {0} contains unresolved merge conflicts. '
'Please complete or abandon any changes in the submodule so that '
'it is in a usable state, then try again.'.format(submodule))
else:
log.warn('Unknown status {0!r} for git submodule {1!r}. Will '
'attempt to use the submodule as-is, but try to ensure '
'that the submodule is in a clean state and contains no '
'conflicts or errors.\n{2}'.format(status, submodule,
_err_help_msg))
return
err_msg = None
cmd = ['git', 'submodule'] + cmd + ['--', submodule]
log.warn('{0} {1} submodule with: `{2}`'.format(
action, submodule, ' '.join(cmd)))
try:
log.info('Running `{0}`; use the --no-git option to disable git '
'commands'.format(' '.join(cmd)))
returncode, stdout, stderr = run_cmd(cmd)
except OSError as e:
err_msg = str(e)
else:
if returncode != 0:
err_msg = stderr
if err_msg is not None:
log.warn('An unexpected error occurred updating the git submodule '
'{0!r}:\n{1}\n{2}'.format(submodule, err_msg,
_err_help_msg))
class _CommandNotFound(OSError):
"""
An exception raised when a command run with run_cmd is not found on the
system.
"""
def run_cmd(cmd):
"""
Run a command in a subprocess, given as a list of command-line
arguments.
Returns a ``(returncode, stdout, stderr)`` tuple.
"""
try:
p = sp.Popen(cmd, stdout=sp.PIPE, stderr=sp.PIPE)
# XXX: May block if either stdout or stderr fill their buffers;
# however for the commands this is currently used for that is
# unlikely (they should have very brief output)
stdout, stderr = p.communicate()
except OSError as e:
if DEBUG:
raise
if e.errno == errno.ENOENT:
msg = 'Command not found: `{0}`'.format(' '.join(cmd))
raise _CommandNotFound(msg, cmd)
else:
raise _AHBoostrapSystemExit(
'An unexpected error occurred when running the '
'`{0}` command:\n{1}'.format(' '.join(cmd), str(e)))
# Can fail of the default locale is not configured properly. See
# https://github.com/astropy/astropy/issues/2749. For the purposes under
# consideration 'latin1' is an acceptable fallback.
try:
stdio_encoding = locale.getdefaultlocale()[1] or 'latin1'
except ValueError:
# Due to an OSX oddity locale.getdefaultlocale() can also crash
# depending on the user's locale/language settings. See:
# http://bugs.python.org/issue18378
stdio_encoding = 'latin1'
# Unlikely to fail at this point but even then let's be flexible
if not isinstance(stdout, _text_type):
stdout = stdout.decode(stdio_encoding, 'replace')
if not isinstance(stderr, _text_type):
stderr = stderr.decode(stdio_encoding, 'replace')
return (p.returncode, stdout, stderr)
def _next_version(version):
"""
Given a parsed version from pkg_resources.parse_version, returns a new
version string with the next minor version.
Examples
========
>>> _next_version(pkg_resources.parse_version('1.2.3'))
'1.3.0'
"""
if hasattr(version, 'base_version'):
# New version parsing from setuptools >= 8.0
if version.base_version:
parts = version.base_version.split('.')
else:
parts = []
else:
parts = []
for part in version:
if part.startswith('*'):
break
parts.append(part)
parts = [int(p) for p in parts]
if len(parts) < 3:
parts += [0] * (3 - len(parts))
major, minor, micro = parts[:3]
return '{0}.{1}.{2}'.format(major, minor + 1, 0)
class _DummyFile(object):
"""A noop writeable object."""
errors = '' # Required for Python 3.x
encoding = 'utf-8'
def write(self, s):
pass
def flush(self):
pass
@contextlib.contextmanager
def _silence():
"""A context manager that silences sys.stdout and sys.stderr."""
old_stdout = sys.stdout
old_stderr = sys.stderr
sys.stdout = _DummyFile()
sys.stderr = _DummyFile()
exception_occurred = False
try:
yield
except:
exception_occurred = True
# Go ahead and clean up so that exception handling can work normally
sys.stdout = old_stdout
sys.stderr = old_stderr
raise
if not exception_occurred:
sys.stdout = old_stdout
sys.stderr = old_stderr
_err_help_msg = """
If the problem persists consider installing astropy_helpers manually using pip
(`pip install astropy_helpers`) or by manually downloading the source archive,
extracting it, and installing by running `python setup.py install` from the
root of the extracted source code.
"""
class _AHBootstrapSystemExit(SystemExit):
def __init__(self, *args):
if not args:
msg = 'An unknown problem occurred bootstrapping astropy_helpers.'
else:
msg = args[0]
msg += '\n' + _err_help_msg
super(_AHBootstrapSystemExit, self).__init__(msg, *args[1:])
if sys.version_info[:2] < (2, 7):
# In Python 2.6 the distutils log does not log warnings, errors, etc. to
# stderr so we have to wrap it to ensure consistency at least in this
# module
import distutils
class log(object):
def __getattr__(self, attr):
return getattr(distutils.log, attr)
def warn(self, msg, *args):
self._log_to_stderr(distutils.log.WARN, msg, *args)
def error(self, msg):
self._log_to_stderr(distutils.log.ERROR, msg, *args)
def fatal(self, msg):
self._log_to_stderr(distutils.log.FATAL, msg, *args)
def log(self, level, msg, *args):
if level in (distutils.log.WARN, distutils.log.ERROR,
distutils.log.FATAL):
self._log_to_stderr(level, msg, *args)
else:
distutils.log.log(level, msg, *args)
def _log_to_stderr(self, level, msg, *args):
# This is the only truly 'public' way to get the current threshold
# of the log
current_threshold = distutils.log.set_threshold(distutils.log.WARN)
distutils.log.set_threshold(current_threshold)
if level >= current_threshold:
if args:
msg = msg % args
sys.stderr.write('%s\n' % msg)
sys.stderr.flush()
log = log()
BOOTSTRAPPER = _Bootstrapper.main()
def use_astropy_helpers(**kwargs):
"""
Ensure that the `astropy_helpers` module is available and is importable.
This supports automatic submodule initialization if astropy_helpers is
included in a project as a git submodule, or will download it from PyPI if
necessary.
Parameters
----------
path : str or None, optional
A filesystem path relative to the root of the project's source code
that should be added to `sys.path` so that `astropy_helpers` can be
imported from that path.
If the path is a git submodule it will automatically be initialized
and/or updated.
The path may also be to a ``.tar.gz`` archive of the astropy_helpers
source distribution. In this case the archive is automatically
unpacked and made temporarily available on `sys.path` as a ``.egg``
archive.
If `None` skip straight to downloading.
download_if_needed : bool, optional
If the provided filesystem path is not found an attempt will be made to
download astropy_helpers from PyPI. It will then be made temporarily
available on `sys.path` as a ``.egg`` archive (using the
``setup_requires`` feature of setuptools. If the ``--offline`` option
is given at the command line the value of this argument is overridden
to `False`.
index_url : str, optional
If provided, use a different URL for the Python package index than the
main PyPI server.
use_git : bool, optional
If `False` no git commands will be used--this effectively disables
support for git submodules. If the ``--no-git`` option is given at the
command line the value of this argument is overridden to `False`.
auto_upgrade : bool, optional
By default, when installing a package from a non-development source
distribution ah_boostrap will try to automatically check for patch
releases to astropy-helpers on PyPI and use the patched version over
any bundled versions. Setting this to `False` will disable that
functionality. If the ``--offline`` option is given at the command line
the value of this argument is overridden to `False`.
offline : bool, optional
If `False` disable all actions that require an internet connection,
including downloading packages from the package index and fetching
updates to any git submodule. Defaults to `True`.
"""
global BOOTSTRAPPER
config = BOOTSTRAPPER.config
config.update(**kwargs)
# Create a new bootstrapper with the updated configuration and run it
BOOTSTRAPPER = _Bootstrapper(**config)
BOOTSTRAPPER.run()
| bsd-3-clause |
samuelleblanc/flight_planning_dist | flight_planning/moving_lines_v3.py | 1 | 13162 | """
Purpose:
Main program set to create a flight planning software
Is used as a basis of the interface between an excel spreadsheet
and a clickable map
It is to create flight lines saved to kml, txt, xls, and others.
Inputs:
test: if set to true, then command line is still useable
Outputs:
lines: linebuilder class object
ui: specil ui to hold the gui interface objects
Dependencies:
numpy
Tkinter
datetime
map_utils
excel_interface
map_interactive
gui
Basemap
PIL (through scipy.misc.imread)
OWSLib
Required files:
labels.txt: file with labels of locations
aeronet_locations.txt: file with location of aeronet sites
arc.ico: icon for main window
Example:
...
Modification History:
Written: Samuel LeBlanc, 2015-08-07, Santa Cruz, CA
Copyright 2015 Samuel LeBlanc
Modified: Samuel LeBlanc, 2015-09-02, NASA Ames, Santa Cruz, CA
- added new buttons
- changed imports to be more specific and not import everything
Modified: Samuel LeBlanc, 2015-09-10, NASA Ames, Santa Cruz, CA
- added multi plane capabilities via radiobuttons in the gui interface
Modified: Samuel LeBlanc, 2015-09-16, NASA Ames
- added icon in main figure
- added move points buttons
- added basemap creation questions
- added GEOS imagerys with WMS service
"""
from matplotlib.backends.backend_tkagg import FigureCanvasTkAgg, NavigationToolbar2TkAgg
from matplotlib.figure import Figure
import Tkinter as tk
import numpy as np
from mpl_toolkits.basemap import Basemap
import datetime
import scipy, scipy.misc, scipy.special, scipy.integrate
import PIL
import re, copy
import ephem
import urllib2, socket, _socket, _ssl, _elementtree
import pykml, simplekml, pyexpat
import gpxpy, gpxpy.gpx
import map_utils as mu
import excel_interface as ex
import map_interactive as mi
import gui
import tkSimpleDialog, tkFileDialog, tkMessageBox
import owslib, owslib.wms, owslib.util
from xlwings import Range, Sheet, Workbook
import Pysolar, Pysolar.solar
import win32com, win32com.client
import FileDialog
import six, six.moves
__version__ = 'v0.7beta'
def Get_basemap_profile():
'Program to load profile dict basemap values'
filename = 'profiles.txt'
defaults = Get_default_profile(filename)
select = gui.Select_profile(defaults)
return select.profile
def Get_default_profile(filename):
"""
Program to try and read a text file with the default profiles
If unavailable use some hardcoded defaults
"""
profile = []
try:
f = open(filename,'r')
dd = None
for line in f:
if not dd:
first = True
dd = line
else:
first = False
if ('{' in dd) & ('}' in dd):
profile.append(eval(dd.strip()))
dd = line.strip()
else:
if first:
dd = line.strip()
else:
dd = ''.join((dd.strip(),line.strip()))
profile.append(eval(dd.strip()))
except:
profile = [{'Profile':'ORACLES','Plane_name':'P3',
'Start_lon':'14 38.717E','Start_lat':'22 58.783S',
'Lon_range':[-20,20],'Lat_range':[-30,10],
'UTC_start':7.0,'UTC_conversion':+1.0,
'start_alt':95.0},
{'Profile':'NAAMES','Plane_name':'C130',
'Start_lon':'52 44.547W','Start_lat':'47 37.273N',
'Lon_range':[-55,-20],'Lat_range':[40,60],
'UTC_start':8.5,'UTC_conversion':-2.5,
'start_alt':110.0},
{'Profile':'KORUS-AQ','Plane_name':'DC8',
'Start_lon':'126 47.663E','Start_lat':'37 33.489N',
'Lon_range':[120,135],'Lat_range':[20,40],
'UTC_start':8.5,'UTC_conversion':+9,
'start_alt':20.0},
{'Profile':'AJAX','Plane_name':'Alpha-jet',
'Start_lon':'122 3.489W','Start_lat':'37 24.387N',
'Lon_range':[-125,-115],'Lat_range':[30,40],
'UTC_start':20.0,'UTC_conversion':+7,
'start_alt':95.0}]
return profile
def Create_gui(vertical=True):
'Program to set up gui interaction with figure embedded'
class ui:
pass
ui = ui
ui.root = tk.Tk()
ui.root.wm_title('Flight planning by Samuel LeBlanc, NASA Ames, '+__version__)
ui.root.geometry('900x950')
try:
ui.root.iconbitmap('arc.ico')
except:
pass
ui.top = tk.Frame(ui.root)
ui.bot = tk.Frame(ui.root)
if vertical:
ui.top.pack(side=tk.LEFT,expand=False)
ui.bot.pack(side=tk.RIGHT,fill=tk.BOTH,expand=True)
else:
ui.top.pack(side=tk.TOP,expand=False)
ui.bot.pack(side=tk.BOTTOM,fill=tk.BOTH,expand=True)
ui.fig = Figure()
ui.ax1 = ui.fig.add_subplot(111)
ui.canvas = FigureCanvasTkAgg(ui.fig,master=ui.root)
ui.canvas.show()
ui.canvas.get_tk_widget().pack(in_=ui.bot,side=tk.BOTTOM,fill=tk.BOTH,expand=1)
ui.tb = NavigationToolbar2TkAgg(ui.canvas,ui.root)
ui.tb.pack(in_=ui.bot,side=tk.BOTTOM)
ui.tb.update()
ui.canvas._tkcanvas.pack(in_=ui.bot,side=tk.TOP,fill=tk.BOTH,expand=1)
return ui
def build_buttons(ui,lines,vertical=True):
'Program to set up the buttons'
import gui
import Tkinter as tk
from matplotlib.colors import cnames
if vertical:
side = tk.TOP
h = 2
w = 20
else:
side = tk.LEFT
h = 20
w = 2
g = gui.gui(lines,root=ui.root,noplt=True)
g.refresh = tk.Button(g.root,text='Refresh',
command=g.refresh,
bg='chartreuse')
g.bopenfile = tk.Button(g.root,text='Open Excel file',
command=g.gui_open_xl)
g.bsavexl = tk.Button(g.root,text='Save Excel file',
command=g.gui_save_xl)
g.bsavetxt = tk.Button(g.root,text='Save text file',
command=g.gui_save_txt)
g.bsaveas2kml = tk.Button(g.root,text='SaveAs to Kml',
command=g.gui_saveas2kml)
g.bsave2kml = tk.Button(g.root,text='Update Kml',
command=g.gui_save2kml)
g.bsave2gpx = tk.Button(g.root,text='Save to GPX',
command=g.gui_save2gpx)
g.refresh.pack(in_=ui.top,side=side,fill=tk.X,pady=8)
tk.Label(g.root,text='File options').pack(in_=ui.top,side=side)
g.bopenfile.pack(in_=ui.top,side=side)
g.bsavexl.pack(in_=ui.top,side=side)
g.bsavetxt.pack(in_=ui.top,side=side)
g.bsaveas2kml.pack(in_=ui.top,side=side)
g.bsave2kml.pack(in_=ui.top,side=side)
g.bsave2gpx.pack(in_=ui.top,side=side)
tk.Frame(g.root,height=h,width=w,bg='black',relief='sunken'
).pack(in_=ui.top,side=side,padx=8,pady=5)
tk.Label(g.root,text='Other plots').pack(in_=ui.top,side=side)
g.bplotalt = tk.Button(g.root,text='Plot alt vs time',
command=g.gui_plotalttime)
g.bplotalt.pack(in_=ui.top,side=side)
g.bplotsza = tk.Button(g.root,text='Plot SZA',
command=g.gui_plotsza)
g.bplotsza.pack(in_=ui.top,side=side)
tk.Frame(g.root,height=h,width=w,bg='black',relief='sunken'
).pack(in_=ui.top,side=side,padx=8,pady=5)
g.frame_select = tk.Frame(g.root,relief=tk.SUNKEN,bg='white')
g.frame_select.pack(in_=ui.top,side=side,fill=tk.BOTH)
tk.Label(g.root,text='Flight paths:',bg='white').pack(in_=g.frame_select,side=side)
g.newflight_off = False
g.flightselect_arr = []
g.flightselect_arr.append(tk.Radiobutton(g.root,text=lines.ex.name,
fg=lines.ex.color,
variable=g.iactive,value=0,
indicatoron=0,
command=g.gui_changeflight,
state=tk.ACTIVE,bg='white'))
g.flightselect_arr[0].pack(in_=g.frame_select,side=side,padx=4,pady=2,fill=tk.BOTH)
g.flightselect_arr[0].select()
g.iactive.set(0)
g.newflightpath = tk.Button(g.root,text='New flight path',
command = g.gui_newflight)
g.newflightpath.pack(in_=ui.top,padx=5,pady=2)
#g.removeflightpath = tk.Button(g.root,text='Remove flight path',
# command = g.gui_removeflight)
#g.removeflightpath.pack(in_=ui.top,padx=5,pady=5)
g.addpoint = tk.Button(g.root,text='Add point dialog',
command = g.gui_addpoint)
g.addpoint.pack(in_=ui.top,padx=5,pady=2)
g.movepoints = tk.Button(g.root,text='Move points',
command = g.gui_movepoints)
g.movepoints.pack(in_=ui.top,padx=5,pady=2)
tk.Frame(g.root,height=h,width=w,bg='black',relief='sunken'
).pack(in_=ui.top,side=side,padx=8,pady=5)
tk.Label(g.root,text='Extra info:').pack(in_=ui.top,side=side)
g.baddsat = tk.Button(g.root,text='Add Satellite tracks',
command = g.gui_addsat_tle)
g.baddsat.pack(in_=ui.top)
g.baddbocachica = tk.Button(g.root,text='Add Forecast\nfrom Bocachica',
command = g.gui_addbocachica)
g.baddbocachica.pack(in_=ui.top)
g.baddfigure = tk.Button(g.root,text='Add Forecast\nfrom image',
command = g.gui_addfigure)
g.baddfigure.pack(in_=ui.top)
#g.baddgeos = tk.Button(g.root,text='Add GEOS',
# command = g.gui_addgeos)
#g.baddgeos.pack(in_=ui.top)
tk.Frame(g.root,height=h,width=w,bg='black',relief='sunken'
).pack(in_=ui.top,side=side,padx=8,pady=5)
tk.Button(g.root,text='Quit',command=g.stopandquit,bg='lightcoral'
).pack(in_=ui.top,side=side)
ui.g = g
def get_datestr(ui):
import tkSimpleDialog
from datetime import datetime
import re
ui.datestr = tkSimpleDialog.askstring('Flight Date','Flight Date (yyyy-mm-dd):')
if not ui.datestr:
ui.datestr = datetime.utcnow().strftime('%Y-%m-%d')
else:
while not re.match('[0-9]{4}-[0-9]{2}-[0-9]{2}',ui.datestr):
ui.datestr = tkSimpleDialog.askstring('Flight Date',
'Bad format, please retry!\nFlight Date (yyyy-mm-dd):')
if not ui.datestr:
ui.datestr = datetime.utcnow().strftime('%Y-%m-%d')
ui.ax1.set_title(ui.datestr)
def savetmp(ui,wb):
import tempfile, os
tmpfilename = os.path.join(tempfile.gettempdir(),ui.datestr+'.xlsx')
try:
wb.save2xl(tmpfilename)
except:
print 'unable to save excel to temp file:'+tmpfilename
print 'continuing ...'
def init_plot(m,start_lon='14 38.717E',start_lat='22 58.783S',color='red'):
lat0,lon0 = mi.pll(start_lat), mi.pll(start_lon)
x0,y0 = m(lon0,lat0)
line, = m.plot([x0],[y0],'o-',color=color,linewidth=3)
text = ('Press s to stop interaction\\n'
'Press i to restart interaction\\n')
return line
def stopandquit():
'simple function to handle the stop and quit'
lines.ex.wb.close()
ui.root.quit()
ui.root.destroy()
def Create_interaction(test=False,profile=None,**kwargs):
ui = Create_gui()
ui.tb.set_message('Creating basemap')
profile = Get_basemap_profile()
m = mi.build_basemap(ax=ui.ax1,profile=profile)
if profile:
sla,slo = profile['Start_lat'],profile['Start_lon']
else:
sla,slo = None,None
line = init_plot(m,start_lat=sla,start_lon=slo,color='red')
flabels = 'labels.txt'
faero = 'aeronet_locations.txt'
try:
ui.tb.set_message('putting labels and aeronet')
mi.plot_map_labels(m,flabels)
mi.plot_map_labels(m,faero,marker='*',skip_lines=2,color='y')
except:
print 'Label files not found!'
get_datestr(ui)
ui.tb.set_message('making the Excel connection')
wb = ex.dict_position(datestr=ui.datestr,color=line.get_color(),profile=profile,**kwargs)
ui.tb.set_message('Building the interactivity on the map')
lines = mi.LineBuilder(line,m=m,ex=wb,tb=ui.tb,blit=True)
ui.tb.set_message('Saving temporary excel file')
savetmp(ui,wb)
build_buttons(ui,lines)
lines.get_bg(redraw=True)
ui.tb.set_message('Ready for interaction')
def stopandquit():
'simple function to handle the stop and quit'
lines.ex.wb.close()
ui.root.quit()
ui.root.destroy()
ui.root.protocol('WM_DELETE_WINDOW',stopandquit)
if not test:
ui.root.mainloop()
return lines,ui
if __name__ == "__main__":
lines,ui = Create_interaction(test=False)
| gpl-3.0 |
RomainBrault/scikit-learn | sklearn/linear_model/tests/test_sparse_coordinate_descent.py | 94 | 10801 | import numpy as np
import scipy.sparse as sp
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_almost_equal
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_less
from sklearn.utils.testing import assert_true
from sklearn.utils.testing import assert_greater
from sklearn.utils.testing import ignore_warnings
from sklearn.linear_model.coordinate_descent import (Lasso, ElasticNet,
LassoCV, ElasticNetCV)
def test_sparse_coef():
# Check that the sparse_coef property works
clf = ElasticNet()
clf.coef_ = [1, 2, 3]
assert_true(sp.isspmatrix(clf.sparse_coef_))
assert_equal(clf.sparse_coef_.toarray().tolist()[0], clf.coef_)
def test_normalize_option():
# Check that the normalize option in enet works
X = sp.csc_matrix([[-1], [0], [1]])
y = [-1, 0, 1]
clf_dense = ElasticNet(fit_intercept=True, normalize=True)
clf_sparse = ElasticNet(fit_intercept=True, normalize=True)
clf_dense.fit(X, y)
X = sp.csc_matrix(X)
clf_sparse.fit(X, y)
assert_almost_equal(clf_dense.dual_gap_, 0)
assert_array_almost_equal(clf_dense.coef_, clf_sparse.coef_)
def test_lasso_zero():
# Check that the sparse lasso can handle zero data without crashing
X = sp.csc_matrix((3, 1))
y = [0, 0, 0]
T = np.array([[1], [2], [3]])
clf = Lasso().fit(X, y)
pred = clf.predict(T)
assert_array_almost_equal(clf.coef_, [0])
assert_array_almost_equal(pred, [0, 0, 0])
assert_almost_equal(clf.dual_gap_, 0)
def test_enet_toy_list_input():
# Test ElasticNet for various values of alpha and l1_ratio with list X
X = np.array([[-1], [0], [1]])
X = sp.csc_matrix(X)
Y = [-1, 0, 1] # just a straight line
T = np.array([[2], [3], [4]]) # test sample
# this should be the same as unregularized least squares
clf = ElasticNet(alpha=0, l1_ratio=1.0)
# catch warning about alpha=0.
# this is discouraged but should work.
ignore_warnings(clf.fit)(X, Y)
pred = clf.predict(T)
assert_array_almost_equal(clf.coef_, [1])
assert_array_almost_equal(pred, [2, 3, 4])
assert_almost_equal(clf.dual_gap_, 0)
clf = ElasticNet(alpha=0.5, l1_ratio=0.3, max_iter=1000)
clf.fit(X, Y)
pred = clf.predict(T)
assert_array_almost_equal(clf.coef_, [0.50819], decimal=3)
assert_array_almost_equal(pred, [1.0163, 1.5245, 2.0327], decimal=3)
assert_almost_equal(clf.dual_gap_, 0)
clf = ElasticNet(alpha=0.5, l1_ratio=0.5)
clf.fit(X, Y)
pred = clf.predict(T)
assert_array_almost_equal(clf.coef_, [0.45454], 3)
assert_array_almost_equal(pred, [0.9090, 1.3636, 1.8181], 3)
assert_almost_equal(clf.dual_gap_, 0)
def test_enet_toy_explicit_sparse_input():
# Test ElasticNet for various values of alpha and l1_ratio with sparse X
f = ignore_warnings
# training samples
X = sp.lil_matrix((3, 1))
X[0, 0] = -1
# X[1, 0] = 0
X[2, 0] = 1
Y = [-1, 0, 1] # just a straight line (the identity function)
# test samples
T = sp.lil_matrix((3, 1))
T[0, 0] = 2
T[1, 0] = 3
T[2, 0] = 4
# this should be the same as lasso
clf = ElasticNet(alpha=0, l1_ratio=1.0)
f(clf.fit)(X, Y)
pred = clf.predict(T)
assert_array_almost_equal(clf.coef_, [1])
assert_array_almost_equal(pred, [2, 3, 4])
assert_almost_equal(clf.dual_gap_, 0)
clf = ElasticNet(alpha=0.5, l1_ratio=0.3, max_iter=1000)
clf.fit(X, Y)
pred = clf.predict(T)
assert_array_almost_equal(clf.coef_, [0.50819], decimal=3)
assert_array_almost_equal(pred, [1.0163, 1.5245, 2.0327], decimal=3)
assert_almost_equal(clf.dual_gap_, 0)
clf = ElasticNet(alpha=0.5, l1_ratio=0.5)
clf.fit(X, Y)
pred = clf.predict(T)
assert_array_almost_equal(clf.coef_, [0.45454], 3)
assert_array_almost_equal(pred, [0.9090, 1.3636, 1.8181], 3)
assert_almost_equal(clf.dual_gap_, 0)
def make_sparse_data(n_samples=100, n_features=100, n_informative=10, seed=42,
positive=False, n_targets=1):
random_state = np.random.RandomState(seed)
# build an ill-posed linear regression problem with many noisy features and
# comparatively few samples
# generate a ground truth model
w = random_state.randn(n_features, n_targets)
w[n_informative:] = 0.0 # only the top features are impacting the model
if positive:
w = np.abs(w)
X = random_state.randn(n_samples, n_features)
rnd = random_state.uniform(size=(n_samples, n_features))
X[rnd > 0.5] = 0.0 # 50% of zeros in input signal
# generate training ground truth labels
y = np.dot(X, w)
X = sp.csc_matrix(X)
if n_targets == 1:
y = np.ravel(y)
return X, y
def _test_sparse_enet_not_as_toy_dataset(alpha, fit_intercept, positive):
n_samples, n_features, max_iter = 100, 100, 1000
n_informative = 10
X, y = make_sparse_data(n_samples, n_features, n_informative,
positive=positive)
X_train, X_test = X[n_samples // 2:], X[:n_samples // 2]
y_train, y_test = y[n_samples // 2:], y[:n_samples // 2]
s_clf = ElasticNet(alpha=alpha, l1_ratio=0.8, fit_intercept=fit_intercept,
max_iter=max_iter, tol=1e-7, positive=positive,
warm_start=True)
s_clf.fit(X_train, y_train)
assert_almost_equal(s_clf.dual_gap_, 0, 4)
assert_greater(s_clf.score(X_test, y_test), 0.85)
# check the convergence is the same as the dense version
d_clf = ElasticNet(alpha=alpha, l1_ratio=0.8, fit_intercept=fit_intercept,
max_iter=max_iter, tol=1e-7, positive=positive,
warm_start=True)
d_clf.fit(X_train.toarray(), y_train)
assert_almost_equal(d_clf.dual_gap_, 0, 4)
assert_greater(d_clf.score(X_test, y_test), 0.85)
assert_almost_equal(s_clf.coef_, d_clf.coef_, 5)
assert_almost_equal(s_clf.intercept_, d_clf.intercept_, 5)
# check that the coefs are sparse
assert_less(np.sum(s_clf.coef_ != 0.0), 2 * n_informative)
def test_sparse_enet_not_as_toy_dataset():
_test_sparse_enet_not_as_toy_dataset(alpha=0.1, fit_intercept=False,
positive=False)
_test_sparse_enet_not_as_toy_dataset(alpha=0.1, fit_intercept=True,
positive=False)
_test_sparse_enet_not_as_toy_dataset(alpha=1e-3, fit_intercept=False,
positive=True)
_test_sparse_enet_not_as_toy_dataset(alpha=1e-3, fit_intercept=True,
positive=True)
def test_sparse_lasso_not_as_toy_dataset():
n_samples = 100
max_iter = 1000
n_informative = 10
X, y = make_sparse_data(n_samples=n_samples, n_informative=n_informative)
X_train, X_test = X[n_samples // 2:], X[:n_samples // 2]
y_train, y_test = y[n_samples // 2:], y[:n_samples // 2]
s_clf = Lasso(alpha=0.1, fit_intercept=False, max_iter=max_iter, tol=1e-7)
s_clf.fit(X_train, y_train)
assert_almost_equal(s_clf.dual_gap_, 0, 4)
assert_greater(s_clf.score(X_test, y_test), 0.85)
# check the convergence is the same as the dense version
d_clf = Lasso(alpha=0.1, fit_intercept=False, max_iter=max_iter, tol=1e-7)
d_clf.fit(X_train.toarray(), y_train)
assert_almost_equal(d_clf.dual_gap_, 0, 4)
assert_greater(d_clf.score(X_test, y_test), 0.85)
# check that the coefs are sparse
assert_equal(np.sum(s_clf.coef_ != 0.0), n_informative)
def test_enet_multitarget():
n_targets = 3
X, y = make_sparse_data(n_targets=n_targets)
estimator = ElasticNet(alpha=0.01, fit_intercept=True, precompute=None)
# XXX: There is a bug when precompute is not None!
estimator.fit(X, y)
coef, intercept, dual_gap = (estimator.coef_,
estimator.intercept_,
estimator.dual_gap_)
for k in range(n_targets):
estimator.fit(X, y[:, k])
assert_array_almost_equal(coef[k, :], estimator.coef_)
assert_array_almost_equal(intercept[k], estimator.intercept_)
assert_array_almost_equal(dual_gap[k], estimator.dual_gap_)
def test_path_parameters():
X, y = make_sparse_data()
max_iter = 50
n_alphas = 10
clf = ElasticNetCV(n_alphas=n_alphas, eps=1e-3, max_iter=max_iter,
l1_ratio=0.5, fit_intercept=False)
ignore_warnings(clf.fit)(X, y) # new params
assert_almost_equal(0.5, clf.l1_ratio)
assert_equal(n_alphas, clf.n_alphas)
assert_equal(n_alphas, len(clf.alphas_))
sparse_mse_path = clf.mse_path_
ignore_warnings(clf.fit)(X.toarray(), y) # compare with dense data
assert_almost_equal(clf.mse_path_, sparse_mse_path)
def test_same_output_sparse_dense_lasso_and_enet_cv():
X, y = make_sparse_data(n_samples=40, n_features=10)
for normalize in [True, False]:
clfs = ElasticNetCV(max_iter=100, cv=5, normalize=normalize)
ignore_warnings(clfs.fit)(X, y)
clfd = ElasticNetCV(max_iter=100, cv=5, normalize=normalize)
ignore_warnings(clfd.fit)(X.toarray(), y)
assert_almost_equal(clfs.alpha_, clfd.alpha_, 7)
assert_almost_equal(clfs.intercept_, clfd.intercept_, 7)
assert_array_almost_equal(clfs.mse_path_, clfd.mse_path_)
assert_array_almost_equal(clfs.alphas_, clfd.alphas_)
clfs = LassoCV(max_iter=100, cv=4, normalize=normalize)
ignore_warnings(clfs.fit)(X, y)
clfd = LassoCV(max_iter=100, cv=4, normalize=normalize)
ignore_warnings(clfd.fit)(X.toarray(), y)
assert_almost_equal(clfs.alpha_, clfd.alpha_, 7)
assert_almost_equal(clfs.intercept_, clfd.intercept_, 7)
assert_array_almost_equal(clfs.mse_path_, clfd.mse_path_)
assert_array_almost_equal(clfs.alphas_, clfd.alphas_)
def test_same_multiple_output_sparse_dense():
for normalize in [True, False]:
l = ElasticNet(normalize=normalize)
X = [[0, 1, 2, 3, 4],
[0, 2, 5, 8, 11],
[9, 10, 11, 12, 13],
[10, 11, 12, 13, 14]]
y = [[1, 2, 3, 4, 5],
[1, 3, 6, 9, 12],
[10, 11, 12, 13, 14],
[11, 12, 13, 14, 15]]
ignore_warnings(l.fit)(X, y)
sample = np.array([1, 2, 3, 4, 5]).reshape(1, -1)
predict_dense = l.predict(sample)
l_sp = ElasticNet(normalize=normalize)
X_sp = sp.coo_matrix(X)
ignore_warnings(l_sp.fit)(X_sp, y)
sample_sparse = sp.coo_matrix(sample)
predict_sparse = l_sp.predict(sample_sparse)
assert_array_almost_equal(predict_sparse, predict_dense)
| bsd-3-clause |
antoniofalcone89/webapp | plotModulo433.py | 1 | 5954 | import cgi
import cgitb; cgitb.enable()
import matplotlib
matplotlib.use( 'Agg' )
import numpy as np
from scipy.stats.kde import gaussian_kde
import os,sys
import pylab
from PIL import Image
import uuid
def crop(arg1):
# size is width/height
img = Image.open(arg1)
left = 88
top = 41
width = 545
height = 321
box = (left, top, left+width, top+height)
#area = img.crop(box)
#area.save('cropped_0_388_image1', 'jpeg')
output_img = img.crop(box)
output_img.save(arg1, 'png')
def make_fig():
global folder
folder = "433files/"
global imgFolder
imgFolder = "imgGen/"
global filename
global filename1,filename2,filename3,filename4,filename5,filename6,filename7,filename8,filename9,filename10
filename1 = folder + sys.argv[1]
filename2 = folder + sys.argv[2]
filename3 = folder + sys.argv[3]
filename4 = folder + sys.argv[4]
filename5 = folder + sys.argv[5]
filename6 = folder + sys.argv[6]
filename7 = folder + sys.argv[7]
filename8 = folder + sys.argv[8]
filename9 = folder + sys.argv[9]
filename10 = folder + sys.argv[10]
#Squadra
x1, y1 = np.genfromtxt(filename1, delimiter=',', unpack=True)
x2, y2 = np.genfromtxt(filename2, delimiter=',', unpack=True)
x3, y3 = np.genfromtxt(filename3, delimiter=',', unpack=True)
x4, y4 = np.genfromtxt(filename4, delimiter=',', unpack=True)
x11, y11 = np.genfromtxt(filename5, delimiter=',', unpack=True)
x22, y22 = np.genfromtxt(filename6, delimiter=',', unpack=True)
x33, y33 = np.genfromtxt(filename7, delimiter=',', unpack=True)
x111, y111 = np.genfromtxt(filename8, delimiter=',', unpack=True)
x222, y222 = np.genfromtxt(filename9, delimiter=',', unpack=True)
x333, y333 = np.genfromtxt(filename10, delimiter=',', unpack=True)
#Difesa da sx verso dx
# x1, y1 = np.genfromtxt('terzinoSX.csv', delimiter=',', unpack=True)
# x2, y2 = np.genfromtxt('centraleSX.csv', delimiter=',', unpack=True)
# x3, y3 = np.genfromtxt('centraleDX.csv', delimiter=',', unpack=True)
# x4, y4 = np.genfromtxt('terzinoDX.csv', delimiter=',', unpack=True)
#Centrocampo da sx verso dx
# x11, y11 = np.genfromtxt('centrSX.csv', delimiter=',', unpack=True)
# x22, y22 = np.genfromtxt('centrCC.csv', delimiter=',', unpack=True)
# x33, y33 = np.genfromtxt('centrDX.csv', delimiter=',', unpack=True)
#attacco da sx verso dx
# x111, y111 = np.genfromtxt('alaSX.csv', delimiter=',', unpack=True)
# x222, y222 = np.genfromtxt('puntaCC.csv', delimiter=',', unpack=True)
# x333, y333 = np.genfromtxt('alaDX.csv', delimiter=',', unpack=True)
y1 = y1[np.logical_not(np.isnan(y1))]
x1 = x1[np.logical_not(np.isnan(x1))]
y2 = y2[np.logical_not(np.isnan(y2))]
x2 = x2[np.logical_not(np.isnan(x2))]
y3 = y3[np.logical_not(np.isnan(y3))]
x3 = x3[np.logical_not(np.isnan(x3))]
y4 = y4[np.logical_not(np.isnan(y4))]
x4 = x4[np.logical_not(np.isnan(x4))]
y11 = y11[np.logical_not(np.isnan(y11))]
x11 = x11[np.logical_not(np.isnan(x11))]
y22 = y22[np.logical_not(np.isnan(y22))]
x22 = x22[np.logical_not(np.isnan(x22))]
y33 = y33[np.logical_not(np.isnan(y33))]
x33 = x33[np.logical_not(np.isnan(x33))]
y111 = y111[np.logical_not(np.isnan(y111))]
x111 = x111[np.logical_not(np.isnan(x111))]
y222 = y222[np.logical_not(np.isnan(y222))]
x222 = x222[np.logical_not(np.isnan(x222))]
y333 = y333[np.logical_not(np.isnan(y333))]
x333 = x333[np.logical_not(np.isnan(x333))]
xM1 = sum(x1)/len(x1)
yM1 = sum(y1)/len(y1)
xM2 = sum(x2)/len(x2)
yM2 = sum(y2)/len(y2)
xM3 = sum(x3)/len(x3)
yM3 = sum(y3)/len(y3)
xM4 = sum(x4)/len(x4)
yM4 = sum(y4)/len(y4)
xM11 = sum(x11)/len(x11)
yM11 = sum(y11)/len(y11)
xM22 = sum(x22)/len(x22)
yM22 = sum(y22)/len(y22)
xM33 = sum(x33)/len(x33)
yM33 = sum(y33)/len(y33)
xM111 = sum(x111)/len(x111)
yM111 = sum(y111)/len(y111)
xM222 = sum(x222)/len(x222)
yM222 = sum(y222)/len(y222)
xM333 = sum(x333)/len(x333)
yM333 = sum(y333)/len(y333)
fig = pylab.figure(figsize=(7,4), frameon=False)
#ax1 = fig.add_subplot(211)
ax2 = fig.add_subplot(111)
#alpha=0.5 will make the plots semitransparent
#ax1.pcolormesh(yi, xi, zi.reshape(xi.shape), alpha=0.5)
#ax2.contourf(yi, xi, zi.reshape(xi.shape), alpha=0.3)
#PUNTI ROSSI CHE INDICANO LA POSIZIONE DEI GIOCATORI
ax2.plot(yM1,xM1, "ro", markersize=10)
ax2.plot(yM2,xM2, "ro", markersize=10)
ax2.plot(yM3,xM3, "ro", markersize=10)
ax2.plot(yM4,xM4, "ro", markersize=10)
ax2.plot(yM11,xM11, "ro", markersize=10)
ax2.plot(yM22,xM22, "ro", markersize=10)
ax2.plot(yM33,xM33, "ro", markersize=10)
ax2.plot(yM111,xM111, "ro", markersize=10)
ax2.plot(yM222,xM222, "ro", markersize=10)
ax2.plot(yM333,xM333, "ro", markersize=10)
pylab.axis('off')
#LINEE DI COLLEGAMENTO TRA I GIOCATORI DEI REPARTI
pylab.plot([yM1,yM2,yM3,yM4], [xM1, xM2, xM3, xM4], 'b', linewidth=3)
pylab.plot([yM11,yM22,yM33], [xM11, xM22, xM33], 'b', linewidth=3)
pylab.plot([yM111,yM222,yM333], [xM111, xM222, xM333], 'b', linewidth=3)
#ax1.set_xlim(0, 740)
#ax1.set_ylim(515, 0)
ax2.set_xlim(0, 740)
ax2.set_ylim(515, 0)
#overlay your soccer field
im = pylab.imread('statszone_football_pitch.png')
#ax1.imshow(im, extent=[0, 740, 0, 515], aspect='auto')
ax2.imshow(im, extent=[0, 740, 0, 515], aspect='auto')
global unique_filename
unique_filename = str(uuid.uuid4())
#plt.show()
#plt.savefig('heatmaps_tackles.png')
# if(os.path.isfile('disposizione.png')):
# filename = 'disposizione1.png'
# else:
# filename = 'disposizione.png'
fig.savefig(imgFolder+unique_filename+".png")
print(unique_filename+".png")
make_fig()
crop(imgFolder+unique_filename+".png")
| mit |
deeplook/bokeh | examples/charts/file/bar.py | 37 | 2221 | from collections import OrderedDict
import numpy as np
import pandas as pd
from bokeh.charts import Bar, output_file, show, vplot, hplot
from bokeh.models import Range1d
from bokeh.sampledata.olympics2014 import data as original_data
width = 700
height = 500
legend_position = "top_right"
data = {d['abbr']: d['medals'] for d in original_data['data'] if d['medals']['total'] > 0}
countries = sorted(data.keys(), key=lambda x: data[x]['total'], reverse=True)
gold = np.array([data[abbr]['gold'] for abbr in countries], dtype=np.float)
silver = np.array([data[abbr]['silver'] for abbr in countries], dtype=np.float)
bronze = np.array([data[abbr]['bronze'] for abbr in countries], dtype=np.float)
# dict input
medals = OrderedDict(bronze=bronze, silver=silver, gold=gold)
dict_stacked = Bar(
medals, countries, title="OrderedDict input | Stacked", legend=legend_position,
xlabel="countries", ylabel="medals", width=width, height=height,
stacked=True
)
# data frame input
df = pd.DataFrame(medals, index=countries)
df_grouped = Bar(
df, title="Data Frame input | Grouped", legend=legend_position,
xlabel="countries", ylabel="medals", width=width, height=height
)
# Numpy array input with different data to affect the ranges
random = np.random.rand(3, 8)
mixed = random - np.random.rand(3, 8)
categories = ['a', 'b', 'c', 'd', 'e', 'f', 'g', 'h']
np_stacked = Bar(
random, cat=categories, title="Numpy Array input | Stacked",
ylabel="Random Number", xlabel="", width=width, height=height,
stacked=True
)
np_negative_grouped = Bar(
random * -1, cat=categories, title="All negative input | Grouped",
ylabel="Random Number", width=width, height=height
)
np_custom = Bar(
mixed, cat=categories, title="Custom range (start=-3, end=0.4)",
ylabel="Random Number", width=width, height=height,
continuous_range=Range1d(start=-3, end=0.4)
)
np_mixed_grouped = Bar(
mixed, cat=categories, title="Mixed-sign input | Grouped",
ylabel="Random Number", width=width, height=height
)
# collect and display
output_file("bar.html")
show(vplot(
hplot(dict_stacked, df_grouped),
hplot(np_stacked, np_negative_grouped),
hplot(np_mixed_grouped, np_custom),
))
| bsd-3-clause |
lpsinger/gst-plugins-cairovis | gst/cairovis/gencolormap.py | 1 | 3242 | # Copyright (C) 2010 Leo Singer
#
# This program is free software; you can redistribute it and/or modify it
# under the terms of the GNU General Public License as published by the
# Free Software Foundation; either version 2 of the License, or (at your
# option) any later version.
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General
# Public License for more details.
#
# You should have received a copy of the GNU General Public License along
# with this program; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
"""
Auto-generate colormap_data.c, which packages Matplotlib's color data.
"""
__author__ = "Leo Singer <leo.singer@ligo.org>"
from matplotlib.cm import datad
from inspect import isfunction
import sys
header = """/*
* Copyright (c) 2010 Leo Singer
*
* Colormap data from Matplotlib's matplotlib.cm module, which is
* Copyright (c) 2002-2009 John D. Hunter; All Rights Reserved
*
*/
"""
# Select all colormaps to print
datad_items = sorted([(key, value) for key, value in datad.items() if hasattr(value, 'iteritems') and not(isfunction(value['red']) or isfunction(value['green']) or isfunction(value['blue']))])
# Write header file
file = open('colormap_data.h', 'w')
try:
print >>file, header
print >>file, "#ifndef __CAIROVIS_COLORMAP_DATA_H__"
print >>file, "#define __CAIROVIS_COLORMAP_DATA_H__"
print >>file
print >>file, "enum cairovis_colormap_name"
print >>file, "{"
for key, value in datad_items:
print >>file, " CAIROVIS_COLORMAP_%s," % key
print >>file, "};"
print >>file, ""
print >>file, "#endif"
finally:
file.close()
# Write C file
file = open('colormap_data.c', 'w')
try:
print >>file, header
print >>file, """#include "colormap.h"
#include <glib.h>
#include <glib-object.h>
gboolean colormap_get_data_by_name(enum cairovis_colormap_name key, colormap_data *data)
{
switch (key)
{"""
for key, value in datad_items:
print >>file, ' case CAIROVIS_COLORMAP_%s: {' % key
for color in ('red', 'green', 'blue'):
print >>file, ' {'
print >>file, ' const double x[] = {', ','.join([repr(x) for x, y0, y1 in sorted(value[color])]), '};'
print >>file, ' const double y[] = {', ','.join([repr(y1) for x, y0, y1 in sorted(value[color])]), '};'
print >>file, ' data->%s.len = sizeof(x) / sizeof(double);' % color
print >>file, ' data->%s.x = g_memdup(x, sizeof(x));' % color
print >>file, ' data->%s.y = g_memdup(y, sizeof(y));' % color
print >>file, ' }'
print >>file, ' } return TRUE; break;'
print >>file, ' default: return FALSE;'
print >>file, " }"
print >>file, "}"
print >>file, """
GType cairovis_colormap_get_type (void)
{
static GType tp = 0;
static const GEnumValue values[] = {"""
for key, value in datad_items:
print >>file, ' {CAIROVIS_COLORMAP_%s, "%s", "%s"},' % (key, key, key)
print >>file, """ {0, NULL, NULL},
};
if (G_UNLIKELY (tp == 0)) {
tp = g_enum_register_static ("CairoVisColormap", values);
}
return tp;
}"""
finally:
file.close()
| lgpl-2.1 |
DanielPWang/Replicating-DeepMind | Hybrid/tools/scoreanalyzer.py | 12 | 1452 | """
Plot and compare game score trails
"""
import numpy as np
import matplotlib.pylab as plt
from itertools import cycle
class ScoreAnalyzer:
scores = {}
def __init__(self, files):
for filename in files:
with open(filename, 'rb') as f:
self.scores[filename] = [int(s.strip()) for s in f.readlines()]
@staticmethod
def smooth(y, box_pts):
box = np.ones(box_pts)/box_pts
y_smooth = np.convolve(y, box, mode='same')
return y_smooth
def plot(self):
# set smoothing parameter
smoothing = 300
# enable to loop over different line type
lines = [":", "-", "--", "-."]
linecycler = cycle(lines)
# draw things
for filename in self.scores.keys():
plt.plot(self.smooth(self.scores[filename], smoothing)[:-smoothing/2], label=filename,
linestyle=next(linecycler))
plt.legend(loc=4)
plt.show()
def sandbox(self):
""" The main place to try out things """
print self.scores.keys()
print self.scores[1]
if __name__ == "__main__":
analyzer = ScoreAnalyzer(['data/baseline.txt',
'data/best.txt',
'data/nesterov.txt',
'data/normalized.txt',
'data/preprocessing.txt',
'data/origgrads.txt'])
analyzer.plot()
| gpl-3.0 |
MartinDelzant/scikit-learn | examples/plot_digits_pipe.py | 250 | 1809 | #!/usr/bin/python
# -*- coding: utf-8 -*-
"""
=========================================================
Pipelining: chaining a PCA and a logistic regression
=========================================================
The PCA does an unsupervised dimensionality reduction, while the logistic
regression does the prediction.
We use a GridSearchCV to set the dimensionality of the PCA
"""
print(__doc__)
# Code source: Gaël Varoquaux
# Modified for documentation by Jaques Grobler
# License: BSD 3 clause
import numpy as np
import matplotlib.pyplot as plt
from sklearn import linear_model, decomposition, datasets
from sklearn.pipeline import Pipeline
from sklearn.grid_search import GridSearchCV
logistic = linear_model.LogisticRegression()
pca = decomposition.PCA()
pipe = Pipeline(steps=[('pca', pca), ('logistic', logistic)])
digits = datasets.load_digits()
X_digits = digits.data
y_digits = digits.target
###############################################################################
# Plot the PCA spectrum
pca.fit(X_digits)
plt.figure(1, figsize=(4, 3))
plt.clf()
plt.axes([.2, .2, .7, .7])
plt.plot(pca.explained_variance_, linewidth=2)
plt.axis('tight')
plt.xlabel('n_components')
plt.ylabel('explained_variance_')
###############################################################################
# Prediction
n_components = [20, 40, 64]
Cs = np.logspace(-4, 4, 3)
#Parameters of pipelines can be set using ‘__’ separated parameter names:
estimator = GridSearchCV(pipe,
dict(pca__n_components=n_components,
logistic__C=Cs))
estimator.fit(X_digits, y_digits)
plt.axvline(estimator.best_estimator_.named_steps['pca'].n_components,
linestyle=':', label='n_components chosen')
plt.legend(prop=dict(size=12))
plt.show()
| bsd-3-clause |
robin-lai/scikit-learn | sklearn/cluster/tests/test_dbscan.py | 176 | 12155 | """
Tests for DBSCAN clustering algorithm
"""
import pickle
import numpy as np
from scipy.spatial import distance
from scipy import sparse
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import assert_in
from sklearn.utils.testing import assert_not_in
from sklearn.neighbors import NearestNeighbors
from sklearn.cluster.dbscan_ import DBSCAN
from sklearn.cluster.dbscan_ import dbscan
from sklearn.cluster.tests.common import generate_clustered_data
from sklearn.metrics.pairwise import pairwise_distances
n_clusters = 3
X = generate_clustered_data(n_clusters=n_clusters)
def test_dbscan_similarity():
# Tests the DBSCAN algorithm with a similarity array.
# Parameters chosen specifically for this task.
eps = 0.15
min_samples = 10
# Compute similarities
D = distance.squareform(distance.pdist(X))
D /= np.max(D)
# Compute DBSCAN
core_samples, labels = dbscan(D, metric="precomputed", eps=eps,
min_samples=min_samples)
# number of clusters, ignoring noise if present
n_clusters_1 = len(set(labels)) - (1 if -1 in labels else 0)
assert_equal(n_clusters_1, n_clusters)
db = DBSCAN(metric="precomputed", eps=eps, min_samples=min_samples)
labels = db.fit(D).labels_
n_clusters_2 = len(set(labels)) - int(-1 in labels)
assert_equal(n_clusters_2, n_clusters)
def test_dbscan_feature():
# Tests the DBSCAN algorithm with a feature vector array.
# Parameters chosen specifically for this task.
# Different eps to other test, because distance is not normalised.
eps = 0.8
min_samples = 10
metric = 'euclidean'
# Compute DBSCAN
# parameters chosen for task
core_samples, labels = dbscan(X, metric=metric, eps=eps,
min_samples=min_samples)
# number of clusters, ignoring noise if present
n_clusters_1 = len(set(labels)) - int(-1 in labels)
assert_equal(n_clusters_1, n_clusters)
db = DBSCAN(metric=metric, eps=eps, min_samples=min_samples)
labels = db.fit(X).labels_
n_clusters_2 = len(set(labels)) - int(-1 in labels)
assert_equal(n_clusters_2, n_clusters)
def test_dbscan_sparse():
core_sparse, labels_sparse = dbscan(sparse.lil_matrix(X), eps=.8,
min_samples=10)
core_dense, labels_dense = dbscan(X, eps=.8, min_samples=10)
assert_array_equal(core_dense, core_sparse)
assert_array_equal(labels_dense, labels_sparse)
def test_dbscan_sparse_precomputed():
D = pairwise_distances(X)
nn = NearestNeighbors(radius=.9).fit(X)
D_sparse = nn.radius_neighbors_graph(mode='distance')
# Ensure it is sparse not merely on diagonals:
assert D_sparse.nnz < D.shape[0] * (D.shape[0] - 1)
core_sparse, labels_sparse = dbscan(D_sparse,
eps=.8,
min_samples=10,
metric='precomputed')
core_dense, labels_dense = dbscan(D, eps=.8, min_samples=10,
metric='precomputed')
assert_array_equal(core_dense, core_sparse)
assert_array_equal(labels_dense, labels_sparse)
def test_dbscan_no_core_samples():
rng = np.random.RandomState(0)
X = rng.rand(40, 10)
X[X < .8] = 0
for X_ in [X, sparse.csr_matrix(X)]:
db = DBSCAN(min_samples=6).fit(X_)
assert_array_equal(db.components_, np.empty((0, X_.shape[1])))
assert_array_equal(db.labels_, -1)
assert_equal(db.core_sample_indices_.shape, (0,))
def test_dbscan_callable():
# Tests the DBSCAN algorithm with a callable metric.
# Parameters chosen specifically for this task.
# Different eps to other test, because distance is not normalised.
eps = 0.8
min_samples = 10
# metric is the function reference, not the string key.
metric = distance.euclidean
# Compute DBSCAN
# parameters chosen for task
core_samples, labels = dbscan(X, metric=metric, eps=eps,
min_samples=min_samples,
algorithm='ball_tree')
# number of clusters, ignoring noise if present
n_clusters_1 = len(set(labels)) - int(-1 in labels)
assert_equal(n_clusters_1, n_clusters)
db = DBSCAN(metric=metric, eps=eps, min_samples=min_samples,
algorithm='ball_tree')
labels = db.fit(X).labels_
n_clusters_2 = len(set(labels)) - int(-1 in labels)
assert_equal(n_clusters_2, n_clusters)
def test_dbscan_balltree():
# Tests the DBSCAN algorithm with balltree for neighbor calculation.
eps = 0.8
min_samples = 10
D = pairwise_distances(X)
core_samples, labels = dbscan(D, metric="precomputed", eps=eps,
min_samples=min_samples)
# number of clusters, ignoring noise if present
n_clusters_1 = len(set(labels)) - int(-1 in labels)
assert_equal(n_clusters_1, n_clusters)
db = DBSCAN(p=2.0, eps=eps, min_samples=min_samples, algorithm='ball_tree')
labels = db.fit(X).labels_
n_clusters_2 = len(set(labels)) - int(-1 in labels)
assert_equal(n_clusters_2, n_clusters)
db = DBSCAN(p=2.0, eps=eps, min_samples=min_samples, algorithm='kd_tree')
labels = db.fit(X).labels_
n_clusters_3 = len(set(labels)) - int(-1 in labels)
assert_equal(n_clusters_3, n_clusters)
db = DBSCAN(p=1.0, eps=eps, min_samples=min_samples, algorithm='ball_tree')
labels = db.fit(X).labels_
n_clusters_4 = len(set(labels)) - int(-1 in labels)
assert_equal(n_clusters_4, n_clusters)
db = DBSCAN(leaf_size=20, eps=eps, min_samples=min_samples,
algorithm='ball_tree')
labels = db.fit(X).labels_
n_clusters_5 = len(set(labels)) - int(-1 in labels)
assert_equal(n_clusters_5, n_clusters)
def test_input_validation():
# DBSCAN.fit should accept a list of lists.
X = [[1., 2.], [3., 4.]]
DBSCAN().fit(X) # must not raise exception
def test_dbscan_badargs():
# Test bad argument values: these should all raise ValueErrors
assert_raises(ValueError,
dbscan,
X, eps=-1.0)
assert_raises(ValueError,
dbscan,
X, algorithm='blah')
assert_raises(ValueError,
dbscan,
X, metric='blah')
assert_raises(ValueError,
dbscan,
X, leaf_size=-1)
assert_raises(ValueError,
dbscan,
X, p=-1)
def test_pickle():
obj = DBSCAN()
s = pickle.dumps(obj)
assert_equal(type(pickle.loads(s)), obj.__class__)
def test_boundaries():
# ensure min_samples is inclusive of core point
core, _ = dbscan([[0], [1]], eps=2, min_samples=2)
assert_in(0, core)
# ensure eps is inclusive of circumference
core, _ = dbscan([[0], [1], [1]], eps=1, min_samples=2)
assert_in(0, core)
core, _ = dbscan([[0], [1], [1]], eps=.99, min_samples=2)
assert_not_in(0, core)
def test_weighted_dbscan():
# ensure sample_weight is validated
assert_raises(ValueError, dbscan, [[0], [1]], sample_weight=[2])
assert_raises(ValueError, dbscan, [[0], [1]], sample_weight=[2, 3, 4])
# ensure sample_weight has an effect
assert_array_equal([], dbscan([[0], [1]], sample_weight=None,
min_samples=6)[0])
assert_array_equal([], dbscan([[0], [1]], sample_weight=[5, 5],
min_samples=6)[0])
assert_array_equal([0], dbscan([[0], [1]], sample_weight=[6, 5],
min_samples=6)[0])
assert_array_equal([0, 1], dbscan([[0], [1]], sample_weight=[6, 6],
min_samples=6)[0])
# points within eps of each other:
assert_array_equal([0, 1], dbscan([[0], [1]], eps=1.5,
sample_weight=[5, 1], min_samples=6)[0])
# and effect of non-positive and non-integer sample_weight:
assert_array_equal([], dbscan([[0], [1]], sample_weight=[5, 0],
eps=1.5, min_samples=6)[0])
assert_array_equal([0, 1], dbscan([[0], [1]], sample_weight=[5.9, 0.1],
eps=1.5, min_samples=6)[0])
assert_array_equal([0, 1], dbscan([[0], [1]], sample_weight=[6, 0],
eps=1.5, min_samples=6)[0])
assert_array_equal([], dbscan([[0], [1]], sample_weight=[6, -1],
eps=1.5, min_samples=6)[0])
# for non-negative sample_weight, cores should be identical to repetition
rng = np.random.RandomState(42)
sample_weight = rng.randint(0, 5, X.shape[0])
core1, label1 = dbscan(X, sample_weight=sample_weight)
assert_equal(len(label1), len(X))
X_repeated = np.repeat(X, sample_weight, axis=0)
core_repeated, label_repeated = dbscan(X_repeated)
core_repeated_mask = np.zeros(X_repeated.shape[0], dtype=bool)
core_repeated_mask[core_repeated] = True
core_mask = np.zeros(X.shape[0], dtype=bool)
core_mask[core1] = True
assert_array_equal(np.repeat(core_mask, sample_weight), core_repeated_mask)
# sample_weight should work with precomputed distance matrix
D = pairwise_distances(X)
core3, label3 = dbscan(D, sample_weight=sample_weight,
metric='precomputed')
assert_array_equal(core1, core3)
assert_array_equal(label1, label3)
# sample_weight should work with estimator
est = DBSCAN().fit(X, sample_weight=sample_weight)
core4 = est.core_sample_indices_
label4 = est.labels_
assert_array_equal(core1, core4)
assert_array_equal(label1, label4)
est = DBSCAN()
label5 = est.fit_predict(X, sample_weight=sample_weight)
core5 = est.core_sample_indices_
assert_array_equal(core1, core5)
assert_array_equal(label1, label5)
assert_array_equal(label1, est.labels_)
def test_dbscan_core_samples_toy():
X = [[0], [2], [3], [4], [6], [8], [10]]
n_samples = len(X)
for algorithm in ['brute', 'kd_tree', 'ball_tree']:
# Degenerate case: every sample is a core sample, either with its own
# cluster or including other close core samples.
core_samples, labels = dbscan(X, algorithm=algorithm, eps=1,
min_samples=1)
assert_array_equal(core_samples, np.arange(n_samples))
assert_array_equal(labels, [0, 1, 1, 1, 2, 3, 4])
# With eps=1 and min_samples=2 only the 3 samples from the denser area
# are core samples. All other points are isolated and considered noise.
core_samples, labels = dbscan(X, algorithm=algorithm, eps=1,
min_samples=2)
assert_array_equal(core_samples, [1, 2, 3])
assert_array_equal(labels, [-1, 0, 0, 0, -1, -1, -1])
# Only the sample in the middle of the dense area is core. Its two
# neighbors are edge samples. Remaining samples are noise.
core_samples, labels = dbscan(X, algorithm=algorithm, eps=1,
min_samples=3)
assert_array_equal(core_samples, [2])
assert_array_equal(labels, [-1, 0, 0, 0, -1, -1, -1])
# It's no longer possible to extract core samples with eps=1:
# everything is noise.
core_samples, labels = dbscan(X, algorithm=algorithm, eps=1,
min_samples=4)
assert_array_equal(core_samples, [])
assert_array_equal(labels, -np.ones(n_samples))
def test_dbscan_precomputed_metric_with_degenerate_input_arrays():
# see https://github.com/scikit-learn/scikit-learn/issues/4641 for
# more details
X = np.eye(10)
labels = DBSCAN(eps=0.5, metric='precomputed').fit(X).labels_
assert_equal(len(set(labels)), 1)
X = np.zeros((10, 10))
labels = DBSCAN(eps=0.5, metric='precomputed').fit(X).labels_
assert_equal(len(set(labels)), 1)
| bsd-3-clause |
mmagnus/rna-pdb-tools | rna_tools/Seq.py | 1 | 27396 | #!/usr/bin/env python3
#-*- coding: utf-8 -*-
"""RNA Sequence with secondary structure prediction methods.
This tool takes a given sequence and returns the secondary structure prediction provided by 5 different tools: RNAfold, RNAsubopt, ipknot, contextfold and centroid_fold. You must have these tools installed. You don't have to install all tools if you want to use only one of the methods.
It's easy to add more methods of your choince to this class.
Installation
~~~~~~~~~~~~~
Depends on what tools you want to use, follow the instructions below.
ContextFold
^^^^^^^^^^^^^^^^^^^^^
https://www.cs.bgu.ac.il/~negevcb/contextfold/
needs Java. Try this on Ubuntu 14-04 https://askubuntu.com/questions/521145/how-to-install-oracle-java-on-ubuntu-14-04 Single chain only!
ViennaRNA
^^^^^^^^^^^^^^
https://www.tbi.univie.ac.at/RNA/
For OSX install from the binary Installer from the page.
ipknot OSX
^^^^^^^^^^^^^
https://github.com/satoken/homebrew-rnatools
If one encounters a problem::
[mm] Desktop$ /usr/local/opt/bin/ipknot
dyld: Library not loaded: /usr/local/opt/glpk/lib/libglpk.40.dylib
Referenced from: /usr/local/opt/bin/ipknot
Reason: image not found
[1] 51654 abort /usr/local/opt/bin/ipknot
the solution is::
brew install glpk # on OSX
RNA Structure
^^^^^^^^^^^^^
http://rna.urmc.rochester.edu/
Works with 5.8.1; Jun 16, 2016.
Download http://rna.urmc.rochester.edu/RNAstructureDownload.html and untar it in ``<RNA_PDB_TOOLS>/opt/RNAstructure/``; run make, the tools will be compiled in a folder ``exe``. Set up ``DATPATH`` in your bashrc to ``<RNA_PDB_TOOLS>/opt/RNAstructure/data_tables`` ``DATAPATH=/home/magnus/work/src/rna-pdb-tools/opt/RNAstructure/data_tables/`` (read more http://rna.urmc.rochester.edu/Text/Thermodynamics.html). RNAstructure can be run with SHAPE restraints, read more http://rna.urmc.rochester.edu/Text/File_Formats.html#Constraint about the format. The file format for SHAPE reactivity comprises two columns. The first column is the nucleotide number, and the second is the reactivity. Nucleotides for which there is no SHAPE data can either be left out of the file, or the reactivity can be entered as less than -500. Columns are separated by any white space.
MC-Sym
^^^^^^^^^^^^^
FAQ
~~~~~~~~~~~~~
- Does it work for more than one chain??? Hmm.. I think it's not. You have to check on your own. --magnus
TIPS
~~~~~~~~~~~~~
Should you need to run it on a list of sequences, use the following script::
from rna_tools import Seq
f = open("listOfSequences.fasta")
for line in f:
if line.startswith('>'):
print line,
else:
print line,
s = Seq.Seq(line.strip()) # module first Seq and class second Seq #without strip this has two lines
print s.predict_ss(method="contextfold"),
#print s.predict_ss(method="centroid_fold")
TODO
~~~~~~~~~~~~~
- This calss should be renamed to RNASeq and merged with RNASeq class from RNAalignment
""" # noqa
import os
import subprocess
import tempfile
import sys
from rna_tools.SecondaryStructure import parse_vienna_to_pairs
from rna_tools.rna_tools_config import CONTEXTFOLD_PATH, RNASTRUCTURE_PATH, ENTRNA_PATH, IPKNOT_PATH
class MethodNotChosen(Exception):
pass
class RNASequence(object):
"""RNASequence.
Usage::
>>> seq = RNASequence("CCCCUUUUGGGG")
>>> seq.name = 'RNA03'
>>> print(seq.predict_ss("RNAfold", constraints="((((....))))"))
>RNA03
CCCCUUUUGGGG
((((....)))) ( -6.40)
"""
def __init__(self, seq, ss='', name='rna_seq'):
self.seq = seq
self.ss = ss
self.ss_log = ''
self.name = name
def __repr__(self):
return self.name + '\n' + self.seq + '\n' + self.ss
def eval(self, ss='', no_dangling_end_energies=False, verbose=False):
"""Evaluate energy of RNA sequence.
Args:
ss (optional), if not set, then self.ss is taken for calc
no_dangling_end_energies (Boolean)
verbose (Boolean)
Returns:
Energy (float)
The RNAeval web server calculates the energy of a RNA sequence on a given secondary structure.
You can use it to get a detailed thermodynamic description (loop free-energy decomposition) of your RNA structures.
Simply paste or upload your sequence below and click Proceed. To get more information on the meaning of the options click the help symbols. You can test the server using this sample sequence/structure pair.
An equivalent RNAeval command line call would have been::
RNAeval -v -d0 < input.txt
Read more: <http://rna.tbi.univie.ac.at//cgi-bin/RNAWebSuite/RNAeval.cgi>
"""
tf = tempfile.NamedTemporaryFile(delete=False)
if not ss:
ss = self.ss
tf.name += '.fa'
with open(tf.name, 'w') as f:
f.write('>' + self.name + '\n')
f.write(self.seq + '\n')
f.write(ss + '\n')
dopt = ' -d2 '
if no_dangling_end_energies:
dopt = ' -d0 '
cmd = 'RNAeval ' + dopt + ' < ' + tf.name
if verbose:
print(cmd)
self.ss_log = subprocess.check_output(cmd, shell=True).decode()
# [u'>rna_seq\nGGCAGGGGCGCUUCGGCCCCCUAUGCC\n((((((((.((....)).)))).))))', u'(-13.50)']
return float(self.ss_log.strip().split(' ')[-1].replace('(','').replace(')', ''))
def get_foldability(self, ss='', verbose=False):
"""Calculate foldability based on EntRNA.
Steps:
- parse SS into basepairs,
- calculate foldabilty
Configuration:
- Set ENTRNA_PATH to the folder where ENTRNA_predict.py is.
Cmd wrapper in here::
python ENTRNA_predict.py --seq_file pseudoknotted_seq.txt --str_file pseudoknotted_str.txt
Su, C., Weir, J. D., Zhang, F., Yan, H., & Wu, T. (2019).
ENTRNA: a framework to predict RNA foldability. BMC Bioinformatics, 20(1), 1–11.
http://doi.org/10.1186/s12859-019-2948-5
"""
if ss:
self.ss = ss
# parse SS into base-pairs
def dp_to_bp(dp):
import numpy as np
a_list = []
bp_array = np.zeros(len(dp),dtype = int)
for i in range(len(dp)):
if dp[i] == "(":
a_list.append(i)
if dp[i] == ")":
bp_array[i] = a_list[-1] + 1
bp_array[a_list[-1]] = i + 1
a_list.pop()
return list(bp_array)
bp = dp_to_bp(self.ss)
if verbose: print(bp)
tempstr = tempfile.NamedTemporaryFile(delete=False)
with open(tempstr.name, 'w') as f:
f.write(str(bp))
tempseq = tempfile.NamedTemporaryFile(delete=False)
with open(tempseq.name, 'w') as f:
f.write(self.seq)
# -W to silent warnings See [1]
cmd = "cd " + ENTRNA_PATH + " && python -W ignore ENTRNA_predict.py --seq_file " + tempseq.name + " --str_file " + tempstr.name
log = subprocess.check_output(cmd, shell=True).decode()
if verbose:
print(cmd)
print(log)
for l in log.split('\n'):
if l.startswith('Foldability: '):
return round(float(l.replace('Foldability: ', '')), 2)
return -1
## [1]:
## /Users/magnus/work/evoClustRNA/rna-foldability/ENTRNA/util/pseudoknot_free.py:22: SettingWithCopyWarning:
## A value is trying to be set on a copy of a slice from a DataFrame.
## Try using .loc[row_indexer,col_indexer] = value instead
## See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
## df_v1['length'] = df_v1['seq'].apply(lambda x:len(x))
## /home/magnus/miniconda2/lib/python2.7/site-packages/sklearn/linear_model/logistic.py:433: FutureWarning: Default solver will be changed to 'lbfgs' in 0.22. Specify a solver to silence this warning.
## FutureWarning)
## cd /Users/magnus/work/evoClustRNA/rna-foldability/ENTRNA/ && python ENTRNA_predict.py --seq_file /var/folders/yc/ssr9692s5fzf7k165grnhpk80000gp/T/tmpUORegp --str_file /var/folders/yc/ssr9692s5fzf7k165grnhpk80000gp/T/tmp1ERCcD
def predict_ss(self, method="RNAfold", constraints='', enforce_constraint=False, shapefn='', explore='', verbose=0):
"""Predict secondary structure of the seq.
Args:
method:
onstraints:
shapefn (str): path to a file with shape reactivites
verbose (boolean)
It creates a seq fasta file and runs various methods for secondary structure
prediction. You can provide also a constraints file for RNAfold and RNAsubopt.
Methods that can be used with contraints: RNAsubopt, RNAfold, mcfold.
Methods that can be used with SHAPE contraints: RNAfold.
**ContextFold**
Example::
$ java -cp bin contextFold.app.Predict in:CCCCUUUGGGGG
CCCCUUUGGGGG
((((....))))
It seems that a seq has to be longer than 9. Otherwise::
$ java -cp bin contextFold.app.Predict in:UUUUUUGGG
Exception in thread "main" java.lang.ArrayIndexOutOfBoundsException: 10
# this is OK
$ java -cp bin contextFold.app.Predict in:CCCCUUUGGG
CCCCUUUGGG
.(((...)))
**RNAstructure**
Example::
>>> seq = RNASequence("GGGGUUUUCCC")
>>> print(seq.predict_ss("rnastructure"))
> ENERGY = -4.4 rna_seq
GGGGUUUUCCC
((((...))))
and with the shape data::
>>> print(seq.predict_ss("rnastructure", shapefn="data/shape.txt"))
> ENERGY = -0.2 rna_seq
GGGGUUUUCCC
.(((....)))
the shape data::
1 10
2 1
3 1
You can easily see that the first G is unpaired right now! The reactivity of this G was
set to 10. Worked!
**MC-Fold**
MC-Fold uses the online version of the tool, this is very powerful with constraints::
rna_seq
acucggcuaggcgaguauaaauagccgucaggccuagcgcguccaagccuagccccuucuggggcugggcgaagggucggg
((((........)))).......((((..............(((((((((((((((....)))))))))))))))..))))
curl -Y 0 -y 300 -F "pass=lucy" -F mask="((((........)))).......((((..............(((((((((((((((....)))))))))))))))..))))" -F sequence="acucggcuaggcgaguauaaauagccgucaggccuagcgcguccaagccuagccccuucuggggcugggcgaagggucggg" https://www.major.iric.ca/cgi-bin/MC-Fold/mcfold.static.cgi
mcfold::energy best dynamics programming: -53.91
(-53.91, '((((........)))).......((((..............(((((((((((((((....)))))))))))))))..))))')
curl -Y 0 -y 300 -F "pass=lucy" -F mask="((((........)))).......((((..............((((((((((..............))))))))))..))))" -F sequence="acucggcuaggcgaguauaaauagccgucaggccuagcgcguccaagccuagccccuucuggggcugggcgaagggucggg" https://www.major.iric.ca/cgi-bin/MC-Fold/mcfold.static.cgi
mcfold::energy best dynamics programming: -34.77
(-34.77, '((((........)))).......((((..............((((((((((..............))))))))))..))))')
acucggcuaggcgaguauaaauagccgucaggccuagcgcguccaagccuagccccuucuggggcugggcgaagggucggg
((((........)))).......((((..............(((((((((((((((....)))))))))))))))..))))
curl -Y 0 -y 300 -F "pass=lucy" -F mask="((((xxxxxxxx))))xxxxxxx((((xxxxxxxxxxxxxx((((((((((xxxxxxxxxxxxxx))))))))))xx))))" -F sequence="acucggcuaggcgaguauaaauagccgucaggccuagcgcguccaagccuagccccuucuggggcugggcgaagggucggg" https://www.major.iric.ca/cgi-bin/MC-Fold/mcfold.static.cgi
mcfold::energy best dynamics programming: -34.77
(-34.77, '((((........)))).......((((..............((((((((((..............))))))))))..))))')
acucggcuaggcgaguauaaauagccgucaggccuagcgcguccaagccuagccccuucuggggcugggcgaagggucggg
((((........)))).......((((..............(((((((((((((((....)))))))))))))))..))))
curl -Y 0 -y 300 -F "pass=lucy" -F mask="((((********))))*******((((**************((((((((((**************))))))))))**))))" -F sequence="acucggcuaggcgaguauaaauagccgucaggccuagcgcguccaagccuagccccuucuggggcugggcgaagggucggg" https://www.major.iric.ca/cgi-bin/MC-Fold/mcfold.static.cgi
mcfold::energy best dynamics programming: -77.30
(-71.12, '(((((((..))))))).......((((((.(((...)))..(((((((((((((((....)))))))))))))))))))))')
acucggcuaggcgaguauaaauagccgucaggccuagcgcguccaagccuagccccuucuggggcugggcgaagggucggg
((((........)))).......((((..............(((((((((((((((....)))))))))))))))..))))
curl -Y 0 -y 300 -F "pass=lucy" -F mask="((((**[[[[[**))))*******((((****]]]]]****(((((((((((((((****)))))))))))))))**))))" -F sequence="acucggcuaggcgaguauaaauagccgucaggccuagcgcguccaagccuagccccuucuggggcugggcgaagggucggg" https://www.major.iric.ca/cgi-bin/MC-Fold/mcfold.static.cgi
mcfold::energy best dynamics programming: -77.30
('-77.30', '((((**[[[[[**))))*******((((****]]]]]****(((((((((((((((****)))))))))))))))**))))')
**explore**
The sub-optimal search space can be constrained within a percentage of the minimum free energy structure, as MC-fold makes use of the Waterman-Byers algorithm [18, 19]. Because the exploration has an exponential time complexity, increasing this value can have a dramatic effect on MC-Fold’s run time.
Parisien, M., & Major, F. (2009). RNA Modeling Using the MC-Fold and MC-Sym Pipeline [Manual] (pp. 1–84).
"""
tf = tempfile.NamedTemporaryFile(delete=False)
tf.name += '.fa'
with open(tf.name, 'w') as f:
f.write('>' + self.name + '\n')
f.write(self.seq + '\n')
if constraints:
f.write(constraints)
# check for seq and constraints
if constraints:
if len(self.seq) != len(constraints):
raise Exception('The seq and constraints should be of the same length: %i %s %i %s' % (len(self.seq), self.seq, len(constraints), constraints))
# run prediction
# rnafold without contraints
if method == "RNAfold" and constraints:
cmd = 'RNAfold -C < ' + tf.name
if verbose:
print(cmd)
self.ss_log = subprocess.check_output(cmd, shell=True).decode()
return '\n'.join(self.ss_log.strip().split('\n')[:])
if method == "RNAfoldX" and constraints:
if enforce_constraint:
cmd = 'RNAfold -p -d2 --noLP -C --enforceConstraint < ' + tf.name
else:
cmd = 'RNAfold -p -d2 --noLP -C < ' + tf.name
if verbose:
print(cmd)
try:
self.ss_log = subprocess.check_output(cmd, shell=True).decode()
except subprocess.CalledProcessError:
print('Error')
return 0, 'error', 0, '', 0, '', 0, 0
if verbose:
print(self.ss_log)
# parse the results
lines = self.ss_log.split('\n')
if 'Supplied structure constraints create empty solution set for sequence' in self.ss_log:
return 0, 'Supplied structure constraints create empty solution set for sequence', 0, '', 0, '', 0, 0
#if not 'frequency of mfe structure' in self.ss_log:
# RNAfold -p -d2 --noLP -C < /var/folders/yc/ssr9692s5fzf7k165grnhpk80000gp/T/tmpGiUoo7.fa
# >rna_seq
# AAAUUAAGGGGAAGCGUUGAGCCGCUACCCAUAUGUGGUUCACUCGGAUAGCGGGGAGCUAAUAGUGAAACCGGCCCUUUAGGGG
# ...((((((((.(((......((((((.((....(((...)))..)).))))))...)))..............))))))))... (-19.80)
# ...{(((((((.(((......((((((.((....(((...)))..)).))))))...)))..............)))))))}... [-21.05]
#...((((((((.(((......((((((.((....(((...)))..)).))))))...)))..............))))))))... {-19.80 d=2.34}
# frequency of mfe structure in ensemble 0.131644; ensemble diversity 3.68
mfess = lines[2].split()[0]
mfe = ' '.join(lines[2].split()[-1:])
mfe = float(mfe.replace('(', '').replace(')', '')) # (-19.80) ->-19.80
efess = lines[3].split()[0] # ensamble free energy
efe = ' '.join(lines[3].split()[-1:])
efe = float(efe.replace('[', '').replace(']', '')) # (-19.80) ->-19.80
cfess = lines[4].split()[0] # ensamble free energy
cfe, d = ' '.join(lines[4].split()[1:]).split('d')
cfe = float(cfe.replace('{', '').replace('}', '')) # (-19.80) ->-19.80
words = lines[5].split() # ensamble free energy
freq = round(float(words[6].replace(';', '')), 2) # frequency of mfe structure in ensemble
diversity = float(words[9]) # ensemble diversity
if verbose:
print(mfe, mfess, efe, efess, cfe, cfess, freq, diversity)
return mfe, mfess, efe, efess, cfe, cfess, freq, diversity
elif method == "RNAfold":
cmd = 'RNAfold < ' + tf.name
if verbose:
print(cmd)
self.ss_log = subprocess.check_output(cmd, shell=True).decode()
return '\n'.join(self.ss_log.strip().split('\n')[:])
elif method == "RNAsubopt" and constraints:
cmd = 'RNAsubopt -C < ' + tf.name
if verbose:
print(cmd)
self.ss_log = subprocess.check_output(cmd, shell=True).decode()
return '\n'.join(self.ss_log.split('\n')[:])
elif method == "RNAsubopt":
cmd = 'RNAsubopt < ' + tf.name
if verbose:
print(cmd)
self.ss_log = subprocess.check_output(cmd, shell=True).decode()
return '\n'.join(self.ss_log.split('\n')[:])
elif method == "mcfold":
# -F tope=1
if explore:
explore_str = " -F explore=" + str(explore)
else:
explore_str = ''
#if constraints:
#cmd = "curl -Y 0 -y 300 -F \"pass=lucy\" -F mask=\"" + constraints + "\" " + explore_str + \
#" -F sequence=\"" + self.seq + "\" https://www.major.iric.ca/cgi-bin/MC-Fold/mcfold.static.cgi"
cmd = "curl https://www.major.iric.ca/cgi-bin/MC-Fold/mcfold.static.cgi\?pass\=lucy\&sequence\=" + self.seq + "\&top\=20\&explore\=15\&name\=\&mask\='" + constraints + "'\&singlehigh\=\&singlemed\=\&singlelow\="
# cmd = "curl -Y 0 -y 300 -F \"pass=lucy\" -F sequence=\"" + self.seq + "\" https://www.major.iric.ca/cgi-bin/MC-Fold/mcfold.static.cgi"
if verbose:
print(cmd)
o = subprocess.Popen(cmd, shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
out = o.stdout.read().decode(errors='ignore').strip()
err = o.stderr.read().decode(errors='ignore').strip()
if verbose:
print(out)
# If the structure can't be find, detect this statement and finish this routine.
if 'Explored 0 structures' in out:
return 0.00, '', 'Explored 0 structures'
comment = ''
energy = ''
out = out.split('\n')
for l in out :
# first you will find the best dynamic energy, and in the next loop
# it will be used to search for lines with this energy and secondary
# structure
# (((..))) -5.43
if energy: # if energy is set
if energy in l:
if verbose: print(l)
ss = l.split()[0]
# Performing Dynamic Programming...
# Best Dynamic Programming Solution has Energy: -5.43
if l.startswith('Best Dynamic Programming Solution has Energy:'):
energy_bdp = l.split(':')[1].strip()
if verbose:
print ('mcfold::energy best dynamics programming: ' + energy_bdp)
comment = 'energy best dynamics programming'
ss = constraints
# return float(energy), constraints # I'm not sure if this is good
# Ok, for whatever reason Best DP energy might not be exactly the same as and
# the energy listed later for secondary structure. So this code finds this secondary
# structure and gets again the energy for this secondary structure,
# and overwrites the previous energy.
# In this case:
# Best Dynamic Programming Solution has Energy: -5.46
# ...
# CUCUCGAAAGAUG
# (((.((..))))) -5.44 ( +0.00)
# (((.((..))))) BP >= 50%
# if evenn this will not find ss, then set ss to null not to crash
# and it's possible, like in here
# curl -Y 0 -y 300 -F "pass=lucy" -F mask="((******)))" -F sequence="CCUgcgcaAGG" \
# http://www.major.iric.ca/cgi-bin/MC-Fold/mcfold.static.cgi
ss = ''
for l in out:
if 'target="_blank">MARNA</a>-formatted:<P><P><P></H2><pre>' in l:
index = out.index(l)
ss_line = out[index + 2]
ss, energy = ss_line.split()[0:2] # '(((.((..))))) -5.44 ( +0.00)'
# if there is
# UUGCCGUAAGACA
# ............. BP >= 50%
# then finish with energy 0.00, and empty ss
if energy == 'BP':
energy = energy_bdp
comment = 'BP energy'
return energy_bdp, constraints, comment
# break
# prepare outputs, return and self-s
self.log = out
self.ss = ss
return float(energy), ss, comment
# if method == "RNAsubopt":
# from cogent.app.vienna_package import RNAfold, RNAsubopt
# r = RNAsubopt(WorkingDir="/tmp")
# res = r([self.seq])
# return str(res['StdOut'].read()).strip()
# if method == 'RNAfold':
# from cogent.app.vienna_package import RNAfold, RNAsubopt
# r = RNAfold(WorkingDir="/tmp")
# res = r([self.seq])
# self.ss_log = res['StdOut'].read()
# return self.ss_log.strip().split('\n')[-1].split()[0]
elif method == "ipknot":
self.ss_log = subprocess.check_output(IPKNOT_PATH + ' ' + tf.name, shell=True)
return '\n'.join(self.ss_log.decode().split('\n')[2:])
elif method == "contextfold":
if not CONTEXTFOLD_PATH:
print('Set up CONTEXTFOLD_PATH in configuration.')
sys.exit(0)
cmd = "cd " + CONTEXTFOLD_PATH + \
" + && java -cp bin contextFold.app.Predict in:" + self.seq
if verbose:
print(cmd)
self.ss_log = subprocess.check_output(cmd, shell=True).decode()
return '\n'.join(self.ss_log.split('\n')[1:])
elif method == "centroid_fold":
self.ss_log = subprocess.check_output('centroid_fold ' + tf.name, shell=True)
return '\n'.join(self.ss_log.split('\n')[2:])
elif method == "rnastructure":
cmd = RNASTRUCTURE_PATH + '/exe/Fold ' + tf.name + ' ' + tf.name + '.out '
if shapefn:
cmd += ' -sh ' + shapefn
if verbose:
print(cmd)
o = subprocess.Popen(cmd, shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
stderr = o.stderr.read().strip()
if stderr:
print(stderr)
cmd = RNASTRUCTURE_PATH + '/exe/ct2dot ' + tf.name + '.out 1 ' + \
tf.name + '.dot'
if verbose:
print(cmd)
o = subprocess.Popen(cmd, shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
stderr = o.stderr.read().strip()
if not stderr:
with open(tf.name + '.dot') as f:
return f.read().strip()
# (-51.15, '.(.(((((((((((((((..))))))))))))))))(..((((((((....)))).))))).')
elif method == "rnastructure_CycleFold":
cmd = RNASTRUCTURE_PATH + '/exe/CycleFold ' + tf.name + ' > ' + tf.name + '.ct '
if verbose:
print(cmd)
o = subprocess.Popen(cmd, shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
stderr = o.stderr.read().strip()
if stderr:
print(stderr)
# get energy
energy = float(open(tf.name + '.ct').readline().split("energy:")[1].strip()) # >rna_seq energy: -51.1500
# get ss in dot-bracket notation
cmd = RNASTRUCTURE_PATH + '/exe/ct2dot ' + tf.name + '.ct 1 ' + \
tf.name + '.dot'
if verbose:
print(cmd)
o = subprocess.Popen(cmd, shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
stderr = o.stderr.read().strip()
if not stderr:
with open(tf.name + '.dot') as f:
# (-51.15, '.(.(((((((((((((((..))))))))))))))))(..((((((((....)))).))))).')
return energy, f.read().strip().split('\n')[2]
else:
raise MethodNotChosen('You have to define a correct method to use.')
# main
def load_fasta_ss_into_RNAseqs(fn, debug=True):
seqs = []
with open(fn) as f:
for line in f:
if debug: print(line)
name = line.replace('>', '').strip()
seq = next(f).strip()
ss = next(f).strip()
rs = RNASequence(seq, ss, name)
seqs.append(rs)
return seqs
if __name__ == '__main__':
import doctest
doctest.testmod()
seq = RNASequence("CGCUUCAUAUAAUCCUAAUGAUAUGGUUUGGGAGUUUCUACCAAGAGCCUUAAACUCUUGAUUAUGAAGUG")
seq.name = 'RNA01'
print(seq.predict_ss("RNAfold",
constraints="((((...............................................................))))")) # noqa
seq = RNASequence("CGCUUCAUAUAAUCCUAAUGAUAUGGUUUGGGAGUUUCUACCAAGAGCCUUAAACUCUUGAUUAUGAAGUG")
seq.name = 'RNA02'
print(seq.predict_ss("RNAsubopt",
constraints="((((...............................................................))))")) # noqa
print(seq.predict_ss("contextfold"))
print(seq.predict_ss(method="ipknot"))
verbose = False
seq = RNASequence("GGGGUUUUCCC")
print(seq.predict_ss("rnastructure", verbose=verbose))
print(seq.predict_ss("rnastructure", shapefn="data/shape.txt", verbose=verbose))
seq = RNASequence("CGUGGUUAGGGCCACGUUAAAUAGUUGCUUAAGCCCUAAGCGUUGAUAAAUAUCAGgUGCAA")
print(seq.predict_ss("rnastructure", shapefn="data/shape.txt", verbose=verbose))
#
# test of MethodNotChose
# print(seq.predict_ss("test"))
| gpl-3.0 |
rflamary/AstroImageReconsCNN | visu_filters_deconv.py | 1 | 3342 | # -*- coding: utf-8 -*-
"""
Created on Tue Jul 26 15:27:25 2016
@author: rflamary
"""
import sys
import numpy as np
import scipy as sp
import scipy.signal
import scipy.io as spio
import deconv
import matplotlib.pylab as pl
import dsutils
import theano
def get_fname(method,n,npsf,sigma,img):
return 'res/{}_{}x{}_PSF{}_sigma{:1.3f}_{}.mat'.format(method,n,n,npsf,sigma,img)
def get_fname_all(method,n,npsf,sigma):
return 'res/{}_{}x{}_PSF{}_sigma{:1.3f}_all.mat'.format(method,n,n,npsf,sigma)
#%% load image
I0=deconv.load_fits_image('M51a')
I0=I0/I0.max()
#%% generat
i=2
cr=32
lst_img=['M31','Hoag','M51a','M81','M101','M104']
#lst_img=['M31']
nb_img=len(lst_img)
def sel(I):
return I[300:-cr-100,300:-cr-100]
img_txt=lst_img[i]
method='none'
I0=deconv.load_fits_image(img_txt)
n=1024
iv=1200;jv=1200
I0=I0[iv:iv+n,jv:jv+n]
npsf=64
sigma=0.01
fname=get_fname('none',n,npsf,sigma,img_txt)
data=spio.loadmat(fname)
Inoise=data['Irec']
#%% get PSF
npsf=64
nr=5
#%% deconvnn
fname='models/32x32_10x10-64_6x6-16_5x5-1_PSF64_sigma0.010_M51a'
model=dsutils.load_model(fname)
model.compile(optimizer='SGD', loss='mse')
sz=32
szp=14
deconv.tic()
I_dcnnn=dsutils.apply_model(Inoise,model,sz,szp)
deconv.toc()
#%% visu last layer
szp2=18
Ip=dsutils.im2patch(Inoise,sz,szp2)
convout1_f = theano.function([model.get_input_at(0)], model.layers[1].get_output_at(0),allow_input_downcast=True)
Ip2=convout1_f(Ip)
I_layer1=dsutils.patch2im(Ip2[:,0:1,:,:],Inoise.shape,sz,szp2)
#
#pl.figure("last layer")
#
#for i in range(16):
# pl.subplot(4,4,i+1)
# pl.imshow(dsutils.patch2im(Ip2[:,i:i+1,:,:],Inoise.shape,sz,szp2),interpolation='nearest')
Il2_1=dsutils.patch2im(Ip2[:,15:16,:,:],Inoise.shape,sz,szp2)
Il2_2=dsutils.patch2im(Ip2[:,2:3,:,:],Inoise.shape,sz,szp2)
Il2_3=dsutils.patch2im(Ip2[:,7:8,:,:],Inoise.shape,sz,szp2)
#%% visu first layer
szp2=23
Ip=dsutils.im2patch(Inoise,sz,szp2)
convout0_f = theano.function([model.get_input_at(0)], model.layers[0].get_output_at(0),allow_input_downcast=True)
Ip2=convout0_f(Ip)
I_layer1=dsutils.patch2im(Ip2[:,0:1,:,:],Inoise.shape,sz,szp2)
#pl.figure("first layer")
#
#for i in range(64):
# pl.subplot(8,8,i+1)
# pl.imshow(dsutils.patch2im(Ip2[:,i:i+1,:,:],Inoise.shape,sz,szp2),interpolation='nearest')
Il1_1=dsutils.patch2im(Ip2[:,4:5,:,:],Inoise.shape,sz,szp2)
Il1_2=dsutils.patch2im(Ip2[:,2:3,:,:],Inoise.shape,sz,szp2)
Il1_3=dsutils.patch2im(Ip2[:,33:34,:,:],Inoise.shape,sz,szp2)
#%%
yt=1
fs=10
pl.figure(1)
pl.subplot(3,3,1)
pl.imshow(sel(Il1_1),cmap='gray')
pl.title('Layer 1 output 1',fontsize=fs,y=yt)
pl.axis("off")
pl.subplot(3,3,2)
pl.imshow(sel(Il1_2),cmap='gray')
pl.title('Layer 1 output 2',fontsize=fs,y=yt)
pl.axis("off")
pl.subplot(3,3,3)
pl.imshow(sel(Il1_3),cmap='gray')
pl.title('Layer 1 output 3',fontsize=fs,y=yt)
pl.axis("off")
pl.subplot(3,3,4)
pl.imshow(sel(Il2_1),cmap='gray')
pl.title('Layer 2 output 1',fontsize=fs,y=yt)
pl.axis("off")
pl.subplot(3,3,5)
pl.imshow(sel(Il2_2),cmap='gray')
pl.title('Layer 2 output 2',fontsize=fs,y=yt)
pl.axis("off")
pl.subplot(3,3,6)
pl.imshow(sel(Il2_3),cmap='gray')
pl.title('Layer 2 output 3',fontsize=fs,y=yt)
pl.axis("off")
pl.subplots_adjust(wspace=-.5,hspace=0.3)
pl.savefig('imgs/images_features.png',dpi=300,bbox_inches='tight',pad_inches=.01)
| mit |
heli522/scikit-learn | sklearn/datasets/twenty_newsgroups.py | 126 | 13591 | """Caching loader for the 20 newsgroups text classification dataset
The description of the dataset is available on the official website at:
http://people.csail.mit.edu/jrennie/20Newsgroups/
Quoting the introduction:
The 20 Newsgroups data set is a collection of approximately 20,000
newsgroup documents, partitioned (nearly) evenly across 20 different
newsgroups. To the best of my knowledge, it was originally collected
by Ken Lang, probably for his Newsweeder: Learning to filter netnews
paper, though he does not explicitly mention this collection. The 20
newsgroups collection has become a popular data set for experiments
in text applications of machine learning techniques, such as text
classification and text clustering.
This dataset loader will download the recommended "by date" variant of the
dataset and which features a point in time split between the train and
test sets. The compressed dataset size is around 14 Mb compressed. Once
uncompressed the train set is 52 MB and the test set is 34 MB.
The data is downloaded, extracted and cached in the '~/scikit_learn_data'
folder.
The `fetch_20newsgroups` function will not vectorize the data into numpy
arrays but the dataset lists the filenames of the posts and their categories
as target labels.
The `fetch_20newsgroups_vectorized` function will in addition do a simple
tf-idf vectorization step.
"""
# Copyright (c) 2011 Olivier Grisel <olivier.grisel@ensta.org>
# License: BSD 3 clause
import os
import logging
import tarfile
import pickle
import shutil
import re
import codecs
import numpy as np
import scipy.sparse as sp
from .base import get_data_home
from .base import Bunch
from .base import load_files
from ..utils import check_random_state
from ..feature_extraction.text import CountVectorizer
from ..preprocessing import normalize
from ..externals import joblib, six
if six.PY3:
from urllib.request import urlopen
else:
from urllib2 import urlopen
logger = logging.getLogger(__name__)
URL = ("http://people.csail.mit.edu/jrennie/"
"20Newsgroups/20news-bydate.tar.gz")
ARCHIVE_NAME = "20news-bydate.tar.gz"
CACHE_NAME = "20news-bydate.pkz"
TRAIN_FOLDER = "20news-bydate-train"
TEST_FOLDER = "20news-bydate-test"
def download_20newsgroups(target_dir, cache_path):
"""Download the 20 newsgroups data and stored it as a zipped pickle."""
archive_path = os.path.join(target_dir, ARCHIVE_NAME)
train_path = os.path.join(target_dir, TRAIN_FOLDER)
test_path = os.path.join(target_dir, TEST_FOLDER)
if not os.path.exists(target_dir):
os.makedirs(target_dir)
if os.path.exists(archive_path):
# Download is not complete as the .tar.gz file is removed after
# download.
logger.warning("Download was incomplete, downloading again.")
os.remove(archive_path)
logger.warning("Downloading dataset from %s (14 MB)", URL)
opener = urlopen(URL)
with open(archive_path, 'wb') as f:
f.write(opener.read())
logger.info("Decompressing %s", archive_path)
tarfile.open(archive_path, "r:gz").extractall(path=target_dir)
os.remove(archive_path)
# Store a zipped pickle
cache = dict(train=load_files(train_path, encoding='latin1'),
test=load_files(test_path, encoding='latin1'))
compressed_content = codecs.encode(pickle.dumps(cache), 'zlib_codec')
with open(cache_path, 'wb') as f:
f.write(compressed_content)
shutil.rmtree(target_dir)
return cache
def strip_newsgroup_header(text):
"""
Given text in "news" format, strip the headers, by removing everything
before the first blank line.
"""
_before, _blankline, after = text.partition('\n\n')
return after
_QUOTE_RE = re.compile(r'(writes in|writes:|wrote:|says:|said:'
r'|^In article|^Quoted from|^\||^>)')
def strip_newsgroup_quoting(text):
"""
Given text in "news" format, strip lines beginning with the quote
characters > or |, plus lines that often introduce a quoted section
(for example, because they contain the string 'writes:'.)
"""
good_lines = [line for line in text.split('\n')
if not _QUOTE_RE.search(line)]
return '\n'.join(good_lines)
def strip_newsgroup_footer(text):
"""
Given text in "news" format, attempt to remove a signature block.
As a rough heuristic, we assume that signatures are set apart by either
a blank line or a line made of hyphens, and that it is the last such line
in the file (disregarding blank lines at the end).
"""
lines = text.strip().split('\n')
for line_num in range(len(lines) - 1, -1, -1):
line = lines[line_num]
if line.strip().strip('-') == '':
break
if line_num > 0:
return '\n'.join(lines[:line_num])
else:
return text
def fetch_20newsgroups(data_home=None, subset='train', categories=None,
shuffle=True, random_state=42,
remove=(),
download_if_missing=True):
"""Load the filenames and data from the 20 newsgroups dataset.
Read more in the :ref:`User Guide <20newsgroups>`.
Parameters
----------
subset: 'train' or 'test', 'all', optional
Select the dataset to load: 'train' for the training set, 'test'
for the test set, 'all' for both, with shuffled ordering.
data_home: optional, default: None
Specify a download and cache folder for the datasets. If None,
all scikit-learn data is stored in '~/scikit_learn_data' subfolders.
categories: None or collection of string or unicode
If None (default), load all the categories.
If not None, list of category names to load (other categories
ignored).
shuffle: bool, optional
Whether or not to shuffle the data: might be important for models that
make the assumption that the samples are independent and identically
distributed (i.i.d.), such as stochastic gradient descent.
random_state: numpy random number generator or seed integer
Used to shuffle the dataset.
download_if_missing: optional, True by default
If False, raise an IOError if the data is not locally available
instead of trying to download the data from the source site.
remove: tuple
May contain any subset of ('headers', 'footers', 'quotes'). Each of
these are kinds of text that will be detected and removed from the
newsgroup posts, preventing classifiers from overfitting on
metadata.
'headers' removes newsgroup headers, 'footers' removes blocks at the
ends of posts that look like signatures, and 'quotes' removes lines
that appear to be quoting another post.
'headers' follows an exact standard; the other filters are not always
correct.
"""
data_home = get_data_home(data_home=data_home)
cache_path = os.path.join(data_home, CACHE_NAME)
twenty_home = os.path.join(data_home, "20news_home")
cache = None
if os.path.exists(cache_path):
try:
with open(cache_path, 'rb') as f:
compressed_content = f.read()
uncompressed_content = codecs.decode(
compressed_content, 'zlib_codec')
cache = pickle.loads(uncompressed_content)
except Exception as e:
print(80 * '_')
print('Cache loading failed')
print(80 * '_')
print(e)
if cache is None:
if download_if_missing:
cache = download_20newsgroups(target_dir=twenty_home,
cache_path=cache_path)
else:
raise IOError('20Newsgroups dataset not found')
if subset in ('train', 'test'):
data = cache[subset]
elif subset == 'all':
data_lst = list()
target = list()
filenames = list()
for subset in ('train', 'test'):
data = cache[subset]
data_lst.extend(data.data)
target.extend(data.target)
filenames.extend(data.filenames)
data.data = data_lst
data.target = np.array(target)
data.filenames = np.array(filenames)
else:
raise ValueError(
"subset can only be 'train', 'test' or 'all', got '%s'" % subset)
data.description = 'the 20 newsgroups by date dataset'
if 'headers' in remove:
data.data = [strip_newsgroup_header(text) for text in data.data]
if 'footers' in remove:
data.data = [strip_newsgroup_footer(text) for text in data.data]
if 'quotes' in remove:
data.data = [strip_newsgroup_quoting(text) for text in data.data]
if categories is not None:
labels = [(data.target_names.index(cat), cat) for cat in categories]
# Sort the categories to have the ordering of the labels
labels.sort()
labels, categories = zip(*labels)
mask = np.in1d(data.target, labels)
data.filenames = data.filenames[mask]
data.target = data.target[mask]
# searchsorted to have continuous labels
data.target = np.searchsorted(labels, data.target)
data.target_names = list(categories)
# Use an object array to shuffle: avoids memory copy
data_lst = np.array(data.data, dtype=object)
data_lst = data_lst[mask]
data.data = data_lst.tolist()
if shuffle:
random_state = check_random_state(random_state)
indices = np.arange(data.target.shape[0])
random_state.shuffle(indices)
data.filenames = data.filenames[indices]
data.target = data.target[indices]
# Use an object array to shuffle: avoids memory copy
data_lst = np.array(data.data, dtype=object)
data_lst = data_lst[indices]
data.data = data_lst.tolist()
return data
def fetch_20newsgroups_vectorized(subset="train", remove=(), data_home=None):
"""Load the 20 newsgroups dataset and transform it into tf-idf vectors.
This is a convenience function; the tf-idf transformation is done using the
default settings for `sklearn.feature_extraction.text.Vectorizer`. For more
advanced usage (stopword filtering, n-gram extraction, etc.), combine
fetch_20newsgroups with a custom `Vectorizer` or `CountVectorizer`.
Read more in the :ref:`User Guide <20newsgroups>`.
Parameters
----------
subset: 'train' or 'test', 'all', optional
Select the dataset to load: 'train' for the training set, 'test'
for the test set, 'all' for both, with shuffled ordering.
data_home: optional, default: None
Specify an download and cache folder for the datasets. If None,
all scikit-learn data is stored in '~/scikit_learn_data' subfolders.
remove: tuple
May contain any subset of ('headers', 'footers', 'quotes'). Each of
these are kinds of text that will be detected and removed from the
newsgroup posts, preventing classifiers from overfitting on
metadata.
'headers' removes newsgroup headers, 'footers' removes blocks at the
ends of posts that look like signatures, and 'quotes' removes lines
that appear to be quoting another post.
Returns
-------
bunch : Bunch object
bunch.data: sparse matrix, shape [n_samples, n_features]
bunch.target: array, shape [n_samples]
bunch.target_names: list, length [n_classes]
"""
data_home = get_data_home(data_home=data_home)
filebase = '20newsgroup_vectorized'
if remove:
filebase += 'remove-' + ('-'.join(remove))
target_file = os.path.join(data_home, filebase + ".pk")
# we shuffle but use a fixed seed for the memoization
data_train = fetch_20newsgroups(data_home=data_home,
subset='train',
categories=None,
shuffle=True,
random_state=12,
remove=remove)
data_test = fetch_20newsgroups(data_home=data_home,
subset='test',
categories=None,
shuffle=True,
random_state=12,
remove=remove)
if os.path.exists(target_file):
X_train, X_test = joblib.load(target_file)
else:
vectorizer = CountVectorizer(dtype=np.int16)
X_train = vectorizer.fit_transform(data_train.data).tocsr()
X_test = vectorizer.transform(data_test.data).tocsr()
joblib.dump((X_train, X_test), target_file, compress=9)
# the data is stored as int16 for compactness
# but normalize needs floats
X_train = X_train.astype(np.float64)
X_test = X_test.astype(np.float64)
normalize(X_train, copy=False)
normalize(X_test, copy=False)
target_names = data_train.target_names
if subset == "train":
data = X_train
target = data_train.target
elif subset == "test":
data = X_test
target = data_test.target
elif subset == "all":
data = sp.vstack((X_train, X_test)).tocsr()
target = np.concatenate((data_train.target, data_test.target))
else:
raise ValueError("%r is not a valid subset: should be one of "
"['train', 'test', 'all']" % subset)
return Bunch(data=data, target=target, target_names=target_names)
| bsd-3-clause |
ifuding/Kaggle | SVPC/Code/philly/nfold_train.py | 1 | 8190 | from sklearn.model_selection import KFold
from lgb import lgbm_train
# import xgboost as xgb
# from functools import reduce
import numpy as np
from keras_train import DNN_Model, VAE_Model
import keras_train
# import gensim
# from RCNN_Keras import get_word2vec, RCNN_Model
# from RNN_Keras import RNN_Model
from tensorflow.python.keras.models import Model
# from xgb import xgb_train
import pandas as pd
from sklearn import metrics
# RNN_PARAMS
RCNN_HIDDEN_UNIT = [128, 64]
def cal_leak_blend_loss(leak_train, model, valide_data, valide_label):
# print (leak_train.head)
# leak_train = leak_train.copy()
pred = pd.Series(model_eval(model[0], model[1], valide_data), index = valide_data.index)
# leak_valide_part = leak_train.loc[valide_data.index]
# print(leak_valide_part.shape)
# print ('valide_data: ', valide_data.head())
blend_leak_target = leak_train.loc[valide_data.index]
# loss = np.sqrt(metrics.mean_squared_error(valide_label, blend_leak_target.values))
# print ('before blend leak_blend_loss: ', loss)
blend_leak_target[blend_leak_target == 0] = pred[blend_leak_target == 0]
# print(blend_leak_target[blend_leak_target == np.nan])
loss = np.sqrt(metrics.mean_squared_error(valide_label, pred.values))
blend_leak_loss = np.sqrt(metrics.mean_squared_error(valide_label, blend_leak_target.values))
print ('loss: ', loss, 'leak_blend_loss: ', blend_leak_loss)
return loss, blend_leak_loss
def nfold_train(train_data, train_label, model_types = None,
stacking = False, valide_data = None, valide_label = None,
test_data = None, train_weight = None, valide_weight = None,
flags = None ,tokenizer = None, scores = None, emb_weight = None, cat_max = None, leak_target = None):
"""
nfold Training
"""
print("Over all training size:")
print(train_data.shape)
print("Over all label size:")
print(train_label.shape)
fold = flags.nfold
kf = KFold(n_splits=fold, shuffle=False)
# wv_model = gensim.models.Word2Vec.load("wv_model_norm.gensim")
stacking = flags.stacking
stacking_data = None
stacking_label = None
test_preds = None
num_fold = 0
models = []
losses = []
leak_train = leak_target
for train_index, test_index in kf.split(train_data):
# print(test_index[:100])
# exit(0)
if valide_label is None:
train_part = train_data.iloc[train_index]
train_part_label = None
if model_types[0] != 'v' and model_types[0] != 'r':
train_part_label = train_label[train_index]
valide_part = train_data.iloc[test_index]
valide_part_label = None
if model_types[0] != 'v' and model_types[0] != 'r':
valide_part_label = train_label[test_index]
if train_weight is not None:
train_part_weight = train_weight[train_index]
valide_part_weight = train_weight[test_index]
else:
train_part = train_data
train_part_label = train_label
valide_part = valide_data
valide_part_label = valide_label
if train_weight is not None:
train_part_weight, valide_part_weight = train_weight, valide_weight
print('fold: %d th train :-)' % (num_fold))
print('Train size: {} Valide size: {}'.format(train_part.shape[0], valide_part.shape[0]))
onefold_models = []
for model_type in model_types:
if model_type == 'k' or model_type == 'r':
# with tf.device('/cpu:0'):
model = DNN_Model(scores = scores, cat_max = cat_max, flags = flags, emb_weight = emb_weight, model_type = model_type)
if num_fold == 0:
print(model.model.summary())
model.train(train_part, train_part_label, valide_part, valide_part_label)
# if stacking:
# model = Model(inputs = model.model.inputs, outputs = model.model.get_layer(name = 'merge_sparse_emb').output)
onefold_models.append((model, model_type))
elif model_type == 'v':
# with tf.device('/cpu:0'):
model = VAE_Model(flags = flags)
if num_fold == 0:
print(model.model.summary())
model.train(train_part, train_part_label, valide_part, valide_part_label)
model = Model(inputs = model.model.inputs, outputs = model.model.get_layer(name = 'z').output)
# if stacking:
# model = Model(inputs = model.model.inputs, outputs = model.model.get_layer(name = 'merge_sparse_emb').output)
onefold_models.append((model, 'v'))
stacking_data = model_eval(model, 'v', train_data) # for model in onefold_models]
# stacking_data = reduce((lambda x, y: np.c_[x, y]), stacking_data)
print('stacking_data shape: {0}'.format(stacking_data.shape))
elif model_type == 'x':
model = xgb_train(train_part, train_part_label, valide_part, valide_part_label, num_fold)
onefold_models.append((model, 'x'))
elif model_type == 'l':
model = lgbm_train(train_part, train_part_label, valide_part, valide_part_label, num_fold,
fold, flags = flags)
onefold_models.append((model, 'l'))
# print (leak_train.head)
losses.append(cal_leak_blend_loss(leak_train, onefold_models[0], valide_part, valide_part_label))
# if stacking:
# valide_pred = [model_eval(model[0], model[1], valide_part) for model in onefold_models]
# valide_pred = reduce((lambda x, y: np.c_[x, y]), valide_pred)
# test_pred = [model_eval(model[0], model[1], test_data) for model in onefold_models]
# test_pred = reduce((lambda x, y: np.c_[x, y]), test_pred)
# if stacking_data is None:
# stacking_data = valide_pred #np.c_[valide_part, valide_pred]
# stacking_label = valide_part_label
# test_preds = test_pred
# else:
# stacking_data = np.append(stacking_data, valide_pred, axis = 0) #np.append(stacking_data, np.c_[valide_part, valide_pred], axis = 0)
# stacking_label = np.append(stacking_label, valide_part_label, axis = 0)
# test_preds += test_pred
# print('stacking_data shape: {0}'.format(stacking_data.shape))
# print('stacking_label shape: {0}'.format(stacking_label.shape))
# print('stacking test data shape: {0}'.format(test_preds.shape))
models.append(onefold_models[0])
num_fold += 1
if num_fold == flags.ensemble_nfold:
break
mean_loss = np.array(losses).mean(axis = 0)
print ('Mean loss: ', mean_loss[0], "Mean blend loss: ", mean_loss[1])
# if stacking:
# test_preds /= flags.ensemble_nfold
# test_data = np.c_[test_data, test_preds]
return models, stacking_data, stacking_label, test_preds
def model_eval(model, model_type, data_frame):
"""
"""
if model_type == 'l':
preds = model.predict(data_frame[keras_train.USED_FEATURE_LIST].values, num_iteration=model.best_iteration)
elif model_type == 'k' or model_type == 'LR' or model_type == 'DNN' or model_type == 'rcnn' \
or model_type == 'r' or model_type == 'cnn':
preds = model.predict(data_frame, verbose = 2)
elif model_type == 'v':
preds = model.predict(data_frame[keras_train.USED_FEATURE_LIST].values, verbose = 2)
elif model_type == 't':
print("ToDO")
elif model_type == 'x':
preds = model.predict(xgb.DMatrix(data_frame), ntree_limit=model.best_ntree_limit)
return preds.reshape((data_frame.shape[0], ))
def models_eval(models, data):
preds = None
for (model, model_type) in models:
pred = model_eval(model, model_type, data)
if preds is None:
preds = pred.copy()
else:
preds += pred
preds /= len(models)
return preds | apache-2.0 |
icoxfog417/pystan | pystan/external/pymc/plots.py | 5 | 13334 | # pymc git commit: 6115726122d46267c86d16de635941daa37eb357
# =======
# License
# =======
#
# PyMC is distributed under the Apache License, Version 2.0
#
# Copyright (c) 2006 Christopher J. Fonnesbeck (Academic Free License)
# Copyright (c) 2007-2008 Christopher J. Fonnesbeck, Anand Prabhakar Patil, David Huard (Academic Free License)
# Copyright (c) 2009-2013 The PyMC developers (see contributors to pymc-devs on GitHub)
# All rights reserved.
from pylab import *
try:
import matplotlib.gridspec as gridspec
except ImportError:
gridspec = None
import numpy as np
from scipy.stats import kde
from .stats import *
from .trace import *
__all__ = ['traceplot', 'kdeplot', 'kde2plot', 'forestplot', 'autocorrplot']
def traceplot(trace, vars=None):
if vars is None:
vars = trace.varnames
if isinstance(trace, MultiTrace):
trace = trace.combined()
n = len(vars)
f, ax = subplots(n, 2, squeeze=False)
for i, v in enumerate(vars):
d = np.squeeze(trace[v])
if trace[v].dtype.kind == 'i':
ax[i, 0].hist(d, bins=sqrt(d.size))
else:
kdeplot_op(ax[i, 0], d)
ax[i, 0].set_title(str(v))
ax[i, 1].plot(d, alpha=.35)
ax[i, 0].set_ylabel("frequency")
ax[i, 1].set_ylabel("sample value")
return f
def kdeplot_op(ax, data):
data = np.atleast_2d(data.T).T
for i in range(data.shape[1]):
d = data[:, i]
density = kde.gaussian_kde(d)
l = np.min(d)
u = np.max(d)
x = np.linspace(0, 1, 100) * (u - l) + l
ax.plot(x, density(x))
def kde2plot_op(ax, x, y, grid=200):
xmin = x.min()
xmax = x.max()
ymin = y.min()
ymax = y.max()
grid = grid * 1j
X, Y = np.mgrid[xmin:xmax:grid, ymin:ymax:grid]
positions = np.vstack([X.ravel(), Y.ravel()])
values = np.vstack([x, y])
kernel = kde.gaussian_kde(values)
Z = np.reshape(kernel(positions).T, X.shape)
ax.imshow(np.rot90(Z), cmap=cm.gist_earth_r,
extent=[xmin, xmax, ymin, ymax])
def kdeplot(data):
f, ax = subplots(1, 1, squeeze=True)
kdeplot_op(ax, data)
return f
def kde2plot(x, y, grid=200):
f, ax = subplots(1, 1, squeeze=True)
kde2plot_op(ax, x, y, grid)
return f
def autocorrplot(trace, vars=None, fontmap = None, max_lag=100):
"""Bar plot of the autocorrelation function for a trace"""
try:
# MultiTrace
traces = trace.traces
except AttributeError:
# NpTrace
traces = [trace]
if fontmap is None: fontmap = {1:10, 2:8, 3:6, 4:5, 5:4}
if vars is None:
vars = traces[0].varnames
# Extract sample data
samples = [{v:trace[v] for v in vars} for trace in traces]
chains = len(traces)
n = len(samples[0])
f, ax = subplots(n, chains, squeeze=False)
max_lag = min(len(samples[0][vars[0]])-1, max_lag)
for i, v in enumerate(vars):
for j in xrange(chains):
d = np.squeeze(samples[j][v])
ax[i,j].acorr(d, detrend=mlab.detrend_mean, maxlags=max_lag)
if not j:
ax[i, j].set_ylabel("correlation")
ax[i, j].set_xlabel("lag")
if chains > 1:
ax[i, j].set_title("chain {0}".format(j+1))
# Smaller tick labels
tlabels = gca().get_xticklabels()
setp(tlabels, 'fontsize', fontmap[1])
tlabels = gca().get_yticklabels()
setp(tlabels, 'fontsize', fontmap[1])
def var_str(name, shape):
"""Return a sequence of strings naming the element of the tallyable object.
This is a support function for forestplot.
:Example:
>>> var_str('theta', (4,))
['theta[1]', 'theta[2]', 'theta[3]', 'theta[4]']
"""
size = prod(shape)
ind = (indices(shape) + 1).reshape(-1, size)
names = ['[' + ','.join(map(str, i)) + ']' for i in zip(*ind)]
# if len(name)>12:
# name = '\n'.join(name.split('_'))
# name += '\n'
names[0] = '%s %s' % (name, names[0])
return names
def forestplot(trace_obj, vars=None, alpha=0.05, quartiles=True, rhat=True,
main=None, xtitle=None, xrange=None, ylabels=None, chain_spacing=0.05, vline=0):
""" Forest plot (model summary plot)
Generates a "forest plot" of 100*(1-alpha)% credible intervals for either the
set of variables in a given model, or a specified set of nodes.
:Arguments:
trace_obj: NpTrace or MultiTrace object
Trace(s) from an MCMC sample.
vars: list
List of variables to plot (defaults to None, which results in all
variables plotted).
alpha (optional): float
Alpha value for (1-alpha)*100% credible intervals (defaults to 0.05).
quartiles (optional): bool
Flag for plotting the interquartile range, in addition to the
(1-alpha)*100% intervals (defaults to True).
rhat (optional): bool
Flag for plotting Gelman-Rubin statistics. Requires 2 or more
chains (defaults to True).
main (optional): string
Title for main plot. Passing False results in titles being
suppressed; passing None (default) results in default titles.
xtitle (optional): string
Label for x-axis. Defaults to no label
xrange (optional): list or tuple
Range for x-axis. Defaults to matplotlib's best guess.
ylabels (optional): list
User-defined labels for each variable. If not provided, the node
__name__ attributes are used.
chain_spacing (optional): float
Plot spacing between chains (defaults to 0.05).
vline (optional): numeric
Location of vertical reference line (defaults to 0).
"""
if not gridspec:
print_(
'\nYour installation of matplotlib is not recent enough to support summary_plot; this function is disabled until matplotlib is updated.')
return
# Quantiles to be calculated
qlist = [100 * alpha / 2, 50, 100 * (1 - alpha / 2)]
if quartiles:
qlist = [100 * alpha / 2, 25, 50, 75, 100 * (1 - alpha / 2)]
# Range for x-axis
plotrange = None
# Number of chains
chains = None
# Gridspec
gs = None
# Subplots
interval_plot = None
rhat_plot = None
try:
# First try MultiTrace type
traces = trace_obj.traces
if rhat and len(traces) > 1:
from .diagnostics import gelman_rubin
R = gelman_rubin(trace_obj)
if vars is not None:
R = {v: R[v] for v in vars}
else:
rhat = False
except AttributeError:
# Single NpTrace
traces = [trace_obj]
# Can't calculate Gelman-Rubin with a single trace
rhat = False
if vars is None:
vars = traces[0].varnames
# Empty list for y-axis labels
labels = []
chains = len(traces)
if gs is None:
# Initialize plot
if rhat and chains > 1:
gs = gridspec.GridSpec(1, 2, width_ratios=[3, 1])
else:
gs = gridspec.GridSpec(1, 1)
# Subplot for confidence intervals
interval_plot = subplot(gs[0])
for j, tr in enumerate(traces):
# Get quantiles
trace_quantiles = quantiles(tr, qlist)
hpd_intervals = hpd(tr, alpha)
# Counter for current variable
var = 1
for varname in vars:
var_quantiles = trace_quantiles[varname]
quants = var_quantiles.values()
var_hpd = hpd_intervals[varname].T
# Substitute HPD interval for quantile
quants[0] = var_hpd[0].T
quants[-1] = var_hpd[1].T
# Ensure x-axis contains range of current interval
if plotrange:
plotrange = [min(
plotrange[0],
np.min(quants)),
max(plotrange[1],
np.max(quants))]
else:
plotrange = [np.min(quants), np.max(quants)]
# Number of elements in current variable
value = tr[varname][0]
k = np.size(value)
# Append variable name(s) to list
if not j:
if k > 1:
names = var_str(varname, shape(value))
labels += names
else:
labels.append(varname)
# labels.append('\n'.join(varname.split('_')))
# Add spacing for each chain, if more than one
e = [0] + [(chain_spacing * ((i + 2) / 2)) *
(-1) ** i for i in range(chains - 1)]
# Deal with multivariate nodes
if k > 1:
for i, q in enumerate(np.transpose(quants).squeeze()):
# Y coordinate with jitter
y = -(var + i) + e[j]
if quartiles:
# Plot median
plot(q[2], y, 'bo', markersize=4)
# Plot quartile interval
errorbar(
x=(q[1],
q[3]),
y=(y,
y),
linewidth=2,
color="blue")
else:
# Plot median
plot(q[1], y, 'bo', markersize=4)
# Plot outer interval
errorbar(
x=(q[0],
q[-1]),
y=(y,
y),
linewidth=1,
color="blue")
else:
# Y coordinate with jitter
y = -var + e[j]
if quartiles:
# Plot median
plot(quants[2], y, 'bo', markersize=4)
# Plot quartile interval
errorbar(
x=(quants[1],
quants[3]),
y=(y,
y),
linewidth=2,
color="blue")
else:
# Plot median
plot(quants[1], y, 'bo', markersize=4)
# Plot outer interval
errorbar(
x=(quants[0],
quants[-1]),
y=(y,
y),
linewidth=1,
color="blue")
# Increment index
var += k
labels = ylabels or labels
# Update margins
left_margin = np.max([len(x) for x in labels]) * 0.015
gs.update(left=left_margin, right=0.95, top=0.9, bottom=0.05)
# Define range of y-axis
ylim(-var + 0.5, -0.5)
datarange = plotrange[1] - plotrange[0]
xlim(plotrange[0] - 0.05 * datarange, plotrange[1] + 0.05 * datarange)
# Add variable labels
yticks([-(l + 1) for l in range(len(labels))], labels)
# Add title
if main is not False:
plot_title = main or str(int((
1 - alpha) * 100)) + "% Credible Intervals"
title(plot_title)
# Add x-axis label
if xtitle is not None:
xlabel(xtitle)
# Constrain to specified range
if xrange is not None:
xlim(*xrange)
# Remove ticklines on y-axes
for ticks in interval_plot.yaxis.get_major_ticks():
ticks.tick1On = False
ticks.tick2On = False
for loc, spine in interval_plot.spines.iteritems():
if loc in ['bottom', 'top']:
pass
# spine.set_position(('outward',10)) # outward by 10 points
elif loc in ['left', 'right']:
spine.set_color('none') # don't draw spine
# Reference line
axvline(vline, color='k', linestyle='--')
# Genenerate Gelman-Rubin plot
if rhat and chains > 1:
# If there are multiple chains, calculate R-hat
rhat_plot = subplot(gs[1])
if main is not False:
title("R-hat")
# Set x range
xlim(0.9, 2.1)
# X axis labels
xticks((1.0, 1.5, 2.0), ("1", "1.5", "2+"))
yticks([-(l + 1) for l in range(len(labels))], "")
i = 1
for varname in vars:
value = traces[0][varname][0]
k = np.size(value)
if k > 1:
plot([min(r, 2) for r in R[varname]], [-(j + i)
for j in range(k)], 'bo', markersize=4)
else:
plot(min(R[varname], 2), -i, 'bo', markersize=4)
i += k
# Define range of y-axis
ylim(-i + 0.5, -0.5)
# Remove ticklines on y-axes
for ticks in rhat_plot.yaxis.get_major_ticks():
ticks.tick1On = False
ticks.tick2On = False
for loc, spine in rhat_plot.spines.iteritems():
if loc in ['bottom', 'top']:
pass
# spine.set_position(('outward',10)) # outward by 10 points
elif loc in ['left', 'right']:
spine.set_color('none') # don't draw spine
return gs
| gpl-3.0 |
willettk/rgz-analysis | python/analyze_consensus.py | 2 | 3995 | import consensus
import bending_angles as ba
import rgz
from matplotlib import pyplot as plt
import numpy as np
import pandas as pd
path = '/Users/willettk/Astronomy/Research/GalaxyZoo'
def load_rgz_data():
client = rgz.MongoClient('localhost', 27017)
db = client['radio']
subjects = db['radio_subjects']
classifications = db['radio_classifications']
catalog = db['catalog']
return subjects,classifications,catalog
# Analyze the results of the consensus algorithm and catalog aggregation
def consensus_hist(catalog,savefig=False):
# Make histogram of the consensus level for sources
df = pd.read_csv('%s/rgz-analysis/csv/static_catalog_full.csv' % path,delim_whitespace=True)
fig = plt.figure(figsize=(15,8))
all_entries = catalog.find()
consensus_level = np.array([])
n_components = []
for a in all_entries:
consensus_level = np.append(consensus_level,a['consensus']['level'])
n_components = np.append(n_components,a['radio']['numberComponents'])
ax1 = fig.add_subplot(121)
ax1.hist(consensus_level,bins=20)
ax1.set_yscale('log')
ax1.set_xlabel('Consensus level',fontsize=16)
ax1.set_ylabel('Count',fontsize=16)
ax1.set_title('All RGZ completed subjects')
ax2 = fig.add_subplot(122)
for i in np.arange(4):
ax2.hist(consensus_level[n_components == i+1], bins=20, alpha=0.5, label=r'$N=$%i' % (i+1))
ax2.hist(consensus_level[n_components >= 5], bins=20, alpha=0.5, label=r'$N\geq$%i' % 5)
ax2.set_yscale('log')
ax2.set_xlabel('Consensus level',fontsize=16)
ax2.set_ylabel('Count',fontsize=16)
ax2.set_title('Grouped by number of radio components')
ax2.legend(loc='upper left')
'''
ax1 = fig.add_subplot(121)
df['consensus_level'].hist(kind='hist',ax=ax1,bins=20)
ax2 = fig.add_subplot(122)
df['consensus_level'].hist(kind='hist',by=df['ax=ax2,bins=20)
'''
if savefig:
fig.savefig('%s/rgz-analysis/plots/analyze_consensus1.pdf' % path)
else:
plt.show()
return None
def edge_down(catalog,savefig=False):
# Make histogram of the consensus level for sources
df = pd.read_csv('%s/rgz-analysis/csv/static_catalog_full.csv' % path,delim_whitespace=True)
fig = plt.figure(figsize=(15,8))
all_entries = catalog.find()
consensus_level = np.array([])
n_components = []
for a in all_entries:
consensus_level = np.append(consensus_level,a['consensus']['level'])
n_components = np.append(n_components,a['radio']['numberComponents'])
ax1 = fig.add_subplot(121)
n,bins,patches = ax1.hist(consensus_level,bins=20,cumulative=True,histtype='step')
ax1.set_yscale('log')
for value in (0.50,0.75):
ax1.axvline(value,ls='--',color='k')
ax1.text(value,ax1.get_ylim()[1]*0.95,int(n[(np.abs(bins-value)).argmin()]),fontsize=10,va='top')
ax1.set_xlabel('Consensus level',fontsize=16)
ax1.set_ylabel('Count',fontsize=16)
ax1.set_title('All RGZ completed subjects')
ax2 = fig.add_subplot(122)
for i in np.arange(4):
n,bins,patches = ax2.hist(consensus_level[n_components == i+1], bins=20, alpha=0.5, label=r'$N=$%i' % (i+1),cumulative=True,histtype='step')
for value in (0.50,0.75):
print i+1,value,int(n[(np.abs(bins-value)).argmin()-1])
n,bins,patches = ax2.hist(consensus_level[n_components >= 5], bins=20, alpha=0.5, label=r'$N\geq$%i' % 5,cumulative=True,histtype='step')
for value in (0.50,0.75):
ax2.axvline(value,ls='--',color='k')
print "5+",value,int(n[(np.abs(bins-value)).argmin()-1])
ax2.set_yscale('log')
ax2.set_xlabel('Consensus level',fontsize=16)
ax2.set_ylabel('Count',fontsize=16)
ax2.set_title('Grouped by number of radio components')
ax2.legend(loc='upper left')
if savefig:
fig.savefig('%s/rgz-analysis/plots/analyze_consensus2.pdf' % path)
else:
plt.show()
return None
| mit |
0x0all/scikit-learn | sklearn/mixture/gmm.py | 10 | 26777 | """
Gaussian Mixture Models.
This implementation corresponds to frequentist (non-Bayesian) formulation
of Gaussian Mixture Models.
"""
# Author: Ron Weiss <ronweiss@gmail.com>
# Fabian Pedregosa <fabian.pedregosa@inria.fr>
# Bertrand Thirion <bertrand.thirion@inria.fr>
import numpy as np
from scipy import linalg
from ..base import BaseEstimator
from ..utils import check_random_state
from ..utils.extmath import logsumexp, pinvh
from .. import cluster
from sklearn.externals.six.moves import zip
EPS = np.finfo(float).eps
def log_multivariate_normal_density(X, means, covars, covariance_type='diag'):
"""Compute the log probability under a multivariate Gaussian distribution.
Parameters
----------
X : array_like, shape (n_samples, n_features)
List of n_features-dimensional data points. Each row corresponds to a
single data point.
means : array_like, shape (n_components, n_features)
List of n_features-dimensional mean vectors for n_components Gaussians.
Each row corresponds to a single mean vector.
covars : array_like
List of n_components covariance parameters for each Gaussian. The shape
depends on `covariance_type`:
(n_components, n_features) if 'spherical',
(n_features, n_features) if 'tied',
(n_components, n_features) if 'diag',
(n_components, n_features, n_features) if 'full'
covariance_type : string
Type of the covariance parameters. Must be one of
'spherical', 'tied', 'diag', 'full'. Defaults to 'diag'.
Returns
-------
lpr : array_like, shape (n_samples, n_components)
Array containing the log probabilities of each data point in
X under each of the n_components multivariate Gaussian distributions.
"""
log_multivariate_normal_density_dict = {
'spherical': _log_multivariate_normal_density_spherical,
'tied': _log_multivariate_normal_density_tied,
'diag': _log_multivariate_normal_density_diag,
'full': _log_multivariate_normal_density_full}
return log_multivariate_normal_density_dict[covariance_type](
X, means, covars)
def sample_gaussian(mean, covar, covariance_type='diag', n_samples=1,
random_state=None):
"""Generate random samples from a Gaussian distribution.
Parameters
----------
mean : array_like, shape (n_features,)
Mean of the distribution.
covars : array_like, optional
Covariance of the distribution. The shape depends on `covariance_type`:
scalar if 'spherical',
(n_features) if 'diag',
(n_features, n_features) if 'tied', or 'full'
covariance_type : string, optional
Type of the covariance parameters. Must be one of
'spherical', 'tied', 'diag', 'full'. Defaults to 'diag'.
n_samples : int, optional
Number of samples to generate. Defaults to 1.
Returns
-------
X : array, shape (n_features, n_samples)
Randomly generated sample
"""
rng = check_random_state(random_state)
n_dim = len(mean)
rand = rng.randn(n_dim, n_samples)
if n_samples == 1:
rand.shape = (n_dim,)
if covariance_type == 'spherical':
rand *= np.sqrt(covar)
elif covariance_type == 'diag':
rand = np.dot(np.diag(np.sqrt(covar)), rand)
else:
s, U = linalg.eigh(covar)
s.clip(0, out=s) # get rid of tiny negatives
np.sqrt(s, out=s)
U *= s
rand = np.dot(U, rand)
return (rand.T + mean).T
class GMM(BaseEstimator):
"""Gaussian Mixture Model
Representation of a Gaussian mixture model probability distribution.
This class allows for easy evaluation of, sampling from, and
maximum-likelihood estimation of the parameters of a GMM distribution.
Initializes parameters such that every mixture component has zero
mean and identity covariance.
Parameters
----------
n_components : int, optional
Number of mixture components. Defaults to 1.
covariance_type : string, optional
String describing the type of covariance parameters to
use. Must be one of 'spherical', 'tied', 'diag', 'full'.
Defaults to 'diag'.
random_state: RandomState or an int seed (None by default)
A random number generator instance
min_covar : float, optional
Floor on the diagonal of the covariance matrix to prevent
overfitting. Defaults to 1e-3.
thresh : float, optional
Convergence threshold.
n_iter : int, optional
Number of EM iterations to perform.
n_init : int, optional
Number of initializations to perform. the best results is kept
params : string, optional
Controls which parameters are updated in the training
process. Can contain any combination of 'w' for weights,
'm' for means, and 'c' for covars. Defaults to 'wmc'.
init_params : string, optional
Controls which parameters are updated in the initialization
process. Can contain any combination of 'w' for weights,
'm' for means, and 'c' for covars. Defaults to 'wmc'.
Attributes
----------
weights_ : array, shape (`n_components`,)
This attribute stores the mixing weights for each mixture component.
means_ : array, shape (`n_components`, `n_features`)
Mean parameters for each mixture component.
covars_ : array
Covariance parameters for each mixture component. The shape
depends on `covariance_type`::
(n_components, n_features) if 'spherical',
(n_features, n_features) if 'tied',
(n_components, n_features) if 'diag',
(n_components, n_features, n_features) if 'full'
converged_ : bool
True when convergence was reached in fit(), False otherwise.
See Also
--------
DPGMM : Infinite gaussian mixture model, using the dirichlet
process, fit with a variational algorithm
VBGMM : Finite gaussian mixture model fit with a variational
algorithm, better for situations where there might be too little
data to get a good estimate of the covariance matrix.
Examples
--------
>>> import numpy as np
>>> from sklearn import mixture
>>> np.random.seed(1)
>>> g = mixture.GMM(n_components=2)
>>> # Generate random observations with two modes centered on 0
>>> # and 10 to use for training.
>>> obs = np.concatenate((np.random.randn(100, 1),
... 10 + np.random.randn(300, 1)))
>>> g.fit(obs) # doctest: +NORMALIZE_WHITESPACE
GMM(covariance_type='diag', init_params='wmc', min_covar=0.001,
n_components=2, n_init=1, n_iter=100, params='wmc',
random_state=None, thresh=0.01)
>>> np.round(g.weights_, 2)
array([ 0.75, 0.25])
>>> np.round(g.means_, 2)
array([[ 10.05],
[ 0.06]])
>>> np.round(g.covars_, 2) #doctest: +SKIP
array([[[ 1.02]],
[[ 0.96]]])
>>> g.predict([[0], [2], [9], [10]]) #doctest: +ELLIPSIS
array([1, 1, 0, 0]...)
>>> np.round(g.score([[0], [2], [9], [10]]), 2)
array([-2.19, -4.58, -1.75, -1.21])
>>> # Refit the model on new data (initial parameters remain the
>>> # same), this time with an even split between the two modes.
>>> g.fit(20 * [[0]] + 20 * [[10]]) # doctest: +NORMALIZE_WHITESPACE
GMM(covariance_type='diag', init_params='wmc', min_covar=0.001,
n_components=2, n_init=1, n_iter=100, params='wmc',
random_state=None, thresh=0.01)
>>> np.round(g.weights_, 2)
array([ 0.5, 0.5])
"""
def __init__(self, n_components=1, covariance_type='diag',
random_state=None, thresh=1e-2, min_covar=1e-3,
n_iter=100, n_init=1, params='wmc', init_params='wmc'):
self.n_components = n_components
self.covariance_type = covariance_type
self.thresh = thresh
self.min_covar = min_covar
self.random_state = random_state
self.n_iter = n_iter
self.n_init = n_init
self.params = params
self.init_params = init_params
if not covariance_type in ['spherical', 'tied', 'diag', 'full']:
raise ValueError('Invalid value for covariance_type: %s' %
covariance_type)
if n_init < 1:
raise ValueError('GMM estimation requires at least one run')
self.weights_ = np.ones(self.n_components) / self.n_components
# flag to indicate exit status of fit() method: converged (True) or
# n_iter reached (False)
self.converged_ = False
def _get_covars(self):
"""Covariance parameters for each mixture component.
The shape depends on `cvtype`::
(`n_states`, 'n_features') if 'spherical',
(`n_features`, `n_features`) if 'tied',
(`n_states`, `n_features`) if 'diag',
(`n_states`, `n_features`, `n_features`) if 'full'
"""
if self.covariance_type == 'full':
return self.covars_
elif self.covariance_type == 'diag':
return [np.diag(cov) for cov in self.covars_]
elif self.covariance_type == 'tied':
return [self.covars_] * self.n_components
elif self.covariance_type == 'spherical':
return [np.diag(cov) for cov in self.covars_]
def _set_covars(self, covars):
"""Provide values for covariance"""
covars = np.asarray(covars)
_validate_covars(covars, self.covariance_type, self.n_components)
self.covars_ = covars
def score_samples(self, X):
"""Return the per-sample likelihood of the data under the model.
Compute the log probability of X under the model and
return the posterior distribution (responsibilities) of each
mixture component for each element of X.
Parameters
----------
X: array_like, shape (n_samples, n_features)
List of n_features-dimensional data points. Each row
corresponds to a single data point.
Returns
-------
logprob : array_like, shape (n_samples,)
Log probabilities of each data point in X.
responsibilities : array_like, shape (n_samples, n_components)
Posterior probabilities of each mixture component for each
observation
"""
X = np.asarray(X)
if X.ndim == 1:
X = X[:, np.newaxis]
if X.size == 0:
return np.array([]), np.empty((0, self.n_components))
if X.shape[1] != self.means_.shape[1]:
raise ValueError('The shape of X is not compatible with self')
lpr = (log_multivariate_normal_density(X, self.means_, self.covars_,
self.covariance_type)
+ np.log(self.weights_))
logprob = logsumexp(lpr, axis=1)
responsibilities = np.exp(lpr - logprob[:, np.newaxis])
return logprob, responsibilities
def score(self, X):
"""Compute the log probability under the model.
Parameters
----------
X : array_like, shape (n_samples, n_features)
List of n_features-dimensional data points. Each row
corresponds to a single data point.
Returns
-------
logprob : array_like, shape (n_samples,)
Log probabilities of each data point in X
"""
logprob, _ = self.score_samples(X)
return logprob
def predict(self, X):
"""Predict label for data.
Parameters
----------
X : array-like, shape = [n_samples, n_features]
Returns
-------
C : array, shape = (n_samples,)
"""
logprob, responsibilities = self.score_samples(X)
return responsibilities.argmax(axis=1)
def predict_proba(self, X):
"""Predict posterior probability of data under each Gaussian
in the model.
Parameters
----------
X : array-like, shape = [n_samples, n_features]
Returns
-------
responsibilities : array-like, shape = (n_samples, n_components)
Returns the probability of the sample for each Gaussian
(state) in the model.
"""
logprob, responsibilities = self.score_samples(X)
return responsibilities
def sample(self, n_samples=1, random_state=None):
"""Generate random samples from the model.
Parameters
----------
n_samples : int, optional
Number of samples to generate. Defaults to 1.
Returns
-------
X : array_like, shape (n_samples, n_features)
List of samples
"""
if random_state is None:
random_state = self.random_state
random_state = check_random_state(random_state)
weight_cdf = np.cumsum(self.weights_)
X = np.empty((n_samples, self.means_.shape[1]))
rand = random_state.rand(n_samples)
# decide which component to use for each sample
comps = weight_cdf.searchsorted(rand)
# for each component, generate all needed samples
for comp in range(self.n_components):
# occurrences of current component in X
comp_in_X = (comp == comps)
# number of those occurrences
num_comp_in_X = comp_in_X.sum()
if num_comp_in_X > 0:
if self.covariance_type == 'tied':
cv = self.covars_
elif self.covariance_type == 'spherical':
cv = self.covars_[comp][0]
else:
cv = self.covars_[comp]
X[comp_in_X] = sample_gaussian(
self.means_[comp], cv, self.covariance_type,
num_comp_in_X, random_state=random_state).T
return X
def fit(self, X):
"""Estimate model parameters with the expectation-maximization
algorithm.
A initialization step is performed before entering the em
algorithm. If you want to avoid this step, set the keyword
argument init_params to the empty string '' when creating the
GMM object. Likewise, if you would like just to do an
initialization, set n_iter=0.
Parameters
----------
X : array_like, shape (n, n_features)
List of n_features-dimensional data points. Each row
corresponds to a single data point.
"""
## initialization step
X = np.asarray(X, dtype=np.float)
if X.ndim == 1:
X = X[:, np.newaxis]
if X.shape[0] < self.n_components:
raise ValueError(
'GMM estimation with %s components, but got only %s samples' %
(self.n_components, X.shape[0]))
max_log_prob = -np.infty
for _ in range(self.n_init):
if 'm' in self.init_params or not hasattr(self, 'means_'):
self.means_ = cluster.KMeans(
n_clusters=self.n_components,
random_state=self.random_state).fit(X).cluster_centers_
if 'w' in self.init_params or not hasattr(self, 'weights_'):
self.weights_ = np.tile(1.0 / self.n_components,
self.n_components)
if 'c' in self.init_params or not hasattr(self, 'covars_'):
cv = np.cov(X.T) + self.min_covar * np.eye(X.shape[1])
if not cv.shape:
cv.shape = (1, 1)
self.covars_ = \
distribute_covar_matrix_to_match_covariance_type(
cv, self.covariance_type, self.n_components)
# EM algorithms
log_likelihood = []
# reset self.converged_ to False
self.converged_ = False
for i in range(self.n_iter):
# Expectation step
curr_log_likelihood, responsibilities = self.score_samples(X)
log_likelihood.append(curr_log_likelihood.sum())
# Check for convergence.
if i > 0 and abs(log_likelihood[-1] - log_likelihood[-2]) < \
self.thresh:
self.converged_ = True
break
# Maximization step
self._do_mstep(X, responsibilities, self.params,
self.min_covar)
# if the results are better, keep it
if self.n_iter:
if log_likelihood[-1] > max_log_prob:
max_log_prob = log_likelihood[-1]
best_params = {'weights': self.weights_,
'means': self.means_,
'covars': self.covars_}
# check the existence of an init param that was not subject to
# likelihood computation issue.
if np.isneginf(max_log_prob) and self.n_iter:
raise RuntimeError(
"EM algorithm was never able to compute a valid likelihood " +
"given initial parameters. Try different init parameters " +
"(or increasing n_init) or check for degenerate data.")
# self.n_iter == 0 occurs when using GMM within HMM
if self.n_iter:
self.covars_ = best_params['covars']
self.means_ = best_params['means']
self.weights_ = best_params['weights']
return self
def _do_mstep(self, X, responsibilities, params, min_covar=0):
""" Perform the Mstep of the EM algorithm and return the class weihgts.
"""
weights = responsibilities.sum(axis=0)
weighted_X_sum = np.dot(responsibilities.T, X)
inverse_weights = 1.0 / (weights[:, np.newaxis] + 10 * EPS)
if 'w' in params:
self.weights_ = (weights / (weights.sum() + 10 * EPS) + EPS)
if 'm' in params:
self.means_ = weighted_X_sum * inverse_weights
if 'c' in params:
covar_mstep_func = _covar_mstep_funcs[self.covariance_type]
self.covars_ = covar_mstep_func(
self, X, responsibilities, weighted_X_sum, inverse_weights,
min_covar)
return weights
def _n_parameters(self):
"""Return the number of free parameters in the model."""
ndim = self.means_.shape[1]
if self.covariance_type == 'full':
cov_params = self.n_components * ndim * (ndim + 1) / 2.
elif self.covariance_type == 'diag':
cov_params = self.n_components * ndim
elif self.covariance_type == 'tied':
cov_params = ndim * (ndim + 1) / 2.
elif self.covariance_type == 'spherical':
cov_params = self.n_components
mean_params = ndim * self.n_components
return int(cov_params + mean_params + self.n_components - 1)
def bic(self, X):
"""Bayesian information criterion for the current model fit
and the proposed data
Parameters
----------
X : array of shape(n_samples, n_dimensions)
Returns
-------
bic: float (the lower the better)
"""
return (-2 * self.score(X).sum() +
self._n_parameters() * np.log(X.shape[0]))
def aic(self, X):
"""Akaike information criterion for the current model fit
and the proposed data
Parameters
----------
X : array of shape(n_samples, n_dimensions)
Returns
-------
aic: float (the lower the better)
"""
return - 2 * self.score(X).sum() + 2 * self._n_parameters()
#########################################################################
## some helper routines
#########################################################################
def _log_multivariate_normal_density_diag(X, means, covars):
"""Compute Gaussian log-density at X for a diagonal model"""
n_samples, n_dim = X.shape
lpr = -0.5 * (n_dim * np.log(2 * np.pi) + np.sum(np.log(covars), 1)
+ np.sum((means ** 2) / covars, 1)
- 2 * np.dot(X, (means / covars).T)
+ np.dot(X ** 2, (1.0 / covars).T))
return lpr
def _log_multivariate_normal_density_spherical(X, means, covars):
"""Compute Gaussian log-density at X for a spherical model"""
cv = covars.copy()
if covars.ndim == 1:
cv = cv[:, np.newaxis]
if covars.shape[1] == 1:
cv = np.tile(cv, (1, X.shape[-1]))
return _log_multivariate_normal_density_diag(X, means, cv)
def _log_multivariate_normal_density_tied(X, means, covars):
"""Compute Gaussian log-density at X for a tied model"""
n_samples, n_dim = X.shape
icv = pinvh(covars)
lpr = -0.5 * (n_dim * np.log(2 * np.pi) + np.log(linalg.det(covars) + 0.1)
+ np.sum(X * np.dot(X, icv), 1)[:, np.newaxis]
- 2 * np.dot(np.dot(X, icv), means.T)
+ np.sum(means * np.dot(means, icv), 1))
return lpr
def _log_multivariate_normal_density_full(X, means, covars, min_covar=1.e-7):
"""Log probability for full covariance matrices.
"""
n_samples, n_dim = X.shape
nmix = len(means)
log_prob = np.empty((n_samples, nmix))
for c, (mu, cv) in enumerate(zip(means, covars)):
try:
cv_chol = linalg.cholesky(cv, lower=True)
except linalg.LinAlgError:
# The model is most probably stuck in a component with too
# few observations, we need to reinitialize this components
cv_chol = linalg.cholesky(cv + min_covar * np.eye(n_dim),
lower=True)
cv_log_det = 2 * np.sum(np.log(np.diagonal(cv_chol)))
cv_sol = linalg.solve_triangular(cv_chol, (X - mu).T, lower=True).T
log_prob[:, c] = - .5 * (np.sum(cv_sol ** 2, axis=1) +
n_dim * np.log(2 * np.pi) + cv_log_det)
return log_prob
def _validate_covars(covars, covariance_type, n_components):
"""Do basic checks on matrix covariance sizes and values
"""
from scipy import linalg
if covariance_type == 'spherical':
if len(covars) != n_components:
raise ValueError("'spherical' covars have length n_components")
elif np.any(covars <= 0):
raise ValueError("'spherical' covars must be non-negative")
elif covariance_type == 'tied':
if covars.shape[0] != covars.shape[1]:
raise ValueError("'tied' covars must have shape (n_dim, n_dim)")
elif (not np.allclose(covars, covars.T)
or np.any(linalg.eigvalsh(covars) <= 0)):
raise ValueError("'tied' covars must be symmetric, "
"positive-definite")
elif covariance_type == 'diag':
if len(covars.shape) != 2:
raise ValueError("'diag' covars must have shape "
"(n_components, n_dim)")
elif np.any(covars <= 0):
raise ValueError("'diag' covars must be non-negative")
elif covariance_type == 'full':
if len(covars.shape) != 3:
raise ValueError("'full' covars must have shape "
"(n_components, n_dim, n_dim)")
elif covars.shape[1] != covars.shape[2]:
raise ValueError("'full' covars must have shape "
"(n_components, n_dim, n_dim)")
for n, cv in enumerate(covars):
if (not np.allclose(cv, cv.T)
or np.any(linalg.eigvalsh(cv) <= 0)):
raise ValueError("component %d of 'full' covars must be "
"symmetric, positive-definite" % n)
else:
raise ValueError("covariance_type must be one of " +
"'spherical', 'tied', 'diag', 'full'")
def distribute_covar_matrix_to_match_covariance_type(
tied_cv, covariance_type, n_components):
"""Create all the covariance matrices from a given template
"""
if covariance_type == 'spherical':
cv = np.tile(tied_cv.mean() * np.ones(tied_cv.shape[1]),
(n_components, 1))
elif covariance_type == 'tied':
cv = tied_cv
elif covariance_type == 'diag':
cv = np.tile(np.diag(tied_cv), (n_components, 1))
elif covariance_type == 'full':
cv = np.tile(tied_cv, (n_components, 1, 1))
else:
raise ValueError("covariance_type must be one of " +
"'spherical', 'tied', 'diag', 'full'")
return cv
def _covar_mstep_diag(gmm, X, responsibilities, weighted_X_sum, norm,
min_covar):
"""Performing the covariance M step for diagonal cases"""
avg_X2 = np.dot(responsibilities.T, X * X) * norm
avg_means2 = gmm.means_ ** 2
avg_X_means = gmm.means_ * weighted_X_sum * norm
return avg_X2 - 2 * avg_X_means + avg_means2 + min_covar
def _covar_mstep_spherical(*args):
"""Performing the covariance M step for spherical cases"""
cv = _covar_mstep_diag(*args)
return np.tile(cv.mean(axis=1)[:, np.newaxis], (1, cv.shape[1]))
def _covar_mstep_full(gmm, X, responsibilities, weighted_X_sum, norm,
min_covar):
"""Performing the covariance M step for full cases"""
# Eq. 12 from K. Murphy, "Fitting a Conditional Linear Gaussian
# Distribution"
n_features = X.shape[1]
cv = np.empty((gmm.n_components, n_features, n_features))
for c in range(gmm.n_components):
post = responsibilities[:, c]
# Underflow Errors in doing post * X.T are not important
np.seterr(under='ignore')
avg_cv = np.dot(post * X.T, X) / (post.sum() + 10 * EPS)
mu = gmm.means_[c][np.newaxis]
cv[c] = (avg_cv - np.dot(mu.T, mu) + min_covar * np.eye(n_features))
return cv
def _covar_mstep_tied(gmm, X, responsibilities, weighted_X_sum, norm,
min_covar):
# Eq. 15 from K. Murphy, "Fitting a Conditional Linear Gaussian
n_features = X.shape[1]
avg_X2 = np.dot(X.T, X)
avg_means2 = np.dot(gmm.means_.T, weighted_X_sum)
return (avg_X2 - avg_means2 + min_covar * np.eye(n_features)) / X.shape[0]
_covar_mstep_funcs = {'spherical': _covar_mstep_spherical,
'diag': _covar_mstep_diag,
'tied': _covar_mstep_tied,
'full': _covar_mstep_full,
}
| bsd-3-clause |
kjung/scikit-learn | sklearn/neural_network/tests/test_mlp.py | 46 | 18585 | """
Testing for Multi-layer Perceptron module (sklearn.neural_network)
"""
# Author: Issam H. Laradji
# Licence: BSD 3 clause
import sys
import warnings
import numpy as np
from numpy.testing import assert_almost_equal, assert_array_equal
from sklearn.datasets import load_digits, load_boston
from sklearn.datasets import make_regression, make_multilabel_classification
from sklearn.externals.six.moves import cStringIO as StringIO
from sklearn.metrics import roc_auc_score
from sklearn.neural_network import MLPClassifier
from sklearn.neural_network import MLPRegressor
from sklearn.preprocessing import LabelBinarizer
from sklearn.preprocessing import StandardScaler, MinMaxScaler
from scipy.sparse import csr_matrix
from sklearn.utils.testing import (assert_raises, assert_greater, assert_equal,
assert_false)
np.seterr(all='warn')
ACTIVATION_TYPES = ["logistic", "tanh", "relu"]
digits_dataset_multi = load_digits(n_class=3)
X_digits_multi = MinMaxScaler().fit_transform(digits_dataset_multi.data[:200])
y_digits_multi = digits_dataset_multi.target[:200]
digits_dataset_binary = load_digits(n_class=2)
X_digits_binary = MinMaxScaler().fit_transform(
digits_dataset_binary.data[:200])
y_digits_binary = digits_dataset_binary.target[:200]
classification_datasets = [(X_digits_multi, y_digits_multi),
(X_digits_binary, y_digits_binary)]
boston = load_boston()
Xboston = StandardScaler().fit_transform(boston.data)[: 200]
yboston = boston.target[:200]
def test_alpha():
# Test that larger alpha yields weights closer to zero"""
X = X_digits_binary[:100]
y = y_digits_binary[:100]
alpha_vectors = []
alpha_values = np.arange(2)
absolute_sum = lambda x: np.sum(np.abs(x))
for alpha in alpha_values:
mlp = MLPClassifier(hidden_layer_sizes=10, alpha=alpha, random_state=1)
mlp.fit(X, y)
alpha_vectors.append(np.array([absolute_sum(mlp.coefs_[0]),
absolute_sum(mlp.coefs_[1])]))
for i in range(len(alpha_values) - 1):
assert (alpha_vectors[i] > alpha_vectors[i + 1]).all()
def test_fit():
# Test that the algorithm solution is equal to a worked out example."""
X = np.array([[0.6, 0.8, 0.7]])
y = np.array([0])
mlp = MLPClassifier(algorithm='sgd', learning_rate_init=0.1, alpha=0.1,
activation='logistic', random_state=1, max_iter=1,
hidden_layer_sizes=2, momentum=0)
# set weights
mlp.coefs_ = [0] * 2
mlp.intercepts_ = [0] * 2
mlp.classes_ = [0, 1]
mlp.n_outputs_ = 1
mlp.coefs_[0] = np.array([[0.1, 0.2], [0.3, 0.1], [0.5, 0]])
mlp.coefs_[1] = np.array([[0.1], [0.2]])
mlp.intercepts_[0] = np.array([0.1, 0.1])
mlp.intercepts_[1] = np.array([1.0])
mlp._coef_grads = [] * 2
mlp._intercept_grads = [] * 2
mlp.label_binarizer_.y_type_ = 'binary'
# Initialize parameters
mlp.n_iter_ = 0
mlp.learning_rate_ = 0.1
# Compute the number of layers
mlp.n_layers_ = 3
# Pre-allocate gradient matrices
mlp._coef_grads = [0] * (mlp.n_layers_ - 1)
mlp._intercept_grads = [0] * (mlp.n_layers_ - 1)
mlp.out_activation_ = 'logistic'
mlp.t_ = 0
mlp.best_loss_ = np.inf
mlp.loss_curve_ = []
mlp._no_improvement_count = 0
mlp._intercept_velocity = [np.zeros_like(intercepts) for
intercepts in
mlp.intercepts_]
mlp._coef_velocity = [np.zeros_like(coefs) for coefs in
mlp.coefs_]
mlp.partial_fit(X, y, classes=[0, 1])
# Manually worked out example
# h1 = g(X1 * W_i1 + b11) = g(0.6 * 0.1 + 0.8 * 0.3 + 0.7 * 0.5 + 0.1)
# = 0.679178699175393
# h2 = g(X2 * W_i2 + b12) = g(0.6 * 0.2 + 0.8 * 0.1 + 0.7 * 0 + 0.1)
# = 0.574442516811659
# o1 = g(h * W2 + b21) = g(0.679 * 0.1 + 0.574 * 0.2 + 1)
# = 0.7654329236196236
# d21 = -(0 - 0.765) = 0.765
# d11 = (1 - 0.679) * 0.679 * 0.765 * 0.1 = 0.01667
# d12 = (1 - 0.574) * 0.574 * 0.765 * 0.2 = 0.0374
# W1grad11 = X1 * d11 + alpha * W11 = 0.6 * 0.01667 + 0.1 * 0.1 = 0.0200
# W1grad11 = X1 * d12 + alpha * W12 = 0.6 * 0.0374 + 0.1 * 0.2 = 0.04244
# W1grad21 = X2 * d11 + alpha * W13 = 0.8 * 0.01667 + 0.1 * 0.3 = 0.043336
# W1grad22 = X2 * d12 + alpha * W14 = 0.8 * 0.0374 + 0.1 * 0.1 = 0.03992
# W1grad31 = X3 * d11 + alpha * W15 = 0.6 * 0.01667 + 0.1 * 0.5 = 0.060002
# W1grad32 = X3 * d12 + alpha * W16 = 0.6 * 0.0374 + 0.1 * 0 = 0.02244
# W2grad1 = h1 * d21 + alpha * W21 = 0.679 * 0.765 + 0.1 * 0.1 = 0.5294
# W2grad2 = h2 * d21 + alpha * W22 = 0.574 * 0.765 + 0.1 * 0.2 = 0.45911
# b1grad1 = d11 = 0.01667
# b1grad2 = d12 = 0.0374
# b2grad = d21 = 0.765
# W1 = W1 - eta * [W1grad11, .., W1grad32] = [[0.1, 0.2], [0.3, 0.1],
# [0.5, 0]] - 0.1 * [[0.0200, 0.04244], [0.043336, 0.03992],
# [0.060002, 0.02244]] = [[0.098, 0.195756], [0.2956664,
# 0.096008], [0.4939998, -0.002244]]
# W2 = W2 - eta * [W2grad1, W2grad2] = [[0.1], [0.2]] - 0.1 *
# [[0.5294], [0.45911]] = [[0.04706], [0.154089]]
# b1 = b1 - eta * [b1grad1, b1grad2] = 0.1 - 0.1 * [0.01667, 0.0374]
# = [0.098333, 0.09626]
# b2 = b2 - eta * b2grad = 1.0 - 0.1 * 0.765 = 0.9235
assert_almost_equal(mlp.coefs_[0], np.array([[0.098, 0.195756],
[0.2956664, 0.096008],
[0.4939998, -0.002244]]),
decimal=3)
assert_almost_equal(mlp.coefs_[1], np.array([[0.04706], [0.154089]]),
decimal=3)
assert_almost_equal(mlp.intercepts_[0],
np.array([0.098333, 0.09626]), decimal=3)
assert_almost_equal(mlp.intercepts_[1], np.array(0.9235), decimal=3)
# Testing output
# h1 = g(X1 * W_i1 + b11) = g(0.6 * 0.098 + 0.8 * 0.2956664 +
# 0.7 * 0.4939998 + 0.098333) = 0.677
# h2 = g(X2 * W_i2 + b12) = g(0.6 * 0.195756 + 0.8 * 0.096008 +
# 0.7 * -0.002244 + 0.09626) = 0.572
# o1 = h * W2 + b21 = 0.677 * 0.04706 +
# 0.572 * 0.154089 + 0.9235 = 1.043
assert_almost_equal(mlp.decision_function(X), 1.043, decimal=3)
def test_gradient():
# Test gradient.
# This makes sure that the activation functions and their derivatives
# are correct. The numerical and analytical computation of the gradient
# should be close.
for n_labels in [2, 3]:
n_samples = 5
n_features = 10
X = np.random.random((n_samples, n_features))
y = 1 + np.mod(np.arange(n_samples) + 1, n_labels)
Y = LabelBinarizer().fit_transform(y)
for activation in ACTIVATION_TYPES:
mlp = MLPClassifier(activation=activation, hidden_layer_sizes=10,
algorithm='l-bfgs', alpha=1e-5,
learning_rate_init=0.2, max_iter=1,
random_state=1)
mlp.fit(X, y)
theta = np.hstack([l.ravel() for l in mlp.coefs_ +
mlp.intercepts_])
layer_units = ([X.shape[1]] + [mlp.hidden_layer_sizes] +
[mlp.n_outputs_])
activations = []
deltas = []
coef_grads = []
intercept_grads = []
activations.append(X)
for i in range(mlp.n_layers_ - 1):
activations.append(np.empty((X.shape[0],
layer_units[i + 1])))
deltas.append(np.empty((X.shape[0],
layer_units[i + 1])))
fan_in = layer_units[i]
fan_out = layer_units[i + 1]
coef_grads.append(np.empty((fan_in, fan_out)))
intercept_grads.append(np.empty(fan_out))
# analytically compute the gradients
def loss_grad_fun(t):
return mlp._loss_grad_lbfgs(t, X, Y, activations, deltas,
coef_grads, intercept_grads)
[value, grad] = loss_grad_fun(theta)
numgrad = np.zeros(np.size(theta))
n = np.size(theta, 0)
E = np.eye(n)
epsilon = 1e-5
# numerically compute the gradients
for i in range(n):
dtheta = E[:, i] * epsilon
numgrad[i] = ((loss_grad_fun(theta + dtheta)[0] -
loss_grad_fun(theta - dtheta)[0]) /
(epsilon * 2.0))
assert_almost_equal(numgrad, grad)
def test_lbfgs_classification():
# Test lbfgs on classification.
# It should achieve a score higher than 0.95 for the binary and multi-class
# versions of the digits dataset.
for X, y in classification_datasets:
X_train = X[:150]
y_train = y[:150]
X_test = X[150:]
expected_shape_dtype = (X_test.shape[0], y_train.dtype.kind)
for activation in ACTIVATION_TYPES:
mlp = MLPClassifier(algorithm='l-bfgs', hidden_layer_sizes=50,
max_iter=150, shuffle=True, random_state=1,
activation=activation)
mlp.fit(X_train, y_train)
y_predict = mlp.predict(X_test)
assert_greater(mlp.score(X_train, y_train), 0.95)
assert_equal((y_predict.shape[0], y_predict.dtype.kind),
expected_shape_dtype)
def test_lbfgs_regression():
# Test lbfgs on the boston dataset, a regression problems."""
X = Xboston
y = yboston
for activation in ACTIVATION_TYPES:
mlp = MLPRegressor(algorithm='l-bfgs', hidden_layer_sizes=50,
max_iter=150, shuffle=True, random_state=1,
activation=activation)
mlp.fit(X, y)
assert_greater(mlp.score(X, y), 0.95)
def test_learning_rate_warmstart():
# Tests that warm_start reuses past solution."""
X = [[3, 2], [1, 6], [5, 6], [-2, -4]]
y = [1, 1, 1, 0]
for learning_rate in ["invscaling", "constant"]:
mlp = MLPClassifier(algorithm='sgd', hidden_layer_sizes=4,
learning_rate=learning_rate, max_iter=1,
power_t=0.25, warm_start=True)
mlp.fit(X, y)
prev_eta = mlp._optimizer.learning_rate
mlp.fit(X, y)
post_eta = mlp._optimizer.learning_rate
if learning_rate == 'constant':
assert_equal(prev_eta, post_eta)
elif learning_rate == 'invscaling':
assert_equal(mlp.learning_rate_init / pow(8 + 1, mlp.power_t),
post_eta)
def test_multilabel_classification():
# Test that multi-label classification works as expected."""
# test fit method
X, y = make_multilabel_classification(n_samples=50, random_state=0,
return_indicator=True)
mlp = MLPClassifier(algorithm='l-bfgs', hidden_layer_sizes=50, alpha=1e-5,
max_iter=150, random_state=0, activation='logistic',
learning_rate_init=0.2)
mlp.fit(X, y)
assert_equal(mlp.score(X, y), 1)
# test partial fit method
mlp = MLPClassifier(algorithm='sgd', hidden_layer_sizes=50, max_iter=150,
random_state=0, activation='logistic', alpha=1e-5,
learning_rate_init=0.2)
for i in range(100):
mlp.partial_fit(X, y, classes=[0, 1, 2, 3, 4])
assert_greater(mlp.score(X, y), 0.9)
def test_multioutput_regression():
# Test that multi-output regression works as expected"""
X, y = make_regression(n_samples=200, n_targets=5)
mlp = MLPRegressor(algorithm='l-bfgs', hidden_layer_sizes=50, max_iter=200,
random_state=1)
mlp.fit(X, y)
assert_greater(mlp.score(X, y), 0.9)
def test_partial_fit_classes_error():
# Tests that passing different classes to partial_fit raises an error"""
X = [[3, 2]]
y = [0]
clf = MLPClassifier(algorithm='sgd')
clf.partial_fit(X, y, classes=[0, 1])
assert_raises(ValueError, clf.partial_fit, X, y, classes=[1, 2])
def test_partial_fit_classification():
# Test partial_fit on classification.
# `partial_fit` should yield the same results as 'fit'for binary and
# multi-class classification.
for X, y in classification_datasets:
X = X
y = y
mlp = MLPClassifier(algorithm='sgd', max_iter=100, random_state=1,
tol=0, alpha=1e-5, learning_rate_init=0.2)
mlp.fit(X, y)
pred1 = mlp.predict(X)
mlp = MLPClassifier(algorithm='sgd', random_state=1, alpha=1e-5,
learning_rate_init=0.2)
for i in range(100):
mlp.partial_fit(X, y, classes=np.unique(y))
pred2 = mlp.predict(X)
assert_array_equal(pred1, pred2)
assert_greater(mlp.score(X, y), 0.95)
def test_partial_fit_regression():
# Test partial_fit on regression.
# `partial_fit` should yield the same results as 'fit' for regression.
X = Xboston
y = yboston
for momentum in [0, .9]:
mlp = MLPRegressor(algorithm='sgd', max_iter=100, activation='relu',
random_state=1, learning_rate_init=0.01,
batch_size=X.shape[0], momentum=momentum)
with warnings.catch_warnings(record=True):
# catch convergence warning
mlp.fit(X, y)
pred1 = mlp.predict(X)
mlp = MLPRegressor(algorithm='sgd', activation='relu',
learning_rate_init=0.01, random_state=1,
batch_size=X.shape[0], momentum=momentum)
for i in range(100):
mlp.partial_fit(X, y)
pred2 = mlp.predict(X)
assert_almost_equal(pred1, pred2, decimal=2)
score = mlp.score(X, y)
assert_greater(score, 0.75)
def test_partial_fit_errors():
# Test partial_fit error handling."""
X = [[3, 2], [1, 6]]
y = [1, 0]
# no classes passed
assert_raises(ValueError,
MLPClassifier(
algorithm='sgd').partial_fit,
X, y,
classes=[2])
# l-bfgs doesn't support partial_fit
assert_false(hasattr(MLPClassifier(algorithm='l-bfgs'), 'partial_fit'))
def test_params_errors():
# Test that invalid parameters raise value error"""
X = [[3, 2], [1, 6]]
y = [1, 0]
clf = MLPClassifier
assert_raises(ValueError, clf(hidden_layer_sizes=-1).fit, X, y)
assert_raises(ValueError, clf(max_iter=-1).fit, X, y)
assert_raises(ValueError, clf(shuffle='true').fit, X, y)
assert_raises(ValueError, clf(alpha=-1).fit, X, y)
assert_raises(ValueError, clf(learning_rate_init=-1).fit, X, y)
assert_raises(ValueError, clf(algorithm='hadoken').fit, X, y)
assert_raises(ValueError, clf(learning_rate='converge').fit, X, y)
assert_raises(ValueError, clf(activation='cloak').fit, X, y)
def test_predict_proba_binary():
# Test that predict_proba works as expected for binary class."""
X = X_digits_binary[:50]
y = y_digits_binary[:50]
clf = MLPClassifier(hidden_layer_sizes=5)
clf.fit(X, y)
y_proba = clf.predict_proba(X)
y_log_proba = clf.predict_log_proba(X)
(n_samples, n_classes) = y.shape[0], 2
proba_max = y_proba.argmax(axis=1)
proba_log_max = y_log_proba.argmax(axis=1)
assert_equal(y_proba.shape, (n_samples, n_classes))
assert_array_equal(proba_max, proba_log_max)
assert_array_equal(y_log_proba, np.log(y_proba))
assert_equal(roc_auc_score(y, y_proba[:, 1]), 1.0)
def test_predict_proba_multi():
# Test that predict_proba works as expected for multi class."""
X = X_digits_multi[:10]
y = y_digits_multi[:10]
clf = MLPClassifier(hidden_layer_sizes=5)
clf.fit(X, y)
y_proba = clf.predict_proba(X)
y_log_proba = clf.predict_log_proba(X)
(n_samples, n_classes) = y.shape[0], np.unique(y).size
proba_max = y_proba.argmax(axis=1)
proba_log_max = y_log_proba.argmax(axis=1)
assert_equal(y_proba.shape, (n_samples, n_classes))
assert_array_equal(proba_max, proba_log_max)
assert_array_equal(y_log_proba, np.log(y_proba))
def test_sparse_matrices():
# Test that sparse and dense input matrices output the same results."""
X = X_digits_binary[:50]
y = y_digits_binary[:50]
X_sparse = csr_matrix(X)
mlp = MLPClassifier(random_state=1, hidden_layer_sizes=15)
mlp.fit(X, y)
pred1 = mlp.decision_function(X)
mlp.fit(X_sparse, y)
pred2 = mlp.decision_function(X_sparse)
assert_almost_equal(pred1, pred2)
pred1 = mlp.predict(X)
pred2 = mlp.predict(X_sparse)
assert_array_equal(pred1, pred2)
def test_tolerance():
# Test tolerance.
# It should force the algorithm to exit the loop when it converges.
X = [[3, 2], [1, 6]]
y = [1, 0]
clf = MLPClassifier(tol=0.5, max_iter=3000, algorithm='sgd', verbose=10)
clf.fit(X, y)
assert_greater(clf.max_iter, clf.n_iter_)
def test_verbose_sgd():
# Test verbose.
X = [[3, 2], [1, 6]]
y = [1, 0]
clf = MLPClassifier(algorithm='sgd', max_iter=2, verbose=10,
hidden_layer_sizes=2)
old_stdout = sys.stdout
sys.stdout = output = StringIO()
clf.fit(X, y)
clf.partial_fit(X, y)
sys.stdout = old_stdout
assert 'Iteration' in output.getvalue()
def test_early_stopping():
X = X_digits_binary[:100]
y = y_digits_binary[:100]
tol = 0.2
clf = MLPClassifier(tol=tol, max_iter=3000, algorithm='sgd',
early_stopping=True)
clf.fit(X, y)
assert_greater(clf.max_iter, clf.n_iter_)
valid_scores = clf.validation_scores_
best_valid_score = clf.best_validation_score_
assert_equal(max(valid_scores), best_valid_score)
assert_greater(best_valid_score + tol, valid_scores[-2])
assert_greater(best_valid_score + tol, valid_scores[-1])
def test_adaptive_learning_rate():
X = [[3, 2], [1, 6]]
y = [1, 0]
clf = MLPClassifier(tol=0.5, max_iter=3000, algorithm='sgd',
learning_rate='adaptive', verbose=10)
clf.fit(X, y)
assert_greater(clf.max_iter, clf.n_iter_)
assert_greater(1e-6, clf._optimizer.learning_rate)
| bsd-3-clause |
rsivapr/scikit-learn | examples/svm/plot_svm_margin.py | 8 | 2297 | #!/usr/bin/python
# -*- coding: utf-8 -*-
"""
=========================================================
SVM Margins Example
=========================================================
The plots below illustrate the effect the parameter `C` has
on the separation line. A large value of `C` basically tells
our model that we do not have that much faith in our data's
distribution, and will only consider points close to line
of separation.
A small value of `C` includes more/all the observations, allowing
the margins to be calculated using all the data in the area.
"""
print(__doc__)
# Code source: Gaël Varoquaux
# Modified for documentation by Jaques Grobler
# License: BSD 3 clause
import numpy as np
import pylab as pl
from sklearn import svm
# we create 40 separable points
np.random.seed(0)
X = np.r_[np.random.randn(20, 2) - [2, 2], np.random.randn(20, 2) + [2, 2]]
Y = [0] * 20 + [1] * 20
# figure number
fignum = 1
# fit the model
for name, penalty in (('unreg', 1), ('reg', 0.05)):
clf = svm.SVC(kernel='linear', C=penalty)
clf.fit(X, Y)
# get the separating hyperplane
w = clf.coef_[0]
a = -w[0] / w[1]
xx = np.linspace(-5, 5)
yy = a * xx - (clf.intercept_[0]) / w[1]
# plot the parallels to the separating hyperplane that pass through the
# support vectors
margin = 1 / np.sqrt(np.sum(clf.coef_ ** 2))
yy_down = yy + a * margin
yy_up = yy - a * margin
# plot the line, the points, and the nearest vectors to the plane
pl.figure(fignum, figsize=(4, 3))
pl.clf()
pl.plot(xx, yy, 'k-')
pl.plot(xx, yy_down, 'k--')
pl.plot(xx, yy_up, 'k--')
pl.scatter(clf.support_vectors_[:, 0], clf.support_vectors_[:, 1], s=80,
facecolors='none', zorder=10)
pl.scatter(X[:, 0], X[:, 1], c=Y, zorder=10, cmap=pl.cm.Paired)
pl.axis('tight')
x_min = -4.8
x_max = 4.2
y_min = -6
y_max = 6
XX, YY = np.mgrid[x_min:x_max:200j, y_min:y_max:200j]
Z = clf.predict(np.c_[XX.ravel(), YY.ravel()])
# Put the result into a color plot
Z = Z.reshape(XX.shape)
pl.figure(fignum, figsize=(4, 3))
pl.pcolormesh(XX, YY, Z, cmap=pl.cm.Paired)
pl.xlim(x_min, x_max)
pl.ylim(y_min, y_max)
pl.xticks(())
pl.yticks(())
fignum = fignum + 1
pl.show()
| bsd-3-clause |
run2/citytour | 4symantec/Lib/site-packages/numpy-1.9.2-py2.7-win-amd64.egg/numpy/core/function_base.py | 23 | 6262 | from __future__ import division, absolute_import, print_function
__all__ = ['logspace', 'linspace']
from . import numeric as _nx
from .numeric import array, result_type
def linspace(start, stop, num=50, endpoint=True, retstep=False, dtype=None):
"""
Return evenly spaced numbers over a specified interval.
Returns `num` evenly spaced samples, calculated over the
interval [`start`, `stop` ].
The endpoint of the interval can optionally be excluded.
Parameters
----------
start : scalar
The starting value of the sequence.
stop : scalar
The end value of the sequence, unless `endpoint` is set to False.
In that case, the sequence consists of all but the last of ``num + 1``
evenly spaced samples, so that `stop` is excluded. Note that the step
size changes when `endpoint` is False.
num : int, optional
Number of samples to generate. Default is 50.
endpoint : bool, optional
If True, `stop` is the last sample. Otherwise, it is not included.
Default is True.
retstep : bool, optional
If True, return (`samples`, `step`), where `step` is the spacing
between samples.
dtype : dtype, optional
The type of the output array. If `dtype` is not given, infer the data
type from the other input arguments.
.. versionadded:: 1.9.0
Returns
-------
samples : ndarray
There are `num` equally spaced samples in the closed interval
``[start, stop]`` or the half-open interval ``[start, stop)``
(depending on whether `endpoint` is True or False).
step : float
Only returned if `retstep` is True
Size of spacing between samples.
See Also
--------
arange : Similar to `linspace`, but uses a step size (instead of the
number of samples).
logspace : Samples uniformly distributed in log space.
Examples
--------
>>> np.linspace(2.0, 3.0, num=5)
array([ 2. , 2.25, 2.5 , 2.75, 3. ])
>>> np.linspace(2.0, 3.0, num=5, endpoint=False)
array([ 2. , 2.2, 2.4, 2.6, 2.8])
>>> np.linspace(2.0, 3.0, num=5, retstep=True)
(array([ 2. , 2.25, 2.5 , 2.75, 3. ]), 0.25)
Graphical illustration:
>>> import matplotlib.pyplot as plt
>>> N = 8
>>> y = np.zeros(N)
>>> x1 = np.linspace(0, 10, N, endpoint=True)
>>> x2 = np.linspace(0, 10, N, endpoint=False)
>>> plt.plot(x1, y, 'o')
[<matplotlib.lines.Line2D object at 0x...>]
>>> plt.plot(x2, y + 0.5, 'o')
[<matplotlib.lines.Line2D object at 0x...>]
>>> plt.ylim([-0.5, 1])
(-0.5, 1)
>>> plt.show()
"""
num = int(num)
# Convert float/complex array scalars to float, gh-3504
start = start * 1.
stop = stop * 1.
if dtype is None:
dtype = result_type(start, stop, float(num))
if num <= 0:
return array([], dtype)
if endpoint:
if num == 1:
return array([start], dtype=dtype)
step = (stop-start)/float((num-1))
y = _nx.arange(0, num, dtype=dtype) * step + start
y[-1] = stop
else:
step = (stop-start)/float(num)
y = _nx.arange(0, num, dtype=dtype) * step + start
if retstep:
return y.astype(dtype), step
else:
return y.astype(dtype)
def logspace(start, stop, num=50, endpoint=True, base=10.0, dtype=None):
"""
Return numbers spaced evenly on a log scale.
In linear space, the sequence starts at ``base ** start``
(`base` to the power of `start`) and ends with ``base ** stop``
(see `endpoint` below).
Parameters
----------
start : float
``base ** start`` is the starting value of the sequence.
stop : float
``base ** stop`` is the final value of the sequence, unless `endpoint`
is False. In that case, ``num + 1`` values are spaced over the
interval in log-space, of which all but the last (a sequence of
length ``num``) are returned.
num : integer, optional
Number of samples to generate. Default is 50.
endpoint : boolean, optional
If true, `stop` is the last sample. Otherwise, it is not included.
Default is True.
base : float, optional
The base of the log space. The step size between the elements in
``ln(samples) / ln(base)`` (or ``log_base(samples)``) is uniform.
Default is 10.0.
dtype : dtype
The type of the output array. If `dtype` is not given, infer the data
type from the other input arguments.
Returns
-------
samples : ndarray
`num` samples, equally spaced on a log scale.
See Also
--------
arange : Similar to linspace, with the step size specified instead of the
number of samples. Note that, when used with a float endpoint, the
endpoint may or may not be included.
linspace : Similar to logspace, but with the samples uniformly distributed
in linear space, instead of log space.
Notes
-----
Logspace is equivalent to the code
>>> y = np.linspace(start, stop, num=num, endpoint=endpoint)
... # doctest: +SKIP
>>> power(base, y).astype(dtype)
... # doctest: +SKIP
Examples
--------
>>> np.logspace(2.0, 3.0, num=4)
array([ 100. , 215.443469 , 464.15888336, 1000. ])
>>> np.logspace(2.0, 3.0, num=4, endpoint=False)
array([ 100. , 177.827941 , 316.22776602, 562.34132519])
>>> np.logspace(2.0, 3.0, num=4, base=2.0)
array([ 4. , 5.0396842 , 6.34960421, 8. ])
Graphical illustration:
>>> import matplotlib.pyplot as plt
>>> N = 10
>>> x1 = np.logspace(0.1, 1, N, endpoint=True)
>>> x2 = np.logspace(0.1, 1, N, endpoint=False)
>>> y = np.zeros(N)
>>> plt.plot(x1, y, 'o')
[<matplotlib.lines.Line2D object at 0x...>]
>>> plt.plot(x2, y + 0.5, 'o')
[<matplotlib.lines.Line2D object at 0x...>]
>>> plt.ylim([-0.5, 1])
(-0.5, 1)
>>> plt.show()
"""
y = linspace(start, stop, num=num, endpoint=endpoint)
if dtype is None:
return _nx.power(base, y)
return _nx.power(base, y).astype(dtype)
| mit |
OGGM/oggm | oggm/tests/test_graphics.py | 2 | 13206 | import warnings
import pytest
import shutil
import os
import matplotlib.pyplot as plt
import numpy as np
salem = pytest.importorskip('salem')
gpd = pytest.importorskip('geopandas')
# Local imports
import oggm.utils
from oggm.tests import mpl_image_compare
from oggm.tests.funcs import (init_columbia_eb, init_hef,
get_test_dir, apply_test_ref_tstars)
from oggm import graphics
from oggm.core import (gis, inversion, climate, centerlines, flowline,
massbalance)
import oggm.cfg as cfg
from oggm.utils import get_demo_file
from oggm import utils, workflow
# Warnings
warnings.filterwarnings("ignore", category=UserWarning,
message=r'.*guessing baseline image.*')
# Globals
pytestmark = pytest.mark.test_env("graphics")
def setup_module():
graphics.set_oggm_cmaps(use_hcl=False)
def teardown_module():
graphics.set_oggm_cmaps()
# ----------------------------------------------------------
# Lets go
def test_surf_to_nan():
surf = np.array([1., 0, 0, 1])
thick = np.array([1, 0, 0, 1])
sh = graphics.surf_to_nan(surf, thick)
np.testing.assert_allclose(sh, [1, 0, 0, 1])
surf = np.array([1., 0, 0, 0, 1])
thick = np.array([1, 0, 0, 0, 1])
sh = graphics.surf_to_nan(surf, thick)
np.testing.assert_allclose(sh, [1, 0, np.NaN, 0, 1])
surf = np.array([1., 0, 0, 0, 0, 1])
thick = np.array([1, 0, 0, 0, 0, 1])
sh = graphics.surf_to_nan(surf, thick)
np.testing.assert_allclose(sh, [1, 0, np.NaN, np.NaN, 0, 1])
surf = np.array([1., 0, 1, 0, 1])
thick = np.array([1, 0, 1, 0, 1])
sh = graphics.surf_to_nan(surf, thick)
np.testing.assert_allclose(sh, [1, 0, 1, 0, 1])
@pytest.mark.internet
@pytest.mark.graphic
@mpl_image_compare(tolerance=25)
def test_googlemap():
fig, ax = plt.subplots()
gdir = init_hef()
graphics.plot_googlemap(gdir, ax=ax)
fig.tight_layout()
return fig
@pytest.mark.internet
@pytest.mark.graphic
@mpl_image_compare(multi=True)
def test_domain():
fig, ax = plt.subplots()
gdir = init_hef()
graphics.plot_domain(gdir, ax=ax)
fig.tight_layout()
return fig
@pytest.mark.graphic
@mpl_image_compare(multi=True)
def test_centerlines():
fig, ax = plt.subplots()
gdir = init_hef()
graphics.plot_centerlines(gdir, ax=ax)
fig.tight_layout()
return fig
@pytest.mark.graphic
@mpl_image_compare(multi=True)
def test_raster():
fig, ax = plt.subplots()
gdir = init_hef()
gis.gridded_attributes(gdir)
graphics.plot_raster(gdir, var_name='aspect', cmap='twilight', ax=ax)
fig.tight_layout()
return fig
@pytest.mark.graphic
@mpl_image_compare(multi=True)
def test_flowlines():
fig, ax = plt.subplots()
gdir = init_hef()
graphics.plot_centerlines(gdir, ax=ax, use_flowlines=True)
fig.tight_layout()
return fig
@pytest.mark.graphic
@mpl_image_compare(multi=True)
def test_downstream():
fig, ax = plt.subplots()
gdir = init_hef()
graphics.plot_centerlines(gdir, ax=ax, add_downstream=True,
use_flowlines=True)
fig.tight_layout()
return fig
@pytest.mark.graphic
@mpl_image_compare(multi=True)
def test_width():
fig, ax = plt.subplots()
gdir = init_hef()
graphics.plot_catchment_width(gdir, ax=ax)
fig.tight_layout()
return fig
@pytest.mark.graphic
@mpl_image_compare(multi=True)
def test_width_corrected():
fig, ax = plt.subplots()
gdir = init_hef()
graphics.plot_catchment_width(gdir, ax=ax, corrected=True,
add_intersects=True,
add_touches=True)
fig.tight_layout()
return fig
@pytest.mark.graphic
@mpl_image_compare(multi=True)
def test_inversion():
fig, ax = plt.subplots()
gdir = init_hef()
graphics.plot_inversion(gdir, ax=ax)
fig.tight_layout()
return fig
@pytest.mark.slow
@pytest.mark.graphic
@mpl_image_compare(multi=True)
def test_multiple_inversion():
# test directory
testdir = os.path.join(get_test_dir(), 'tmp_mdir')
if not os.path.exists(testdir):
os.makedirs(testdir)
# Init
cfg.initialize()
cfg.set_intersects_db(get_demo_file('rgi_intersect_oetztal.shp'))
cfg.PATHS['dem_file'] = get_demo_file('hef_srtm.tif')
cfg.PATHS['climate_file'] = get_demo_file('histalp_merged_hef.nc')
cfg.PARAMS['border'] = 40
cfg.PARAMS['baseline_climate'] = 'CUSTOM'
cfg.PARAMS['trapezoid_lambdas'] = 1
cfg.PATHS['working_dir'] = testdir
apply_test_ref_tstars()
# Get the RGI ID
hef_rgi = gpd.read_file(get_demo_file('divides_hef.shp'))
hef_rgi.loc[0, 'RGIId'] = 'RGI50-11.00897'
gdirs = workflow.init_glacier_directories(hef_rgi)
workflow.gis_prepro_tasks(gdirs)
workflow.climate_tasks(gdirs)
workflow.inversion_tasks(gdirs)
fig, ax = plt.subplots()
graphics.plot_inversion(gdirs, ax=ax)
fig.tight_layout()
shutil.rmtree(testdir)
return fig
@pytest.mark.graphic
@mpl_image_compare(multi=True)
def test_modelsection():
gdir = init_hef()
flowline.init_present_time_glacier(gdir)
fls = gdir.read_pickle('model_flowlines')
model = flowline.FlowlineModel(fls)
fig = plt.figure(figsize=(12, 6))
ax = fig.add_axes([0.07, 0.08, 0.7, 0.84])
graphics.plot_modeloutput_section(ax=ax, model=model)
return fig
@pytest.mark.graphic
@mpl_image_compare(multi=True)
def test_modelsection_withtrib():
gdir = init_hef()
flowline.init_present_time_glacier(gdir)
fls = gdir.read_pickle('model_flowlines')
model = flowline.FlowlineModel(fls)
fig = plt.figure(figsize=(14, 10))
graphics.plot_modeloutput_section_withtrib(fig=fig, model=model)
return fig
@pytest.mark.graphic
@mpl_image_compare(multi=True)
def test_modeloutput_map():
gdir = init_hef()
flowline.init_present_time_glacier(gdir)
fls = gdir.read_pickle('model_flowlines')
model = flowline.FlowlineModel(fls)
fig, ax = plt.subplots()
graphics.plot_modeloutput_map(gdir, ax=ax, model=model)
fig.tight_layout()
return fig
@pytest.mark.slow
@pytest.mark.graphic
@mpl_image_compare(multi=True)
def test_multiple_models():
# test directory
testdir = os.path.join(get_test_dir(), 'tmp_mdir')
utils.mkdir(testdir, reset=True)
# Init
cfg.initialize()
cfg.set_intersects_db(get_demo_file('rgi_intersect_oetztal.shp'))
cfg.PATHS['dem_file'] = get_demo_file('hef_srtm.tif')
cfg.PATHS['climate_file'] = get_demo_file('histalp_merged_hef.nc')
cfg.PATHS['working_dir'] = testdir
cfg.PARAMS['baseline_climate'] = 'CUSTOM'
cfg.PARAMS['trapezoid_lambdas'] = 1
cfg.PARAMS['border'] = 40
apply_test_ref_tstars()
# Get the RGI ID
hef_rgi = gpd.read_file(get_demo_file('divides_hef.shp'))
hef_rgi.loc[0, 'RGIId'] = 'RGI50-11.00897'
gdirs = workflow.init_glacier_directories(hef_rgi)
workflow.gis_prepro_tasks(gdirs)
workflow.climate_tasks(gdirs)
workflow.inversion_tasks(gdirs)
models = []
for gdir in gdirs:
flowline.init_present_time_glacier(gdir)
fls = gdir.read_pickle('model_flowlines')
models.append(flowline.FlowlineModel(fls))
fig, ax = plt.subplots()
graphics.plot_modeloutput_map(gdirs, ax=ax, model=models)
fig.tight_layout()
shutil.rmtree(testdir)
return fig
@pytest.mark.graphic
@mpl_image_compare(multi=True)
def test_thick_alt():
fig, ax = plt.subplots()
gdir = init_hef()
graphics.plot_distributed_thickness(gdir, ax=ax,
varname_suffix='_alt')
fig.tight_layout()
return fig
@pytest.mark.graphic
@mpl_image_compare(multi=True)
def test_thick_interp():
fig, ax = plt.subplots()
gdir = init_hef()
graphics.plot_distributed_thickness(gdir, ax=ax,
varname_suffix='_interp')
fig.tight_layout()
return fig
@pytest.mark.graphic
@mpl_image_compare(multi=True)
def test_thick_elev_bands():
fig, ax = plt.subplots()
gdir = init_columbia_eb(dir_name='test_thick_eb')
workflow.inversion_tasks(utils.tolist(gdir))
inversion.distribute_thickness_per_altitude(gdir)
graphics.plot_distributed_thickness(gdir, ax=ax)
fig.tight_layout()
return fig
@pytest.mark.graphic
@mpl_image_compare(multi=True)
def test_catch_areas():
fig, ax = plt.subplots()
gdir = init_hef()
graphics.plot_catchment_areas(gdir, ax=ax)
fig.tight_layout()
return fig
@pytest.mark.slow
@pytest.mark.graphic
@mpl_image_compare()
def test_chhota_shigri():
testdir = os.path.join(get_test_dir(), 'tmp_chhota')
utils.mkdir(testdir, reset=True)
# Init
cfg.initialize()
cfg.PATHS['dem_file'] = get_demo_file('dem_chhota_shigri.tif')
cfg.PARAMS['border'] = 80
cfg.PARAMS['use_intersects'] = False
cfg.PATHS['working_dir'] = testdir
cfg.PARAMS['trapezoid_lambdas'] = 1
hef_file = get_demo_file('divides_RGI50-14.15990.shp')
df = gpd.read_file(hef_file)
df['Area'] = df.Area * 1e-6 # cause it was in m2
df['RGIId'] = ['RGI50-14.15990' + d for d in ['_d01', '_d02']]
gdirs = workflow.init_glacier_directories(df)
workflow.gis_prepro_tasks(gdirs)
for gdir in gdirs:
climate.apparent_mb_from_linear_mb(gdir)
workflow.execute_entity_task(inversion.prepare_for_inversion, gdirs)
workflow.execute_entity_task(inversion.mass_conservation_inversion, gdirs)
workflow.execute_entity_task(inversion.filter_inversion_output, gdirs)
workflow.execute_entity_task(flowline.init_present_time_glacier, gdirs)
models = []
for gdir in gdirs:
flowline.init_present_time_glacier(gdir)
fls = gdir.read_pickle('model_flowlines')
models.append(flowline.FlowlineModel(fls))
fig, ax = plt.subplots()
graphics.plot_modeloutput_map(gdirs, ax=ax, model=models)
fig.tight_layout()
shutil.rmtree(testdir)
return fig
@pytest.mark.slow
@pytest.mark.graphic
@mpl_image_compare(multi=True)
def test_ice_cap():
testdir = os.path.join(get_test_dir(), 'tmp_icecap')
utils.mkdir(testdir, reset=True)
cfg.initialize()
cfg.PARAMS['use_intersects'] = False
cfg.PATHS['dem_file'] = get_demo_file('dem_RGI50-05.08389.tif')
cfg.PARAMS['border'] = 60
cfg.PATHS['working_dir'] = testdir
cfg.PARAMS['trapezoid_lambdas'] = 1
df = gpd.read_file(get_demo_file('divides_RGI50-05.08389.shp'))
df['Area'] = df.Area * 1e-6 # cause it was in m2
df['RGIId'] = ['RGI50-05.08389_d{:02d}'.format(d+1) for d in df.index]
df['GlacType'] = '1099' # Make an ice cap
gdirs = workflow.init_glacier_directories(df)
workflow.gis_prepro_tasks(gdirs)
from salem import mercator_grid, Map
smap = mercator_grid((gdirs[0].cenlon, gdirs[0].cenlat),
extent=[20000, 23000])
smap = Map(smap)
fig, ax = plt.subplots()
graphics.plot_catchment_width(gdirs, ax=ax, add_intersects=True,
add_touches=True, smap=smap)
fig.tight_layout()
shutil.rmtree(testdir)
return fig
@pytest.mark.slow
@pytest.mark.graphic
@mpl_image_compare(multi=True)
def test_coxe():
testdir = os.path.join(get_test_dir(), 'tmp_coxe')
utils.mkdir(testdir, reset=True)
# Init
cfg.initialize()
cfg.PARAMS['use_intersects'] = False
cfg.PATHS['dem_file'] = get_demo_file('dem_RGI50-01.10299.tif')
cfg.PARAMS['border'] = 40
cfg.PARAMS['clip_tidewater_border'] = False
cfg.PARAMS['use_multiple_flowlines'] = False
cfg.PARAMS['use_kcalving_for_inversion'] = True
cfg.PARAMS['use_kcalving_for_run'] = True
cfg.PARAMS['trapezoid_lambdas'] = 1
hef_file = get_demo_file('rgi_RGI50-01.10299.shp')
entity = gpd.read_file(hef_file).iloc[0]
gdir = oggm.GlacierDirectory(entity, base_dir=testdir, reset=True)
gis.define_glacier_region(gdir)
gis.glacier_masks(gdir)
centerlines.compute_centerlines(gdir)
centerlines.initialize_flowlines(gdir)
centerlines.compute_downstream_line(gdir)
centerlines.compute_downstream_bedshape(gdir)
centerlines.catchment_area(gdir)
centerlines.catchment_intersections(gdir)
centerlines.catchment_width_geom(gdir)
centerlines.catchment_width_correction(gdir)
climate.apparent_mb_from_linear_mb(gdir)
inversion.prepare_for_inversion(gdir)
inversion.mass_conservation_inversion(gdir)
inversion.filter_inversion_output(gdir)
flowline.init_present_time_glacier(gdir)
fls = gdir.read_pickle('model_flowlines')
p = gdir.read_pickle('linear_mb_params')
mb_mod = massbalance.LinearMassBalance(ela_h=p['ela_h'],
grad=p['grad'])
mb_mod.temp_bias = -0.3
model = flowline.FluxBasedModel(fls, mb_model=mb_mod, y0=0,
inplace=True,
is_tidewater=True)
# run
model.run_until(200)
assert model.calving_m3_since_y0 > 0
fig, ax = plt.subplots()
graphics.plot_modeloutput_map(gdir, ax=ax, model=model)
fig.tight_layout()
shutil.rmtree(testdir)
return fig
| bsd-3-clause |
DailyActie/Surrogate-Model | 01-codes/numpy-master/numpy/fft/fftpack.py | 1 | 46020 | """
Discrete Fourier Transforms
Routines in this module:
fft(a, n=None, axis=-1)
ifft(a, n=None, axis=-1)
rfft(a, n=None, axis=-1)
irfft(a, n=None, axis=-1)
hfft(a, n=None, axis=-1)
ihfft(a, n=None, axis=-1)
fftn(a, s=None, axes=None)
ifftn(a, s=None, axes=None)
rfftn(a, s=None, axes=None)
irfftn(a, s=None, axes=None)
fft2(a, s=None, axes=(-2,-1))
ifft2(a, s=None, axes=(-2, -1))
rfft2(a, s=None, axes=(-2,-1))
irfft2(a, s=None, axes=(-2, -1))
i = inverse transform
r = transform of purely real data
h = Hermite transform
n = n-dimensional transform
2 = 2-dimensional transform
(Note: 2D routines are just nD routines with different default
behavior.)
The underlying code for these functions is an f2c-translated and modified
version of the FFTPACK routines.
"""
from __future__ import division, absolute_import, print_function
__all__ = ['fft', 'ifft', 'rfft', 'irfft', 'hfft', 'ihfft', 'rfftn',
'irfftn', 'rfft2', 'irfft2', 'fft2', 'ifft2', 'fftn', 'ifftn']
from numpy.core import (array, asarray, zeros, swapaxes, conjugate,
take, sqrt)
from . import fftpack_lite as fftpack
from .helper import _FFTCache
_fft_cache = _FFTCache(max_size_in_mb=100, max_item_count=32)
_real_fft_cache = _FFTCache(max_size_in_mb=100, max_item_count=32)
def _raw_fft(a, n=None, axis=-1, init_function=fftpack.cffti,
work_function=fftpack.cfftf, fft_cache=_fft_cache):
a = asarray(a)
if n is None:
n = a.shape[axis]
if n < 1:
raise ValueError("Invalid number of FFT data points (%d) specified."
% n)
# We have to ensure that only a single thread can access a wsave array
# at any given time. Thus we remove it from the cache and insert it
# again after it has been used. Multiple threads might create multiple
# copies of the wsave array. This is intentional and a limitation of
# the current C code.
wsave = fft_cache.pop_twiddle_factors(n)
if wsave is None:
wsave = init_function(n)
if a.shape[axis] != n:
s = list(a.shape)
if s[axis] > n:
index = [slice(None)] * len(s)
index[axis] = slice(0, n)
a = a[index]
else:
index = [slice(None)] * len(s)
index[axis] = slice(0, s[axis])
s[axis] = n
z = zeros(s, a.dtype.char)
z[index] = a
a = z
if axis != -1:
a = swapaxes(a, axis, -1)
r = work_function(a, wsave)
if axis != -1:
r = swapaxes(r, axis, -1)
# As soon as we put wsave back into the cache, another thread could pick it
# up and start using it, so we must not do this until after we're
# completely done using it ourselves.
fft_cache.put_twiddle_factors(n, wsave)
return r
def _unitary(norm):
if norm not in (None, "ortho"):
raise ValueError("Invalid norm value %s, should be None or \"ortho\"."
% norm)
return norm is not None
def fft(a, n=None, axis=-1, norm=None):
"""
Compute the one-dimensional discrete Fourier Transform.
This function computes the one-dimensional *n*-point discrete Fourier
Transform (DFT) with the efficient Fast Fourier Transform (FFT)
algorithm [CT].
Parameters
----------
a : array_like
Input array, can be complex.
n : int, optional
Length of the transformed axis of the output.
If `n` is smaller than the length of the input, the input is cropped.
If it is larger, the input is padded with zeros. If `n` is not given,
the length of the input along the axis specified by `axis` is used.
axis : int, optional
Axis over which to compute the FFT. If not given, the last axis is
used.
norm : {None, "ortho"}, optional
.. versionadded:: 1.10.0
Normalization mode (see `numpy.fft`). Default is None.
Returns
-------
out : complex ndarray
The truncated or zero-padded input, transformed along the axis
indicated by `axis`, or the last one if `axis` is not specified.
Raises
------
IndexError
if `axes` is larger than the last axis of `a`.
See Also
--------
numpy.fft : for definition of the DFT and conventions used.
ifft : The inverse of `fft`.
fft2 : The two-dimensional FFT.
fftn : The *n*-dimensional FFT.
rfftn : The *n*-dimensional FFT of real input.
fftfreq : Frequency bins for given FFT parameters.
Notes
-----
FFT (Fast Fourier Transform) refers to a way the discrete Fourier
Transform (DFT) can be calculated efficiently, by using symmetries in the
calculated terms. The symmetry is highest when `n` is a power of 2, and
the transform is therefore most efficient for these sizes.
The DFT is defined, with the conventions used in this implementation, in
the documentation for the `numpy.fft` module.
References
----------
.. [CT] Cooley, James W., and John W. Tukey, 1965, "An algorithm for the
machine calculation of complex Fourier series," *Math. Comput.*
19: 297-301.
Examples
--------
>>> np.fft.fft(np.exp(2j * np.pi * np.arange(8) / 8))
array([ -3.44505240e-16 +1.14383329e-17j,
8.00000000e+00 -5.71092652e-15j,
2.33482938e-16 +1.22460635e-16j,
1.64863782e-15 +1.77635684e-15j,
9.95839695e-17 +2.33482938e-16j,
0.00000000e+00 +1.66837030e-15j,
1.14383329e-17 +1.22460635e-16j,
-1.64863782e-15 +1.77635684e-15j])
In this example, real input has an FFT which is Hermitian, i.e., symmetric
in the real part and anti-symmetric in the imaginary part, as described in
the `numpy.fft` documentation:
>>> import matplotlib.pyplot as plt
>>> t = np.arange(256)
>>> sp = np.fft.fft(np.sin(t))
>>> freq = np.fft.fftfreq(t.shape[-1])
>>> plt.plot(freq, sp.real, freq, sp.imag)
[<matplotlib.lines.Line2D object at 0x...>, <matplotlib.lines.Line2D object at 0x...>]
>>> plt.show()
"""
a = asarray(a).astype(complex, copy=False)
if n is None:
n = a.shape[axis]
output = _raw_fft(a, n, axis, fftpack.cffti, fftpack.cfftf, _fft_cache)
if _unitary(norm):
output *= 1 / sqrt(n)
return output
def ifft(a, n=None, axis=-1, norm=None):
"""
Compute the one-dimensional inverse discrete Fourier Transform.
This function computes the inverse of the one-dimensional *n*-point
discrete Fourier transform computed by `fft`. In other words,
``ifft(fft(a)) == a`` to within numerical accuracy.
For a general description of the algorithm and definitions,
see `numpy.fft`.
The input should be ordered in the same way as is returned by `fft`,
i.e.,
* ``a[0]`` should contain the zero frequency term,
* ``a[1:n//2]`` should contain the positive-frequency terms,
* ``a[n//2 + 1:]`` should contain the negative-frequency terms, in
increasing order starting from the most negative frequency.
For an even number of input points, ``A[n//2]`` represents the sum of
the values at the positive and negative Nyquist frequencies, as the two
are aliased together. See `numpy.fft` for details.
Parameters
----------
a : array_like
Input array, can be complex.
n : int, optional
Length of the transformed axis of the output.
If `n` is smaller than the length of the input, the input is cropped.
If it is larger, the input is padded with zeros. If `n` is not given,
the length of the input along the axis specified by `axis` is used.
See notes about padding issues.
axis : int, optional
Axis over which to compute the inverse DFT. If not given, the last
axis is used.
norm : {None, "ortho"}, optional
.. versionadded:: 1.10.0
Normalization mode (see `numpy.fft`). Default is None.
Returns
-------
out : complex ndarray
The truncated or zero-padded input, transformed along the axis
indicated by `axis`, or the last one if `axis` is not specified.
Raises
------
IndexError
If `axes` is larger than the last axis of `a`.
See Also
--------
numpy.fft : An introduction, with definitions and general explanations.
fft : The one-dimensional (forward) FFT, of which `ifft` is the inverse
ifft2 : The two-dimensional inverse FFT.
ifftn : The n-dimensional inverse FFT.
Notes
-----
If the input parameter `n` is larger than the size of the input, the input
is padded by appending zeros at the end. Even though this is the common
approach, it might lead to surprising results. If a different padding is
desired, it must be performed before calling `ifft`.
Examples
--------
>>> np.fft.ifft([0, 4, 0, 0])
array([ 1.+0.j, 0.+1.j, -1.+0.j, 0.-1.j])
Create and plot a band-limited signal with random phases:
>>> import matplotlib.pyplot as plt
>>> t = np.arange(400)
>>> n = np.zeros((400,), dtype=complex)
>>> n[40:60] = np.exp(1j*np.random.uniform(0, 2*np.pi, (20,)))
>>> s = np.fft.ifft(n)
>>> plt.plot(t, s.real, 'b-', t, s.imag, 'r--')
...
>>> plt.legend(('real', 'imaginary'))
...
>>> plt.show()
"""
# The copy may be required for multithreading.
a = array(a, copy=True, dtype=complex)
if n is None:
n = a.shape[axis]
unitary = _unitary(norm)
output = _raw_fft(a, n, axis, fftpack.cffti, fftpack.cfftb, _fft_cache)
return output * (1 / (sqrt(n) if unitary else n))
def rfft(a, n=None, axis=-1, norm=None):
"""
Compute the one-dimensional discrete Fourier Transform for real input.
This function computes the one-dimensional *n*-point discrete Fourier
Transform (DFT) of a real-valued array by means of an efficient algorithm
called the Fast Fourier Transform (FFT).
Parameters
----------
a : array_like
Input array
n : int, optional
Number of points along transformation axis in the input to use.
If `n` is smaller than the length of the input, the input is cropped.
If it is larger, the input is padded with zeros. If `n` is not given,
the length of the input along the axis specified by `axis` is used.
axis : int, optional
Axis over which to compute the FFT. If not given, the last axis is
used.
norm : {None, "ortho"}, optional
.. versionadded:: 1.10.0
Normalization mode (see `numpy.fft`). Default is None.
Returns
-------
out : complex ndarray
The truncated or zero-padded input, transformed along the axis
indicated by `axis`, or the last one if `axis` is not specified.
If `n` is even, the length of the transformed axis is ``(n/2)+1``.
If `n` is odd, the length is ``(n+1)/2``.
Raises
------
IndexError
If `axis` is larger than the last axis of `a`.
See Also
--------
numpy.fft : For definition of the DFT and conventions used.
irfft : The inverse of `rfft`.
fft : The one-dimensional FFT of general (complex) input.
fftn : The *n*-dimensional FFT.
rfftn : The *n*-dimensional FFT of real input.
Notes
-----
When the DFT is computed for purely real input, the output is
Hermitian-symmetric, i.e. the negative frequency terms are just the complex
conjugates of the corresponding positive-frequency terms, and the
negative-frequency terms are therefore redundant. This function does not
compute the negative frequency terms, and the length of the transformed
axis of the output is therefore ``n//2 + 1``.
When ``A = rfft(a)`` and fs is the sampling frequency, ``A[0]`` contains
the zero-frequency term 0*fs, which is real due to Hermitian symmetry.
If `n` is even, ``A[-1]`` contains the term representing both positive
and negative Nyquist frequency (+fs/2 and -fs/2), and must also be purely
real. If `n` is odd, there is no term at fs/2; ``A[-1]`` contains
the largest positive frequency (fs/2*(n-1)/n), and is complex in the
general case.
If the input `a` contains an imaginary part, it is silently discarded.
Examples
--------
>>> np.fft.fft([0, 1, 0, 0])
array([ 1.+0.j, 0.-1.j, -1.+0.j, 0.+1.j])
>>> np.fft.rfft([0, 1, 0, 0])
array([ 1.+0.j, 0.-1.j, -1.+0.j])
Notice how the final element of the `fft` output is the complex conjugate
of the second element, for real input. For `rfft`, this symmetry is
exploited to compute only the non-negative frequency terms.
"""
# The copy may be required for multithreading.
a = array(a, copy=True, dtype=float)
output = _raw_fft(a, n, axis, fftpack.rffti, fftpack.rfftf,
_real_fft_cache)
if _unitary(norm):
output *= 1 / sqrt(a.shape[axis])
return output
def irfft(a, n=None, axis=-1, norm=None):
"""
Compute the inverse of the n-point DFT for real input.
This function computes the inverse of the one-dimensional *n*-point
discrete Fourier Transform of real input computed by `rfft`.
In other words, ``irfft(rfft(a), len(a)) == a`` to within numerical
accuracy. (See Notes below for why ``len(a)`` is necessary here.)
The input is expected to be in the form returned by `rfft`, i.e. the
real zero-frequency term followed by the complex positive frequency terms
in order of increasing frequency. Since the discrete Fourier Transform of
real input is Hermitian-symmetric, the negative frequency terms are taken
to be the complex conjugates of the corresponding positive frequency terms.
Parameters
----------
a : array_like
The input array.
n : int, optional
Length of the transformed axis of the output.
For `n` output points, ``n//2+1`` input points are necessary. If the
input is longer than this, it is cropped. If it is shorter than this,
it is padded with zeros. If `n` is not given, it is determined from
the length of the input along the axis specified by `axis`.
axis : int, optional
Axis over which to compute the inverse FFT. If not given, the last
axis is used.
norm : {None, "ortho"}, optional
.. versionadded:: 1.10.0
Normalization mode (see `numpy.fft`). Default is None.
Returns
-------
out : ndarray
The truncated or zero-padded input, transformed along the axis
indicated by `axis`, or the last one if `axis` is not specified.
The length of the transformed axis is `n`, or, if `n` is not given,
``2*(m-1)`` where ``m`` is the length of the transformed axis of the
input. To get an odd number of output points, `n` must be specified.
Raises
------
IndexError
If `axis` is larger than the last axis of `a`.
See Also
--------
numpy.fft : For definition of the DFT and conventions used.
rfft : The one-dimensional FFT of real input, of which `irfft` is inverse.
fft : The one-dimensional FFT.
irfft2 : The inverse of the two-dimensional FFT of real input.
irfftn : The inverse of the *n*-dimensional FFT of real input.
Notes
-----
Returns the real valued `n`-point inverse discrete Fourier transform
of `a`, where `a` contains the non-negative frequency terms of a
Hermitian-symmetric sequence. `n` is the length of the result, not the
input.
If you specify an `n` such that `a` must be zero-padded or truncated, the
extra/removed values will be added/removed at high frequencies. One can
thus resample a series to `m` points via Fourier interpolation by:
``a_resamp = irfft(rfft(a), m)``.
Examples
--------
>>> np.fft.ifft([1, -1j, -1, 1j])
array([ 0.+0.j, 1.+0.j, 0.+0.j, 0.+0.j])
>>> np.fft.irfft([1, -1j, -1])
array([ 0., 1., 0., 0.])
Notice how the last term in the input to the ordinary `ifft` is the
complex conjugate of the second term, and the output has zero imaginary
part everywhere. When calling `irfft`, the negative frequencies are not
specified, and the output array is purely real.
"""
# The copy may be required for multithreading.
a = array(a, copy=True, dtype=complex)
if n is None:
n = (a.shape[axis] - 1) * 2
unitary = _unitary(norm)
output = _raw_fft(a, n, axis, fftpack.rffti, fftpack.rfftb,
_real_fft_cache)
return output * (1 / (sqrt(n) if unitary else n))
def hfft(a, n=None, axis=-1, norm=None):
"""
Compute the FFT of a signal that has Hermitian symmetry, i.e., a real
spectrum.
Parameters
----------
a : array_like
The input array.
n : int, optional
Length of the transformed axis of the output. For `n` output
points, ``n//2 + 1`` input points are necessary. If the input is
longer than this, it is cropped. If it is shorter than this, it is
padded with zeros. If `n` is not given, it is determined from the
length of the input along the axis specified by `axis`.
axis : int, optional
Axis over which to compute the FFT. If not given, the last
axis is used.
norm : {None, "ortho"}, optional
Normalization mode (see `numpy.fft`). Default is None.
.. versionadded:: 1.10.0
Returns
-------
out : ndarray
The truncated or zero-padded input, transformed along the axis
indicated by `axis`, or the last one if `axis` is not specified.
The length of the transformed axis is `n`, or, if `n` is not given,
``2*m - 2`` where ``m`` is the length of the transformed axis of
the input. To get an odd number of output points, `n` must be
specified, for instance as ``2*m - 1`` in the typical case,
Raises
------
IndexError
If `axis` is larger than the last axis of `a`.
See also
--------
rfft : Compute the one-dimensional FFT for real input.
ihfft : The inverse of `hfft`.
Notes
-----
`hfft`/`ihfft` are a pair analogous to `rfft`/`irfft`, but for the
opposite case: here the signal has Hermitian symmetry in the time
domain and is real in the frequency domain. So here it's `hfft` for
which you must supply the length of the result if it is to be odd.
* even: ``ihfft(hfft(a, 2*len(a) - 2) == a``, within roundoff error,
* odd: ``ihfft(hfft(a, 2*len(a) - 1) == a``, within roundoff error.
Examples
--------
>>> signal = np.array([1, 2, 3, 4, 3, 2])
>>> np.fft.fft(signal)
array([ 15.+0.j, -4.+0.j, 0.+0.j, -1.-0.j, 0.+0.j, -4.+0.j])
>>> np.fft.hfft(signal[:4]) # Input first half of signal
array([ 15., -4., 0., -1., 0., -4.])
>>> np.fft.hfft(signal, 6) # Input entire signal and truncate
array([ 15., -4., 0., -1., 0., -4.])
>>> signal = np.array([[1, 1.j], [-1.j, 2]])
>>> np.conj(signal.T) - signal # check Hermitian symmetry
array([[ 0.-0.j, 0.+0.j],
[ 0.+0.j, 0.-0.j]])
>>> freq_spectrum = np.fft.hfft(signal)
>>> freq_spectrum
array([[ 1., 1.],
[ 2., -2.]])
"""
# The copy may be required for multithreading.
a = array(a, copy=True, dtype=complex)
if n is None:
n = (a.shape[axis] - 1) * 2
unitary = _unitary(norm)
return irfft(conjugate(a), n, axis) * (sqrt(n) if unitary else n)
def ihfft(a, n=None, axis=-1, norm=None):
"""
Compute the inverse FFT of a signal that has Hermitian symmetry.
Parameters
----------
a : array_like
Input array.
n : int, optional
Length of the inverse FFT, the number of points along
transformation axis in the input to use. If `n` is smaller than
the length of the input, the input is cropped. If it is larger,
the input is padded with zeros. If `n` is not given, the length of
the input along the axis specified by `axis` is used.
axis : int, optional
Axis over which to compute the inverse FFT. If not given, the last
axis is used.
norm : {None, "ortho"}, optional
Normalization mode (see `numpy.fft`). Default is None.
.. versionadded:: 1.10.0
Returns
-------
out : complex ndarray
The truncated or zero-padded input, transformed along the axis
indicated by `axis`, or the last one if `axis` is not specified.
The length of the transformed axis is ``n//2 + 1``.
See also
--------
hfft, irfft
Notes
-----
`hfft`/`ihfft` are a pair analogous to `rfft`/`irfft`, but for the
opposite case: here the signal has Hermitian symmetry in the time
domain and is real in the frequency domain. So here it's `hfft` for
which you must supply the length of the result if it is to be odd:
* even: ``ihfft(hfft(a, 2*len(a) - 2) == a``, within roundoff error,
* odd: ``ihfft(hfft(a, 2*len(a) - 1) == a``, within roundoff error.
Examples
--------
>>> spectrum = np.array([ 15, -4, 0, -1, 0, -4])
>>> np.fft.ifft(spectrum)
array([ 1.+0.j, 2.-0.j, 3.+0.j, 4.+0.j, 3.+0.j, 2.-0.j])
>>> np.fft.ihfft(spectrum)
array([ 1.-0.j, 2.-0.j, 3.-0.j, 4.-0.j])
"""
# The copy may be required for multithreading.
a = array(a, copy=True, dtype=float)
if n is None:
n = a.shape[axis]
unitary = _unitary(norm)
output = conjugate(rfft(a, n, axis))
return output * (1 / (sqrt(n) if unitary else n))
def _cook_nd_args(a, s=None, axes=None, invreal=0):
if s is None:
shapeless = 1
if axes is None:
s = list(a.shape)
else:
s = take(a.shape, axes)
else:
shapeless = 0
s = list(s)
if axes is None:
axes = list(range(-len(s), 0))
if len(s) != len(axes):
raise ValueError("Shape and axes have different lengths.")
if invreal and shapeless:
s[-1] = (a.shape[axes[-1]] - 1) * 2
return s, axes
def _raw_fftnd(a, s=None, axes=None, function=fft, norm=None):
a = asarray(a)
s, axes = _cook_nd_args(a, s, axes)
itl = list(range(len(axes)))
itl.reverse()
for ii in itl:
a = function(a, n=s[ii], axis=axes[ii], norm=norm)
return a
def fftn(a, s=None, axes=None, norm=None):
"""
Compute the N-dimensional discrete Fourier Transform.
This function computes the *N*-dimensional discrete Fourier Transform over
any number of axes in an *M*-dimensional array by means of the Fast Fourier
Transform (FFT).
Parameters
----------
a : array_like
Input array, can be complex.
s : sequence of ints, optional
Shape (length of each transformed axis) of the output
(``s[0]`` refers to axis 0, ``s[1]`` to axis 1, etc.).
This corresponds to ``n`` for ``fft(x, n)``.
Along any axis, if the given shape is smaller than that of the input,
the input is cropped. If it is larger, the input is padded with zeros.
if `s` is not given, the shape of the input along the axes specified
by `axes` is used.
axes : sequence of ints, optional
Axes over which to compute the FFT. If not given, the last ``len(s)``
axes are used, or all axes if `s` is also not specified.
Repeated indices in `axes` means that the transform over that axis is
performed multiple times.
norm : {None, "ortho"}, optional
.. versionadded:: 1.10.0
Normalization mode (see `numpy.fft`). Default is None.
Returns
-------
out : complex ndarray
The truncated or zero-padded input, transformed along the axes
indicated by `axes`, or by a combination of `s` and `a`,
as explained in the parameters section above.
Raises
------
ValueError
If `s` and `axes` have different length.
IndexError
If an element of `axes` is larger than than the number of axes of `a`.
See Also
--------
numpy.fft : Overall view of discrete Fourier transforms, with definitions
and conventions used.
ifftn : The inverse of `fftn`, the inverse *n*-dimensional FFT.
fft : The one-dimensional FFT, with definitions and conventions used.
rfftn : The *n*-dimensional FFT of real input.
fft2 : The two-dimensional FFT.
fftshift : Shifts zero-frequency terms to centre of array
Notes
-----
The output, analogously to `fft`, contains the term for zero frequency in
the low-order corner of all axes, the positive frequency terms in the
first half of all axes, the term for the Nyquist frequency in the middle
of all axes and the negative frequency terms in the second half of all
axes, in order of decreasingly negative frequency.
See `numpy.fft` for details, definitions and conventions used.
Examples
--------
>>> a = np.mgrid[:3, :3, :3][0]
>>> np.fft.fftn(a, axes=(1, 2))
array([[[ 0.+0.j, 0.+0.j, 0.+0.j],
[ 0.+0.j, 0.+0.j, 0.+0.j],
[ 0.+0.j, 0.+0.j, 0.+0.j]],
[[ 9.+0.j, 0.+0.j, 0.+0.j],
[ 0.+0.j, 0.+0.j, 0.+0.j],
[ 0.+0.j, 0.+0.j, 0.+0.j]],
[[ 18.+0.j, 0.+0.j, 0.+0.j],
[ 0.+0.j, 0.+0.j, 0.+0.j],
[ 0.+0.j, 0.+0.j, 0.+0.j]]])
>>> np.fft.fftn(a, (2, 2), axes=(0, 1))
array([[[ 2.+0.j, 2.+0.j, 2.+0.j],
[ 0.+0.j, 0.+0.j, 0.+0.j]],
[[-2.+0.j, -2.+0.j, -2.+0.j],
[ 0.+0.j, 0.+0.j, 0.+0.j]]])
>>> import matplotlib.pyplot as plt
>>> [X, Y] = np.meshgrid(2 * np.pi * np.arange(200) / 12,
... 2 * np.pi * np.arange(200) / 34)
>>> S = np.sin(X) + np.cos(Y) + np.random.uniform(0, 1, X.shape)
>>> FS = np.fft.fftn(S)
>>> plt.imshow(np.log(np.abs(np.fft.fftshift(FS))**2))
<matplotlib.image.AxesImage object at 0x...>
>>> plt.show()
"""
return _raw_fftnd(a, s, axes, fft, norm)
def ifftn(a, s=None, axes=None, norm=None):
"""
Compute the N-dimensional inverse discrete Fourier Transform.
This function computes the inverse of the N-dimensional discrete
Fourier Transform over any number of axes in an M-dimensional array by
means of the Fast Fourier Transform (FFT). In other words,
``ifftn(fftn(a)) == a`` to within numerical accuracy.
For a description of the definitions and conventions used, see `numpy.fft`.
The input, analogously to `ifft`, should be ordered in the same way as is
returned by `fftn`, i.e. it should have the term for zero frequency
in all axes in the low-order corner, the positive frequency terms in the
first half of all axes, the term for the Nyquist frequency in the middle
of all axes and the negative frequency terms in the second half of all
axes, in order of decreasingly negative frequency.
Parameters
----------
a : array_like
Input array, can be complex.
s : sequence of ints, optional
Shape (length of each transformed axis) of the output
(``s[0]`` refers to axis 0, ``s[1]`` to axis 1, etc.).
This corresponds to ``n`` for ``ifft(x, n)``.
Along any axis, if the given shape is smaller than that of the input,
the input is cropped. If it is larger, the input is padded with zeros.
if `s` is not given, the shape of the input along the axes specified
by `axes` is used. See notes for issue on `ifft` zero padding.
axes : sequence of ints, optional
Axes over which to compute the IFFT. If not given, the last ``len(s)``
axes are used, or all axes if `s` is also not specified.
Repeated indices in `axes` means that the inverse transform over that
axis is performed multiple times.
norm : {None, "ortho"}, optional
.. versionadded:: 1.10.0
Normalization mode (see `numpy.fft`). Default is None.
Returns
-------
out : complex ndarray
The truncated or zero-padded input, transformed along the axes
indicated by `axes`, or by a combination of `s` or `a`,
as explained in the parameters section above.
Raises
------
ValueError
If `s` and `axes` have different length.
IndexError
If an element of `axes` is larger than than the number of axes of `a`.
See Also
--------
numpy.fft : Overall view of discrete Fourier transforms, with definitions
and conventions used.
fftn : The forward *n*-dimensional FFT, of which `ifftn` is the inverse.
ifft : The one-dimensional inverse FFT.
ifft2 : The two-dimensional inverse FFT.
ifftshift : Undoes `fftshift`, shifts zero-frequency terms to beginning
of array.
Notes
-----
See `numpy.fft` for definitions and conventions used.
Zero-padding, analogously with `ifft`, is performed by appending zeros to
the input along the specified dimension. Although this is the common
approach, it might lead to surprising results. If another form of zero
padding is desired, it must be performed before `ifftn` is called.
Examples
--------
>>> a = np.eye(4)
>>> np.fft.ifftn(np.fft.fftn(a, axes=(0,)), axes=(1,))
array([[ 1.+0.j, 0.+0.j, 0.+0.j, 0.+0.j],
[ 0.+0.j, 1.+0.j, 0.+0.j, 0.+0.j],
[ 0.+0.j, 0.+0.j, 1.+0.j, 0.+0.j],
[ 0.+0.j, 0.+0.j, 0.+0.j, 1.+0.j]])
Create and plot an image with band-limited frequency content:
>>> import matplotlib.pyplot as plt
>>> n = np.zeros((200,200), dtype=complex)
>>> n[60:80, 20:40] = np.exp(1j*np.random.uniform(0, 2*np.pi, (20, 20)))
>>> im = np.fft.ifftn(n).real
>>> plt.imshow(im)
<matplotlib.image.AxesImage object at 0x...>
>>> plt.show()
"""
return _raw_fftnd(a, s, axes, ifft, norm)
def fft2(a, s=None, axes=(-2, -1), norm=None):
"""
Compute the 2-dimensional discrete Fourier Transform
This function computes the *n*-dimensional discrete Fourier Transform
over any axes in an *M*-dimensional array by means of the
Fast Fourier Transform (FFT). By default, the transform is computed over
the last two axes of the input array, i.e., a 2-dimensional FFT.
Parameters
----------
a : array_like
Input array, can be complex
s : sequence of ints, optional
Shape (length of each transformed axis) of the output
(``s[0]`` refers to axis 0, ``s[1]`` to axis 1, etc.).
This corresponds to ``n`` for ``fft(x, n)``.
Along each axis, if the given shape is smaller than that of the input,
the input is cropped. If it is larger, the input is padded with zeros.
if `s` is not given, the shape of the input along the axes specified
by `axes` is used.
axes : sequence of ints, optional
Axes over which to compute the FFT. If not given, the last two
axes are used. A repeated index in `axes` means the transform over
that axis is performed multiple times. A one-element sequence means
that a one-dimensional FFT is performed.
norm : {None, "ortho"}, optional
.. versionadded:: 1.10.0
Normalization mode (see `numpy.fft`). Default is None.
Returns
-------
out : complex ndarray
The truncated or zero-padded input, transformed along the axes
indicated by `axes`, or the last two axes if `axes` is not given.
Raises
------
ValueError
If `s` and `axes` have different length, or `axes` not given and
``len(s) != 2``.
IndexError
If an element of `axes` is larger than than the number of axes of `a`.
See Also
--------
numpy.fft : Overall view of discrete Fourier transforms, with definitions
and conventions used.
ifft2 : The inverse two-dimensional FFT.
fft : The one-dimensional FFT.
fftn : The *n*-dimensional FFT.
fftshift : Shifts zero-frequency terms to the center of the array.
For two-dimensional input, swaps first and third quadrants, and second
and fourth quadrants.
Notes
-----
`fft2` is just `fftn` with a different default for `axes`.
The output, analogously to `fft`, contains the term for zero frequency in
the low-order corner of the transformed axes, the positive frequency terms
in the first half of these axes, the term for the Nyquist frequency in the
middle of the axes and the negative frequency terms in the second half of
the axes, in order of decreasingly negative frequency.
See `fftn` for details and a plotting example, and `numpy.fft` for
definitions and conventions used.
Examples
--------
>>> a = np.mgrid[:5, :5][0]
>>> np.fft.fft2(a)
array([[ 50.0 +0.j , 0.0 +0.j , 0.0 +0.j ,
0.0 +0.j , 0.0 +0.j ],
[-12.5+17.20477401j, 0.0 +0.j , 0.0 +0.j ,
0.0 +0.j , 0.0 +0.j ],
[-12.5 +4.0614962j , 0.0 +0.j , 0.0 +0.j ,
0.0 +0.j , 0.0 +0.j ],
[-12.5 -4.0614962j , 0.0 +0.j , 0.0 +0.j ,
0.0 +0.j , 0.0 +0.j ],
[-12.5-17.20477401j, 0.0 +0.j , 0.0 +0.j ,
0.0 +0.j , 0.0 +0.j ]])
"""
return _raw_fftnd(a, s, axes, fft, norm)
def ifft2(a, s=None, axes=(-2, -1), norm=None):
"""
Compute the 2-dimensional inverse discrete Fourier Transform.
This function computes the inverse of the 2-dimensional discrete Fourier
Transform over any number of axes in an M-dimensional array by means of
the Fast Fourier Transform (FFT). In other words, ``ifft2(fft2(a)) == a``
to within numerical accuracy. By default, the inverse transform is
computed over the last two axes of the input array.
The input, analogously to `ifft`, should be ordered in the same way as is
returned by `fft2`, i.e. it should have the term for zero frequency
in the low-order corner of the two axes, the positive frequency terms in
the first half of these axes, the term for the Nyquist frequency in the
middle of the axes and the negative frequency terms in the second half of
both axes, in order of decreasingly negative frequency.
Parameters
----------
a : array_like
Input array, can be complex.
s : sequence of ints, optional
Shape (length of each axis) of the output (``s[0]`` refers to axis 0,
``s[1]`` to axis 1, etc.). This corresponds to `n` for ``ifft(x, n)``.
Along each axis, if the given shape is smaller than that of the input,
the input is cropped. If it is larger, the input is padded with zeros.
if `s` is not given, the shape of the input along the axes specified
by `axes` is used. See notes for issue on `ifft` zero padding.
axes : sequence of ints, optional
Axes over which to compute the FFT. If not given, the last two
axes are used. A repeated index in `axes` means the transform over
that axis is performed multiple times. A one-element sequence means
that a one-dimensional FFT is performed.
norm : {None, "ortho"}, optional
.. versionadded:: 1.10.0
Normalization mode (see `numpy.fft`). Default is None.
Returns
-------
out : complex ndarray
The truncated or zero-padded input, transformed along the axes
indicated by `axes`, or the last two axes if `axes` is not given.
Raises
------
ValueError
If `s` and `axes` have different length, or `axes` not given and
``len(s) != 2``.
IndexError
If an element of `axes` is larger than than the number of axes of `a`.
See Also
--------
numpy.fft : Overall view of discrete Fourier transforms, with definitions
and conventions used.
fft2 : The forward 2-dimensional FFT, of which `ifft2` is the inverse.
ifftn : The inverse of the *n*-dimensional FFT.
fft : The one-dimensional FFT.
ifft : The one-dimensional inverse FFT.
Notes
-----
`ifft2` is just `ifftn` with a different default for `axes`.
See `ifftn` for details and a plotting example, and `numpy.fft` for
definition and conventions used.
Zero-padding, analogously with `ifft`, is performed by appending zeros to
the input along the specified dimension. Although this is the common
approach, it might lead to surprising results. If another form of zero
padding is desired, it must be performed before `ifft2` is called.
Examples
--------
>>> a = 4 * np.eye(4)
>>> np.fft.ifft2(a)
array([[ 1.+0.j, 0.+0.j, 0.+0.j, 0.+0.j],
[ 0.+0.j, 0.+0.j, 0.+0.j, 1.+0.j],
[ 0.+0.j, 0.+0.j, 1.+0.j, 0.+0.j],
[ 0.+0.j, 1.+0.j, 0.+0.j, 0.+0.j]])
"""
return _raw_fftnd(a, s, axes, ifft, norm)
def rfftn(a, s=None, axes=None, norm=None):
"""
Compute the N-dimensional discrete Fourier Transform for real input.
This function computes the N-dimensional discrete Fourier Transform over
any number of axes in an M-dimensional real array by means of the Fast
Fourier Transform (FFT). By default, all axes are transformed, with the
real transform performed over the last axis, while the remaining
transforms are complex.
Parameters
----------
a : array_like
Input array, taken to be real.
s : sequence of ints, optional
Shape (length along each transformed axis) to use from the input.
(``s[0]`` refers to axis 0, ``s[1]`` to axis 1, etc.).
The final element of `s` corresponds to `n` for ``rfft(x, n)``, while
for the remaining axes, it corresponds to `n` for ``fft(x, n)``.
Along any axis, if the given shape is smaller than that of the input,
the input is cropped. If it is larger, the input is padded with zeros.
if `s` is not given, the shape of the input along the axes specified
by `axes` is used.
axes : sequence of ints, optional
Axes over which to compute the FFT. If not given, the last ``len(s)``
axes are used, or all axes if `s` is also not specified.
norm : {None, "ortho"}, optional
.. versionadded:: 1.10.0
Normalization mode (see `numpy.fft`). Default is None.
Returns
-------
out : complex ndarray
The truncated or zero-padded input, transformed along the axes
indicated by `axes`, or by a combination of `s` and `a`,
as explained in the parameters section above.
The length of the last axis transformed will be ``s[-1]//2+1``,
while the remaining transformed axes will have lengths according to
`s`, or unchanged from the input.
Raises
------
ValueError
If `s` and `axes` have different length.
IndexError
If an element of `axes` is larger than than the number of axes of `a`.
See Also
--------
irfftn : The inverse of `rfftn`, i.e. the inverse of the n-dimensional FFT
of real input.
fft : The one-dimensional FFT, with definitions and conventions used.
rfft : The one-dimensional FFT of real input.
fftn : The n-dimensional FFT.
rfft2 : The two-dimensional FFT of real input.
Notes
-----
The transform for real input is performed over the last transformation
axis, as by `rfft`, then the transform over the remaining axes is
performed as by `fftn`. The order of the output is as for `rfft` for the
final transformation axis, and as for `fftn` for the remaining
transformation axes.
See `fft` for details, definitions and conventions used.
Examples
--------
>>> a = np.ones((2, 2, 2))
>>> np.fft.rfftn(a)
array([[[ 8.+0.j, 0.+0.j],
[ 0.+0.j, 0.+0.j]],
[[ 0.+0.j, 0.+0.j],
[ 0.+0.j, 0.+0.j]]])
>>> np.fft.rfftn(a, axes=(2, 0))
array([[[ 4.+0.j, 0.+0.j],
[ 4.+0.j, 0.+0.j]],
[[ 0.+0.j, 0.+0.j],
[ 0.+0.j, 0.+0.j]]])
"""
# The copy may be required for multithreading.
a = array(a, copy=True, dtype=float)
s, axes = _cook_nd_args(a, s, axes)
a = rfft(a, s[-1], axes[-1], norm)
for ii in range(len(axes) - 1):
a = fft(a, s[ii], axes[ii], norm)
return a
def rfft2(a, s=None, axes=(-2, -1), norm=None):
"""
Compute the 2-dimensional FFT of a real array.
Parameters
----------
a : array
Input array, taken to be real.
s : sequence of ints, optional
Shape of the FFT.
axes : sequence of ints, optional
Axes over which to compute the FFT.
norm : {None, "ortho"}, optional
.. versionadded:: 1.10.0
Normalization mode (see `numpy.fft`). Default is None.
Returns
-------
out : ndarray
The result of the real 2-D FFT.
See Also
--------
rfftn : Compute the N-dimensional discrete Fourier Transform for real
input.
Notes
-----
This is really just `rfftn` with different default behavior.
For more details see `rfftn`.
"""
return rfftn(a, s, axes, norm)
def irfftn(a, s=None, axes=None, norm=None):
"""
Compute the inverse of the N-dimensional FFT of real input.
This function computes the inverse of the N-dimensional discrete
Fourier Transform for real input over any number of axes in an
M-dimensional array by means of the Fast Fourier Transform (FFT). In
other words, ``irfftn(rfftn(a), a.shape) == a`` to within numerical
accuracy. (The ``a.shape`` is necessary like ``len(a)`` is for `irfft`,
and for the same reason.)
The input should be ordered in the same way as is returned by `rfftn`,
i.e. as for `irfft` for the final transformation axis, and as for `ifftn`
along all the other axes.
Parameters
----------
a : array_like
Input array.
s : sequence of ints, optional
Shape (length of each transformed axis) of the output
(``s[0]`` refers to axis 0, ``s[1]`` to axis 1, etc.). `s` is also the
number of input points used along this axis, except for the last axis,
where ``s[-1]//2+1`` points of the input are used.
Along any axis, if the shape indicated by `s` is smaller than that of
the input, the input is cropped. If it is larger, the input is padded
with zeros. If `s` is not given, the shape of the input along the
axes specified by `axes` is used.
axes : sequence of ints, optional
Axes over which to compute the inverse FFT. If not given, the last
`len(s)` axes are used, or all axes if `s` is also not specified.
Repeated indices in `axes` means that the inverse transform over that
axis is performed multiple times.
norm : {None, "ortho"}, optional
.. versionadded:: 1.10.0
Normalization mode (see `numpy.fft`). Default is None.
Returns
-------
out : ndarray
The truncated or zero-padded input, transformed along the axes
indicated by `axes`, or by a combination of `s` or `a`,
as explained in the parameters section above.
The length of each transformed axis is as given by the corresponding
element of `s`, or the length of the input in every axis except for the
last one if `s` is not given. In the final transformed axis the length
of the output when `s` is not given is ``2*(m-1)`` where ``m`` is the
length of the final transformed axis of the input. To get an odd
number of output points in the final axis, `s` must be specified.
Raises
------
ValueError
If `s` and `axes` have different length.
IndexError
If an element of `axes` is larger than than the number of axes of `a`.
See Also
--------
rfftn : The forward n-dimensional FFT of real input,
of which `ifftn` is the inverse.
fft : The one-dimensional FFT, with definitions and conventions used.
irfft : The inverse of the one-dimensional FFT of real input.
irfft2 : The inverse of the two-dimensional FFT of real input.
Notes
-----
See `fft` for definitions and conventions used.
See `rfft` for definitions and conventions used for real input.
Examples
--------
>>> a = np.zeros((3, 2, 2))
>>> a[0, 0, 0] = 3 * 2 * 2
>>> np.fft.irfftn(a)
array([[[ 1., 1.],
[ 1., 1.]],
[[ 1., 1.],
[ 1., 1.]],
[[ 1., 1.],
[ 1., 1.]]])
"""
# The copy may be required for multithreading.
a = array(a, copy=True, dtype=complex)
s, axes = _cook_nd_args(a, s, axes, invreal=1)
for ii in range(len(axes) - 1):
a = ifft(a, s[ii], axes[ii], norm)
a = irfft(a, s[-1], axes[-1], norm)
return a
def irfft2(a, s=None, axes=(-2, -1), norm=None):
"""
Compute the 2-dimensional inverse FFT of a real array.
Parameters
----------
a : array_like
The input array
s : sequence of ints, optional
Shape of the inverse FFT.
axes : sequence of ints, optional
The axes over which to compute the inverse fft.
Default is the last two axes.
norm : {None, "ortho"}, optional
.. versionadded:: 1.10.0
Normalization mode (see `numpy.fft`). Default is None.
Returns
-------
out : ndarray
The result of the inverse real 2-D FFT.
See Also
--------
irfftn : Compute the inverse of the N-dimensional FFT of real input.
Notes
-----
This is really `irfftn` with different defaults.
For more details see `irfftn`.
"""
return irfftn(a, s, axes, norm)
| mit |
unicef/rhizome | rhizome/tests/test_api_date_datapoint.py | 1 | 16157 |
from rhizome.tests.base_test_case import RhizomeApiTestCase
from django.contrib.auth.models import User
from rhizome.models.location_models import Location, LocationType, \
LocationPermission
from rhizome.models.indicator_models import Indicator
from rhizome.models.document_models import Document
from rhizome.models.datapoint_models import DataPoint
from rhizome.cache_meta import LocationTreeCache
from rhizome.tests.setup_helpers import TestSetupHelpers
import pandas as pd
from datetime import datetime
class DateDataPointResourceTest(RhizomeApiTestCase):
# python manage.py test rhizome.tests.test_api_datapoint_groupby_date --settings=rhizome.settings.test
def setUp(self):
super(DateDataPointResourceTest, self).setUp()
# Create a user.
self.username = 'john'
self.password = 'pass'
self.user = User.objects.create_user(self.username,\
'eradicate@polio.com', self.password)
self.lt = LocationType.objects.create(name='Country',admin_level = 0)
self.province_lt = LocationType.objects.create(name='Province'\
,admin_level = 1)
self.district_lt = LocationType.objects.create(name='District'\
,admin_level = 2)
self.ind = Indicator.objects.create(
name = 'Polio Cases',
short_name = 'Polio Cases',
data_format = 'date_int'
)
self.top_lvl_location = Location.objects.create(
name = 'Afghanistan',
location_code = 'Afghanistan',
id=1234,
location_type_id = self.lt.id,
)
self.some_province = Location.objects.create(
name = 'Province',
location_code = 'Province',
id=432,
parent_location_id = self.top_lvl_location.id,
location_type_id = self.province_lt.id
)
self.some_district = Location.objects.create(
name = 'Achin',
location_code = 'Achin',
id=4321,
parent_location_id = self.some_province.id,
location_type_id = self.district_lt.id
)
ltc = LocationTreeCache()
ltc.main()
LocationPermission.objects.create(user_id = self.user.id,\
top_lvl_location_id = self.top_lvl_location.id)
self.get_credentials()
self.create_polio_cases()
self.ts = TestSetupHelpers()
self.doc_id = Document.objects.create(doc_title='Data Entry').id
def create_polio_cases(self):
df = pd.read_csv('rhizome/tests/_data/AfgPolioCases.csv')
for ix, row in df.iterrows():
DataPoint.objects.create(
location_id = self.some_district.id,
indicator_id = self.ind.id,
data_date = datetime.strptime(row.data_date, '%d-%m-%y'),
value = 1,
source_submission_id = 1,
unique_index = str(self.some_district.id) + str(self.ind.id) +\
str(row.data_date)
)
def get_credentials(self):
result = self.api_client.client.login(username=self.username,
password=self.password)
return result
def test_get_list(self):
# python manage.py test rhizome.tests.test_api_datapoint_groupby_date.DateDataPointResourceTest.test_get_list --settings=rhizome.settings.test
get = {'group_by_time':'year',
'indicator__in' : self.ind.id,
'start_date' : '2013-01-01',
'end_date' : '2016-12-01',
'location_id' : self.top_lvl_location.id,
'location_depth' : 1
}
resp = self.api_client.get('/api/v1/date_datapoint/', \
format='json', data=get, authentication=self.get_credentials())
response_data = self.deserialize(resp)
self.assertHttpOK(resp)
objects = response_data['objects']
meta = response_data['meta']
## does the 'meta' object have what the FE needs
self.assertEqual(self.ind.id, int(meta['indicator_ids'][0]))
# self.assertEqual(self.top_lvl_location.id, int(meta['location_ids'][0]))
## WE SHOULD REMOVE THIS LOGIC FROM FE -- DATES ARE SEPARATE FROM CAMPAIGNS
self.assertEqual(set(meta['time_groupings']),set([2014,2015,2016]))
self.assertEqual(3, len(objects)) # one for each year #
case_dict = {}
for obj in objects:
case_dict[obj['time_grouping']] = float(obj['value'])
self.assertEqual(28.00, case_dict[2014])
self.assertEqual(20.00, case_dict[2015])
self.assertEqual(3.0, case_dict[2016])
# basic test to just get a datapoint at a location for which we have data
def test_get_list_no_recursion(self):
# python manage.py test rhizome.tests.test_api_datapoint_groupby_date.DateDataPointResourceTest.test_get_list_no_recursion --settings=rhizome.settings.test
location_id = 4321
get = {
'group_by_time' :'all_time',
'indicator__in': self.ind.id,
'start_date': '2013-01-01',
'end_date': '2016-01-01',
'location_id': location_id,
'location_depth' : 0
}
resp = self.api_client\
.get('/api/v1/date_datapoint/',
data = get,
format = 'json',
authentication = self.get_credentials())
self.assertHttpOK(resp)
response_data = self.deserialize(resp)
dps_all_time = DataPoint.objects.filter(indicator_id=self.ind.id)
total_all_time = 0
for dp in dps_all_time:
total_all_time += dp.value
self.assertEqual(len(response_data['objects']), 1)
self.assertEqual(int(response_data['objects'][0]['location_id']), location_id)
# not sure if this is a bug or what, but start and end date seem to be irrelevant when using group_by_time
def test_get_list_diff_start_end_dates(self):
get = {
'group_by_time' :'year',
'indicator__in': self.ind.id,
'start_date': '2013-01-01',
'end_date': '2016-01-01',
'location_id__in': self.top_lvl_location.id,
'location_depth' : 1
}
resp = self.api_client.get('/api/v1/date_datapoint/',
data = get, format='json', authentication=self.get_credentials())
self.assertHttpOK(resp)
response_data = self.deserialize(resp)
objects_1 = response_data['objects']
get_2 = {
'group_by_time' :'year',
'indicator__in': self.ind.id,
'start_date': '2016-01-01',
'end_date': '2016-01-01',
'location_id__in': self.top_lvl_location.id,
'location_depth' : 1
}
resp_2 = self.api_client.get('/api/v1/date_datapoint/',\
data = get_2, format='json',\
authentication=self.get_credentials())
self.assertHttpOK(resp_2)
response_data_2 = self.deserialize(resp_2)
objects_2 = response_data_2['objects']
self.assertEqual(len(objects_1), len(objects_2))
def test_get_list_quarter_and_all_time(self):
get = {
'group_by_time' :'quarter',
'indicator__in': self.ind.id,
'start_date': '2013-01-01',
'end_date': '2016-07-01',
'location_id': self.top_lvl_location.id,
'location_depth' : 1
}
resp = self.api_client.get('/api/v1/date_datapoint/', \
data = get , format='json', authentication=self.get_credentials())
response_data = self.deserialize(resp)
self.assertHttpOK(resp)
dps_q1_2014 = DataPoint.objects.filter(
data_date__range=('2014-01-01', '2014-03-31'),\
indicator = self.ind.id
)
total = 0
for dp in dps_q1_2014:
total += dp.value
# find the total for q1 2014
q1_found = False
for indicator in response_data['objects']:
campaign = indicator['time_grouping']
if campaign == '20141':
value = float(indicator['value'])
self.assertEqual(value, total)
q1_found = True
self.assertTrue(q1_found)
get_2 = {
'group_by_time' :'all_time',
'indicator__in': self.ind.id,
'start_date': '2013-01-01',
'end_date': '2016-07-01',
'location_id': self.top_lvl_location.id,
'location_depth' : 1
}
resp = self.api_client.get('/api/v1/date_datapoint/', \
data = get_2, format='json',\
authentication=self.get_credentials())
response_data = self.deserialize(resp)
dps_all_time = DataPoint.objects.filter(indicator_id=self.ind.id)
total_all_time = 0
for dp in dps_all_time:
total_all_time += dp.value
self.assertEqual(len(response_data['objects']), 1)
self.assertEqual(float(response_data['objects'][0]['value']), total_all_time)
# provide a non-existent id
def test_get_list_bogus_id(self):
get = {
'group_by_time' :'quarter',
'indicator__in': 3223,
'start_date': '2013-01-01',
'end_date': '2016-01-01',
'location_id__in': self.top_lvl_location.id,
'location_depth' : 1
}
resp = self.api_client.get('/api/v1/date_datapoint/', \
data = get, format='json', authentication=self.get_credentials())
self.assertHttpOK(resp)
response_data = self.deserialize(resp)
self.assertEqual(len(response_data['objects']), 0)
def test_get_list_wrong_grouping(self):
'''
What happens when we request an unsupported time grouping
'''
# python manage.py test rhizome.tests.test_api_datapoint_groupby_date.DateDataPointResourceTest.test_get_list_wrong_grouping --settings=rhizome.settings.test
get = {
'group_by_time' :'xxx',
'indicator__in': self.ind.id,
'start_date': '2013-01-01',
'end_date': '2016-01-01',
'location_id': self.top_lvl_location.id,
'location_depth' : 1
}
resp = self.api_client.get('/api/v1/date_datapoint/',data = get,\
format='json', authentication=self.get_credentials())
self.deserialize(resp)
self.assertHttpApplicationError(resp)
def _show_missing_data(self):
'''
This test is not in the suite because for date_datapoint results, the back end should not
be in charge of creating every possible datapoint
wiht a null value in order to handle discontinuity.
show_all_data should not be a parameter and we should remove this and handle the fallout in the front end charting library.
'''
# python manage.py test rhizome.tests.test_api_datapoint_groupby_date.DateDataPointResourceTest.test_show_missing_data --settings=rhizome.settings.test
rando_ind = Indicator.objects.create(
name = 'some other damn indicator',
short_name = 'we don\'t care!',
data_format = 'int'
)
rando_ind_2 = Indicator.objects.create(
name = 'some other indicator',
short_name = 'we don care!',
data_format = 'int'
)
# ind_list = [rando_ind.id, rando_ind_2.id]
ind_list = '{0},{1}'.format(rando_ind.id, rando_ind_2.id)
get = {
'group_by_time' :'year',
'indicator__in': ind_list,
'start_date': '2013-01-01',
'end_date': '2016-01-01',
'location_id__in': self.top_lvl_location.id,
'location_depth' : 0,
'show_missing_data': 1
}
resp = self.api_client.get('/api/v1/date_datapoint/', \
data = get , format='json',\
authentication=self.get_credentials())
response_data = self.deserialize(resp)
self.assertHttpOK(resp)
self.assertEqual(len(response_data['objects']), 6)
## should be one object for the location, for each Indicator
## for each location and each time grouping.
## 3 yrs * 2 indicators * one location = 6
## if location_depth = 1, the number would have to take into
## account the number of sub locations one step under the parent
def test_patch_date_datapoint(self):
'''
create a datapoint with the ORM, submit a PATCH request and see
if the value changed.
If the user tries to change anything exept the value, there should be
an error.
'''
dp_to_patch = DataPoint.objects.all()[0]
patch_data = {'value': 101.01}
dp_url = '/api/v1/date_datapoint/%s/' % dp_to_patch.id
## submit the patch and make sure it has the proper response code
resp = self.ts.patch(self, dp_url, data=patch_data)
self.assertHttpAccepted(resp)
## now get the dp and see if the value has been is updated ##
dp_to_patch = DataPoint.objects.get(id=dp_to_patch.id)
self.assertEqual(dp_to_patch.value, patch_data['value'])
def test_post_date_datapoint(self):
'''
post a record to the datapoint table
'''
indicator_id = Indicator.objects.all()[0].id
location_id = Location.objects.all()[0].id
val = 10.0
data = {
'indicator_id': indicator_id,
'data_date': '2016-01-01',
'location_id': location_id,
'value': val
}
resp = self.ts.post(self, '/api/v1/date_datapoint/', data)
self.assertHttpCreated(resp)
response_data = self.deserialize(resp)
self.assertEqual(response_data['value'], val)
another_date = '2016-02-02'
data['data_date'] = another_date
resp = self.ts.post(self, '/api/v1/date_datapoint/', data)
self.assertHttpCreated(resp)
response_data = self.deserialize(resp)
self.assertEqual(response_data['data_date'], another_date)
def test_post_date_datapoint_missing_data(self):
'''
if we do not have all hte keys we need, throw an error
'''
data = {'value': 10}
resp = self.ts.post(self, '/api/v1/date_datapoint/', data)
self.assertHttpApplicationError(resp)
def test_post_date_datapoint_invalid_data(self):
'''
The indicator, and campaign dont exists, the api should tell us
'''
data = {
# 'document_id': doc_id,
'indicator_id': 4324,
'time_grouping': 32132123,
'location_id': 4321,
'value': 10
}
resp = self.ts.post(self, '/api/v1/date_datapoint/', data)
self.assertHttpApplicationError(resp)
response_data = self.deserialize(resp)
def test_delete_date_datapoint(self):
'''
create a datapoint, then delete it, make sure that it is no longer
there.
'''
dp = DataPoint.objects.all()[0]
delete_url = '/api/v1/date_datapoint/%d/' % dp.id
resp = self.ts.delete(self, delete_url)
## now make sure that it is not there #
dpc_query = DataPoint.objects.filter(id=dp.id)
self.assertEqual(len(dpc_query), 0)
def test_get_date_datapoint_by_id(self):
'''
Here we get one object from the API and ensure it has the proper
data from when we inserted it.
'''
dp_obj = DataPoint.objects.all()[0]
resp = self.ts.get(self, '/api/v1/date_datapoint/%s/' % dp_obj.id)
self.assertHttpOK(resp)
response_data = self.deserialize(resp)
self.assertEqual(response_data['value'], dp_obj.value)
| agpl-3.0 |
jimsrc/seatos | etc/n_CR/individual/run_brute.py | 1 | 5136 | #!/usr/bin/env ipython
from pylab import *
#from load_data import sh, mc, cr
import func_data as fd
import share.funcs as ff
#import CythonSrc.funcs as ff
import matplotlib.patches as patches
import matplotlib.transforms as transforms
from os import environ as env
from os.path import isfile, isdir
from h5py import File as h5
#++++++++++++++++++++++++++++++++++++++++++++++++++++
class Lim:
def __init__(self, min_, max_, n):
self.min = min_
self.max = max_
self.n = n
def delta(self):
return (self.max-self.min) / (1.0*self.n)
dir_inp_sh = '{dir}/sheaths.icmes/ascii/MCflag0.1.2.2H/woShiftCorr/_auger_' .format(dir=env['MEAN_PROFILES_ACE'])
dir_inp_mc = '{dir}/icmes/ascii/MCflag0.1.2.2H/woShiftCorr/_auger_' .format(dir=env['MEAN_PROFILES_ACE'])
#dir_inp_sh = '{dir}/sheaths/ascii/MCflag2/wShiftCorr/_test_Vmc_' .format(dir=env['MEAN_PROFILES_ACE'])
#dir_inp_mc = '{dir}/mcs/ascii/MCflag2/wShiftCorr/_test_Vmc_' .format(dir=env['MEAN_PROFILES_ACE'])
fname_inp_part = 'MCflag0.1.2.2H_2before.4after_fgap0.2_WangNaN' # '_vlo.100.0.vhi.375.0_CRs.Auger_BandScals.txt'
#fname_inp_part = 'MCflag2_2before.4after_fgap0.2_Wang90.0'
#CRstr = 'CRs.Auger_BandScals'
#CRstr = 'CRs.Auger_BandMuons'
CRstr = 'CRs.Auger_scals'
mgr = fd.mgr_data(dir_inp_sh, dir_inp_mc, fname_inp_part)
#sh, mc, cr = mgr.run(vlo=100.0, vhi=375.0, CRstr=CRstr)
sh, mc, cr = mgr.run(vlo=375.0, vhi=450.0, CRstr=CRstr)
#sh, mc, cr = mgr.run(vlo=450.0, vhi=3000.0, CRstr=CRstr)
dir_dst = '../out/individual'
fname_fig = '{dir}/_nCR_vlo.{lo:4.1f}.vhi.{hi:4.1f}_{name}.png' .format(dir=dir_dst, lo=mgr.vlo, hi=mgr.vhi, name=CRstr)
fname_out = '{dir}/_nCR_vlo.{lo:5.1f}.vhi.{hi:4.1f}_{name}.h5' .format(dir=dir_dst, lo=mgr.vlo, hi=mgr.vhi, name=CRstr)
#++++++++++++++++++++++++++++++++++++++++++++++++++++
#-- mc:
mc.cc = (mc.t>0.0) & (mc.t<=2.0)
mc.tout = 3.0*mc.t[mc.cc]+1.0
mc.rms = mc.rmsB[mc.cc]
mc.B = mc.B[mc.cc]
cr.mc.crs = cr.mc.avr[mc.cc]
#-- sheath
sh.cc = sh.t<1.0
sh.tout = sh.t[sh.cc]
sh.rms = sh.rmsB[sh.cc]
sh.B = sh.B[sh.cc]
cr.sh.crs = cr.sh.avr[sh.cc]
tpre = 0.0 #-1.0 # tiempo antes del cual se toma data para el rms-del-quiet-SW
rms_o = np.mean(sh.rms[sh.t<tpre]) #0.06 #0.025 #np.mean(sh.rms[sh.t<-1.0]) #0.03
t = np.concatenate([sh.tout, mc.tout])
rms = np.concatenate([sh.rms, mc.rms])
B = np.concatenate([sh.B, mc.B])
crs = np.concatenate([cr.sh.crs, cr.mc.crs])
org_t = t.copy()
org_crs = crs.copy()
t, rms, crs, B = t[t>=0.0], rms[t>=0.0], crs[t>=0.0], B[t>=0.0]
dt = t[1:-1] - t[0:-2]
cte = 0.0
#--- 'fc' es la version trozos de 'rms'
cc = ((rms-rms_o)>=0.0) & (t<5.0)
fc = np.zeros(rms.size)
fc[cc] = (rms-rms_o)[cc]
b = B
#++++++++++++++++++++++++++++++++++++++++++++++++ ajuste
#--- semillas
tau_, q_, off_ = 5., -6., 0.1 #2.0, -400.0
bp_, bo_ = -0.1, 10.0
#--- parameter boundaries && number of evaluations
nbin = 20 #5
tau = Lim(0.1, 5., n=nbin)
q = Lim(-20., -0.1, n=nbin)
off = Lim(0., 1., n=nbin)
bp = Lim(-1., 0., n=nbin)
bo = Lim(0., 20., n=nbin)
#--- slice object
rranges = (
slice(tau.min, tau.max, tau.delta()),
slice(q.min, q.max, q.delta()),
slice(off.min, off.max, off.delta()),
slice(bp.min, bp.max, bp.delta()),
slice(bo.min, bo.max, bo.delta()),
)
#--- start && run the fitter
data = np.array([t, fc, crs, b])
fit = ff.fit_forbush(data, [tau_, q_, off_, bp_, bo_])
fit.make_fit_brute(rranges)
print fit.par
#--- output en hdf5
fo = h5(fname_out, 'w')
for pname in fit.par.keys():
fo[pname] = fit.par[pname]
#--- guardamos la grilla de exploracion
fo['grids/tau'] = [tau.min, tau.max, tau.delta(), tau.n]
fo['grids/q'] = [q.min, q.max, q.delta(), q.n]
fo['grids/off'] = [off.min, off.max, off.delta(), off.n]
fo['grids/bp'] = [bp.min, bp.max, bp.delta(), bp.n]
fo['grids/bo'] = [bo.min, bo.max, bo.delta(), bo.n]
#------------------
#++++++++++++++++++++++++++++++++++++++++++++++++ figura
fig = figure(1, figsize=(6,3.))
ax = fig.add_subplot(111)
ncr = ff.nCR2([t, fc, b], **fit.par)
sqr = np.nanmean(np.square(crs - ncr))
#--- plot izq
ax.plot(org_t, org_crs, '-o', c='gray', ms=3)
ax.plot(t, ncr, '-', c='red', lw=5, alpha=0.8, label='$\\{tau:3.3g}$'.format(**fit.par))
#++++ region sheath (naranja)
trans = transforms.blended_transform_factory(ax.transData, ax.transAxes)
rect1 = patches.Rectangle((0., 0.), width=1, height=1,
transform=trans, color='orange',
alpha=0.3)
ax.add_patch(rect1)
#++++ region mc (blue)
trans = transforms.blended_transform_factory(ax.transData, ax.transAxes)
rect1 = patches.Rectangle((1., 0.), width=3, height=1,
transform=trans, color='blue',
alpha=0.3)
ax.add_patch(rect1)
ax.plot(t, crs, '-o', c='k', ms=3)
#ax.axhline(y=0.0, c='g')
ax.grid()
ax.set_xlabel('time normalized to sheath/MC passage [1]', fontsize=14)
ax.set_ylabel('$n_{CR}$ [%]', fontsize=21)
ax.set_ylim(-1., 0.5)
savefig(fname_fig, dpi=135, bbox_inches='tight')
print " ---> generamos: " + fname_fig
close()
#EOF
| mit |
AmurG/tardis | tardis/util.py | 7 | 13796 | # Utilities for TARDIS
from astropy import units as u, constants, units
import numpy as np
import os
import yaml
import re
import logging
import atomic
k_B_cgs = constants.k_B.cgs.value
c_cgs = constants.c.cgs.value
h_cgs = constants.h.cgs.value
m_e_cgs = constants.m_e.cgs.value
e_charge_gauss = constants.e.gauss.value
class MalformedError(Exception):
pass
class MalformedSpeciesError(MalformedError):
def __init__(self, malformed_element_symbol):
self.malformed_element_symbol = malformed_element_symbol
def __str__(self):
return 'Expecting a species notation (e.g. "Si 2", "Si II", "Fe IV") - supplied %s' % self.malformed_element_symbol
class MalformedElementSymbolError(MalformedError):
def __init__(self, malformed_element_symbol):
self.malformed_element_symbol = malformed_element_symbol
def __str__(self):
return 'Expecting an atomic symbol (e.g. Fe) - supplied %s' % self.malformed_element_symbol
class MalformedQuantityError(MalformedError):
def __init__(self, malformed_quantity_string):
self.malformed_quantity_string = malformed_quantity_string
def __str__(self):
return 'Expecting a quantity string(e.g. "5 km/s") for keyword - supplied %s' % self.malformed_quantity_string
logger = logging.getLogger(__name__)
synpp_default_yaml_fname = os.path.join(os.path.dirname(__file__), 'data', 'synpp_default.yaml')
def int_to_roman(input):
"""
from http://code.activestate.com/recipes/81611-roman-numerals/
Convert an integer to Roman numerals.
Examples:
>>> int_to_roman(0)
Traceback (most recent call last):
ValueError: Argument must be between 1 and 3999
>>> int_to_roman(-1)
Traceback (most recent call last):
ValueError: Argument must be between 1 and 3999
>>> int_to_roman(1.5)
Traceback (most recent call last):
TypeError: expected integer, got <type 'float'>
>>> for i in range(1, 21): print int_to_roman(i)
...
I
II
III
IV
V
VI
VII
VIII
IX
X
XI
XII
XIII
XIV
XV
XVI
XVII
XVIII
XIX
XX
>>> print int_to_roman(2000)
MM
>>> print int_to_roman(1999)
MCMXCIX
"""
input = int(input)
if type(input) != type(1):
raise TypeError, "expected integer, got %s" % type(input)
if not 0 < input < 4000:
raise ValueError, "Argument must be between 1 and 3999"
ints = (1000, 900, 500, 400, 100, 90, 50, 40, 10, 9, 5, 4, 1)
nums = ('M', 'CM', 'D', 'CD','C', 'XC','L','XL','X','IX','V','IV','I')
result = ""
for i in range(len(ints)):
count = int(input / ints[i])
result += nums[i] * count
input -= ints[i] * count
return result
def roman_to_int(input):
"""
from http://code.activestate.com/recipes/81611-roman-numerals/
Convert a roman numeral to an integer.
>>> r = range(1, 4000)
>>> nums = [int_to_roman(i) for i in r]
>>> ints = [roman_to_int(n) for n in nums]
>>> print r == ints
1
>>> roman_to_int('VVVIV')
Traceback (most recent call last):
...
ValueError: input is not a valid roman numeral: VVVIV
>>> roman_to_int(1)
Traceback (most recent call last):
...
TypeError: expected string, got <type 'int'>
>>> roman_to_int('a')
Traceback (most recent call last):
...
ValueError: input is not a valid roman numeral: A
>>> roman_to_int('IL')
Traceback (most recent call last):
...
ValueError: input is not a valid roman numeral: IL
"""
if type(input) != type(""):
raise TypeError, "expected string, got %s" % type(input)
input = input.upper()
nums = ['M', 'D', 'C', 'L', 'X', 'V', 'I']
ints = [1000, 500, 100, 50, 10, 5, 1]
places = []
for c in input:
if not c in nums:
raise ValueError, "input is not a valid roman numeral: %s" % input
for i in range(len(input)):
c = input[i]
value = ints[nums.index(c)]
# If the next place holds a larger number, this value is negative.
try:
nextvalue = ints[nums.index(input[i +1])]
if nextvalue > value:
value *= -1
except IndexError:
# there is no next place.
pass
places.append(value)
sum = 0
for n in places: sum += n
# Easiest test for validity...
if int_to_roman(sum) == input:
return sum
else:
raise ValueError, 'input is not a valid roman numeral: %s' % input
def calculate_luminosity(spec_fname, distance, wavelength_column=0, wavelength_unit=u.angstrom, flux_column=1,
flux_unit=u.Unit('erg / (Angstrom cm2 s)')):
#BAD STYLE change to parse quantity
distance = u.Unit(distance)
wavelength, flux = np.loadtxt(spec_fname, usecols=(wavelength_column, flux_column), unpack=True)
flux_density = np.trapz(flux, wavelength) * (flux_unit * wavelength_unit)
luminosity = (flux_density * 4 * np.pi * distance**2).to('erg/s')
return luminosity.value, wavelength.min(), wavelength.max()
def create_synpp_yaml(radial1d_mdl, fname, shell_no=0, lines_db=None):
logger.warning('Currently only works with Si and a special setup')
if not radial1d_mdl.atom_data.has_synpp_refs:
raise ValueError(
'The current atom dataset does not contain the necesarry reference files (please contact the authors)')
radial1d_mdl.atom_data.synpp_refs['ref_log_tau'] = -99.0
for key, value in radial1d_mdl.atom_data.synpp_refs.iterrows():
try:
radial1d_mdl.atom_data.synpp_refs['ref_log_tau'].ix[key] = np.log10(
radial1d_mdl.plasma_array.tau_sobolevs[0].ix[value['line_id']])
except KeyError:
pass
relevant_synpp_refs = radial1d_mdl.atom_data.synpp_refs[radial1d_mdl.atom_data.synpp_refs['ref_log_tau'] > -50]
yaml_reference = yaml.load(file(synpp_default_yaml_fname))
if lines_db is not None:
yaml_reference['opacity']['line_dir'] = os.path.join(lines_db, 'lines')
yaml_reference['opacity']['line_dir'] = os.path.join(lines_db, 'refs.dat')
yaml_reference['output']['min_wl'] = float(radial1d_mdl.spectrum.wavelength.to('angstrom').value.min())
yaml_reference['output']['max_wl'] = float(radial1d_mdl.spectrum.wavelength.to('angstrom').value.max())
#raise Exception("there's a problem here with units what units does synpp expect?")
yaml_reference['opacity']['v_ref'] = float((radial1d_mdl.tardis_config.structure.v_inner[0].to('km/s') /
(1000. * u.km / u.s)).value)
yaml_reference['grid']['v_outer_max'] = float((radial1d_mdl.tardis_config.structure.v_outer[-1].to('km/s') /
(1000. * u.km / u.s)).value)
#pdb.set_trace()
yaml_setup = yaml_reference['setups'][0]
yaml_setup['ions'] = []
yaml_setup['log_tau'] = []
yaml_setup['active'] = []
yaml_setup['temp'] = []
yaml_setup['v_min'] = []
yaml_setup['v_max'] = []
yaml_setup['aux'] = []
for species, synpp_ref in relevant_synpp_refs.iterrows():
yaml_setup['ions'].append(100 * species[0] + species[1])
yaml_setup['log_tau'].append(float(synpp_ref['ref_log_tau']))
yaml_setup['active'].append(True)
yaml_setup['temp'].append(yaml_setup['t_phot'])
yaml_setup['v_min'].append(yaml_reference['opacity']['v_ref'])
yaml_setup['v_max'].append(yaml_reference['grid']['v_outer_max'])
yaml_setup['aux'].append(1e200)
yaml.dump(yaml_reference, stream=file(fname, 'w'), explicit_start=True)
def intensity_black_body(nu, T):
"""
Calculate the intensity of a black-body according to the following formula
.. math::
I(\\nu, T) = \\frac{2h\\nu^3}{c^2}\frac{1}{e^{h\\nu \\beta_\\textrm{rad}} - 1}
"""
beta_rad = 1 / (k_B_cgs * T)
return (2 * (h_cgs * nu ** 3) / (c_cgs ** 2)) / (
np.exp(h_cgs * nu * beta_rad) - 1)
def savitzky_golay(y, window_size, order, deriv=0, rate=1):
r"""Smooth (and optionally differentiate) data with a Savitzky-Golay filter.
The Savitzky-Golay filter removes high frequency noise from data.
It has the advantage of preserving the original shape and
features of the signal better than other types of filtering
approaches, such as moving averages techniques.
Parameters
----------
y : array_like, shape (N,)
the values of the time history of the signal.
window_size : int
the length of the window. Must be an odd integer number.
order : int
the order of the polynomial used in the filtering.
Must be less then `window_size` - 1.
deriv: int
the order of the derivative to compute (default = 0 means only smoothing)
Returns
-------
ys : ndarray, shape (N)
the smoothed signal (or it's n-th derivative).
Notes
-----
The Savitzky-Golay is a type of low-pass filter, particularly
suited for smoothing noisy data. The main idea behind this
approach is to make for each point a least-square fit with a
polynomial of high order over a odd-sized window centered at
the point.
Examples
--------
t = np.linspace(-4, 4, 500)
y = np.exp( -t**2 ) + np.random.normal(0, 0.05, t.shape)
ysg = savitzky_golay(y, window_size=31, order=4)
import matplotlib.pyplot as plt
plt.plot(t, y, label='Noisy signal')
plt.plot(t, np.exp(-t**2), 'k', lw=1.5, label='Original signal')
plt.plot(t, ysg, 'r', label='Filtered signal')
plt.legend()
plt.show()
References
----------
.. [1] A. Savitzky, M. J. E. Golay, Smoothing and Differentiation of
Data by Simplified Least Squares Procedures. Analytical
Chemistry, 1964, 36 (8), pp 1627-1639.
.. [2] Numerical Recipes 3rd Edition: The Art of Scientific Computing
W.H. Press, S.A. Teukolsky, W.T. Vetterling, B.P. Flannery
Cambridge University Press ISBN-13: 9780521880688
"""
import numpy as np
from math import factorial
try:
window_size = np.abs(np.int(window_size))
order = np.abs(np.int(order))
except ValueError, msg:
raise ValueError("window_size and order have to be of type int")
if window_size % 2 != 1 or window_size < 1:
raise TypeError("window_size size must be a positive odd number")
if window_size < order + 2:
raise TypeError("window_size is too small for the polynomials order")
order_range = range(order+1)
half_window = (window_size -1) // 2
# precompute coefficients
b = np.mat([[k**i for i in order_range] for k in range(-half_window, half_window+1)])
m = np.linalg.pinv(b).A[deriv] * rate**deriv * factorial(deriv)
# pad the signal at the extremes with
# values taken from the signal itself
firstvals = y[0] - np.abs( y[1:half_window+1][::-1] - y[0] )
lastvals = y[-1] + np.abs(y[-half_window-1:-1][::-1] - y[-1])
y = np.concatenate((firstvals, y, lastvals))
return np.convolve( m[::-1], y, mode='valid')
def species_tuple_to_string(species_tuple, roman_numerals=True):
atomic_number, ion_number = species_tuple
element_symbol = atomic.atomic_number2symbol[atomic_number]
if roman_numerals:
roman_ion_number = int_to_roman(ion_number+1)
return '%s %s' % (element_symbol, roman_ion_number)
else:
return '%s %d' % (element_symbol, ion_number)
def species_string_to_tuple(species_string):
try:
element_symbol, ion_number_string = re.match('^(\w+)\s*(\d+)', species_string).groups()
except AttributeError:
try:
element_symbol, ion_number_string = species_string.split()
except ValueError:
raise MalformedSpeciesError('Species string "{0}" is not of format <element_symbol><number> '
'(e.g. Fe 2, Fe2, ..)'.format(species_string))
atomic_number = element_symbol2atomic_number(element_symbol)
try:
ion_number = roman_to_int(ion_number_string)
except ValueError:
try:
ion_number = int(ion_number_string)
except ValueError:
raise MalformedSpeciesError("Given ion number ('{}') could not be parsed ".format(ion_number_string))
if ion_number > atomic_number:
raise ValueError('Species given does not exist: ion number > atomic number')
return atomic_number, ion_number - 1
def parse_quantity(quantity_string):
if not isinstance(quantity_string, basestring):
raise MalformedQuantityError(quantity_string)
try:
value_string, unit_string = quantity_string.split()
except ValueError:
raise MalformedQuantityError(quantity_string)
try:
value = float(value_string)
except ValueError:
raise MalformedQuantityError(quantity_string)
try:
q = u.Quantity(value, unit_string)
except ValueError:
raise MalformedQuantityError(quantity_string)
return q
def element_symbol2atomic_number(element_string):
reformatted_element_string = reformat_element_symbol(element_string)
if reformatted_element_string not in atomic.symbol2atomic_number:
raise MalformedElementSymbolError(element_string)
return atomic.symbol2atomic_number[reformatted_element_string]
def atomic_number2element_symbol(atomic_number):
"""
Convert atomic number to string symbol
"""
return atomic.atomic_number2symbol[atomic_number]
def reformat_element_symbol(element_string):
"""
Reformat the string so the first letter is uppercase and all subsequent letters lowercase
Parameters
----------
element_symbol: str
Returns
-------
reformated element symbol
"""
return element_string[0].upper() + element_string[1:].lower()
| bsd-3-clause |
lthurlow/Network-Grapher | proj/external/matplotlib-1.2.1/lib/mpl_examples/api/date_demo.py | 3 | 1686 | #!/usr/bin/env python
"""
Show how to make date plots in matplotlib using date tick locators and
formatters. See major_minor_demo1.py for more information on
controlling major and minor ticks
All matplotlib date plotting is done by converting date instances into
days since the 0001-01-01 UTC. The conversion, tick locating and
formatting is done behind the scenes so this is most transparent to
you. The dates module provides several converter functions date2num
and num2date
"""
import datetime
import numpy as np
import matplotlib.pyplot as plt
import matplotlib.dates as mdates
import matplotlib.cbook as cbook
years = mdates.YearLocator() # every year
months = mdates.MonthLocator() # every month
yearsFmt = mdates.DateFormatter('%Y')
# load a numpy record array from yahoo csv data with fields date,
# open, close, volume, adj_close from the mpl-data/example directory.
# The record array stores python datetime.date as an object array in
# the date column
datafile = cbook.get_sample_data('goog.npy')
r = np.load(datafile).view(np.recarray)
fig = plt.figure()
ax = fig.add_subplot(111)
ax.plot(r.date, r.adj_close)
# format the ticks
ax.xaxis.set_major_locator(years)
ax.xaxis.set_major_formatter(yearsFmt)
ax.xaxis.set_minor_locator(months)
datemin = datetime.date(r.date.min().year, 1, 1)
datemax = datetime.date(r.date.max().year+1, 1, 1)
ax.set_xlim(datemin, datemax)
# format the coords message box
def price(x): return '$%1.2f'%x
ax.format_xdata = mdates.DateFormatter('%Y-%m-%d')
ax.format_ydata = price
ax.grid(True)
# rotates and right aligns the x labels, and moves the bottom of the
# axes up to make room for them
fig.autofmt_xdate()
plt.show()
| mit |
cdegroc/scikit-learn | examples/svm/plot_rbf_parameters.py | 1 | 1705 | """
==================
RBF SVM parameters
==================
This example illustrates the effect of the parameters `gamma`
and `C` of the rbf kernel SVM.
Intuitively, the `gamma` parameter defines how far the influence
of a single training example reaches, with low values meaning 'far'
and high values meaning 'close'.
The `C` parameter trades off misclassification of training examples
against simplicity of the decision surface. A low C makes
the decision surface smooth, while a high C aims at classifying
all training examples correctly.
"""
print __doc__
import numpy as np
import pylab as pl
from sklearn import svm
from sklearn.datasets import load_iris
from sklearn.preprocessing import Scaler
iris = load_iris()
X = iris.data[:, :2] # Take only 2 dimensions
y = iris.target
X = X[y > 0]
y = y[y > 0]
y -= 1
scaler = Scaler()
X = scaler.fit_transform(X)
xx, yy = np.meshgrid(np.linspace(-5, 5, 200), np.linspace(-5, 5, 200))
np.random.seed(0)
gamma_range = [10. ** -1, 1, 10. ** 1]
C_range = [10. ** -2, 1, 10. ** 2]
pl.figure()
k = 1
for C in C_range:
for gamma in gamma_range:
# fit the model
clf = svm.SVC(gamma=gamma, C=C)
clf.fit(X, y)
# plot the decision function for each datapoint on the grid
Z = clf.decision_function(np.c_[xx.ravel(), yy.ravel()])
Z = Z.reshape(xx.shape)
pl.subplot(3, 3, k)
pl.title("gamma %.1f, C %.2f" % (gamma, C))
k += 1
pl.pcolormesh(xx, yy, -Z, cmap=pl.cm.jet)
pl.scatter(X[:, 0], X[:, 1], c=y, cmap=pl.cm.jet)
pl.xticks(())
pl.yticks(())
pl.axis('tight')
pl.subplots_adjust(left=0.05, right=0.95, bottom=0.05, top=0.95)
pl.show()
| bsd-3-clause |
shincling/MemNN_and_Varieties | MemN2N_python/Unknown_networks/unknown_main.py | 1 | 29900 | # -*- coding: utf8 -*-
from __future__ import division
import argparse
import glob
import lasagne
import numpy as np
import theano
import theano.tensor as T
import time
from sklearn import metrics
from sklearn.preprocessing import LabelBinarizer,label_binarize
class SimpleAttentionLayer(lasagne.layers.MergeLayer):
def __init__(self, incomings, vocab, embedding_size,enable_time, W_h, W_q,W_o, nonlinearity=lasagne.nonlinearities.tanh, **kwargs):
super(SimpleAttentionLayer, self).__init__(incomings, **kwargs) #???不知道这个super到底做什么的,会引入input_layers和input_shapes这些属性
if len(incomings) != 2:
raise NotImplementedError
batch_size, max_sentlen ,embedding_size = self.input_shapes[0]
self.batch_size,self.max_sentlen,self.embedding_size=batch_size,max_sentlen,embedding_size
self.W_h=self.add_param(W_h,(embedding_size,embedding_size), name='Attention_layer_W_h')
self.W_q=self.add_param(W_q,(embedding_size,embedding_size), name='Attention_layer_W_q')
self.W_o=self.add_param(W_o,(embedding_size,), name='Attention_layer_W_o')
self.nonlinearity=nonlinearity
zero_vec_tensor = T.vector()
self.zero_vec = np.zeros(embedding_size, dtype=theano.config.floatX)
# self.set_zero = theano.function([zero_vec_tensor], updates=[(x, T.set_subtensor(x[0, :], zero_vec_tensor)) for x in [self.A,self.C]])
def get_output_shape_for(self, input_shapes):
return (self.batch_size,self.embedding_size)
def get_output_for(self, inputs, **kwargs):
#input[0]:(BS,max_senlen,emb_size),input[1]:(BS,1,emb_size)
activation0=(T.dot(inputs[0],self.W_h))
activation1=T.dot(inputs[1],self.W_q).reshape([self.batch_size,self.embedding_size]).dimshuffle(0,'x',1)
activation=self.nonlinearity(activation0+activation1)#.dimshuffle(0,'x',2)#.repeat(self.max_sentlen,axis=1)
final=T.dot(activation,self.W_o) #(BS,max_sentlen)
alpha=lasagne.nonlinearities.softmax(final) #(BS,max_sentlen)
final=T.batched_dot(alpha,inputs[0])#(BS,max_sentlen)*(BS,max_sentlen,emb_size)--(BS,emb_size)
return final
# TODO:think about the set_zero
def reset_zero(self):
self.set_zero(self.zero_vec)
class SimplePointerLayer(lasagne.layers.MergeLayer):
def __init__(self, incomings, vocab, embedding_size,enable_time, W_h, W_q,W_o, nonlinearity=lasagne.nonlinearities.tanh,**kwargs):
super(SimplePointerLayer, self).__init__(incomings, **kwargs) #???不知道这个super到底做什么的,会引入input_layers和input_shapes这些属性
if len(incomings) != 3:
raise NotImplementedError
# if mask_input is not None:
# incomings.append(mask_input)
batch_size, max_sentlen ,embedding_size = self.input_shapes[0]
self.batch_size,self.max_sentlen,self.embedding_size=batch_size,max_sentlen,embedding_size
self.W_h=self.add_param(W_h,(embedding_size,embedding_size), name='Pointer_layer_W_h')
self.W_q=self.add_param(W_q,(embedding_size,embedding_size), name='Pointer_layer_W_q')
self.W_o=self.add_param(W_o,(embedding_size,), name='Pointer_layer_W_o')
self.nonlinearity=nonlinearity
zero_vec_tensor = T.vector()
self.zero_vec = np.zeros(embedding_size, dtype=theano.config.floatX)
# self.set_zero = theano.function([zero_vec_tensor], updates=[(x, T.set_subtensor(x[0, :], zero_vec_tensor)) for x in [self.A,self.C]])
def get_output_shape_for(self, input_shapes):
return (self.batch_size,self.max_sentlen)
def get_output_for(self, inputs, **kwargs):
#input[0]:(BS,max_senlen,emb_size),input[1]:(BS,1,emb_size),input[2]:(BS,max_sentlen)
activation0=(T.dot(inputs[0],self.W_h))
activation1=T.dot(inputs[1],self.W_q).reshape([self.batch_size,self.embedding_size]).dimshuffle(0,'x',1)
activation=self.nonlinearity(activation0+activation1)#.dimshuffle(0,'x',2)#.repeat(self.max_sentlen,axis=1)
final=T.dot(activation,self.W_o) #(BS,max_sentlen)
if inputs[2] is not None:
final=inputs[2]*final-(1-inputs[2])*1000000
alpha=lasagne.nonlinearities.softmax(final) #(BS,max_sentlen)
# final=T.batched_dot(alpha,inputs[0])#(BS,max_sentlen)*(BS,max_sentlen,emb_size)--(BS,emb_size)
return alpha
# TODO:think about the set_zero
def reset_zero(self):
self.set_zero(self.zero_vec)
class TmpMergeLayer(lasagne.layers.MergeLayer):
def __init__(self,incomings,W_merge_r,W_merge_q, nonlinearity=lasagne.nonlinearities.tanh,**kwargs):
super(TmpMergeLayer, self).__init__(incomings, **kwargs) #???不知道这个super到底做什么的,会引入input_layers和input_shapes这些属性
if len(incomings) != 2:
raise NotImplementedError
batch_size,embedding_size=self.input_shapes[0]
self.W_merge_r=self.add_param(W_merge_r,(embedding_size,embedding_size),name='MergeLayer_w_r')
self.W_merge_q=self.add_param(W_merge_q,(embedding_size,embedding_size),name='MergeLayer_w_q')
self.batch_size,self.embedding_size=batch_size,embedding_size
self.nonlinearity=nonlinearity
def get_output_shape_for(self, input_shapes):
return self.input_shapes[0]
def get_output_for(self, inputs, **kwargs):
h_r,h_q=inputs[0],inputs[1] # h_r:(BS,emb_size),h_q:(BS,1,emb_size)
# result=T.dot(self.W_merge_r,h_r)+T.dot(self.W_merge_q,h_q).reshape((self.batch_size,self.embedding_size))
result=T.dot(h_r,self.W_merge_r)+T.dot(h_q,self.W_merge_q).reshape((self.batch_size,self.embedding_size))
return result
class TransposedDenseLayer(lasagne.layers.DenseLayer):
def __init__(self, incoming, num_units,embedding_size,vocab_size, W_final_softmax=lasagne.init.GlorotUniform(),
b=lasagne.init.Constant(0.), nonlinearity=lasagne.nonlinearities.rectify,
**kwargs):
super(TransposedDenseLayer, self).__init__(incoming,num_units, name='softmax_layer_w',**kwargs)
# self.W_final_softmax=self.add_param(W_final_softmax,(embedding_size,num_units),name='softmax_layer_w')
def get_output_shape_for(self, input_shape):
return (input_shape[0], self.num_units)
def get_output_for(self, input, **kwargs):
if input.ndim > 2:
input = input.flatten(2)
activation = T.dot(input, self.W)
if self.b is not None:
activation = activation + self.b.dimshuffle('x', 0)
return self.nonlinearity(activation)
class Model:
def __init__(self, train_file, test_file, batch_size=32, embedding_size=20, max_norm=40, lr=0.01, num_hops=3, adj_weight_tying=True, linear_start=True, enable_time=False,pointer_nn=False,optimizer='sgd',enable_mask=True,std_rate=0.1,**kwargs):
train_lines, test_lines = self.get_lines(train_file), self.get_lines(test_file)
lines = np.concatenate([train_lines, test_lines], axis=0) #直接头尾拼接
vocab, word_to_idx, idx_to_word, max_sentlen = self.get_vocab(lines)
#C是document的列表,Q是定位问题序列的列表,Y是答案组成的列表,目前为知都是字符形式的,没有向量化#
self.data = {'train': {}, 'test': {}} #各是一个字典
S_train, self.data['train']['Q'], self.data['train']['Y'] = self.process_dataset(train_lines, word_to_idx, max_sentlen, offset=0)
S_test, self.data['test']['Q'], self.data['test']['Y'] = self.process_dataset(test_lines, word_to_idx, max_sentlen)
S = np.concatenate([np.zeros((1, max_sentlen), dtype=np.int32), S_train, S_test], axis=0)
self.data['train']['S'],self.data['test']['S']=S_train,S_test
for i in range(min(10,len(self.data['test']['Y']))):
for k in ['S', 'Q', 'Y']:
print k, self.data['test'][k][i]
print 'batch_size:', batch_size, 'max_sentlen:', max_sentlen
print 'sentences:', S.shape
print 'vocab size:', len(vocab)
for d in ['train', 'test']:
print d,
for k in ['S', 'Q', 'Y']:
print k, self.data[d][k].shape,
print ''
vocab=[]
for i in range(len(idx_to_word)):
vocab.append(idx_to_word[i+1])
idx_to_word[0]='#'
word_to_idx['#']=0
lb = LabelBinarizer()
self.enable_time=enable_time
self.optimizer=optimizer
self.batch_size = batch_size
self.max_sentlen = max_sentlen if not enable_time else max_sentlen+1
self.embedding_size = embedding_size
self.num_classes = len(vocab) + 1
self.vocab = vocab
self.lb = lb
self.init_lr = lr
self.lr = self.init_lr
self.max_norm = max_norm
self.S = S
self.idx_to_word = idx_to_word
self.nonlinearity = None if linear_start else lasagne.nonlinearities.softmax
self.word_to_idx=word_to_idx
self.pointer_nn=pointer_nn
self.std=std_rate
self.enable_mask=enable_mask
self.build_network()
def build_network(self):
batch_size, max_sentlen, embedding_size, vocab, enable_time = self.batch_size, self.max_sentlen, self.embedding_size, self.vocab,self.enable_time
s = T.imatrix()
# q = T.ivector()
q = T.imatrix()
y = T.imatrix()
mask= T.imatrix()# if self.enable_mask else None
# c_pe = T.tensor4()
# q_pe = T.tensor4()
self.s_shared = theano.shared(np.zeros((batch_size, max_sentlen), dtype=np.int32), borrow=True)
self.mask_shared = theano.shared(np.zeros((batch_size, max_sentlen), dtype=np.int32), borrow=True)
self.q_shared = theano.shared(np.zeros((batch_size, 1), dtype=np.int32), borrow=True)
'''最后的softmax层的参数'''
self.a_shared = theano.shared(np.zeros((batch_size, self.num_classes), dtype=np.int32), borrow=True)
# S_shared = theano.shared(self.S, borrow=True)#这个S把train test放到了一起来干事情#
l_context_in = lasagne.layers.InputLayer(shape=(batch_size, max_sentlen))
l_mask_in = lasagne.layers.InputLayer(shape=(batch_size, max_sentlen))
l_question_in = lasagne.layers.InputLayer(shape=(batch_size,1))
w_emb=lasagne.init.Normal(std=self.std)
l_context_emb = lasagne.layers.EmbeddingLayer(l_context_in,self.num_classes,embedding_size,W=w_emb,name='sentence_embedding') #(BS,max_sentlen,emb_size)
l_question_emb= lasagne.layers.EmbeddingLayer(l_question_in,self.num_classes,embedding_size,W=l_context_emb.W,name='question_embedding') #(BS,1,d)
# w_emb_query=lasagne.init.Normal(std=self.std)
# l_question_emb= lasagne.layers.EmbeddingLayer(l_question_in,self.num_classes,embedding_size,W=w_emb_query,name='question_embedding') #(BS,1,d)
l_context_rnn_f=lasagne.layers.LSTMLayer(l_context_emb,embedding_size,name='contexut_lstm',mask_input=l_mask_in,backwards=False) #(BS,max_sentlen,emb_size)
l_context_rnn_b=lasagne.layers.LSTMLayer(l_context_emb,embedding_size,name='context_lstm',mask_input=l_mask_in,backwards=True) #(BS,max_sentlen,emb_size)
# l_context_rnn_f=lasagne.layers.GRULayer(l_context_emb,embedding_size,name='context_lstm',mask_input=l_mask_in,backwards=False) #(BS,max_sentlen,emb_size)
# l_context_rnn_b=lasagne.layers.GRULayer(l_context_emb,embedding_size,name='context_lstm',mask_input=l_mask_in,backwards=True) #(BS,max_sentlen,emb_size)
l_context_rnn=lasagne.layers.ElemwiseSumLayer((l_context_rnn_f,l_context_rnn_b))
w_h,w_q,w_o=lasagne.init.Normal(std=self.std),lasagne.init.Normal(std=self.std),lasagne.init.Normal(std=self.std)
#下面这个层是用来利用question做attention,得到文档在当前q下的最后一个表示,输出一个(BS,emb_size)的东西
#得到一个(BS,emb_size)的加权平均后的向量
if not self.pointer_nn:
l_context_attention=SimpleAttentionLayer((l_context_rnn,l_question_emb),vocab, embedding_size,enable_time, W_h=w_h, W_q=w_q,W_o=w_o, nonlinearity=lasagne.nonlinearities.tanh)
w_merge_r,w_merge_q=lasagne.init.Normal(std=self.std),lasagne.init.Normal(std=self.std)
l_merge=TmpMergeLayer((l_context_attention,l_question_emb),W_merge_r=w_merge_r,W_merge_q=w_merge_q, nonlinearity=lasagne.nonlinearities.tanh)
w_final_softmax=lasagne.init.Normal(std=self.std)
# l_pred = TransposedDenseLayer(l_merge, self.num_classes,embedding_size=embedding_size,vocab_size=self.num_classes,W_final_softmax=w_final_softmax, b=None, nonlinearity=lasagne.nonlinearities.softmax)
l_pred = lasagne.layers.DenseLayer(l_merge, self.num_classes, W=w_final_softmax, b=None, nonlinearity=lasagne.nonlinearities.softmax,name='l_final')
probas=lasagne.layers.helper.get_output(l_pred,{l_context_in:s,l_question_in:q,l_mask_in:mask})
probas = T.clip(probas, 1e-7, 1.0-1e-7)
pred = T.argmax(probas, axis=1)
cost = T.nnet.binary_crossentropy(probas, y).sum()
else :
l_context_pointer=SimplePointerLayer((l_context_rnn,l_question_emb,l_mask_in),vocab, embedding_size,enable_time, W_h=w_h, W_q=w_q,W_o=w_o, nonlinearity=lasagne.nonlinearities.tanh)
l_pred=l_context_pointer
probas=lasagne.layers.helper.get_output(l_pred,{l_context_in:s,l_question_in:q,l_mask_in:mask})
probas = T.clip(probas, 1e-7, 1.0-1e-7)
pred = T.argmax(probas, axis=1)
cost = T.nnet.categorical_crossentropy(probas, y).sum()
# cost = cost*batch_size/mask.sum()*10
pass
params = lasagne.layers.helper.get_all_params(l_pred, trainable=True)
print 'params:', params
grads = T.grad(cost, params)
scaled_grads = lasagne.updates.total_norm_constraint(grads, self.max_norm)
if self.optimizer=='sgd':
updates = lasagne.updates.sgd(scaled_grads, params, learning_rate=self.lr)
elif self.optimizer=='adagrad':
updates = lasagne.updates.adagrad(scaled_grads, params, learning_rate=self.lr)
else:
updates = lasagne.updates.adadelta(scaled_grads, params, learning_rate=self.lr)
givens = {
s: self.s_shared,
q: self.q_shared,
y: self.a_shared,
mask: self.mask_shared
}
# test_output=lasagne.layers.helper.get_output(l_context_attention,{l_context_in:s,l_question_in:q}).flatten().sum()
# self.train_model1 = theano.function([],test_output, givens=givens,on_unused_input='warn' )
self.train_model = theano.function([], cost, givens=givens, updates=updates)
self.compute_pred = theano.function([], outputs= [probas,pred], givens=givens, on_unused_input='ignore')
zero_vec_tensor = T.vector()
self.zero_vec = np.zeros(embedding_size, dtype=theano.config.floatX)
self.set_zero = theano.function([zero_vec_tensor], updates=[(x, T.set_subtensor(x[0, :], zero_vec_tensor)) for x in [l_context_emb.W]])
def get_lines(self, fname):
lines = [] #每个元素是个字典看来
for i, line in enumerate(open(fname)):
ll=line.split('\t')
id = int(ll[0])
sentence=ll[1]
question=ll[2][0:ll[2].find(' ')]
answer=ll[3].strip()
lines.append({'id':id,'sentence':sentence,'question':question,'target':answer})
return np.array(lines)
def get_vocab(self, lines): #这个函数相当于预处理的函数
vocab = set()
max_sentlen = 0
for i, line in enumerate(lines):
#words = nltk.word_tokenize(line['text'])
words=line['sentence'].split(' ') #这里做了一个修改,替换掉了nltk的工具
max_sentlen = max(max_sentlen, len(words))
for w in words:
vocab.add(w)
vocab.add(line['question'])
vocab.add(line['target'])
word_to_idx = {}
for w in vocab:
word_to_idx[w] = len(word_to_idx) + 1
idx_to_word = {}
for w, idx in word_to_idx.iteritems():
idx_to_word[idx] = w
return vocab, word_to_idx, idx_to_word, max_sentlen
def process_dataset(self,lines, word_to_idx, max_sentlen, offset=0):
S,Q,Y=[],[],[]
for i, line in enumerate(lines):
word_indices = [word_to_idx[w] for w in line['sentence'].split(' ')]
word_indices += [0] * (max_sentlen - len(word_indices)) #这是补零,把句子填充到max_sentLen
S.append(word_indices)
Q.append([word_to_idx[line['question']]])
Y.append(word_to_idx[line['target']])
return np.array(S),np.array(Q),np.array(Y)
def set_shared_variables(self, dataset, index,enable_time):
c = np.zeros((self.batch_size, self.max_sentlen), dtype=np.int32)
# mask = np.zeros((self.batch_size, self.max_sentlen), dtype=np.int32)
q = np.zeros((self.batch_size, 1), dtype=np.int32)
y = np.zeros((self.batch_size, self.num_classes), dtype=np.int32)
indices = range(index*self.batch_size, (index+1)*self.batch_size)
for i, row in enumerate(dataset['S'][indices]):
row = row[:self.max_sentlen]
c[i, :len(row)] = row
mask=np.int32(c!=0) #if self.enable_mask else None
q[:len(indices),:] = dataset['Q'][indices] #问题的行数组成的列表
'''底下这个整个循环是得到一个batch对应的那个调整的矩阵'''
# y[:len(indices), 1:self.num_classes] = self.lb.transform(dataset['Y'][indices])#竟然是把y变成了而之花的one=hot向量都,每个是字典大小这么长
y[:len(indices), 1:self.num_classes] = label_binarize([self.idx_to_word[i] for i in dataset['Y'][indices]],self.vocab)#竟然是把y变成了而之花的one=hot向量都,每个是字典大小这么长
# y[:len(indices), 1:self.embedding_size] = self.mem_layers[0].A[[self.word_to_idx(i) for i in list(dataset['Y'][indices])]]#竟然是把y变成了而之花的one=hot向量都,每个是字典大小这么长
self.s_shared.set_value(c)
self.mask_shared.set_value(mask)
self.q_shared.set_value(q)
self.a_shared.set_value(y)
def set_shared_variables_pointer(self, dataset, index,enable_time):
c = np.zeros((self.batch_size, self.max_sentlen), dtype=np.int32)
q = np.zeros((self.batch_size, 1), dtype=np.int32)
y = np.zeros((self.batch_size, self.max_sentlen), dtype=np.int32)
indices = range(index*self.batch_size, (index+1)*self.batch_size)
for i, row in enumerate(dataset['S'][indices]):
row = row[:self.max_sentlen]
c[i, :len(row)] = row
mask=np.int32(c!=0) #if self.enable_mask else None
q[:len(indices),:] = dataset['Q'][indices] #问题的行数组成的列表
'''底下这个整个循环是得到一个batch对应的那个调整的矩阵'''
# y[:len(indices), 1:self.num_classes] = self.lb.transform(dataset['Y'][indices])#竟然是把y变成了而之花的one=hot向量都,每个是字典大小这么长
# y[:len(indices), 1:self.max_sentlen] = label_binarize([self.idx_to_word[i] for i in dataset['Y'][indices]],self.vocab)#竟然是把y变成了而之花的one=hot向量都,每个是字典大小这么长
# y[:len(indices), 1:self.max_sentlen] = [label_binarize([dataset['Y'][indices]],)#竟然是把y变成了而之花的one=hot向量都,每个是字典大小这么长
for i in range(len(indices)):
one_hot=label_binarize([dataset['Y'][i]],dataset['S'][i])
y[i,:]=one_hot
# y[:len(indices), 1:self.embedding_size] = self.mem_layers[0].A[[self.word_to_idx(i) for i in list(dataset['Y'][indices])]]#竟然是把y变成了而之花的one=hot向量都,每个是字典大小这么长
self.s_shared.set_value(c)
self.mask_shared.set_value(mask)
self.q_shared.set_value(q)
self.a_shared.set_value(y)
def train(self, n_epochs=100, shuffle_batch=False):
epoch = 0
n_train_batches = len(self.data['train']['Y']) // self.batch_size
self.lr = self.init_lr
prev_train_f1 = None
while (epoch < n_epochs):
epoch += 1
if epoch % 50 == 0: #每隔25个epoch,则速率减半
self.lr /= 2.0
indices = range(n_train_batches)
if shuffle_batch:
self.shuffle_sync(self.data['train'])#保持对应性地一次性shuffle了C Q Y
total_cost = 0
start_time = time.time()
# print 'TRAIN', '=' * 40
# train_f1, train_errors = self.compute_f1(self.data['train'])
# print 'TRAIN_ERROR:', (1-train_f1)*100
for minibatch_index in indices:#一次进入一个batch的数据
if not self.pointer_nn:
self.set_shared_variables(self.data['train'], minibatch_index,self.enable_time)#这里的函数总算把数据传给了模型里面初始化的变量
else:
self.set_shared_variables_pointer(self.data['train'], minibatch_index,self.enable_time)#这里的函数总算把数据传给了模型里面初始化的变量
total_cost += self.train_model()
# print self.train_model1()
self.set_zero(self.zero_vec) #reset是把A,C的第一行(也就是第一个元素,对应字典了的第一个词)reset了一次,变成了0
end_time = time.time()
print '\n' * 3, '*' * 80
print 'epoch:', epoch, 'cost:', (total_cost / len(indices)), ' took: %d(s)' % (end_time - start_time)
print 'TRAIN', '=' * 40
if not self.pointer_nn:
train_f1, train_errors = self.compute_f1(self.data['train'])
else:
train_f1, train_errors = self.compute_f1_pointer(self.data['train'])
print 'TRAIN_ERROR:', (1-train_f1)*100
if False:
for i, pred in train_errors[:10]:
print 'context: ', self.to_words(self.data['train']['S'][i])
print 'question: ', self.to_words([self.data['train']['Q'][i]])
print 'correct answer: ', self.data['train']['Y'][i]
print 'predicted answer: ', pred
print '---' * 20
'''这块负责了linearity和softmanx的切换'''
if False and prev_train_f1 is not None and train_f1 < prev_train_f1 and self.nonlinearity is None:
print 'The linearity ends.××××××××××××××××××\n\n'
prev_weights = lasagne.layers.helper.get_all_param_values(self.network)
self.build_network(nonlinearity=lasagne.nonlinearities.softmax)
lasagne.layers.helper.set_all_param_values(self.network, prev_weights)
else:
print 'TEST', '=' * 40
if not self.pointer_nn:
test_f1, test_errors = self.compute_f1(self.data['test']) #有点奇怪这里的f1和test_error怎么好像不对应的?
else:
test_f1, test_errors = self.compute_f1_pointer(self.data['test']) #有点奇怪这里的f1和test_error怎么好像不对应的?
print 'test_f1,test_errors:',test_f1,len(test_errors)
print '*** TEST_ERROR:', (1-test_f1)*100
if 1 and (50<epoch) :
for i, pred in test_errors[:10]:
print 'context: ', self.to_words(self.data['test']['S'][i],'S')
print 'question: ', self.to_words([self.data['test']['Q'][i]],'Q')
print 'correct answer: ', self.to_words(self.data['test']['Y'][i],'Y')
print 'predicted answer: ', self.idx_to_word[self.data['test']['S'][i][pred]]
print '---' * 20
prev_train_f1 = train_f1
def predict(self, dataset, index):
# if not self.set_shared_variables_pointer:
if not self.pointer_nn:
self.set_shared_variables(dataset, index,self.enable_time)
else:
self.set_shared_variables_pointer(dataset, index,self.enable_time)
result=self.compute_pred()
# print 'probas:{}\n'.format(index)
# print result[0]
return result[1]
def compute_f1(self, dataset):
n_batches = len(dataset['Y']) // self.batch_size
# TODO: find out why not -1
y_pred = np.concatenate([self.predict(dataset, i) for i in xrange(n_batches)]).astype(np.int32) #- 1
# y_true = [self.vocab.index(y) for y in dataset['Y'][:len(y_pred)]]
y_true = dataset['Y'][:len(y_pred)]
# print metrics.confusion_matrix(y_true, y_pred)
# print metrics.classification_report(y_true, y_pred)
errors = []
for i, (t, p) in enumerate(zip(y_true, y_pred)):
if t != p:
# errors.append((i, self.lb.classes_[p]))
errors.append((i, self.vocab[p]))
pass
return metrics.f1_score(y_true, y_pred, average='weighted', pos_label=None), errors
def compute_f1_pointer(self, dataset):
n_batches = len(dataset['Y']) // self.batch_size
# TODO: find out why not -1
y_pred = np.concatenate([self.predict(dataset, i) for i in xrange(n_batches)]).astype(np.int32) #- 1
# y_true = [self.vocab.index(y) for y in dataset['Y'][:len(y_pred)]]
# y_true = dataset['Y'][:len(y_pred)]
y_true=[]
for i in range(len(y_pred)):
y_true.append(list(dataset['S'][i]).index(dataset['Y'][i]))
# print metrics.confusion_matrix(y_true, y_pred)
# print metrics.classification_report(y_true, y_pred)
errors = []
for i, (t, p) in enumerate(zip(y_true, y_pred)):
if t != p:
# print t,p
# errors.append((i, self.lb.classes_[p]))
# errors.append((i, self.vocab[p]))
errors.append((i, p))
pass
return metrics.f1_score(y_true, y_pred, average='weighted', pos_label=None), errors
def shuffle_sync(self, dataset):
p = np.random.permutation(len(dataset['Y']))
for k in ['S', 'Q', 'Y']:
dataset[k] = dataset[k][p]
def to_words(self, indices,ty):
# sents = []
# for idx in indices:
# words = ' '.join([self.idx_to_word[idx] for idx in self.S[idx] if idx > 0])
# words = ' '.join([self.idx_to_word[idx] for i in idx)
# sents.append(words)
# return ' '.join(sents)
sent = ''
if ty =='S':
for idx in indices:
sent+=self.idx_to_word[idx]
sent+=' '
elif ty =='Q':
for idx in indices[0]:
sent+=self.idx_to_word[idx]
sent+=' '
elif ty =='Y':
sent=self.idx_to_word[indices]
return sent
def str2bool(v):
return v.lower() in ('yes', 'true', 't', '1')
def main():
parser = argparse.ArgumentParser()
parser.register('type', 'bool', str2bool)
parser.add_argument('--task', type=int, default=35, help='Task#')
parser.add_argument('--train_file', type=str, default='', help='Train file')
parser.add_argument('--test_file', type=str, default='', help='Test file')
parser.add_argument('--back_method', type=str, default='sgd', help='Train Method to bp')
parser.add_argument('--batch_size', type=int, default=1000, help='Batch size')
parser.add_argument('--embedding_size', type=int, default=100, help='Embedding size')
parser.add_argument('--max_norm', type=float, default=40.0, help='Max norm')
parser.add_argument('--lr', type=float, default=0.03, help='Learning rate')
parser.add_argument('--num_hops', type=int, default=3, help='Num hops')
parser.add_argument('--linear_start', type='bool', default=True, help='Whether to start with linear activations')
parser.add_argument('--shuffle_batch', type='bool', default=True, help='Whether to shuffle minibatches')
parser.add_argument('--n_epochs', type=int, default=500, help='Num epochs')
parser.add_argument('--enable_time', type=bool, default=False, help='time word embedding')
parser.add_argument('--pointer_nn',type=bool,default=True,help='Whether to use the pointer networks')
parser.add_argument('--enable_mask',type=bool,default=True,help='Whether to use the mask')
parser.add_argument('--std_rate',type=float,default=0.5,help='The std number for the Noraml init')
args = parser.parse_args()
if args.train_file == '' or args.test_file == '':
args.train_file = glob.glob('*_real_train.txt' )[0]
args.test_file = glob.glob('*_real_test.txt' )[0]
# args.train_file = glob.glob('*_onlyName_train.txt' )[0]
# args.test_file = glob.glob('*_onlyName_test.txt' )[0]
# args.train_file = glob.glob('*_sent_train.txt' )[0]
# args.test_file = glob.glob('*_sent_test.txt' )[0]
# args.train_file = glob.glob('*_toy_train.txt' )[0]
# args.test_file = glob.glob('*_toy_test.txt' )[0]
# args.train_file = '/home/shin/DeepLearning/MemoryNetwork/MemN2N_python/MemN2N-master/data/en/qqq_train.txt'
# args.test_file ='/home/shin/DeepLearning/MemoryNetwork/MemN2N_python/MemN2N-master/data/en/qqq_test.txt'
print '*' * 80
print 'args:', args
print '*' * 80
model = Model(**args.__dict__)
model.train(n_epochs=args.n_epochs, shuffle_batch=args.shuffle_batch)
if __name__ == '__main__':
main() | bsd-3-clause |
zihua/scikit-learn | examples/linear_model/plot_sparse_recovery.py | 70 | 7486 | """
============================================================
Sparse recovery: feature selection for sparse linear models
============================================================
Given a small number of observations, we want to recover which features
of X are relevant to explain y. For this :ref:`sparse linear models
<l1_feature_selection>` can outperform standard statistical tests if the
true model is sparse, i.e. if a small fraction of the features are
relevant.
As detailed in :ref:`the compressive sensing notes
<compressive_sensing>`, the ability of L1-based approach to identify the
relevant variables depends on the sparsity of the ground truth, the
number of samples, the number of features, the conditioning of the
design matrix on the signal subspace, the amount of noise, and the
absolute value of the smallest non-zero coefficient [Wainwright2006]
(http://statistics.berkeley.edu/sites/default/files/tech-reports/709.pdf).
Here we keep all parameters constant and vary the conditioning of the
design matrix. For a well-conditioned design matrix (small mutual
incoherence) we are exactly in compressive sensing conditions (i.i.d
Gaussian sensing matrix), and L1-recovery with the Lasso performs very
well. For an ill-conditioned matrix (high mutual incoherence),
regressors are very correlated, and the Lasso randomly selects one.
However, randomized-Lasso can recover the ground truth well.
In each situation, we first vary the alpha parameter setting the sparsity
of the estimated model and look at the stability scores of the randomized
Lasso. This analysis, knowing the ground truth, shows an optimal regime
in which relevant features stand out from the irrelevant ones. If alpha
is chosen too small, non-relevant variables enter the model. On the
opposite, if alpha is selected too large, the Lasso is equivalent to
stepwise regression, and thus brings no advantage over a univariate
F-test.
In a second time, we set alpha and compare the performance of different
feature selection methods, using the area under curve (AUC) of the
precision-recall.
"""
print(__doc__)
# Author: Alexandre Gramfort and Gael Varoquaux
# License: BSD 3 clause
import warnings
import matplotlib.pyplot as plt
import numpy as np
from scipy import linalg
from sklearn.linear_model import (RandomizedLasso, lasso_stability_path,
LassoLarsCV)
from sklearn.feature_selection import f_regression
from sklearn.preprocessing import StandardScaler
from sklearn.metrics import auc, precision_recall_curve
from sklearn.ensemble import ExtraTreesRegressor
from sklearn.utils.extmath import pinvh
from sklearn.exceptions import ConvergenceWarning
def mutual_incoherence(X_relevant, X_irelevant):
"""Mutual incoherence, as defined by formula (26a) of [Wainwright2006].
"""
projector = np.dot(np.dot(X_irelevant.T, X_relevant),
pinvh(np.dot(X_relevant.T, X_relevant)))
return np.max(np.abs(projector).sum(axis=1))
for conditioning in (1, 1e-4):
###########################################################################
# Simulate regression data with a correlated design
n_features = 501
n_relevant_features = 3
noise_level = .2
coef_min = .2
# The Donoho-Tanner phase transition is around n_samples=25: below we
# will completely fail to recover in the well-conditioned case
n_samples = 25
block_size = n_relevant_features
rng = np.random.RandomState(42)
# The coefficients of our model
coef = np.zeros(n_features)
coef[:n_relevant_features] = coef_min + rng.rand(n_relevant_features)
# The correlation of our design: variables correlated by blocs of 3
corr = np.zeros((n_features, n_features))
for i in range(0, n_features, block_size):
corr[i:i + block_size, i:i + block_size] = 1 - conditioning
corr.flat[::n_features + 1] = 1
corr = linalg.cholesky(corr)
# Our design
X = rng.normal(size=(n_samples, n_features))
X = np.dot(X, corr)
# Keep [Wainwright2006] (26c) constant
X[:n_relevant_features] /= np.abs(
linalg.svdvals(X[:n_relevant_features])).max()
X = StandardScaler().fit_transform(X.copy())
# The output variable
y = np.dot(X, coef)
y /= np.std(y)
# We scale the added noise as a function of the average correlation
# between the design and the output variable
y += noise_level * rng.normal(size=n_samples)
mi = mutual_incoherence(X[:, :n_relevant_features],
X[:, n_relevant_features:])
###########################################################################
# Plot stability selection path, using a high eps for early stopping
# of the path, to save computation time
alpha_grid, scores_path = lasso_stability_path(X, y, random_state=42,
eps=0.05)
plt.figure()
# We plot the path as a function of alpha/alpha_max to the power 1/3: the
# power 1/3 scales the path less brutally than the log, and enables to
# see the progression along the path
hg = plt.plot(alpha_grid[1:] ** .333, scores_path[coef != 0].T[1:], 'r')
hb = plt.plot(alpha_grid[1:] ** .333, scores_path[coef == 0].T[1:], 'k')
ymin, ymax = plt.ylim()
plt.xlabel(r'$(\alpha / \alpha_{max})^{1/3}$')
plt.ylabel('Stability score: proportion of times selected')
plt.title('Stability Scores Path - Mutual incoherence: %.1f' % mi)
plt.axis('tight')
plt.legend((hg[0], hb[0]), ('relevant features', 'irrelevant features'),
loc='best')
###########################################################################
# Plot the estimated stability scores for a given alpha
# Use 6-fold cross-validation rather than the default 3-fold: it leads to
# a better choice of alpha:
# Stop the user warnings outputs- they are not necessary for the example
# as it is specifically set up to be challenging.
with warnings.catch_warnings():
warnings.simplefilter('ignore', UserWarning)
warnings.simplefilter('ignore', ConvergenceWarning)
lars_cv = LassoLarsCV(cv=6).fit(X, y)
# Run the RandomizedLasso: we use a paths going down to .1*alpha_max
# to avoid exploring the regime in which very noisy variables enter
# the model
alphas = np.linspace(lars_cv.alphas_[0], .1 * lars_cv.alphas_[0], 6)
clf = RandomizedLasso(alpha=alphas, random_state=42).fit(X, y)
trees = ExtraTreesRegressor(100).fit(X, y)
# Compare with F-score
F, _ = f_regression(X, y)
plt.figure()
for name, score in [('F-test', F),
('Stability selection', clf.scores_),
('Lasso coefs', np.abs(lars_cv.coef_)),
('Trees', trees.feature_importances_),
]:
precision, recall, thresholds = precision_recall_curve(coef != 0,
score)
plt.semilogy(np.maximum(score / np.max(score), 1e-4),
label="%s. AUC: %.3f" % (name, auc(recall, precision)))
plt.plot(np.where(coef != 0)[0], [2e-4] * n_relevant_features, 'mo',
label="Ground truth")
plt.xlabel("Features")
plt.ylabel("Score")
# Plot only the 100 first coefficients
plt.xlim(0, 100)
plt.legend(loc='best')
plt.title('Feature selection scores - Mutual incoherence: %.1f'
% mi)
plt.show()
| bsd-3-clause |