repo_name
stringlengths 6
112
| path
stringlengths 4
204
| copies
stringlengths 1
3
| size
stringlengths 4
6
| content
stringlengths 714
810k
| license
stringclasses 15
values |
---|---|---|---|---|---|
bzero/statsmodels | examples/python/glm.py | 29 | 3989 |
## Generalized Linear Models
from __future__ import print_function
import numpy as np
import statsmodels.api as sm
from scipy import stats
from matplotlib import pyplot as plt
# ## GLM: Binomial response data
#
# ### Load data
#
# In this example, we use the Star98 dataset which was taken with permission
# from Jeff Gill (2000) Generalized linear models: A unified approach. Codebook
# information can be obtained by typing:
print(sm.datasets.star98.NOTE)
# Load the data and add a constant to the exogenous (independent) variables:
data = sm.datasets.star98.load()
data.exog = sm.add_constant(data.exog, prepend=False)
# The dependent variable is N by 2 (Success: NABOVE, Failure: NBELOW):
print(data.endog[:5,:])
# The independent variables include all the other variables described above, as
# well as the interaction terms:
print(data.exog[:2,:])
# ### Fit and summary
glm_binom = sm.GLM(data.endog, data.exog, family=sm.families.Binomial())
res = glm_binom.fit()
print(res.summary())
# ### Quantities of interest
print('Total number of trials:', data.endog[0].sum())
print('Parameters: ', res.params)
print('T-values: ', res.tvalues)
# First differences: We hold all explanatory variables constant at their means and manipulate the percentage of low income households to assess its impact on the response variables:
means = data.exog.mean(axis=0)
means25 = means.copy()
means25[0] = stats.scoreatpercentile(data.exog[:,0], 25)
means75 = means.copy()
means75[0] = lowinc_75per = stats.scoreatpercentile(data.exog[:,0], 75)
resp_25 = res.predict(means25)
resp_75 = res.predict(means75)
diff = resp_75 - resp_25
# The interquartile first difference for the percentage of low income households in a school district is:
print("%2.4f%%" % (diff*100))
# ### Plots
#
# We extract information that will be used to draw some interesting plots:
nobs = res.nobs
y = data.endog[:,0]/data.endog.sum(1)
yhat = res.mu
# Plot yhat vs y:
from statsmodels.graphics.api import abline_plot
fig, ax = plt.subplots()
ax.scatter(yhat, y)
line_fit = sm.OLS(y, sm.add_constant(yhat, prepend=True)).fit()
abline_plot(model_results=line_fit, ax=ax)
ax.set_title('Model Fit Plot')
ax.set_ylabel('Observed values')
ax.set_xlabel('Fitted values');
# Plot yhat vs. Pearson residuals:
fig, ax = plt.subplots()
ax.scatter(yhat, res.resid_pearson)
ax.hlines(0, 0, 1)
ax.set_xlim(0, 1)
ax.set_title('Residual Dependence Plot')
ax.set_ylabel('Pearson Residuals')
ax.set_xlabel('Fitted values')
# Histogram of standardized deviance residuals:
from scipy import stats
fig, ax = plt.subplots()
resid = res.resid_deviance.copy()
resid_std = stats.zscore(resid)
ax.hist(resid_std, bins=25)
ax.set_title('Histogram of standardized deviance residuals');
# QQ Plot of Deviance Residuals:
from statsmodels import graphics
graphics.gofplots.qqplot(resid, line='r')
# ## GLM: Gamma for proportional count response
#
# ### Load data
#
# In the example above, we printed the ``NOTE`` attribute to learn about the
# Star98 dataset. Statsmodels datasets ships with other useful information. For
# example:
print(sm.datasets.scotland.DESCRLONG)
# Load the data and add a constant to the exogenous variables:
data2 = sm.datasets.scotland.load()
data2.exog = sm.add_constant(data2.exog, prepend=False)
print(data2.exog[:5,:])
print(data2.endog[:5])
# ### Fit and summary
glm_gamma = sm.GLM(data2.endog, data2.exog, family=sm.families.Gamma())
glm_results = glm_gamma.fit()
print(glm_results.summary())
# ## GLM: Gaussian distribution with a noncanonical link
#
# ### Artificial data
nobs2 = 100
x = np.arange(nobs2)
np.random.seed(54321)
X = np.column_stack((x,x**2))
X = sm.add_constant(X, prepend=False)
lny = np.exp(-(.03*x + .0001*x**2 - 1.0)) + .001 * np.random.rand(nobs2)
# ### Fit and summary
gauss_log = sm.GLM(lny, X, family=sm.families.Gaussian(sm.families.links.log))
gauss_log_results = gauss_log.fit()
print(gauss_log_results.summary())
| bsd-3-clause |
466152112/scikit-learn | sklearn/svm/tests/test_bounds.py | 280 | 2541 | import nose
from nose.tools import assert_equal, assert_true
from sklearn.utils.testing import clean_warning_registry
import warnings
import numpy as np
from scipy import sparse as sp
from sklearn.svm.bounds import l1_min_c
from sklearn.svm import LinearSVC
from sklearn.linear_model.logistic import LogisticRegression
dense_X = [[-1, 0], [0, 1], [1, 1], [1, 1]]
sparse_X = sp.csr_matrix(dense_X)
Y1 = [0, 1, 1, 1]
Y2 = [2, 1, 0, 0]
def test_l1_min_c():
losses = ['squared_hinge', 'log']
Xs = {'sparse': sparse_X, 'dense': dense_X}
Ys = {'two-classes': Y1, 'multi-class': Y2}
intercepts = {'no-intercept': {'fit_intercept': False},
'fit-intercept': {'fit_intercept': True,
'intercept_scaling': 10}}
for loss in losses:
for X_label, X in Xs.items():
for Y_label, Y in Ys.items():
for intercept_label, intercept_params in intercepts.items():
check = lambda: check_l1_min_c(X, Y, loss,
**intercept_params)
check.description = ('Test l1_min_c loss=%r %s %s %s' %
(loss, X_label, Y_label,
intercept_label))
yield check
def test_l2_deprecation():
clean_warning_registry()
with warnings.catch_warnings(record=True) as w:
assert_equal(l1_min_c(dense_X, Y1, "l2"),
l1_min_c(dense_X, Y1, "squared_hinge"))
assert_equal(w[0].category, DeprecationWarning)
def check_l1_min_c(X, y, loss, fit_intercept=True, intercept_scaling=None):
min_c = l1_min_c(X, y, loss, fit_intercept, intercept_scaling)
clf = {
'log': LogisticRegression(penalty='l1'),
'squared_hinge': LinearSVC(loss='squared_hinge',
penalty='l1', dual=False),
}[loss]
clf.fit_intercept = fit_intercept
clf.intercept_scaling = intercept_scaling
clf.C = min_c
clf.fit(X, y)
assert_true((np.asarray(clf.coef_) == 0).all())
assert_true((np.asarray(clf.intercept_) == 0).all())
clf.C = min_c * 1.01
clf.fit(X, y)
assert_true((np.asarray(clf.coef_) != 0).any() or
(np.asarray(clf.intercept_) != 0).any())
@nose.tools.raises(ValueError)
def test_ill_posed_min_c():
X = [[0, 0], [0, 0]]
y = [0, 1]
l1_min_c(X, y)
@nose.tools.raises(ValueError)
def test_unsupported_loss():
l1_min_c(dense_X, Y1, 'l1')
| bsd-3-clause |
dhalleine/tensorflow | tensorflow/examples/tutorials/word2vec/word2vec_basic.py | 5 | 8987 | # Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
import math
import os
import random
import zipfile
import numpy as np
from six.moves import urllib
from six.moves import xrange # pylint: disable=redefined-builtin
import tensorflow as tf
# Step 1: Download the data.
url = 'http://mattmahoney.net/dc/'
def maybe_download(filename, expected_bytes):
"""Download a file if not present, and make sure it's the right size."""
if not os.path.exists(filename):
filename, _ = urllib.request.urlretrieve(url + filename, filename)
statinfo = os.stat(filename)
if statinfo.st_size == expected_bytes:
print('Found and verified', filename)
else:
print(statinfo.st_size)
raise Exception(
'Failed to verify ' + filename + '. Can you get to it with a browser?')
return filename
filename = maybe_download('text8.zip', 31344016)
# Read the data into a list of strings.
def read_data(filename):
"""Extract the first file enclosed in a zip file as a list of words"""
with zipfile.ZipFile(filename) as f:
data = tf.compat.as_str(f.read(f.namelist()[0])).split()
return data
words = read_data(filename)
print('Data size', len(words))
# Step 2: Build the dictionary and replace rare words with UNK token.
vocabulary_size = 50000
def build_dataset(words):
count = [['UNK', -1]]
count.extend(collections.Counter(words).most_common(vocabulary_size - 1))
dictionary = dict()
for word, _ in count:
dictionary[word] = len(dictionary)
data = list()
unk_count = 0
for word in words:
if word in dictionary:
index = dictionary[word]
else:
index = 0 # dictionary['UNK']
unk_count += 1
data.append(index)
count[0][1] = unk_count
reverse_dictionary = dict(zip(dictionary.values(), dictionary.keys()))
return data, count, dictionary, reverse_dictionary
data, count, dictionary, reverse_dictionary = build_dataset(words)
del words # Hint to reduce memory.
print('Most common words (+UNK)', count[:5])
print('Sample data', data[:10], [reverse_dictionary[i] for i in data[:10]])
data_index = 0
# Step 3: Function to generate a training batch for the skip-gram model.
def generate_batch(batch_size, num_skips, skip_window):
global data_index
assert batch_size % num_skips == 0
assert num_skips <= 2 * skip_window
batch = np.ndarray(shape=(batch_size), dtype=np.int32)
labels = np.ndarray(shape=(batch_size, 1), dtype=np.int32)
span = 2 * skip_window + 1 # [ skip_window target skip_window ]
buffer = collections.deque(maxlen=span)
for _ in range(span):
buffer.append(data[data_index])
data_index = (data_index + 1) % len(data)
for i in range(batch_size // num_skips):
target = skip_window # target label at the center of the buffer
targets_to_avoid = [ skip_window ]
for j in range(num_skips):
while target in targets_to_avoid:
target = random.randint(0, span - 1)
targets_to_avoid.append(target)
batch[i * num_skips + j] = buffer[skip_window]
labels[i * num_skips + j, 0] = buffer[target]
buffer.append(data[data_index])
data_index = (data_index + 1) % len(data)
return batch, labels
batch, labels = generate_batch(batch_size=8, num_skips=2, skip_window=1)
for i in range(8):
print(batch[i], reverse_dictionary[batch[i]],
'->', labels[i, 0], reverse_dictionary[labels[i, 0]])
# Step 4: Build and train a skip-gram model.
batch_size = 128
embedding_size = 128 # Dimension of the embedding vector.
skip_window = 1 # How many words to consider left and right.
num_skips = 2 # How many times to reuse an input to generate a label.
# We pick a random validation set to sample nearest neighbors. Here we limit the
# validation samples to the words that have a low numeric ID, which by
# construction are also the most frequent.
valid_size = 16 # Random set of words to evaluate similarity on.
valid_window = 100 # Only pick dev samples in the head of the distribution.
valid_examples = np.random.choice(valid_window, valid_size, replace=False)
num_sampled = 64 # Number of negative examples to sample.
graph = tf.Graph()
with graph.as_default():
# Input data.
train_inputs = tf.placeholder(tf.int32, shape=[batch_size])
train_labels = tf.placeholder(tf.int32, shape=[batch_size, 1])
valid_dataset = tf.constant(valid_examples, dtype=tf.int32)
# Ops and variables pinned to the CPU because of missing GPU implementation
with tf.device('/cpu:0'):
# Look up embeddings for inputs.
embeddings = tf.Variable(
tf.random_uniform([vocabulary_size, embedding_size], -1.0, 1.0))
embed = tf.nn.embedding_lookup(embeddings, train_inputs)
# Construct the variables for the NCE loss
nce_weights = tf.Variable(
tf.truncated_normal([vocabulary_size, embedding_size],
stddev=1.0 / math.sqrt(embedding_size)))
nce_biases = tf.Variable(tf.zeros([vocabulary_size]))
# Compute the average NCE loss for the batch.
# tf.nce_loss automatically draws a new sample of the negative labels each
# time we evaluate the loss.
loss = tf.reduce_mean(
tf.nn.nce_loss(nce_weights, nce_biases, embed, train_labels,
num_sampled, vocabulary_size))
# Construct the SGD optimizer using a learning rate of 1.0.
optimizer = tf.train.GradientDescentOptimizer(1.0).minimize(loss)
# Compute the cosine similarity between minibatch examples and all embeddings.
norm = tf.sqrt(tf.reduce_sum(tf.square(embeddings), 1, keep_dims=True))
normalized_embeddings = embeddings / norm
valid_embeddings = tf.nn.embedding_lookup(
normalized_embeddings, valid_dataset)
similarity = tf.matmul(
valid_embeddings, normalized_embeddings, transpose_b=True)
# Add variable initializer.
init = tf.initialize_all_variables()
# Step 5: Begin training.
num_steps = 100001
with tf.Session(graph=graph) as session:
# We must initialize all variables before we use them.
init.run()
print("Initialized")
average_loss = 0
for step in xrange(num_steps):
batch_inputs, batch_labels = generate_batch(
batch_size, num_skips, skip_window)
feed_dict = {train_inputs : batch_inputs, train_labels : batch_labels}
# We perform one update step by evaluating the optimizer op (including it
# in the list of returned values for session.run()
_, loss_val = session.run([optimizer, loss], feed_dict=feed_dict)
average_loss += loss_val
if step % 2000 == 0:
if step > 0:
average_loss /= 2000
# The average loss is an estimate of the loss over the last 2000 batches.
print("Average loss at step ", step, ": ", average_loss)
average_loss = 0
# Note that this is expensive (~20% slowdown if computed every 500 steps)
if step % 10000 == 0:
sim = similarity.eval()
for i in xrange(valid_size):
valid_word = reverse_dictionary[valid_examples[i]]
top_k = 8 # number of nearest neighbors
nearest = (-sim[i, :]).argsort()[1:top_k+1]
log_str = "Nearest to %s:" % valid_word
for k in xrange(top_k):
close_word = reverse_dictionary[nearest[k]]
log_str = "%s %s," % (log_str, close_word)
print(log_str)
final_embeddings = normalized_embeddings.eval()
# Step 6: Visualize the embeddings.
def plot_with_labels(low_dim_embs, labels, filename='tsne.png'):
assert low_dim_embs.shape[0] >= len(labels), "More labels than embeddings"
plt.figure(figsize=(18, 18)) #in inches
for i, label in enumerate(labels):
x, y = low_dim_embs[i,:]
plt.scatter(x, y)
plt.annotate(label,
xy=(x, y),
xytext=(5, 2),
textcoords='offset points',
ha='right',
va='bottom')
plt.savefig(filename)
try:
from sklearn.manifold import TSNE
import matplotlib.pyplot as plt
tsne = TSNE(perplexity=30, n_components=2, init='pca', n_iter=5000)
plot_only = 500
low_dim_embs = tsne.fit_transform(final_embeddings[:plot_only,:])
labels = [reverse_dictionary[i] for i in xrange(plot_only)]
plot_with_labels(low_dim_embs, labels)
except ImportError:
print("Please install sklearn and matplotlib to visualize embeddings.")
| apache-2.0 |
shangwuhencc/scikit-learn | examples/cluster/plot_mean_shift.py | 351 | 1793 | """
=============================================
A demo of the mean-shift clustering algorithm
=============================================
Reference:
Dorin Comaniciu and Peter Meer, "Mean Shift: A robust approach toward
feature space analysis". IEEE Transactions on Pattern Analysis and
Machine Intelligence. 2002. pp. 603-619.
"""
print(__doc__)
import numpy as np
from sklearn.cluster import MeanShift, estimate_bandwidth
from sklearn.datasets.samples_generator import make_blobs
###############################################################################
# Generate sample data
centers = [[1, 1], [-1, -1], [1, -1]]
X, _ = make_blobs(n_samples=10000, centers=centers, cluster_std=0.6)
###############################################################################
# Compute clustering with MeanShift
# The following bandwidth can be automatically detected using
bandwidth = estimate_bandwidth(X, quantile=0.2, n_samples=500)
ms = MeanShift(bandwidth=bandwidth, bin_seeding=True)
ms.fit(X)
labels = ms.labels_
cluster_centers = ms.cluster_centers_
labels_unique = np.unique(labels)
n_clusters_ = len(labels_unique)
print("number of estimated clusters : %d" % n_clusters_)
###############################################################################
# Plot result
import matplotlib.pyplot as plt
from itertools import cycle
plt.figure(1)
plt.clf()
colors = cycle('bgrcmykbgrcmykbgrcmykbgrcmyk')
for k, col in zip(range(n_clusters_), colors):
my_members = labels == k
cluster_center = cluster_centers[k]
plt.plot(X[my_members, 0], X[my_members, 1], col + '.')
plt.plot(cluster_center[0], cluster_center[1], 'o', markerfacecolor=col,
markeredgecolor='k', markersize=14)
plt.title('Estimated number of clusters: %d' % n_clusters_)
plt.show()
| bsd-3-clause |
Eric89GXL/mne-python | examples/connectivity/plot_mne_inverse_label_connectivity.py | 13 | 7516 | """
=========================================================================
Compute source space connectivity and visualize it using a circular graph
=========================================================================
This example computes the all-to-all connectivity between 68 regions in
source space based on dSPM inverse solutions and a FreeSurfer cortical
parcellation. The connectivity is visualized using a circular graph which
is ordered based on the locations of the regions in the axial plane.
"""
# Authors: Martin Luessi <mluessi@nmr.mgh.harvard.edu>
# Alexandre Gramfort <alexandre.gramfort@inria.fr>
# Nicolas P. Rougier (graph code borrowed from his matplotlib gallery)
#
# License: BSD (3-clause)
import numpy as np
import matplotlib.pyplot as plt
import mne
from mne.datasets import sample
from mne.minimum_norm import apply_inverse_epochs, read_inverse_operator
from mne.connectivity import spectral_connectivity
from mne.viz import circular_layout, plot_connectivity_circle
print(__doc__)
###############################################################################
# Load our data
# -------------
#
# First we'll load the data we'll use in connectivity estimation. We'll use
# the sample MEG data provided with MNE.
data_path = sample.data_path()
subjects_dir = data_path + '/subjects'
fname_inv = data_path + '/MEG/sample/sample_audvis-meg-oct-6-meg-inv.fif'
fname_raw = data_path + '/MEG/sample/sample_audvis_filt-0-40_raw.fif'
fname_event = data_path + '/MEG/sample/sample_audvis_filt-0-40_raw-eve.fif'
# Load data
inverse_operator = read_inverse_operator(fname_inv)
raw = mne.io.read_raw_fif(fname_raw)
events = mne.read_events(fname_event)
# Add a bad channel
raw.info['bads'] += ['MEG 2443']
# Pick MEG channels
picks = mne.pick_types(raw.info, meg=True, eeg=False, stim=False, eog=True,
exclude='bads')
# Define epochs for left-auditory condition
event_id, tmin, tmax = 1, -0.2, 0.5
epochs = mne.Epochs(raw, events, event_id, tmin, tmax, picks=picks,
baseline=(None, 0), reject=dict(mag=4e-12, grad=4000e-13,
eog=150e-6))
###############################################################################
# Compute inverse solutions and their connectivity
# ------------------------------------------------
#
# Next, we need to compute the inverse solution for this data. This will return
# the sources / source activity that we'll use in computing connectivity. We'll
# compute the connectivity in the alpha band of these sources. We can specify
# particular frequencies to include in the connectivity with the ``fmin`` and
# ``fmax`` flags. Notice from the status messages how mne-python:
#
# 1. reads an epoch from the raw file
# 2. applies SSP and baseline correction
# 3. computes the inverse to obtain a source estimate
# 4. averages the source estimate to obtain a time series for each label
# 5. includes the label time series in the connectivity computation
# 6. moves to the next epoch.
#
# This behaviour is because we are using generators. Since we only need to
# operate on the data one epoch at a time, using a generator allows us to
# compute connectivity in a computationally efficient manner where the amount
# of memory (RAM) needed is independent from the number of epochs.
# Compute inverse solution and for each epoch. By using "return_generator=True"
# stcs will be a generator object instead of a list.
snr = 1.0 # use lower SNR for single epochs
lambda2 = 1.0 / snr ** 2
method = "dSPM" # use dSPM method (could also be MNE or sLORETA)
stcs = apply_inverse_epochs(epochs, inverse_operator, lambda2, method,
pick_ori="normal", return_generator=True)
# Get labels for FreeSurfer 'aparc' cortical parcellation with 34 labels/hemi
labels = mne.read_labels_from_annot('sample', parc='aparc',
subjects_dir=subjects_dir)
label_colors = [label.color for label in labels]
# Average the source estimates within each label using sign-flips to reduce
# signal cancellations, also here we return a generator
src = inverse_operator['src']
label_ts = mne.extract_label_time_course(stcs, labels, src, mode='mean_flip',
return_generator=True)
fmin = 8.
fmax = 13.
sfreq = raw.info['sfreq'] # the sampling frequency
con_methods = ['pli', 'wpli2_debiased', 'ciplv']
con, freqs, times, n_epochs, n_tapers = spectral_connectivity(
label_ts, method=con_methods, mode='multitaper', sfreq=sfreq, fmin=fmin,
fmax=fmax, faverage=True, mt_adaptive=True, n_jobs=1)
# con is a 3D array, get the connectivity for the first (and only) freq. band
# for each method
con_res = dict()
for method, c in zip(con_methods, con):
con_res[method] = c[:, :, 0]
###############################################################################
# Make a connectivity plot
# ------------------------
#
# Now, we visualize this connectivity using a circular graph layout.
# First, we reorder the labels based on their location in the left hemi
label_names = [label.name for label in labels]
lh_labels = [name for name in label_names if name.endswith('lh')]
# Get the y-location of the label
label_ypos = list()
for name in lh_labels:
idx = label_names.index(name)
ypos = np.mean(labels[idx].pos[:, 1])
label_ypos.append(ypos)
# Reorder the labels based on their location
lh_labels = [label for (yp, label) in sorted(zip(label_ypos, lh_labels))]
# For the right hemi
rh_labels = [label[:-2] + 'rh' for label in lh_labels]
# Save the plot order and create a circular layout
node_order = list()
node_order.extend(lh_labels[::-1]) # reverse the order
node_order.extend(rh_labels)
node_angles = circular_layout(label_names, node_order, start_pos=90,
group_boundaries=[0, len(label_names) / 2])
# Plot the graph using node colors from the FreeSurfer parcellation. We only
# show the 300 strongest connections.
plot_connectivity_circle(con_res['pli'], label_names, n_lines=300,
node_angles=node_angles, node_colors=label_colors,
title='All-to-All Connectivity left-Auditory '
'Condition (PLI)')
###############################################################################
# Make two connectivity plots in the same figure
# ----------------------------------------------
#
# We can also assign these connectivity plots to axes in a figure. Below we'll
# show the connectivity plot using two different connectivity methods.
fig = plt.figure(num=None, figsize=(8, 4), facecolor='black')
no_names = [''] * len(label_names)
for ii, method in enumerate(con_methods):
plot_connectivity_circle(con_res[method], no_names, n_lines=300,
node_angles=node_angles, node_colors=label_colors,
title=method, padding=0, fontsize_colorbar=6,
fig=fig, subplot=(1, 3, ii + 1))
plt.show()
###############################################################################
# Save the figure (optional)
# --------------------------
#
# By default matplotlib does not save using the facecolor, even though this was
# set when the figure was generated. If not set via savefig, the labels, title,
# and legend will be cut off from the output png file.
# fname_fig = data_path + '/MEG/sample/plot_inverse_connect.png'
# fig.savefig(fname_fig, facecolor='black')
| bsd-3-clause |
camallen/aggregation | experimental/condor/gain_per_user.py | 2 | 7166 | #!/usr/bin/env python
__author__ = 'greghines'
import numpy as np
import os
import pymongo
import sys
import urllib
import matplotlib.cbook as cbook
from PIL import Image
import matplotlib.pyplot as plt
import warnings
import random
if os.path.exists("/home/ggdhines"):
sys.path.append("/home/ggdhines/PycharmProjects/reduction/experimental/clusteringAlg")
else:
sys.path.append("/home/greg/github/reduction/experimental/clusteringAlg")
#from divisiveDBSCAN import DivisiveDBSCAN
from divisiveDBSCAN_multi import DivisiveDBSCAN
from divisiveKmeans import DivisiveKmeans
if os.path.exists("/home/ggdhines"):
base_directory = "/home/ggdhines"
else:
base_directory = "/home/greg"
client = pymongo.MongoClient()
db = client['condor_2014-11-11']
classification_collection = db["condor_classifications"]
subject_collection = db["condor_subjects"]
to_sample_from = [u'ACW0000kxt', u'ACW00006p8', u'ACW0000bxt', u'ACW0002005', u'ACW000120u', u'ACW00006rc', u'ACW00040az', u'ACW0000m08', u'ACW0000az7', u'ACW000055u', u'ACW0000df0', u'ACW00006ld', u'ACW00011nb', u'ACW000180h', u'ACW0000k15', u'ACW0005ghc', u'ACW0000bl4', u'ACW00013hc', u'ACW0002t1k', u'ACW0000cu2', u'ACW00014ia', u'ACW00003ac', u'ACW00014vp', u'ACW0000nkd', u'ACW0003nyl', u'ACW0004k9y', u'ACW00012q9', u'ACW00011yg', u'ACW0000ozm', u'ACW00011hz', u'ACW000128j', u'ACW00006k6', u'ACW00012ha', u'ACW00007dn', u'ACW0004bp1', u'ACW00044cs', u'ACW0000lrr', u'ACW00015xo', u'ACW0000ddn', u'ACW0002g7h', u'ACW00053o5', u'ACW000127z', u'ACW0003zyk', u'ACW0001826', u'ACW0001evk', u'ACW0004feb', u'ACW0000jql', u'ACW0001hpb', u'ACW0000kw0', u'ACW00011gq', u'ACW00004vc', u'ACW00047sq', u'ACW000554b', u'ACW000181m', u'ACW0000k7q', u'ACW0000e6i', u'ACW0004jxu', u'ACW00011is', u'ACW00027lo', u'ACW0000lu1', u'ACW000130c', u'ACW0000le4', u'ACW000160y', u'ACW00051os', u'ACW0003y9q', u'ACW0004nra', u'ACW0002vj8', u'ACW00041en', u'ACW00057p7', u'ACW0002qps', u'ACW0000apl', u'ACW00007cw', u'ACW00018m9', u'ACW0005m6l', u'ACW00055cy', u'ACW00012xz', u'ACW0003yd6', u'ACW0000xdt', u'ACW0000pd9', u'ACW00003tq', u'ACW00011g4', u'ACW0000bv7', u'ACW00010ol', u'ACW000491z', u'ACW0000xf4', u'ACW000116t', u'ACW00002r7', u'ACW0000jw1', u'ACW00009lo', u'ACW000410t', u'ACW00003l5', u'ACW0002izy', u'ACW0000jt4', u'ACW00043gl', u'ACW00011wh', u'ACW0000ao8', u'ACW00048dl', u'ACW000036e', u'ACW0000m4n', u'ACW0003skl', u'ACW0000ijv', u'ACW0004s2k', u'ACW00011hn', u'ACW0000a2d', u'ACW0005ds7', u'ACW000138e', u'ACW0002sgv', u'ACW00006mc', u'ACW0003tvy', u'ACW000191i', u'ACW000037x', u'ACW0001sz7', u'ACW0004p03', u'ACW00003th', u'ACW00011ey', u'ACW0005e1z', u'ACW00008ax', u'ACW0003k73', u'ACW0000o4m', u'ACW00012gy', u'ACW00012j5', u'ACW0004iml', u'ACW0005anw', u'ACW0000jkb', u'ACW0000b4c', u'ACW0004tvd', u'ACW0000569', u'ACW00016p6', u'ACW0005f1n', u'ACW0005f5w', u'ACW0000lsm', u'ACW00003km', u'ACW0004e2v', u'ACW0004dt0', u'ACW00041nj', u'ACW0000396', u'ACW00013ni', u'ACW0003uar', u'ACW0005ck9', u'ACW0000dd6', u'ACW0004mno', u'ACW00007b9', u'ACW0005n2h', u'ACW00011di', u'ACW00033m4', u'ACW00006jl', u'ACW0000at6', u'ACW0000e13', u'ACW0001612', u'ACW0004e6m', u'ACW000030f', u'ACW0000xfq', u'ACW00012ag', u'ACW00033em', u'ACW0000aw8', u'ACW00011js', u'ACW0000auq', u'ACW0001235', u'ACW0004qkt', u'ACW0000s1g', u'ACW0000mac', u'ACW00011zg', u'ACW00013mn', u'ACW0000ms9', u'ACW0004ijh', u'ACW0005ff4', u'ACW00011na', u'ACW0000pd3', u'ACW0001234', u'ACW00057hs', u'ACW0000lr6', u'ACW0000kko', u'ACW0004s6n', u'ACW0001b1c', u'ACW0003v83', u'ACW000138l', u'ACW000030u', u'ACW0000boq', u'ACW00047pv', u'ACW00054bm', u'ACW0004ehj', u'ACW0000b8l', u'ACW0003s9d', u'ACW00003b2', u'ACW00041cn', u'ACW0000dxs', u'ACW00011qs', u'ACW0004leg', u'ACW00012t3', u'ACW0000arl', u'ACW0005ev1', u'ACW00039vc', u'ACW0001t23', u'ACW0000jxm', u'ACW0003c0h', u'ACW00041ba', u'ACW0003v1j', u'ACW00011j7', u'ACW0000nyy', u'ACW0000br8', u'ACW0000xe4', u'ACW000460a', u'ACW0004ezy', u'ACW00003jx']
to_ignore_1 = ["ACW0002005","ACW0000m08","ACW0000az7","ACW000055u","ACW00014vp","ACW0000nkd","ACW0003nyl","ACW0000jql","ACW0000k7q","ACW0000e6i","ACW0000lu1","ACW0002qps","ACW00003tq","ACW00009lo","ACW0000jt4","ACW0000m4n","ACW00003th","ACW0000o4m","ACW00033m4","ACW0000s1g","ACW0000pd3","ACW0000kko","ACW00039vc","ACW0003c0h"]
to_ignore_2 = ["ACW0004feb","ACW0002vj8","ACW00012xz","ACW0000pd9","ACW0000xf4","ACW0002izy","ACW0000569","ACW0000dd6","ACW0000at6","ACW0001b1c","ACW0001t23","ACW00003jx"]
steps = [2,5,20]
condor_count_2 = {k:[] for k in steps}
condor_count_3 = {k:[] for k in steps}
for subject_count,zooniverse_id in enumerate(to_sample_from):
if (zooniverse_id in to_ignore_1) or (zooniverse_id in to_ignore_2):
continue
print zooniverse_id
subject = subject_collection.find_one({"zooniverse_id":zooniverse_id})
url = subject["location"]["standard"]
slash_index = url.rfind("/")
object_id = url[slash_index+1:]
annotation_list = []
user_markings = {k:[] for k in steps}
user_list = {k:[] for k in steps}
type_list = {k:[] for k in steps}
for user_index,classification in enumerate(classification_collection.find({"subjects.zooniverse_id":zooniverse_id})):
try:
mark_index = [ann.keys() for ann in classification["annotations"]].index(["marks",])
markings = classification["annotations"][mark_index].values()[0]
for animal in markings.values():
scale = 1.875
x = scale*float(animal["x"])
y = scale*float(animal["y"])
for s in steps:
if user_index < s:
#only add the animal if it is not a
try:
animal_type = animal["animal"]
if animal_type == "condor":
user_markings[s].append((x,y))
user_list[s].append(user_index)
type_list[s].append(animal_type)
except KeyError:
pass
except ValueError:
pass
#do the divisive k means for each
for s in steps:
#print s
if user_markings[s] == []:
condor_count_3[s].append(0.)
condor_count_2[s].append(0.)
else:
identified_animals,clusters = DivisiveKmeans(3).fit2(user_markings[s],user_list[s],debug=True)
condor_count_3[s].append(float(len(identified_animals)))
if s != 20:
identified_animals,clusters = DivisiveKmeans(2).fit2(user_markings[s],user_list[s],debug=True)
condor_count_2[s].append(float(len(identified_animals)))
for threshold in [5,10]:
print len([c for c in condor_count_3[20] if c <= threshold])/float(len(condor_count_3[20]))
for s in steps[:-1]:
ratio_3 = [a/b for a,b in zip(condor_count_3[s],condor_count_3[20]) if b <= threshold]
ratio_2 = [a/b for a,b in zip(condor_count_2[s],condor_count_3[20]) if b <= threshold]
print np.mean(ratio_2),np.median(ratio_2)
print np.mean(ratio_3),np.median(ratio_3)
print "==="
| apache-2.0 |
sumspr/scikit-learn | sklearn/metrics/cluster/tests/test_bicluster.py | 394 | 1770 | """Testing for bicluster metrics module"""
import numpy as np
from sklearn.utils.testing import assert_equal, assert_almost_equal
from sklearn.metrics.cluster.bicluster import _jaccard
from sklearn.metrics import consensus_score
def test_jaccard():
a1 = np.array([True, True, False, False])
a2 = np.array([True, True, True, True])
a3 = np.array([False, True, True, False])
a4 = np.array([False, False, True, True])
assert_equal(_jaccard(a1, a1, a1, a1), 1)
assert_equal(_jaccard(a1, a1, a2, a2), 0.25)
assert_equal(_jaccard(a1, a1, a3, a3), 1.0 / 7)
assert_equal(_jaccard(a1, a1, a4, a4), 0)
def test_consensus_score():
a = [[True, True, False, False],
[False, False, True, True]]
b = a[::-1]
assert_equal(consensus_score((a, a), (a, a)), 1)
assert_equal(consensus_score((a, a), (b, b)), 1)
assert_equal(consensus_score((a, b), (a, b)), 1)
assert_equal(consensus_score((a, b), (b, a)), 1)
assert_equal(consensus_score((a, a), (b, a)), 0)
assert_equal(consensus_score((a, a), (a, b)), 0)
assert_equal(consensus_score((b, b), (a, b)), 0)
assert_equal(consensus_score((b, b), (b, a)), 0)
def test_consensus_score_issue2445():
''' Different number of biclusters in A and B'''
a_rows = np.array([[True, True, False, False],
[False, False, True, True],
[False, False, False, True]])
a_cols = np.array([[True, True, False, False],
[False, False, True, True],
[False, False, False, True]])
idx = [0, 2]
s = consensus_score((a_rows, a_cols), (a_rows[idx], a_cols[idx]))
# B contains 2 of the 3 biclusters in A, so score should be 2/3
assert_almost_equal(s, 2.0/3.0)
| bsd-3-clause |
lemieuxl/pyplink | pyplink/tests/test_pyplink.py | 1 | 45684 | # This file is part of pyplink.
#
# The MIT License (MIT)
#
# Copyright (c) 2014 Louis-Philippe Lemieux Perreault
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
from __future__ import print_function
import os
import sys
import stat
import random
import shutil
import zipfile
import platform
import unittest
from tempfile import mkdtemp
from io import UnsupportedOperation
from distutils.spawn import find_executable
from subprocess import check_call, PIPE, CalledProcessError
try:
from itertools import zip_longest as zip
except ImportError:
from itertools import izip_longest as zip
try:
from urllib.request import urlretrieve
except ImportError:
from urllib import urlretrieve
try:
from unittest import mock
except ImportError:
import mock
from pkg_resources import resource_filename
import numpy as np
import pandas as pd
from six.moves import range
from .. import pyplink
def get_plink(tmp_dir):
"""Gets the Plink binary, if required."""
# Checking if Plink is in the path
plink_path = "plink"
if platform.system() == "Windows":
plink_path += ".exe"
if find_executable(plink_path) is None:
print("Downloading Plink", file=sys.stderr)
# The url for each platform
url = ("http://statgen.org/wp-content/uploads/Softwares/"
"plink-1.0.7/{filename}")
# Getting the name of the file
filename = ""
if platform.system() == "Windows":
filename = "plink-1.07-dos.zip"
elif platform.system() == "Darwin":
filename = "plink-1.07-mac-intel.zip"
elif platform.system() == "Linux":
if platform.architecture()[0].startswith("32"):
filename = "plink-1.07-i686.zip"
elif platform.architecture()[0].startswith("64"):
filename = "plink-1.07-x86_64.zip"
else:
return None, "System not compatible for Plink"
else:
return None, "System not compatible for Plink"
# Downloading Plink
zip_path = os.path.join(tmp_dir, filename)
try:
urlretrieve(
url.format(filename=filename),
zip_path,
)
except:
return None, "Plink's URL is not available"
# Unzipping Plink
with zipfile.ZipFile(zip_path, "r") as z:
z.extractall(tmp_dir)
plink_path = os.path.join(tmp_dir, os.path.splitext(filename)[0],
plink_path)
if not os.path.isfile(plink_path):
return None, "Cannot use Plink"
# Making the script executable
if platform.system() in {"Darwin", "Linux"}:
os.chmod(plink_path, stat.S_IRWXU)
# Testing Plink works
try:
check_call([
plink_path,
"--noweb",
"--help",
"--out", os.path.join(tmp_dir, "execution_test")
], stdout=PIPE, stderr=PIPE)
except CalledProcessError:
return None, "Plink cannot be properly used"
except IOError:
return None, "Plink was not properly installed"
return plink_path, "OK"
class TestPyPlink(unittest.TestCase):
@classmethod
def setUpClass(cls):
# Creating a temporary directory
cls.tmp_dir = mkdtemp(prefix="pyplink_test_")
# Getting the BED/BIM/FAM files
cls.bed = resource_filename(
__name__,
os.path.join("data", "test_data.bed"),
)
cls.bim = resource_filename(
__name__,
os.path.join("data", "test_data.bim"),
)
cls.fam = resource_filename(
__name__,
os.path.join("data", "test_data.fam"),
)
# Getting the prefix of the files
cls.prefix = os.path.splitext(cls.bed)[0]
# The list of markers
cls.markers = ["rs10399749", "rs2949420", "rs2949421", "rs2691310",
"rs4030303", "rs4030300", "rs3855952", "rs940550",
"rs13328714", "rs11490937"]
# The genotypes
cls.genotypes = [[0, 0, 1], [0, 1, 0], [-1, -1, -1], [-1, -1, 1],
[0, 0, 0], [0, 0, 0], [0, 1, 2], [0, 0, 0], [1, 0, 0],
[0, 1, 0]]
cls.acgt_genotypes = [["CC", "CC", "GC"], ["TT", "CT", "TT"],
["00", "00", "00"], ["00", "00", "AT"],
["GG", "GG", "GG"], ["CC", "CC", "CC"],
["AA", "GA", "GG"], ["TT", "TT", "TT"],
["GC", "CC", "CC"], ["GG", "AG", "GG"]]
# Getting Plink
cls.plink_path, cls.plink_message = get_plink(cls.tmp_dir)
def setUp(self):
# Reading the plink binary file
self.pedfile = pyplink.PyPlink(self.prefix)
@classmethod
def tearDownClass(cls):
# Cleaning the temporary directory
shutil.rmtree(cls.tmp_dir)
def tearDown(self):
# Closing the PyPlink object
self.pedfile.close()
def test_pyplink_object_integrity(self):
"""Checks the integrity of the PyPlink object."""
# Checking the name of the BED file
self.assertTrue(hasattr(self.pedfile, "bed_filename"))
self.assertEqual(self.bed, self.pedfile.bed_filename)
# Checking the name of the BIM file
self.assertTrue(hasattr(self.pedfile, "bim_filename"))
self.assertEqual(self.bim, self.pedfile.bim_filename)
# Checking the BIM object
self.assertTrue(hasattr(self.pedfile, "_bim"))
self.assertTrue(isinstance(self.pedfile._bim, pd.DataFrame))
# Checking the name of the FAM file
self.assertTrue(hasattr(self.pedfile, "fam_filename"))
self.assertEqual(self.fam, self.pedfile.fam_filename)
# Checking the FAM object
self.assertTrue(hasattr(self.pedfile, "_fam"))
self.assertTrue(isinstance(self.pedfile._fam, pd.DataFrame))
def test_pyplink_object_error(self):
"""Checks what happens when we play with the PyPlink object."""
# Changing the BIM to None
ori = self.pedfile._bim
self.pedfile._bim = None
with self.assertRaises(RuntimeError) as cm:
self.pedfile._read_bed()
self.assertEqual("no BIM or FAM file were read", str(cm.exception))
self.pedfile._bim = ori
# Changing the FAM to None
ori = self.pedfile._fam
self.pedfile._fam = None
with self.assertRaises(RuntimeError) as cm:
self.pedfile._read_bed()
self.assertEqual("no BIM or FAM file were read", str(cm.exception))
self.pedfile._fam = ori
def test_pyplink_bad_bed(self):
"""Checks what happens when we read a bad BED file."""
# The new file prefix
new_prefix = os.path.join(self.tmp_dir, "bad_data")
# Copying the FAM file
new_fam = new_prefix + ".fam"
with open(new_fam, "w") as o_file, open(self.fam, "r") as i_file:
o_file.write(i_file.read())
# Copying the BIM file
new_bim = new_prefix + ".bim"
with open(new_bim, "w") as o_file, open(self.bim, "r") as i_file:
o_file.write(i_file.read())
# Creating a new BED file with invalid number of bytes
new_bed = new_prefix + ".bed"
with open(new_bed, "wb") as o_file:
o_file.write(bytearray([108, 27, 1, 1, 2, 3, 4]))
# This should raise an exception
with self.assertRaises(ValueError) as cm:
pyplink.PyPlink(new_prefix)
self.assertEqual("invalid number of entries: corrupted BED?",
str(cm.exception))
# Creating a new BED file with invalid first byte
new_bed = new_prefix + ".bed"
with open(new_bed, "wb") as o_file:
o_file.write(bytearray([107, 27, 1, 1, 2, 3, 4]))
# This should raise an exception
with self.assertRaises(ValueError) as cm:
pyplink.PyPlink(new_prefix)
self.assertEqual("not a valid BED file: {}".format(new_bed),
str(cm.exception))
# Creating a new BED file with invalid second byte
new_bed = new_prefix + ".bed"
with open(new_bed, "wb") as o_file:
o_file.write(bytearray([108, 28, 1, 1, 2, 3, 4]))
# This should raise an exception
with self.assertRaises(ValueError) as cm:
pyplink.PyPlink(new_prefix)
self.assertEqual("not a valid BED file: {}".format(new_bed),
str(cm.exception))
# Creating a new BED file not in SNP-major format
new_bed = new_prefix + ".bed"
with open(new_bed, "wb") as o_file:
o_file.write(bytearray([108, 27, 0, 1, 2, 3, 4]))
# This should raise an exception
with self.assertRaises(ValueError) as cm:
pyplink.PyPlink(new_prefix)
self.assertEqual(
"not in SNP-major format (please recode): {}".format(new_bed),
str(cm.exception),
)
def test_missing_files(self):
"""Checks that an exception is raised when an input file is missing."""
# Creating dummy BED/BIM/FAM files
prefix = os.path.join(self.tmp_dir, "test_missing")
for extension in (".bed", ".bim", ".fam"):
with open(prefix + extension, "w"):
pass
# Removing the files (one by one) and checking the exception is raised
for extension in (".bed", ".bim", ".fam"):
os.remove(prefix + extension)
with self.assertRaises(IOError) as cm:
pyplink.PyPlink(prefix)
self.assertEqual("No such file: '{}'".format(prefix + extension),
str(cm.exception))
with open(prefix + extension, "w"):
pass
def test_get_nb_markers(self):
"""Tests that the correct number of markers is returned."""
self.assertEqual(self.pedfile.get_nb_markers(), 10)
def test_get_nb_markers_w_mode(self):
"""Tests that an exception is raised if in write mode."""
with self.assertRaises(UnsupportedOperation) as cm:
# Creating the dummy PyPlink object
prefix = os.path.join(self.tmp_dir, "test_error")
with pyplink.PyPlink(prefix, "w") as p:
p.get_nb_markers()
self.assertEqual("not available in 'w' mode", str(cm.exception))
def test_get_nb_samples(self):
"""Tests that the correct number of samples is returned."""
self.assertEqual(self.pedfile.get_nb_samples(), 3)
def test_get_nb_samples_w_mode(self):
"""Tests that an exception is raised if in write mode."""
with self.assertRaises(UnsupportedOperation) as cm:
# Creating the dummy PyPlink object
prefix = os.path.join(self.tmp_dir, "test_error")
with pyplink.PyPlink(prefix, "w") as p:
p.get_nb_samples()
self.assertEqual("not available in 'w' mode", str(cm.exception))
def test_get_bim(self):
"""Tests the 'get_bim' function."""
# The original BIM file (with the 'i' column)
ori_bim = self.pedfile._bim
# The expected values
chromosomes = [1, 2, 3, 4, 4, 5, 6, 6, 6, 8]
positions = [45162, 45257, 45413, 46844, 72434, 72515, 77689, 78032,
81468, 222077]
cms = [0, 1, 1, 2, 2, 3, 4, 4, 5, 6]
a1s = ["G", "C", "0", "A", "0", "0", "G", "0", "G", "A"]
a2s = ["C", "T", "0", "T", "G", "C", "A", "T", "C", "G"]
# Getting the BIM file
bim = self.pedfile.get_bim()
# Checking the columns
self.assertTrue(
set(bim.columns.values) == {"chrom", "pos", "cm", "a1", "a2"}
)
# Checking the indexes
self.assertTrue(set(bim.index.values) == set(self.markers))
# Checking the values for the markers
zipped = zip(self.markers, chromosomes, positions, cms, a1s, a2s)
for marker, chrom, pos, cm, a1, a2 in zipped:
self.assertEqual(chrom, bim.loc[marker, "chrom"])
self.assertEqual(pos, bim.loc[marker, "pos"])
self.assertEqual(cm, bim.loc[marker, "cm"])
self.assertEqual(a1, bim.loc[marker, "a1"])
self.assertEqual(a2, bim.loc[marker, "a2"])
# Comparing with the original values
comparison = ori_bim.loc[:, ["chrom", "pos", "cm", "a1", "a2"]] == bim
self.assertTrue(comparison.all().all())
# Testing that changing a values in the BIM, doesn't change the value
# in the original BIM
bim.loc["rs4030300", "chrom"] = 2
bim.loc["rs2949420", "cm"] = 0.1
comparison = ori_bim.loc[:, ["chrom", "pos", "cm", "a1", "a2"]] == bim
self.assertFalse(comparison.all().chrom)
self.assertFalse(comparison.all().cm)
self.assertTrue(comparison.all()[["pos", "a1", "a2"]].all())
def test_get_bim_w_mode(self):
"""Tests that an exception is raised if in write mode."""
with self.assertRaises(UnsupportedOperation) as cm:
# Creating the dummy PyPlink object
prefix = os.path.join(self.tmp_dir, "test_error")
with pyplink.PyPlink(prefix, "w") as p:
p.get_bim()
self.assertEqual("not available in 'w' mode", str(cm.exception))
def test_get_fam(self):
"""Tests the 'get_fam' function."""
# The original FAM file (with the 'byte' and 'bit' columns)
ori_fam = self.pedfile._fam
# The expected values
fids = ["Sample_1", "Sample_2", "Sample_3"]
iids = ["Sample_1", "Sample_2", "Sample_3"]
fathers = ["0", "0", "Sample_1"]
mothers = ["0", "0", "Sample_2"]
genders = [1, 2, 2]
status = [-9, -9, -9]
# Getting the FAM file
fam = self.pedfile.get_fam()
# Checking the columns
self.assertTrue(
set(fam.columns.values) == {"fid", "iid", "father", "mother",
"gender", "status"}
)
# Checking the values
zipped = zip(fids, iids, fathers, mothers, genders, status)
for i, (fid, iid, father, mother, gender, s) in enumerate(zipped):
self.assertEqual(fid, fam.loc[i, "fid"])
self.assertEqual(iid, fam.loc[i, "iid"])
self.assertEqual(father, fam.loc[i, "father"])
self.assertEqual(mother, fam.loc[i, "mother"])
self.assertEqual(gender, fam.loc[i, "gender"])
self.assertEqual(s, fam.loc[i, "status"])
# Comparing with the original values
comparison = ori_fam.loc[:, ["fid", "iid", "father", "mother",
"gender", "status"]] == fam
self.assertTrue(comparison.all().all())
# Testing that changing a values in the FAM, doesn't change the value
# in the original FAM
fam.loc[2, "father"] = "0"
fam.loc[0, "status"] = 2
comparison = ori_fam.loc[:, ["fid", "iid", "father", "mother",
"gender", "status"]] == fam
self.assertFalse(comparison.all().father)
self.assertFalse(comparison.all().status)
self.assertTrue(
comparison.all()[["fid", "iid", "mother", "gender"]].all()
)
def test_get_fam_w_mode(self):
"""Tests that an exception is raised if in write mode."""
with self.assertRaises(UnsupportedOperation) as cm:
# Creating the dummy PyPlink object
prefix = os.path.join(self.tmp_dir, "test_error")
with pyplink.PyPlink(prefix, "w") as p:
p.get_fam()
self.assertEqual("not available in 'w' mode", str(cm.exception))
def test_generator(self):
"""Testing the class as a generator."""
# Zipping and checking
zipped = zip(
[i for i in zip(self.markers, self.genotypes)],
self.pedfile,
)
for (e_marker, e_geno), (marker, geno) in zipped:
self.assertEqual(e_marker, marker)
np.testing.assert_array_equal(e_geno, geno)
# The generator should be empty
remaining = [(marker, geno) for marker, geno in self.pedfile]
self.assertEqual(0, len(remaining))
def test_generator_w_mode(self):
"""Tests that an exception is raised if in write mode."""
with self.assertRaises(UnsupportedOperation) as cm:
# Creating the dummy PyPlink object
prefix = os.path.join(self.tmp_dir, "test_error")
with pyplink.PyPlink(prefix, "w") as p:
marker, genotypes = next(p)
self.assertEqual("not available in 'w' mode", str(cm.exception))
def test_next(self):
"""Tests that an exception is raised when calling next in w mode."""
marker, genotypes = self.pedfile.next()
# Comparing
self.assertEqual(self.markers[0], marker)
np.testing.assert_array_equal(self.genotypes[0], genotypes)
def test_next_w_mode(self):
"""Tests that an exception is raised when calling next in w mode."""
with self.assertRaises(UnsupportedOperation) as cm:
# Creating the dummy PyPlink object
prefix = os.path.join(self.tmp_dir, "test_error")
with pyplink.PyPlink(prefix, "w") as p:
p.next()
self.assertEqual("not available in 'w' mode", str(cm.exception))
def test_seek(self):
"""Testing the seeking (for the generator)."""
for marker, geno in self.pedfile:
pass
# The generator should be empty
remaining = [(marker, geno) for marker, geno in self.pedfile]
self.assertEqual(0, len(remaining))
# Seeking at the second position
zipped = zip(
[i for i in zip(self.markers[1:], self.genotypes[1:])],
self.pedfile,
)
self.pedfile.seek(1)
for (e_marker, e_geno), (marker, geno) in zipped:
self.assertEqual(e_marker, marker)
np.testing.assert_array_equal(e_geno, geno)
# Seeking at the fourth position
zipped = zip(
[i for i in zip(self.markers[3:], self.genotypes[3:])],
self.pedfile,
)
self.pedfile.seek(3)
for (e_marker, e_geno), (marker, geno) in zipped:
self.assertEqual(e_marker, marker)
np.testing.assert_array_equal(e_geno, geno)
# Seeking at the tenth position
zipped = zip(
[i for i in zip(self.markers[9:], self.genotypes[9:])],
self.pedfile,
)
self.pedfile.seek(9)
for (e_marker, e_geno), (marker, geno) in zipped:
self.assertEqual(e_marker, marker)
np.testing.assert_array_equal(e_geno, geno)
# Seeking at an invalid position
with self.assertRaises(ValueError) as cm:
self.pedfile.seek(-1)
self.assertEqual("invalid position in BED: -1", str(cm.exception))
# Seeking at an invalid position
with self.assertRaises(ValueError) as cm:
self.pedfile.seek(100)
self.assertEqual("invalid position in BED: 100", str(cm.exception))
# Seeking at an invalid position
with self.assertRaises(ValueError) as cm:
self.pedfile.seek(10)
self.assertEqual("invalid position in BED: 10", str(cm.exception))
def test_seek_w_mode(self):
"""Tests that an exception is raised if in write mode."""
with self.assertRaises(UnsupportedOperation) as cm:
# Creating the dummy PyPlink object
prefix = os.path.join(self.tmp_dir, "test_error")
with pyplink.PyPlink(prefix, "w") as p:
p.seek(100)
self.assertEqual("not available in 'w' mode", str(cm.exception))
def test_iter_geno(self):
"""Tests the 'iter_geno' function."""
zipped = zip(
[i for i in zip(self.markers, self.genotypes)],
self.pedfile.iter_geno(),
)
for (e_marker, e_geno), (marker, geno) in zipped:
self.assertEqual(e_marker, marker)
np.testing.assert_array_equal(e_geno, geno)
def test_iter_geno_w_mode(self):
"""Tests that an exception is raised if in write mode."""
with self.assertRaises(UnsupportedOperation) as cm:
# Creating the dummy PyPlink object
prefix = os.path.join(self.tmp_dir, "test_error")
with pyplink.PyPlink(prefix, "w") as p:
marker, genotypes = next(p.iter_geno())
self.assertEqual("not available in 'w' mode", str(cm.exception))
def test_iter_acgt_geno(self):
"""Tests the 'iter_acgt_geno" function."""
zipped = zip(
[i for i in zip(self.markers, self.acgt_genotypes)],
self.pedfile.iter_acgt_geno(),
)
for (e_marker, e_geno), (marker, geno) in zipped:
self.assertEqual(e_marker, marker)
np.testing.assert_array_equal(e_geno, geno)
def test_iter_acgt_geno_w_mode(self):
"""Tests that an exception is raised if in write mode."""
with self.assertRaises(UnsupportedOperation) as cm:
# Creating the dummy PyPlink object
prefix = os.path.join(self.tmp_dir, "test_error")
with pyplink.PyPlink(prefix, "w") as p:
marker, genotypes = next(p.iter_acgt_geno())
self.assertEqual("not available in 'w' mode", str(cm.exception))
def test_iter_geno_marker(self):
"""Tests the 'iter_geno_marker' function."""
# Getting a subset of indexes
indexes = random.sample(range(len(self.markers)), 4)
# Getting the markers and genotypes
markers = [self.markers[i] for i in indexes]
genotypes = [self.genotypes[i] for i in indexes]
# Zipping and comparing
zipped = zip(
[i for i in zip(markers, genotypes)],
self.pedfile.iter_geno_marker(markers),
)
for (e_marker, e_geno), (marker, geno) in zipped:
self.assertEqual(e_marker, marker)
np.testing.assert_array_equal(e_geno, geno)
# Testing a single marker
index = random.randint(0, len(self.markers) - 1)
e_marker = self.markers[index]
e_geno = self.genotypes[index]
for marker, geno in self.pedfile.iter_geno_marker(e_marker):
self.assertEqual(e_marker, marker)
np.testing.assert_array_equal(e_geno, geno)
# Adding a marker that doesn't exist
markers.extend(["unknown_1", "unknown_2"])
with self.assertRaises(ValueError) as cm:
[i for i in self.pedfile.iter_geno_marker(markers)]
self.assertEqual("unknown_1: marker not in BIM", str(cm.exception))
def test_iter_geno_marker_w_mode(self):
"""Tests that an exception is raised if in write mode."""
with self.assertRaises(UnsupportedOperation) as cm:
# Creating the dummy PyPlink object
prefix = os.path.join(self.tmp_dir, "test_error")
with pyplink.PyPlink(prefix, "w") as p:
marker, genotypes = next(p.iter_geno_marker(["M1", "M2"]))
self.assertEqual("not available in 'w' mode", str(cm.exception))
def test_iter_acgt_geno_marker(self):
"""Tests the 'iter_acgt_geno_marker' function."""
# Getting a subset of indexes
indexes = random.sample(range(len(self.markers)), 4)
# Getting the markers and genotypes
markers = [self.markers[i] for i in indexes]
genotypes = [self.acgt_genotypes[i] for i in indexes]
# Zipping and comparing
zipped = zip(
[i for i in zip(markers, genotypes)],
self.pedfile.iter_acgt_geno_marker(markers),
)
for (e_marker, e_geno), (marker, geno) in zipped:
self.assertEqual(e_marker, marker)
np.testing.assert_array_equal(e_geno, geno)
# Testing a single marker
index = random.randint(0, len(self.markers) - 1)
e_marker = self.markers[index]
e_geno = self.acgt_genotypes[index]
for marker, geno in self.pedfile.iter_acgt_geno_marker(e_marker):
self.assertEqual(e_marker, marker)
np.testing.assert_array_equal(e_geno, geno)
# Adding a marker that doesn't exist
markers.extend(["unknown_3", "unknown_4"])
with self.assertRaises(ValueError) as cm:
[i for i in self.pedfile.iter_acgt_geno_marker(markers)]
self.assertEqual("unknown_3: marker not in BIM", str(cm.exception))
def test_iter_acgt_geno_marker_w_mode(self):
"""Tests that an exception is raised if in write mode."""
with self.assertRaises(UnsupportedOperation) as cm:
# Creating the dummy PyPlink object
prefix = os.path.join(self.tmp_dir, "test_error")
with pyplink.PyPlink(prefix, "w") as p:
marker, genotypes = next(p.iter_acgt_geno_marker(["M1", "M2"]))
self.assertEqual("not available in 'w' mode", str(cm.exception))
def test_repr_r_mode(self):
"""Tests the object representation of the string (r mode)."""
# Counting the number of samples
nb_samples = None
with open(self.fam, "r") as i_file:
nb_samples = len(i_file.read().splitlines())
# Counting the number of markers
nb_markers = None
with open(self.bim, "r") as i_file:
nb_markers = len(i_file.read().splitlines())
# Creating the expected string representation
e_repr = "PyPlink({:,d} samples; {:,d} markers)".format(nb_samples,
nb_markers)
# Getting the observed string representation
o_repr = str(self.pedfile)
# Comparing
self.assertEqual(e_repr, o_repr)
def test_repr_w_mode(self):
"""Tests the object representation of the string (w mode)."""
# The expected representation
e_repr = 'PyPlink(mode="w")'
# Creating the dummy PyPlink object
prefix = os.path.join(self.tmp_dir, "test_repr")
with pyplink.PyPlink(prefix, "w") as pedfile:
# Comparing the expected with the observed representation
o_repr = str(pedfile)
self.assertEqual(e_repr, o_repr)
def test_get_geno_marker(self):
"""Tests the 'get_geno_marker' function."""
# Getting a random marker to test
i = random.choice(range(len(self.markers)))
marker = self.markers[i]
e_geno = self.genotypes[i]
# Getting the genotype
o_geno = self.pedfile.get_geno_marker(marker)
np.testing.assert_array_equal(o_geno, e_geno)
# Asking for an unknown marker should raise an ValueError
with self.assertRaises(ValueError) as cm:
self.pedfile.get_geno_marker("dummy_marker")
self.assertEqual(
"dummy_marker: marker not in BIM",
str(cm.exception),
)
def test_get_geno_marker_w_mode(self):
"""Tests that an exception is raised if in write mode."""
with self.assertRaises(UnsupportedOperation) as cm:
# Creating the dummy PyPlink object
prefix = os.path.join(self.tmp_dir, "test_error")
with pyplink.PyPlink(prefix, "w") as p:
p.get_geno_marker("M1")
self.assertEqual("not available in 'w' mode", str(cm.exception))
def test_get_iter_w_mode(self):
"""Tests that an exception is raised if in write mode."""
with self.assertRaises(UnsupportedOperation) as cm:
# Creating the dummy PyPlink object
prefix = os.path.join(self.tmp_dir, "test_error")
with pyplink.PyPlink(prefix, "w") as p:
iter(p)
self.assertEqual("not available in 'w' mode", str(cm.exception))
def test_get_acgt_geno_marker(self):
"""Tests the 'get_acgt_geno_marker' function."""
# Getting a random marker to test
i = random.choice(range(len(self.markers)))
marker = self.markers[i]
e_geno = self.acgt_genotypes[i]
# Getting the genotype
o_geno = self.pedfile.get_acgt_geno_marker(marker)
np.testing.assert_array_equal(o_geno, e_geno)
# Asking for an unknown marker should raise an ValueError
with self.assertRaises(ValueError) as cm:
self.pedfile.get_acgt_geno_marker("dummy_marker")
self.assertEqual("dummy_marker: marker not in BIM", str(cm.exception))
def test_get_acgt_geno_marker_w_mode(self):
"""Tests that an exception is raised if in write mode."""
with self.assertRaises(UnsupportedOperation) as cm:
# Creating the dummy PyPlink object
prefix = os.path.join(self.tmp_dir, "test_error")
with pyplink.PyPlink(prefix, "w") as p:
p.get_acgt_geno_marker("M1")
self.assertEqual("not available in 'w' mode", str(cm.exception))
def test_get_context_read_mode(self):
"""Tests the PyPlink object as context manager."""
with pyplink.PyPlink(self.prefix) as genotypes:
self.assertEqual(3, len(genotypes.get_fam().head(n=3)))
def test_invalid_mode(self):
"""Tests invalid mode when PyPlink as context manager."""
with self.assertRaises(ValueError) as cm:
pyplink.PyPlink(self.prefix, "u")
self.assertEqual("invalid mode: 'u'", str(cm.exception))
def test_write_binary(self):
"""Tests writing a Plink binary file."""
# The expected genotypes
expected_genotypes = [
np.array([0, 0, 0, 1, 0, 1, 2], dtype=int),
np.array([0, 0, 0, 0, -1, 0, 1], dtype=int),
np.array([0, -1, -1, 2, 0, 0, 0], dtype=int),
]
# The prefix
test_prefix = os.path.join(self.tmp_dir, "test_write")
# Writing the binary file
with pyplink.PyPlink(test_prefix, "w") as pedfile:
for genotypes in expected_genotypes:
pedfile.write_genotypes(genotypes)
# Checking the file exists
self.assertTrue(os.path.isfile(test_prefix + ".bed"))
# Writing the FAM file
with open(test_prefix + ".fam", "w") as o_file:
for i in range(7):
print("f{}".format(i+1), "s{}".format(i+1), "0", "0",
random.choice((1, 2)), "-9", sep=" ", file=o_file)
# Writing the BIM file
with open(test_prefix + ".bim", "w") as o_file:
for i in range(3):
print(i+1, "m{}".format(i+1), "0", i+1, "T", "A", sep="\t",
file=o_file)
# Reading the written binary file
with pyplink.PyPlink(test_prefix) as pedfile:
for i, (marker, genotypes) in enumerate(pedfile):
self.assertEqual("m{}".format(i+1), marker)
np.testing.assert_array_equal(expected_genotypes[i], genotypes)
def test_write_binary_error(self):
"""Tests writing a binary file, with different number of sample."""
# The expected genotypes
expected_genotypes = [
np.array([0, 0, 0, 1, 0, 1, 2], dtype=int),
np.array([0, 0, 0, 0, -1, 0], dtype=int),
np.array([0, -1, -1, 2, 0, 0, 0], dtype=int),
]
# The prefix
test_prefix = os.path.join(self.tmp_dir, "test_write")
# Writing the binary file
with self.assertRaises(ValueError) as cm:
with pyplink.PyPlink(test_prefix, "w") as pedfile:
pedfile.write_genotypes(expected_genotypes[0]) # 7 genotypes
pedfile.write_genotypes(expected_genotypes[1]) # 6 genotypes
self.assertEqual("7 samples expected, got 6", str(cm.exception))
def test_grouper_padding(self):
"""Tests the _grouper function (when padding is required)."""
expected_chunks = [
(0, 1, 2),
(3, 4, 5),
(6, 7, 8),
(9, 0, 0),
]
observed_chunks = pyplink.PyPlink._grouper(range(10), 3)
for expected, observed in zip(expected_chunks, observed_chunks):
self.assertEqual(expected, observed)
def test_grouper_no_padding(self):
"""Tests the _grouper function (when padding is not required)."""
expected_chunks = [
(0, 1, 2, 3, 4),
(5, 6, 7, 8, 9),
]
observed_chunks = pyplink.PyPlink._grouper(range(10), 5)
for expected, observed in zip(expected_chunks, observed_chunks):
self.assertEqual(expected, observed)
@unittest.skipIf(platform.system() not in {"Darwin", "Linux", "Windows"},
"Plink not available for {}".format(platform.system()))
def test_with_plink(self):
"""Tests to read a binary file using Plink."""
# Checking if we need to skip
if self.plink_path is None:
self.skipTest(self.plink_message)
# Creating the BED file
all_genotypes = [
[0, 1, 0, 0, -1, 0, 1, 0, 0, 2],
[2, 1, 2, 2, 2, 2, 2, 1, 0, 1],
[0, 0, 0, 0, 0, 1, 0, 0, 0, 0],
]
prefix = os.path.join(self.tmp_dir, "test_output")
with pyplink.PyPlink(prefix, "w") as pedfile:
for genotypes in all_genotypes:
pedfile.write_genotypes(genotypes)
# Creating the FAM file
fam_content = [
["F0", "S0", "0", "0", "1", "-9"],
["F1", "S1", "0", "0", "1", "-9"],
["F2", "S2", "0", "0", "2", "-9"],
["F3", "S3", "0", "0", "1", "-9"],
["F4", "S4", "0", "0", "1", "-9"],
["F5", "S5", "0", "0", "2", "-9"],
["F6", "S6", "0", "0", "1", "-9"],
["F7", "S7", "0", "0", "0", "-9"],
["F8", "S8", "0", "0", "1", "-9"],
["F9", "S9", "0", "0", "2", "-9"],
]
with open(prefix + ".fam", "w") as o_file:
for sample in fam_content:
print(*sample, sep=" ", file=o_file)
# Creating the BIM file
bim_content = [
["1", "M0", "0", "123", "A", "G"],
["1", "M1", "0", "124", "C", "T"],
["2", "M2", "0", "117", "G", "C"],
]
with open(prefix + ".bim", "w") as o_file:
for marker in bim_content:
print(*marker, sep="\t", file=o_file)
# Creating a transposed pedfile using Plink
out_prefix = prefix + "_transposed"
try:
check_call([
self.plink_path,
"--noweb",
"--bfile", prefix,
"--recode", "--transpose", "--tab",
"--out", out_prefix,
], stdout=PIPE, stderr=PIPE)
except CalledProcessError:
self.fail("Plink could not recode file")
# Checking the two files exists
self.assertTrue(os.path.isfile(out_prefix + ".tped"))
self.assertTrue(os.path.isfile(out_prefix + ".tfam"))
# Checking the content of the TFAM file
expected = "\n".join("\t".join(sample) for sample in fam_content)
with open(out_prefix + ".tfam", "r") as i_file:
self.assertEqual(expected + "\n", i_file.read())
# Checking the content of the TPED file
with open(out_prefix + ".tped", "r") as i_file:
# Checking the first marker
marker_1 = i_file.readline().rstrip("\r\n").split("\t")
self.assertEqual(["1", "M0", "0", "123"], marker_1[:4])
self.assertEqual(["G G", "A G", "G G", "G G", "0 0", "G G", "A G",
"G G", "G G", "A A"],
marker_1[4:])
# Checking the second marker
marker_2 = i_file.readline().rstrip("\r\n").split("\t")
self.assertEqual(["1", "M1", "0", "124"], marker_2[:4])
self.assertEqual(["C C", "T C", "C C", "C C", "C C", "C C", "C C",
"T C", "T T", "T C"],
marker_2[4:])
# Checking the third marker
marker_3 = i_file.readline().rstrip("\r\n").split("\t")
self.assertEqual(["2", "M2", "0", "117"], marker_3[:4])
self.assertEqual(["C C", "C C", "C C", "C C", "C C", "G C", "C C",
"C C", "C C", "C C"],
marker_3[4:])
# Checking this is the end of the file
self.assertEqual("", i_file.readline())
@unittest.skipIf(platform.system() not in {"Darwin", "Linux", "Windows"},
"Plink not available for {}".format(platform.system()))
def test_with_plink_individual_major(self):
"""Tests to read a binary file (INDIVIDUAL-major) using Plink."""
# Checking if we need to skip
if self.plink_path is None:
self.skipTest(self.plink_message)
# The genotypes
all_genotypes = [
[0, 1, 0, 0, -1, 0, 1, 0, 0, 2],
[2, 1, 2, 2, 2, 2, 2, 1, 0, 1],
[0, 0, 0, 0, 0, 1, 0, 0, 0, 0],
]
transposed_genotypes = [
[row[i] for row in all_genotypes]
for i in range(len(all_genotypes[0]))
]
# Creating the BED file (INDIVIDUAL-major)
prefix = os.path.join(self.tmp_dir, "test_output")
with pyplink.PyPlink(prefix, "w", "INDIVIDUAL-major") as pedfile:
for genotypes in transposed_genotypes:
pedfile.write_genotypes(genotypes)
# Creating the FAM file
fam_content = [
["F0", "S0", "0", "0", "1", "-9"],
["F1", "S1", "0", "0", "1", "-9"],
["F2", "S2", "0", "0", "2", "-9"],
["F3", "S3", "0", "0", "1", "-9"],
["F4", "S4", "0", "0", "1", "-9"],
["F5", "S5", "0", "0", "2", "-9"],
["F6", "S6", "0", "0", "1", "-9"],
["F7", "S7", "0", "0", "0", "-9"],
["F8", "S8", "0", "0", "1", "-9"],
["F9", "S9", "0", "0", "2", "-9"],
]
with open(prefix + ".fam", "w") as o_file:
for sample in fam_content:
print(*sample, sep=" ", file=o_file)
# Creating the BIM file
bim_content = [
["1", "M0", "0", "123", "A", "G"],
["1", "M1", "0", "124", "C", "T"],
["2", "M2", "0", "117", "G", "C"],
]
with open(prefix + ".bim", "w") as o_file:
for marker in bim_content:
print(*marker, sep="\t", file=o_file)
# Creating a transposed pedfile using Plink
out_prefix = prefix + "_transposed"
try:
check_call([
self.plink_path,
"--noweb",
"--bfile", prefix,
"--recode", "--transpose", "--tab",
"--out", out_prefix,
], stdout=PIPE, stderr=PIPE)
except CalledProcessError:
self.fail("Plink could not recode file")
# Checking the two files exists
self.assertTrue(os.path.isfile(out_prefix + ".tped"))
self.assertTrue(os.path.isfile(out_prefix + ".tfam"))
# Checking the content of the TFAM file
expected = "\n".join("\t".join(sample) for sample in fam_content)
with open(out_prefix + ".tfam", "r") as i_file:
self.assertEqual(expected + "\n", i_file.read())
# Checking the content of the TPED file
with open(out_prefix + ".tped", "r") as i_file:
# Checking the first marker
marker_1 = i_file.readline().rstrip("\r\n").split("\t")
self.assertEqual(["1", "M0", "0", "123"], marker_1[:4])
self.assertEqual(["G G", "A G", "G G", "G G", "0 0", "G G", "A G",
"G G", "G G", "A A"],
marker_1[4:])
# Checking the second marker
marker_2 = i_file.readline().rstrip("\r\n").split("\t")
self.assertEqual(["1", "M1", "0", "124"], marker_2[:4])
self.assertEqual(["C C", "T C", "C C", "C C", "C C", "C C", "C C",
"T C", "T T", "T C"],
marker_2[4:])
# Checking the third marker
marker_3 = i_file.readline().rstrip("\r\n").split("\t")
self.assertEqual(["2", "M2", "0", "117"], marker_3[:4])
self.assertEqual(["C C", "C C", "C C", "C C", "C C", "G C", "C C",
"C C", "C C", "C C"],
marker_3[4:])
# Checking this is the end of the file
self.assertEqual("", i_file.readline())
def test_wrong_bed_format(self):
"""Tests opening a BED file with unknown format."""
with self.assertRaises(ValueError) as cm:
pyplink.PyPlink(self.prefix, bed_format="UNKNOWN-major")
self.assertEqual(
"invalid bed format: UNKNOWN-major",
str(cm.exception),
)
def test_invalid_bed_format_with_r_mode(self):
"""Tests an invalid BED format with r mode."""
with self.assertRaises(ValueError) as cm:
pyplink.PyPlink(self.prefix, bed_format="INDIVIDUAL-major")
self.assertEqual(
"only SNP-major format is supported with mode 'r'",
str(cm.exception),
)
def test_write_genotypes_in_r_mode(self):
"""Tests to use the 'write_genotypes' function in read mode."""
with self.assertRaises(UnsupportedOperation) as cm:
self.pedfile.write_genotypes([0, 0, 0])
self.assertEqual("not available in 'r' mode", str(cm.exception))
@mock.patch.object(pyplink, "logger")
def test_dup_markers(self, pyplink_logger):
"""Tests when there are duplicated markers."""
# Checking the original one has no duplicates
self.assertEqual(len(self.pedfile.get_duplicated_markers()), 0)
# Copying the BED and the FAM to the temporary directory
new_prefix = os.path.join(self.tmp_dir, "with_dup")
for suffix in (".bed", ".fam"):
shutil.copyfile(self.prefix + suffix, new_prefix + suffix)
# Copying the BIM file to the temporary directory
shutil.copyfile(self.prefix + "_with_dup.bim", new_prefix + ".bim")
# Reading the new files
pedfile = pyplink.PyPlink(new_prefix)
# Checking a warning was called
self.assertTrue(pyplink_logger.warning.called)
# Checking the BIM
chromosomes = [1, 2, 3, 4, 4, 5, 6, 6, 6, 8]
markers = ["rs10399749", "rs2949420:dup1", "rs2949421", "rs2691310",
"rs4030303:dup1", "rs4030303:dup2", "rs4030303:dup3",
"rs940550:dup1", "rs940550:dup2", "rs2949420:dup2"]
positions = [45162, 45257, 45413, 46844, 72434, 72515, 77689, 78032,
81468, 222077]
cms = [0, 1, 1, 2, 2, 3, 4, 4, 5, 6]
a1s = ["G", "C", "0", "A", "0", "0", "G", "0", "G", "A"]
a2s = ["C", "T", "0", "T", "G", "C", "A", "T", "C", "G"]
# Getting the BIM file
bim = pedfile.get_bim()
# Checking the columns
self.assertTrue(
set(bim.columns.values) == {"chrom", "pos", "cm", "a1", "a2"}
)
# Checking the indexes
self.assertTrue(set(bim.index.values) == set(markers))
# Checking the values for the markers
zipped = zip(markers, chromosomes, positions, cms, a1s, a2s)
for marker, chrom, pos, cm, a1, a2 in zipped:
self.assertEqual(chrom, bim.loc[marker, "chrom"])
self.assertEqual(pos, bim.loc[marker, "pos"])
self.assertEqual(cm, bim.loc[marker, "cm"])
self.assertEqual(a1, bim.loc[marker, "a1"])
self.assertEqual(a2, bim.loc[marker, "a2"])
# Checking only one duplicated markers
for i, marker in enumerate(markers):
geno = pedfile.get_geno_marker(marker)
np.testing.assert_array_equal(geno, self.genotypes[i])
# Checking the list of duplicated markers
expected_dup = dict(
rs2949420=["rs2949420:dup1", "rs2949420:dup2"],
rs4030303=["rs4030303:dup1", "rs4030303:dup2", "rs4030303:dup3"],
rs940550=["rs940550:dup1", "rs940550:dup2"],
)
self.assertEqual(expected_dup, pedfile.get_duplicated_markers())
# Closing the file
pedfile.close()
| mit |
fujicoin/electrum-fjc | electrum/plot.py | 1 | 1878 | import datetime
from collections import defaultdict
import matplotlib
matplotlib.use('Qt5Agg')
import matplotlib.pyplot as plt
import matplotlib.dates as md
from .i18n import _
from .bitcoin import COIN
class NothingToPlotException(Exception):
def __str__(self):
return _("Nothing to plot.")
def plot_history(history):
if len(history) == 0:
raise NothingToPlotException()
hist_in = defaultdict(int)
hist_out = defaultdict(int)
for item in history:
if not item['confirmations']:
continue
if item['timestamp'] is None:
continue
value = item['value'].value/COIN
date = item['date']
datenum = int(md.date2num(datetime.date(date.year, date.month, 1)))
if value > 0:
hist_in[datenum] += value
else:
hist_out[datenum] -= value
f, axarr = plt.subplots(2, sharex=True)
plt.subplots_adjust(bottom=0.2)
plt.xticks( rotation=25 )
ax = plt.gca()
plt.ylabel('FJC')
plt.xlabel('Month')
xfmt = md.DateFormatter('%Y-%m-%d')
ax.xaxis.set_major_formatter(xfmt)
axarr[0].set_title('Monthly Volume')
xfmt = md.DateFormatter('%Y-%m')
ax.xaxis.set_major_formatter(xfmt)
width = 20
r1 = None
r2 = None
dates_values = list(zip(*sorted(hist_in.items())))
if dates_values and len(dates_values) == 2:
dates, values = dates_values
r1 = axarr[0].bar(dates, values, width, label='incoming')
axarr[0].legend(loc='upper left')
dates_values = list(zip(*sorted(hist_out.items())))
if dates_values and len(dates_values) == 2:
dates, values = dates_values
r2 = axarr[1].bar(dates, values, width, color='r', label='outgoing')
axarr[1].legend(loc='upper left')
if r1 is None and r2 is None:
raise NothingToPlotException()
return plt
| mit |
with-git/tensorflow | tensorflow/examples/learn/text_classification_cnn.py | 29 | 5677 | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Example of Estimator for CNN-based text classification with DBpedia data."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import argparse
import sys
import numpy as np
import pandas
from sklearn import metrics
import tensorflow as tf
FLAGS = None
MAX_DOCUMENT_LENGTH = 100
EMBEDDING_SIZE = 20
N_FILTERS = 10
WINDOW_SIZE = 20
FILTER_SHAPE1 = [WINDOW_SIZE, EMBEDDING_SIZE]
FILTER_SHAPE2 = [WINDOW_SIZE, N_FILTERS]
POOLING_WINDOW = 4
POOLING_STRIDE = 2
n_words = 0
MAX_LABEL = 15
WORDS_FEATURE = 'words' # Name of the input words feature.
def cnn_model(features, labels, mode):
"""2 layer ConvNet to predict from sequence of words to a class."""
# Convert indexes of words into embeddings.
# This creates embeddings matrix of [n_words, EMBEDDING_SIZE] and then
# maps word indexes of the sequence into [batch_size, sequence_length,
# EMBEDDING_SIZE].
word_vectors = tf.contrib.layers.embed_sequence(
features[WORDS_FEATURE], vocab_size=n_words, embed_dim=EMBEDDING_SIZE)
word_vectors = tf.expand_dims(word_vectors, 3)
with tf.variable_scope('CNN_Layer1'):
# Apply Convolution filtering on input sequence.
conv1 = tf.layers.conv2d(
word_vectors,
filters=N_FILTERS,
kernel_size=FILTER_SHAPE1,
padding='VALID',
# Add a ReLU for non linearity.
activation=tf.nn.relu)
# Max pooling across output of Convolution+Relu.
pool1 = tf.layers.max_pooling2d(
conv1,
pool_size=POOLING_WINDOW,
strides=POOLING_STRIDE,
padding='SAME')
# Transpose matrix so that n_filters from convolution becomes width.
pool1 = tf.transpose(pool1, [0, 1, 3, 2])
with tf.variable_scope('CNN_Layer2'):
# Second level of convolution filtering.
conv2 = tf.layers.conv2d(
pool1,
filters=N_FILTERS,
kernel_size=FILTER_SHAPE2,
padding='VALID')
# Max across each filter to get useful features for classification.
pool2 = tf.squeeze(tf.reduce_max(conv2, 1), squeeze_dims=[1])
# Apply regular WX + B and classification.
logits = tf.layers.dense(pool2, MAX_LABEL, activation=None)
predicted_classes = tf.argmax(logits, 1)
if mode == tf.estimator.ModeKeys.PREDICT:
return tf.estimator.EstimatorSpec(
mode=mode,
predictions={
'class': predicted_classes,
'prob': tf.nn.softmax(logits)
})
onehot_labels = tf.one_hot(labels, MAX_LABEL, 1, 0)
loss = tf.losses.softmax_cross_entropy(
onehot_labels=onehot_labels, logits=logits)
if mode == tf.estimator.ModeKeys.TRAIN:
optimizer = tf.train.AdamOptimizer(learning_rate=0.01)
train_op = optimizer.minimize(loss, global_step=tf.train.get_global_step())
return tf.estimator.EstimatorSpec(mode, loss=loss, train_op=train_op)
eval_metric_ops = {
'accuracy': tf.metrics.accuracy(
labels=labels, predictions=predicted_classes)
}
return tf.estimator.EstimatorSpec(
mode=mode, loss=loss, eval_metric_ops=eval_metric_ops)
def main(unused_argv):
global n_words
# Prepare training and testing data
dbpedia = tf.contrib.learn.datasets.load_dataset(
'dbpedia', test_with_fake_data=FLAGS.test_with_fake_data)
x_train = pandas.DataFrame(dbpedia.train.data)[1]
y_train = pandas.Series(dbpedia.train.target)
x_test = pandas.DataFrame(dbpedia.test.data)[1]
y_test = pandas.Series(dbpedia.test.target)
# Process vocabulary
vocab_processor = tf.contrib.learn.preprocessing.VocabularyProcessor(
MAX_DOCUMENT_LENGTH)
x_train = np.array(list(vocab_processor.fit_transform(x_train)))
x_test = np.array(list(vocab_processor.transform(x_test)))
n_words = len(vocab_processor.vocabulary_)
print('Total words: %d' % n_words)
# Build model
classifier = tf.estimator.Estimator(model_fn=cnn_model)
# Train.
train_input_fn = tf.estimator.inputs.numpy_input_fn(
x={WORDS_FEATURE: x_train},
y=y_train,
batch_size=len(x_train),
num_epochs=None,
shuffle=True)
classifier.train(input_fn=train_input_fn, steps=100)
# Predict.
test_input_fn = tf.estimator.inputs.numpy_input_fn(
x={WORDS_FEATURE: x_test},
y=y_test,
num_epochs=1,
shuffle=False)
predictions = classifier.predict(input_fn=test_input_fn)
y_predicted = np.array(list(p['class'] for p in predictions))
y_predicted = y_predicted.reshape(np.array(y_test).shape)
# Score with sklearn.
score = metrics.accuracy_score(y_test, y_predicted)
print('Accuracy (sklearn): {0:f}'.format(score))
# Score with tensorflow.
scores = classifier.evaluate(input_fn=test_input_fn)
print('Accuracy (tensorflow): {0:f}'.format(scores['accuracy']))
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument(
'--test_with_fake_data',
default=False,
help='Test the example code with fake data.',
action='store_true')
FLAGS, unparsed = parser.parse_known_args()
tf.app.run(main=main, argv=[sys.argv[0]] + unparsed)
| apache-2.0 |
m2dsupsdlclass/lectures-labs | slides/08_expressivity_optimization_generalization/images/generalization_grid.py | 1 | 5944 | import json
import os
import numpy as np
from sklearn.preprocessing import StandardScaler
from sklearn.datasets import make_moons
from sklearn.model_selection import train_test_split
from sklearn.neural_network import MLPClassifier
from sklearn.neural_network.multilayer_perceptron import ACTIVATIONS
from sklearn.model_selection import ParameterGrid
from sklearn.utils import shuffle
from sklearn.utils.extmath import safe_sparse_dot
import joblib
model_filename = 'models.log'
evaluations_filename = 'evaluations.log'
def logits(m, X):
sigma = ACTIVATIONS[m.activation]
a = X
for i in range(m.n_layers_ - 1):
a = safe_sparse_dot(a, m.coefs_[i])
a += m.intercepts_[i]
if (i + 1) != (m.n_layers_ - 1):
activations = sigma(a)
return a
def lipschitz(m):
return np.prod([max(np.linalg.svd(w)[1]) for w in m.coefs_])
def margins(m, X, y):
preds = logits(m, X).ravel()
# correct_mask = (preds >= 0) == y
# return np.abs(preds * correct_mask)
return np.abs(preds)
def normalized_margins(m, X, y):
return margins(m, X, y) / lipschitz(m)
def bartlett_complexity_mean(m, X, y):
return 1 / normalized_margins(m, X, y).mean()
def bartlett_complexity_median(m, X, y):
median = np.median(normalized_margins(m, X, y))
if median == 0:
return 0
return 1 / median
def make_noisy_problem(n_samples_train=30, label_noise_rate=0.1, input_noise=0.15,
n_samples_test=3000, seed=0):
rng = np.random.RandomState(seed)
rng = np.random.RandomState(1)
scaler = StandardScaler()
X_train, y_train = make_moons(n_samples=n_samples_train, shuffle=True,
noise=input_noise, random_state=rng)
X_test, y_test = make_moons(n_samples=n_samples_test, shuffle=True,
noise=input_noise, random_state=rng)
if label_noise_rate > 0:
rnd_levels = rng.uniform(low=0., high=1., size=n_samples_train)
noise_mask = rnd_levels <= label_noise_rate
y_train[noise_mask] = rng.randint(low=0, high=2, size=noise_mask.sum())
X_train = scaler.fit_transform(X_train)
X_test = scaler.transform(X_test)
return (X_train, y_train), (X_test, y_test)
hidden_layer_sizes_range = [
[16], [64], [256], [512], [1024],
[16] * 2, [64] * 2, [256] * 2, [512] * 2,
[256] * 3,
[256] * 4,
[256] * 5,
]
param_grid = [
{
'solver': ['sgd', 'adam'],
'hidden_layer_sizes': hidden_layer_sizes_range,
'activation': ['relu'],
'random_state': [0],
'learning_rate_init': [0.1, 0.01],
'learning_rate': ['constant', 'adaptive'],
'max_iter': [5000],
},
{
'solver': ['lbfgs'],
'hidden_layer_sizes': hidden_layer_sizes_range,
'activation': ['relu'],
'random_state': [0],
},
]
if __name__ == '__main__':
model_params = list(ParameterGrid(param_grid))
with open(model_filename, 'w') as f:
for params in model_params:
model_id = joblib.hash(params)
model_record = params.copy()
model_record['model_id'] = model_id
model_record['depth'] = len(params['hidden_layer_sizes'])
model_record['width'] = max(params['hidden_layer_sizes'])
f.write(json.dumps(model_record) + '\n')
f.flush()
model_params = shuffle(model_params, random_state=0)
with open(evaluations_filename, 'w') as f:
for n_samples_train in [30]:
for label_noise_rate in np.linspace(0, 1, 11):
print(f'\nn_samples: {n_samples_train}, label noise: {label_noise_rate:0.1f}')
for data_seed in [0, 1]:
(X_train, y_train), (X_test, y_test) = make_noisy_problem(
n_samples_train, label_noise_rate, seed=data_seed)
for params in model_params:
model_id = joblib.hash(params)
m = MLPClassifier(**params).fit(X_train, y_train)
train_acc = m.score(X_train, y_train)
test_acc = m.score(X_test, y_test)
excess_risk = max(train_acc - test_acc, 0)
n_params = sum([np.product(w.shape) for w in m.coefs_])
n_params += sum([np.product(b.shape) for b in m.intercepts_])
evaluation_record = {
'model_id': model_id,
'n_samples_train': n_samples_train,
'label_noise_rate': label_noise_rate,
'train_acc': train_acc,
'test_acc': test_acc,
'excess_risk': excess_risk,
'lipschitz': lipschitz(m),
'mean_margins': margins(m, X_train, y_train).mean(),
'median_margins': np.median(margins(m, X_train, y_train)),
'bartlett_complexity_mean': bartlett_complexity_mean(m, X_train, y_train),
'bartlett_complexity_median': bartlett_complexity_median(m, X_train, y_train),
'mean_margins_test': margins(m, X_test, y_test).mean(),
'median_margins_test': np.median(margins(m, X_test, y_test)),
'bartlett_complexity_mean_test': bartlett_complexity_mean(m, X_test, y_test),
'bartlett_complexity_median_test': bartlett_complexity_median(m, X_test, y_test),
'n_params': int(n_params),
}
f.write(json.dumps(evaluation_record) + '\n')
f.flush()
print('.', end='', flush=True) | mit |
teoliphant/scipy | scipy/cluster/hierarchy.py | 2 | 94428 | """
========================================================
Hierarchical clustering (:mod:`scipy.cluster.hierarchy`)
========================================================
.. currentmodule:: scipy.cluster.hierarchy
These functions cut hierarchical clusterings into flat clusterings
or find the roots of the forest formed by a cut by providing the flat
cluster ids of each observation.
.. autosummary::
:toctree: generated/
fcluster
fclusterdata
leaders
These are routines for agglomerative clustering.
.. autosummary::
:toctree: generated/
linkage
single
complete
average
weighted
centroid
median
ward
These routines compute statistics on hierarchies.
.. autosummary::
:toctree: generated/
cophenet
from_mlab_linkage
inconsistent
maxinconsts
maxdists
maxRstat
to_mlab_linkage
Routines for visualizing flat clusters.
.. autosummary::
:toctree: generated/
dendrogram
These are data structures and routines for representing hierarchies as
tree objects.
.. autosummary::
:toctree: generated/
ClusterNode
leaves_list
to_tree
These are predicates for checking the validity of linkage and
inconsistency matrices as well as for checking isomorphism of two
flat cluster assignments.
.. autosummary::
:toctree: generated/
is_valid_im
is_valid_linkage
is_isomorphic
is_monotonic
correspond
num_obs_linkage
Utility routines for plotting:
.. autosummary::
:toctree: generated/
set_link_color_palette
References
----------
.. [Sta07] "Statistics toolbox." API Reference Documentation. The MathWorks.
http://www.mathworks.com/access/helpdesk/help/toolbox/stats/.
Accessed October 1, 2007.
.. [Mti07] "Hierarchical clustering." API Reference Documentation.
The Wolfram Research, Inc.
http://reference.wolfram.com/mathematica/HierarchicalClustering/tutorial/
HierarchicalClustering.html.
Accessed October 1, 2007.
.. [Gow69] Gower, JC and Ross, GJS. "Minimum Spanning Trees and Single Linkage
Cluster Analysis." Applied Statistics. 18(1): pp. 54--64. 1969.
.. [War63] Ward Jr, JH. "Hierarchical grouping to optimize an objective
function." Journal of the American Statistical Association. 58(301):
pp. 236--44. 1963.
.. [Joh66] Johnson, SC. "Hierarchical clustering schemes." Psychometrika.
32(2): pp. 241--54. 1966.
.. [Sne62] Sneath, PH and Sokal, RR. "Numerical taxonomy." Nature. 193: pp.
855--60. 1962.
.. [Bat95] Batagelj, V. "Comparing resemblance measures." Journal of
Classification. 12: pp. 73--90. 1995.
.. [Sok58] Sokal, RR and Michener, CD. "A statistical method for evaluating
systematic relationships." Scientific Bulletins. 38(22):
pp. 1409--38. 1958.
.. [Ede79] Edelbrock, C. "Mixture model tests of hierarchical clustering
algorithms: the problem of classifying everybody." Multivariate
Behavioral Research. 14: pp. 367--84. 1979.
.. [Jai88] Jain, A., and Dubes, R., "Algorithms for Clustering Data."
Prentice-Hall. Englewood Cliffs, NJ. 1988.
.. [Fis36] Fisher, RA "The use of multiple measurements in taxonomic
problems." Annals of Eugenics, 7(2): 179-188. 1936
* MATLAB and MathWorks are registered trademarks of The MathWorks, Inc.
* Mathematica is a registered trademark of The Wolfram Research, Inc.
"""
# Copyright (C) Damian Eads, 2007-2008. New BSD License.
# hierarchy.py (derived from cluster.py, http://scipy-cluster.googlecode.com)
#
# Author: Damian Eads
# Date: September 22, 2007
#
# Copyright (c) 2007, 2008, Damian Eads
#
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
# - Redistributions of source code must retain the above
# copyright notice, this list of conditions and the
# following disclaimer.
# - Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# - Neither the name of the author nor the names of its
# contributors may be used to endorse or promote products derived
# from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import types
import warnings
import numpy as np
import _hierarchy_wrap
import scipy.spatial.distance as distance
_cpy_non_euclid_methods = {'single': 0, 'complete': 1, 'average': 2,
'weighted': 6}
_cpy_euclid_methods = {'centroid': 3, 'median': 4, 'ward': 5}
_cpy_linkage_methods = set(_cpy_non_euclid_methods.keys()).union(
set(_cpy_euclid_methods.keys()))
__all__ = ['ClusterNode', 'average', 'centroid', 'complete', 'cophenet',
'correspond', 'dendrogram', 'fcluster', 'fclusterdata',
'from_mlab_linkage', 'inconsistent', 'is_isomorphic',
'is_monotonic', 'is_valid_im', 'is_valid_linkage', 'leaders',
'leaves_list', 'linkage', 'maxRstat', 'maxdists', 'maxinconsts',
'median', 'num_obs_linkage', 'set_link_color_palette', 'single',
'to_mlab_linkage', 'to_tree', 'ward', 'weighted', 'distance']
def _warning(s):
warnings.warn('scipy.cluster: %s' % s, stacklevel=3)
def _copy_array_if_base_present(a):
"""
Copies the array if its base points to a parent array.
"""
if a.base is not None:
return a.copy()
elif np.issubsctype(a, np.float32):
return np.array(a, dtype=np.double)
else:
return a
def _copy_arrays_if_base_present(T):
"""
Accepts a tuple of arrays T. Copies the array T[i] if its base array
points to an actual array. Otherwise, the reference is just copied.
This is useful if the arrays are being passed to a C function that
does not do proper striding.
"""
l = [_copy_array_if_base_present(a) for a in T]
return l
def _randdm(pnts):
""" Generates a random distance matrix stored in condensed form. A
pnts * (pnts - 1) / 2 sized vector is returned.
"""
if pnts >= 2:
D = np.random.rand(pnts * (pnts - 1) / 2)
else:
raise ValueError("The number of points in the distance matrix "
"must be at least 2.")
return D
def single(y):
"""
Performs single/min/nearest linkage on the condensed distance matrix ``y``
Parameters
----------
y : ndarray
The upper triangular of the distance matrix. The result of
``pdist`` is returned in this form.
Returns
-------
Z : ndarray
The linkage matrix.
See Also
--------
linkage: for advanced creation of hierarchical clusterings.
"""
return linkage(y, method='single', metric='euclidean')
def complete(y):
"""
Performs complete/max/farthest point linkage on a condensed distance matrix
Parameters
----------
y : ndarray
The upper triangular of the distance matrix. The result of
``pdist`` is returned in this form.
Returns
-------
Z : ndarray
A linkage matrix containing the hierarchical clustering. See
the ``linkage`` function documentation for more information
on its structure.
See Also
--------
linkage
"""
return linkage(y, method='complete', metric='euclidean')
def average(y):
"""
Performs average/UPGMA linkage on a condensed distance matrix
Parameters
----------
y : ndarray
The upper triangular of the distance matrix. The result of
``pdist`` is returned in this form.
Returns
-------
Z : ndarray
A linkage matrix containing the hierarchical clustering. See
the ``linkage`` function documentation for more information
on its structure.
See Also
--------
linkage: for advanced creation of hierarchical clusterings.
"""
return linkage(y, method='average', metric='euclidean')
def weighted(y):
"""
Performs weighted/WPGMA linkage on the condensed distance matrix
``y``. See ``linkage`` for more information on the return
structure and algorithm.
Parameters
----------
y : ndarray
The upper triangular of the distance matrix. The result of
``pdist`` is returned in this form.
Returns
-------
Z : ndarray
A linkage matrix containing the hierarchical clustering. See
the ``linkage`` function documentation for more information
on its structure.
See Also
--------
linkage: for advanced creation of hierarchical clusterings.
"""
return linkage(y, method='weighted', metric='euclidean')
def centroid(y):
"""
Performs centroid/UPGMC linkage. See ``linkage`` for more
information on the return structure and algorithm.
The following are common calling conventions:
1. ``Z = centroid(y)``
Performs centroid/UPGMC linkage on the condensed distance
matrix ``y``. See ``linkage`` for more information on the return
structure and algorithm.
2. ``Z = centroid(X)``
Performs centroid/UPGMC linkage on the observation matrix ``X``
using Euclidean distance as the distance metric. See ``linkage``
for more information on the return structure and algorithm.
Parameters
----------
Q : ndarray
A condensed or redundant distance matrix. A condensed
distance matrix is a flat array containing the upper
triangular of the distance matrix. This is the form that
``pdist`` returns. Alternatively, a collection of
m observation vectors in n dimensions may be passed as
a m by n array.
Returns
-------
Z : ndarray
A linkage matrix containing the hierarchical clustering. See
the ``linkage`` function documentation for more information
on its structure.
See Also
--------
linkage: for advanced creation of hierarchical clusterings.
"""
return linkage(y, method='centroid', metric='euclidean')
def median(y):
"""
Performs median/WPGMC linkage. See ``linkage`` for more
information on the return structure and algorithm.
The following are common calling conventions:
1. ``Z = median(y)``
Performs median/WPGMC linkage on the condensed distance matrix
``y``. See ``linkage`` for more information on the return
structure and algorithm.
2. ``Z = median(X)``
Performs median/WPGMC linkage on the observation matrix ``X``
using Euclidean distance as the distance metric. See linkage
for more information on the return structure and algorithm.
Parameters
----------
Q : ndarray
A condensed or redundant distance matrix. A condensed
distance matrix is a flat array containing the upper
triangular of the distance matrix. This is the form that
``pdist`` returns. Alternatively, a collection of
m observation vectors in n dimensions may be passed as
a m by n array.
Returns
-------
Z : ndarray
The hierarchical clustering encoded as a linkage matrix.
See Also
--------
linkage: for advanced creation of hierarchical clusterings.
"""
return linkage(y, method='median', metric='euclidean')
def ward(y):
"""
Performs Ward's linkage on a condensed or redundant distance
matrix. See linkage for more information on the return structure
and algorithm.
The following are common calling conventions:
1. ``Z = ward(y)``
Performs Ward's linkage on the condensed distance matrix ``Z``. See
linkage for more information on the return structure and
algorithm.
2. ``Z = ward(X)``
Performs Ward's linkage on the observation matrix ``X`` using
Euclidean distance as the distance metric. See linkage for more
information on the return structure and algorithm.
Parameters
----------
Q : ndarray
A condensed or redundant distance matrix. A condensed
distance matrix is a flat array containing the upper
triangular of the distance matrix. This is the form that
``pdist`` returns. Alternatively, a collection of
m observation vectors in n dimensions may be passed as
a m by n array.
Returns
-------
Z : ndarray
The hierarchical clustering encoded as a linkage matrix.
See Also
--------
linkage: for advanced creation of hierarchical clusterings.
"""
return linkage(y, method='ward', metric='euclidean')
def linkage(y, method='single', metric='euclidean'):
"""
Performs hierarchical/agglomerative clustering on the condensed
distance matrix y.
y must be a :math:`{n \\choose 2}` sized
vector where n is the number of original observations paired
in the distance matrix. The behavior of this function is very
similar to the MATLAB linkage function.
A 4 by :math:`(n-1)` matrix ``Z`` is returned. At the
:math:`i`-th iteration, clusters with indices ``Z[i, 0]`` and
``Z[i, 1]`` are combined to form cluster :math:`n + i`. A
cluster with an index less than :math:`n` corresponds to one of
the :math:`n` original observations. The distance between
clusters ``Z[i, 0]`` and ``Z[i, 1]`` is given by ``Z[i, 2]``. The
fourth value ``Z[i, 3]`` represents the number of original
observations in the newly formed cluster.
The following linkage methods are used to compute the distance
:math:`d(s, t)` between two clusters :math:`s` and
:math:`t`. The algorithm begins with a forest of clusters that
have yet to be used in the hierarchy being formed. When two
clusters :math:`s` and :math:`t` from this forest are combined
into a single cluster :math:`u`, :math:`s` and :math:`t` are
removed from the forest, and :math:`u` is added to the
forest. When only one cluster remains in the forest, the algorithm
stops, and this cluster becomes the root.
A distance matrix is maintained at each iteration. The ``d[i,j]``
entry corresponds to the distance between cluster :math:`i` and
:math:`j` in the original forest.
At each iteration, the algorithm must update the distance matrix
to reflect the distance of the newly formed cluster u with the
remaining clusters in the forest.
Suppose there are :math:`|u|` original observations
:math:`u[0], \\ldots, u[|u|-1]` in cluster :math:`u` and
:math:`|v|` original objects :math:`v[0], \\ldots, v[|v|-1]` in
cluster :math:`v`. Recall :math:`s` and :math:`t` are
combined to form cluster :math:`u`. Let :math:`v` be any
remaining cluster in the forest that is not :math:`u`.
The following are methods for calculating the distance between the
newly formed cluster :math:`u` and each :math:`v`.
* method='single' assigns
.. math::
d(u,v) = \\min(dist(u[i],v[j]))
for all points :math:`i` in cluster :math:`u` and
:math:`j` in cluster :math:`v`. This is also known as the
Nearest Point Algorithm.
* method='complete' assigns
.. math::
d(u, v) = \\max(dist(u[i],v[j]))
for all points :math:`i` in cluster u and :math:`j` in
cluster :math:`v`. This is also known by the Farthest Point
Algorithm or Voor Hees Algorithm.
* method='average' assigns
.. math::
d(u,v) = \\sum_{ij} \\frac{d(u[i], v[j])}
{(|u|*|v|)}
for all points :math:`i` and :math:`j` where :math:`|u|`
and :math:`|v|` are the cardinalities of clusters :math:`u`
and :math:`v`, respectively. This is also called the UPGMA
algorithm. This is called UPGMA.
* method='weighted' assigns
.. math::
d(u,v) = (dist(s,v) + dist(t,v))/2
where cluster u was formed with cluster s and t and v
is a remaining cluster in the forest. (also called WPGMA)
* method='centroid' assigns
.. math::
dist(s,t) = ||c_s-c_t||_2
where :math:`c_s` and :math:`c_t` are the centroids of
clusters :math:`s` and :math:`t`, respectively. When two
clusters :math:`s` and :math:`t` are combined into a new
cluster :math:`u`, the new centroid is computed over all the
original objects in clusters :math:`s` and :math:`t`. The
distance then becomes the Euclidean distance between the
centroid of :math:`u` and the centroid of a remaining cluster
:math:`v` in the forest. This is also known as the UPGMC
algorithm.
* method='median' assigns math:`d(s,t)` like the ``centroid``
method. When two clusters :math:`s` and :math:`t` are combined
into a new cluster :math:`u`, the average of centroids s and t
give the new centroid :math:`u`. This is also known as the
WPGMC algorithm.
* method='ward' uses the Ward variance minimization algorithm.
The new entry :math:`d(u,v)` is computed as follows,
.. math::
d(u,v) = \\sqrt{\\frac{|v|+|s|}
{T}d(v,s)^2
+ \\frac{|v|+|t|}
{T}d(v,t)^2
+ \\frac{|v|}
{T}d(s,t)^2}
where :math:`u` is the newly joined cluster consisting of
clusters :math:`s` and :math:`t`, :math:`v` is an unused
cluster in the forest, :math:`T=|v|+|s|+|t|`, and
:math:`|*|` is the cardinality of its argument. This is also
known as the incremental algorithm.
Warning: When the minimum distance pair in the forest is chosen, there
may be two or more pairs with the same minimum distance. This
implementation may chose a different minimum than the MATLAB
version.
Parameters
----------
y : ndarray
A condensed or redundant distance matrix. A condensed distance matrix
is a flat array containing the upper triangular of the distance matrix.
This is the form that ``pdist`` returns. Alternatively, a collection of
:math:`m` observation vectors in n dimensions may be passed as an
:math:`m` by :math:`n` array.
method : str, optional
The linkage algorithm to use. See the ``Linkage Methods`` section below
for full descriptions.
metric : str, optional
The distance metric to use. See the ``distance.pdist`` function for a
list of valid distance metrics.
Returns
-------
Z : ndarray
The hierarchical clustering encoded as a linkage matrix.
"""
if not isinstance(method, str):
raise TypeError("Argument 'method' must be a string.")
y = _convert_to_double(np.asarray(y, order='c'))
s = y.shape
if len(s) == 1:
distance.is_valid_y(y, throw=True, name='y')
d = distance.num_obs_y(y)
if method not in _cpy_non_euclid_methods.keys():
raise ValueError("Valid methods when the raw observations are "
"omitted are 'single', 'complete', 'weighted', "
"and 'average'.")
# Since the C code does not support striding using strides.
[y] = _copy_arrays_if_base_present([y])
Z = np.zeros((d - 1, 4))
_hierarchy_wrap.linkage_wrap(y, Z, int(d), \
int(_cpy_non_euclid_methods[method]))
elif len(s) == 2:
X = y
n = s[0]
m = s[1]
if method not in _cpy_linkage_methods:
raise ValueError('Invalid method: %s' % method)
if method in _cpy_non_euclid_methods.keys():
dm = distance.pdist(X, metric)
Z = np.zeros((n - 1, 4))
_hierarchy_wrap.linkage_wrap(dm, Z, n, \
int(_cpy_non_euclid_methods[method]))
elif method in _cpy_euclid_methods.keys():
if metric != 'euclidean':
raise ValueError(('Method %s requires the distance metric to '
'be euclidean') % s)
dm = distance.pdist(X, metric)
Z = np.zeros((n - 1, 4))
_hierarchy_wrap.linkage_euclid_wrap(dm, Z, X, m, n,
int(_cpy_euclid_methods[method]))
return Z
class ClusterNode:
"""
A tree node class for representing a cluster.
Leaf nodes correspond to original observations, while non-leaf nodes
correspond to non-singleton clusters.
The to_tree function converts a matrix returned by the linkage
function into an easy-to-use tree representation.
See Also
--------
to_tree: for converting a linkage matrix ``Z`` into a tree object.
"""
def __init__(self, id, left=None, right=None, dist=0, count=1):
if id < 0:
raise ValueError('The id must be non-negative.')
if dist < 0:
raise ValueError('The distance must be non-negative.')
if (left is None and right is not None) or \
(left is not None and right is None):
raise ValueError('Only full or proper binary trees are permitted.'
' This node has one child.')
if count < 1:
raise ValueError('A cluster must contain at least one original '
'observation.')
self.id = id
self.left = left
self.right = right
self.dist = dist
if self.left is None:
self.count = count
else:
self.count = left.count + right.count
def get_id(self):
"""
The identifier of the target node.
For ``0 <= i < n``, `i` corresponds to original observation i.
For ``n <= i < 2n-1``, `i` corresponds to non-singleton cluster formed
at iteration ``i-n``.
Returns
-------
id : int
The identifier of the target node.
"""
return self.id
def get_count(self):
"""
The number of leaf nodes (original observations) belonging to
the cluster node nd. If the target node is a leaf, 1 is
returned.
Returns
-------
c : int
The number of leaf nodes below the target node.
"""
return self.count
def get_left(self):
"""
Return a reference to the left child tree object.
Returns
-------
left : ClusterNode
The left child of the target node. If the node is a leaf,
None is returned.
"""
return self.left
def get_right(self):
"""
Returns a reference to the right child tree object.
Returns
-------
right : ClusterNode
The left child of the target node. If the node is a leaf,
None is returned.
"""
return self.right
def is_leaf(self):
"""
Returns True if the target node is a leaf.
Returns
-------
leafness : bool
True if the target node is a leaf node.
"""
return self.left is None
def pre_order(self, func=(lambda x: x.id)):
"""
Performs pre-order traversal without recursive function calls.
When a leaf node is first encountered, ``func`` is called with
the leaf node as its argument, and its result is appended to
the list.
For example, the statement::
ids = root.pre_order(lambda x: x.id)
returns a list of the node ids corresponding to the leaf nodes
of the tree as they appear from left to right.
Parameters
----------
func : function
Applied to each leaf ClusterNode object in the pre-order traversal.
Given the i'th leaf node in the pre-ordeR traversal ``n[i]``, the
result of func(n[i]) is stored in L[i]. If not provided, the index
of the original observation to which the node corresponds is used.
Returns
-------
L : list
The pre-order traversal.
"""
# Do a preorder traversal, caching the result. To avoid having to do
# recursion, we'll store the previous index we've visited in a vector.
n = self.count
curNode = [None] * (2 * n)
lvisited = np.zeros((2 * n,), dtype=bool)
rvisited = np.zeros((2 * n,), dtype=bool)
curNode[0] = self
k = 0
preorder = []
while k >= 0:
nd = curNode[k]
ndid = nd.id
if nd.is_leaf():
preorder.append(func(nd))
k = k - 1
else:
if not lvisited[ndid]:
curNode[k + 1] = nd.left
lvisited[ndid] = True
k = k + 1
elif not rvisited[ndid]:
curNode[k + 1] = nd.right
rvisited[ndid] = True
k = k + 1
# If we've visited the left and right of this non-leaf
# node already, go up in the tree.
else:
k = k - 1
return preorder
_cnode_bare = ClusterNode(0)
_cnode_type = type(ClusterNode)
def to_tree(Z, rd=False):
"""
Converts a hierarchical clustering encoded in the matrix ``Z`` (by
linkage) into an easy-to-use tree object. The reference r to the
root ClusterNode object is returned.
Each ClusterNode object has a left, right, dist, id, and count
attribute. The left and right attributes point to ClusterNode objects
that were combined to generate the cluster. If both are None then
the ClusterNode object is a leaf node, its count must be 1, and its
distance is meaningless but set to 0.
Note: This function is provided for the convenience of the library
user. ClusterNodes are not used as input to any of the functions in this
library.
Parameters
----------
Z : ndarray
The linkage matrix in proper form (see the ``linkage``
function documentation).
rd : bool, optional
When ``False``, a reference to the root ClusterNode object is
returned. Otherwise, a tuple (r,d) is returned. ``r`` is a
reference to the root node while ``d`` is a dictionary
mapping cluster ids to ClusterNode references. If a cluster id is
less than n, then it corresponds to a singleton cluster
(leaf node). See ``linkage`` for more information on the
assignment of cluster ids to clusters.
Returns
-------
L : list
The pre-order traversal.
"""
Z = np.asarray(Z, order='c')
is_valid_linkage(Z, throw=True, name='Z')
# The number of original objects is equal to the number of rows minus
# 1.
n = Z.shape[0] + 1
# Create a list full of None's to store the node objects
d = [None] * (n * 2 - 1)
# Create the nodes corresponding to the n original objects.
for i in xrange(0, n):
d[i] = ClusterNode(i)
nd = None
for i in xrange(0, n - 1):
fi = int(Z[i, 0])
fj = int(Z[i, 1])
if fi > i + n:
raise ValueError(('Corrupt matrix Z. Index to derivative cluster '
'is used before it is formed. See row %d, '
'column 0') % fi)
if fj > i + n:
raise ValueError(('Corrupt matrix Z. Index to derivative cluster '
'is used before it is formed. See row %d, '
'column 1') % fj)
nd = ClusterNode(i + n, d[fi], d[fj], Z[i, 2])
# ^ id ^ left ^ right ^ dist
if Z[i, 3] != nd.count:
raise ValueError(('Corrupt matrix Z. The count Z[%d,3] is '
'incorrect.') % i)
d[n + i] = nd
if rd:
return (nd, d)
else:
return nd
def _convert_to_bool(X):
if X.dtype != np.bool:
X = np.bool_(X)
if not X.flags.contiguous:
X = X.copy()
return X
def _convert_to_double(X):
if X.dtype != np.double:
X = np.double(X)
if not X.flags.contiguous:
X = X.copy()
return X
def cophenet(Z, Y=None):
"""
Calculates the cophenetic distances between each observation in
the hierarchical clustering defined by the linkage ``Z``.
Suppose ``p`` and ``q`` are original observations in
disjoint clusters ``s`` and ``t``, respectively and
``s`` and ``t`` are joined by a direct parent cluster
``u``. The cophenetic distance between observations
``i`` and ``j`` is simply the distance between
clusters ``s`` and ``t``.
Parameters
----------
Z : ndarray
The hierarchical clustering encoded as an array
(see ``linkage`` function).
Y : ndarray (optional)
Calculates the cophenetic correlation coefficient ``c`` of a
hierarchical clustering defined by the linkage matrix ``Z``
of a set of :math:`n` observations in :math:`m`
dimensions. ``Y`` is the condensed distance matrix from which
``Z`` was generated.
Returns
-------
res : tuple
A tuple (c, {d}):
- c : ndarray
The cophentic correlation distance (if ``y`` is passed).
- d : ndarray
The cophenetic distance matrix in condensed form. The
:math:`ij` th entry is the cophenetic distance between
original observations :math:`i` and :math:`j`.
"""
Z = np.asarray(Z, order='c')
is_valid_linkage(Z, throw=True, name='Z')
Zs = Z.shape
n = Zs[0] + 1
zz = np.zeros((n * (n - 1) / 2,), dtype=np.double)
# Since the C code does not support striding using strides.
# The dimensions are used instead.
Z = _convert_to_double(Z)
_hierarchy_wrap.cophenetic_distances_wrap(Z, zz, int(n))
if Y is None:
return zz
Y = np.asarray(Y, order='c')
Ys = Y.shape
distance.is_valid_y(Y, throw=True, name='Y')
z = zz.mean()
y = Y.mean()
Yy = Y - y
Zz = zz - z
#print Yy.shape, Zz.shape
numerator = (Yy * Zz)
denomA = Yy ** 2
denomB = Zz ** 2
c = numerator.sum() / np.sqrt((denomA.sum() * denomB.sum()))
#print c, numerator.sum()
return (c, zz)
def inconsistent(Z, d=2):
"""
Calculates inconsistency statistics on a linkage.
Note: This function behaves similarly to the MATLAB(TM)
inconsistent function.
Parameters
----------
d : int
The number of links up to ``d`` levels below each
non-singleton cluster.
Z : ndarray
The :math:`(n-1)` by 4 matrix encoding the linkage
(hierarchical clustering). See ``linkage`` documentation
for more information on its form.
Returns
-------
R : ndarray
A :math:`(n-1)` by 5 matrix where the ``i``'th row
contains the link statistics for the non-singleton cluster
``i``. The link statistics are computed over the link
heights for links :math:`d` levels below the cluster
``i``. ``R[i,0]`` and ``R[i,1]`` are the mean and standard
deviation of the link heights, respectively; ``R[i,2]`` is
the number of links included in the calculation; and
``R[i,3]`` is the inconsistency coefficient,
.. math::
\frac{\mathtt{Z[i,2]}-\mathtt{R[i,0]}} {R[i,1]}.
"""
Z = np.asarray(Z, order='c')
Zs = Z.shape
is_valid_linkage(Z, throw=True, name='Z')
if (not d == np.floor(d)) or d < 0:
raise ValueError('The second argument d must be a nonnegative '
'integer value.')
# if d == 0:
# d = 1
# Since the C code does not support striding using strides.
# The dimensions are used instead.
[Z] = _copy_arrays_if_base_present([Z])
n = Zs[0] + 1
R = np.zeros((n - 1, 4), dtype=np.double)
_hierarchy_wrap.inconsistent_wrap(Z, R, int(n), int(d))
return R
def from_mlab_linkage(Z):
"""
Converts a linkage matrix generated by MATLAB(TM) to a new
linkage matrix compatible with this module. The conversion does
two things:
* the indices are converted from ``1..N`` to ``0..(N-1)`` form,
and
* a fourth column Z[:,3] is added where Z[i,3] is represents the
number of original observations (leaves) in the non-singleton
cluster i.
This function is useful when loading in linkages from legacy data
files generated by MATLAB.
Parameters
----------
Z : ndarray
A linkage matrix generated by MATLAB(TM).
Returns
-------
ZS : ndarray
A linkage matrix compatible with this library.
"""
Z = np.asarray(Z, dtype=np.double, order='c')
Zs = Z.shape
# If it's empty, return it.
if len(Zs) == 0 or (len(Zs) == 1 and Zs[0] == 0):
return Z.copy()
if len(Zs) != 2:
raise ValueError("The linkage array must be rectangular.")
# If it contains no rows, return it.
if Zs[0] == 0:
return Z.copy()
Zpart = Z.copy()
if Zpart[:, 0:2].min() != 1.0 and Zpart[:, 0:2].max() != 2 * Zs[0]:
raise ValueError('The format of the indices is not 1..N')
Zpart[:, 0:2] -= 1.0
CS = np.zeros((Zs[0],), dtype=np.double)
_hierarchy_wrap.calculate_cluster_sizes_wrap(Zpart, CS, int(Zs[0]) + 1)
return np.hstack([Zpart, CS.reshape(Zs[0], 1)])
def to_mlab_linkage(Z):
"""
Converts a linkage matrix ``Z`` generated by the linkage function
of this module to a MATLAB(TM) compatible one. The return linkage
matrix has the last column removed and the cluster indices are
converted to ``1..N`` indexing.
Parameters
----------
Z : ndarray
A linkage matrix generated by this library.
Returns
-------
ZM : ndarray
A linkage matrix compatible with MATLAB(TM)'s hierarchical
clustering functions.
"""
Z = np.asarray(Z, order='c', dtype=np.double)
Zs = Z.shape
if len(Zs) == 0 or (len(Zs) == 1 and Zs[0] == 0):
return Z.copy()
is_valid_linkage(Z, throw=True, name='Z')
ZP = Z[:, 0:3].copy()
ZP[:, 0:2] += 1.0
return ZP
def is_monotonic(Z):
"""
Returns ``True`` if the linkage passed is monotonic. The linkage
is monotonic if for every cluster :math:`s` and :math:`t`
joined, the distance between them is no less than the distance
between any previously joined clusters.
Parameters
----------
Z : ndarray
The linkage matrix to check for monotonicity.
Returns
-------
b : bool
A boolean indicating whether the linkage is monotonic.
"""
Z = np.asarray(Z, order='c')
is_valid_linkage(Z, throw=True, name='Z')
# We expect the i'th value to be greater than its successor.
return (Z[1:, 2] >= Z[:-1, 2]).all()
def is_valid_im(R, warning=False, throw=False, name=None):
"""Returns True if the inconsistency matrix passed is valid.
It must be a :math:`n` by 4 numpy array of doubles. The standard
deviations ``R[:,1]`` must be nonnegative. The link counts
``R[:,2]`` must be positive and no greater than :math:`n-1`.
Parameters
----------
R : ndarray
The inconsistency matrix to check for validity.
warning : bool, optional
When ``True``, issues a Python warning if the linkage
matrix passed is invalid.
throw : bool, optional
When ``True``, throws a Python exception if the linkage
matrix passed is invalid.
name : str, optional
This string refers to the variable name of the invalid
linkage matrix.
Returns
-------
b : bool
True if the inconsistency matrix is valid.
"""
R = np.asarray(R, order='c')
valid = True
try:
if type(R) != np.ndarray:
if name:
raise TypeError(('Variable \'%s\' passed as inconsistency '
'matrix is not a numpy array.') % name)
else:
raise TypeError('Variable passed as inconsistency matrix '
'is not a numpy array.')
if R.dtype != np.double:
if name:
raise TypeError(('Inconsistency matrix \'%s\' must contain '
'doubles (double).') % name)
else:
raise TypeError('Inconsistency matrix must contain doubles '
'(double).')
if len(R.shape) != 2:
if name:
raise ValueError(('Inconsistency matrix \'%s\' must have '
'shape=2 (i.e. be two-dimensional).') % name)
else:
raise ValueError('Inconsistency matrix must have shape=2 '
'(i.e. be two-dimensional).')
if R.shape[1] != 4:
if name:
raise ValueError(('Inconsistency matrix \'%s\' must have 4 '
'columns.') % name)
else:
raise ValueError('Inconsistency matrix must have 4 columns.')
if R.shape[0] < 1:
if name:
raise ValueError(('Inconsistency matrix \'%s\' must have at '
'least one row.') % name)
else:
raise ValueError('Inconsistency matrix must have at least '
'one row.')
if (R[:, 0] < 0).any():
if name:
raise ValueError(('Inconsistency matrix \'%s\' contains '
'negative link height means.') % name)
else:
raise ValueError('Inconsistency matrix contains negative '
'link height means.')
if (R[:, 1] < 0).any():
if name:
raise ValueError(('Inconsistency matrix \'%s\' contains '
'negative link height standard '
'deviations.') % name)
else:
raise ValueError('Inconsistency matrix contains negative '
'link height standard deviations.')
if (R[:, 2] < 0).any():
if name:
raise ValueError(('Inconsistency matrix \'%s\' contains '
'negative link counts.') % name)
else:
raise ValueError('Inconsistency matrix contains negative '
'link counts.')
except Exception, e:
if throw:
raise
if warning:
_warning(str(e))
valid = False
return valid
def is_valid_linkage(Z, warning=False, throw=False, name=None):
"""
Checks the validity of a linkage matrix.
A linkage matrix is valid if it is a two dimensional
ndarray (type double) with :math:`n`
rows and 4 columns. The first two columns must contain indices
between 0 and :math:`2n-1`. For a given row ``i``,
:math:`0 \\leq \\mathtt{Z[i,0]} \\leq i+n-1`
and :math:`0 \\leq Z[i,1] \\leq i+n-1`
(i.e. a cluster cannot join another cluster unless the cluster
being joined has been generated.)
Parameters
----------
Z : array_like
Linkage matrix.
warning : bool, optional
When ``True``, issues a Python warning if the linkage
matrix passed is invalid.
throw : bool, optional
When ``True``, throws a Python exception if the linkage
matrix passed is invalid.
name : str, optional
This string refers to the variable name of the invalid
linkage matrix.
Returns
-------
b : bool
True iff the inconsistency matrix is valid.
"""
Z = np.asarray(Z, order='c')
valid = True
try:
if type(Z) != np.ndarray:
if name:
raise TypeError(('\'%s\' passed as a linkage is not a valid '
'array.') % name)
else:
raise TypeError('Variable is not a valid array.')
if Z.dtype != np.double:
if name:
raise TypeError('Linkage matrix \'%s\' must contain doubles.'
% name)
else:
raise TypeError('Linkage matrix must contain doubles.')
if len(Z.shape) != 2:
if name:
raise ValueError(('Linkage matrix \'%s\' must have shape=2 '
'(i.e. be two-dimensional).') % name)
else:
raise ValueError('Linkage matrix must have shape=2 '
'(i.e. be two-dimensional).')
if Z.shape[1] != 4:
if name:
raise ValueError('Linkage matrix \'%s\' must have 4 columns.'
% name)
else:
raise ValueError('Linkage matrix must have 4 columns.')
if Z.shape[0] == 0:
raise ValueError('Linkage must be computed on at least two '
'observations.')
n = Z.shape[0]
if n > 1:
if ((Z[:, 0] < 0).any() or
(Z[:, 1] < 0).any()):
if name:
raise ValueError(('Linkage \'%s\' contains negative '
'indices.') % name)
else:
raise ValueError('Linkage contains negative indices.')
if (Z[:, 2] < 0).any():
if name:
raise ValueError(('Linkage \'%s\' contains negative '
'distances.') % name)
else:
raise ValueError('Linkage contains negative distances.')
if (Z[:, 3] < 0).any():
if name:
raise ValueError('Linkage \'%s\' contains negative counts.'
% name)
else:
raise ValueError('Linkage contains negative counts.')
if _check_hierarchy_uses_cluster_before_formed(Z):
if name:
raise ValueError(('Linkage \'%s\' uses non-singleton cluster '
'before its formed.') % name)
else:
raise ValueError("Linkage uses non-singleton cluster before "
"it's formed.")
if _check_hierarchy_uses_cluster_more_than_once(Z):
if name:
raise ValueError(('Linkage \'%s\' uses the same cluster more '
'than once.') % name)
else:
raise ValueError('Linkage uses the same cluster more than '
'once.')
# if _check_hierarchy_not_all_clusters_used(Z):
# if name:
# raise ValueError('Linkage \'%s\' does not use all clusters.'
# % name)
# else:
# raise ValueError('Linkage does not use all clusters.')
except Exception, e:
if throw:
raise
if warning:
_warning(str(e))
valid = False
return valid
def _check_hierarchy_uses_cluster_before_formed(Z):
n = Z.shape[0] + 1
for i in xrange(0, n - 1):
if Z[i, 0] >= n + i or Z[i, 1] >= n + i:
return True
return False
def _check_hierarchy_uses_cluster_more_than_once(Z):
n = Z.shape[0] + 1
chosen = set([])
for i in xrange(0, n - 1):
if (Z[i, 0] in chosen) or (Z[i, 1] in chosen) or Z[i, 0] == Z[i, 1]:
return True
chosen.add(Z[i, 0])
chosen.add(Z[i, 1])
return False
def _check_hierarchy_not_all_clusters_used(Z):
n = Z.shape[0] + 1
chosen = set([])
for i in xrange(0, n - 1):
chosen.add(int(Z[i, 0]))
chosen.add(int(Z[i, 1]))
must_chosen = set(range(0, 2 * n - 2))
return len(must_chosen.difference(chosen)) > 0
def num_obs_linkage(Z):
"""
Returns the number of original observations of the linkage matrix
passed.
Parameters
----------
Z : ndarray
The linkage matrix on which to perform the operation.
Returns
-------
n : int
The number of original observations in the linkage.
"""
Z = np.asarray(Z, order='c')
is_valid_linkage(Z, throw=True, name='Z')
return (Z.shape[0] + 1)
def correspond(Z, Y):
"""
Checks for correspondence between linkage and condensed distance matrices
They must have the same number of original observations for
the check to succeed.
This function is useful as a sanity check in algorithms that make
extensive use of linkage and distance matrices that must
correspond to the same set of original observations.
Parameters
----------
Z : array_like
The linkage matrix to check for correspondence.
Y : array_like
The condensed distance matrix to check for correspondence.
Returns
-------
b : bool
A boolean indicating whether the linkage matrix and distance
matrix could possibly correspond to one another.
"""
is_valid_linkage(Z, throw=True)
distance.is_valid_y(Y, throw=True)
Z = np.asarray(Z, order='c')
Y = np.asarray(Y, order='c')
return distance.num_obs_y(Y) == num_obs_linkage(Z)
def fcluster(Z, t, criterion='inconsistent', depth=2, R=None, monocrit=None):
"""
Forms flat clusters from the hierarchical clustering defined by
the linkage matrix ``Z``.
Parameters
----------
Z : ndarray
The hierarchical clustering encoded with the matrix returned
by the `linkage` function.
t : float
The threshold to apply when forming flat clusters.
criterion : str, optional
The criterion to use in forming flat clusters. This can
be any of the following values:
'inconsistent':
If a cluster node and all its
descendants have an inconsistent value less than or equal
to ``t`` then all its leaf descendants belong to the
same flat cluster. When no non-singleton cluster meets
this criterion, every node is assigned to its own
cluster. (Default)
'distance':
Forms flat clusters so that the original
observations in each flat cluster have no greater a
cophenetic distance than ``t``.
'maxclust':
Finds a minimum threshold ``r`` so that
the cophenetic distance between any two original
observations in the same flat cluster is no more than
``r`` and no more than ``t`` flat clusters are formed.
'monocrit':
Forms a flat cluster from a cluster node c
with index i when ``monocrit[j] <= t``.
For example, to threshold on the maximum mean distance
as computed in the inconsistency matrix R with a
threshold of 0.8 do:
``MR = maxRstat(Z, R, 3)``
``cluster(Z, t=0.8, criterion='monocrit', monocrit=MR)``
'maxclust_monocrit':
Forms a flat cluster from a
non-singleton cluster node ``c`` when ``monocrit[i] <=
r`` for all cluster indices ``i`` below and including
``c``. ``r`` is minimized such that no more than ``t``
flat clusters are formed. monocrit must be
monotonic. For example, to minimize the threshold t on
maximum inconsistency values so that no more than 3 flat
clusters are formed, do:
``MI = maxinconsts(Z, R)``
``cluster(Z, t=3, criterion='maxclust_monocrit', monocrit=MI)``
depth : int, optional
The maximum depth to perform the inconsistency calculation.
It has no meaning for the other criteria. Default is 2.
R : ndarray, optional
The inconsistency matrix to use for the 'inconsistent'
criterion. This matrix is computed if not provided.
monocrit : ndarray, optional
An array of length n-1. ``monocrit[i]`` is the
statistics upon which non-singleton i is thresholded. The
monocrit vector must be monotonic, i.e. given a node c with
index i, for all node indices j corresponding to nodes
below c, ``monocrit[i] >= monocrit[j]``.
Returns
-------
fcluster : ndarray
An array of length n. T[i] is the flat cluster number to
which original observation i belongs.
"""
Z = np.asarray(Z, order='c')
is_valid_linkage(Z, throw=True, name='Z')
n = Z.shape[0] + 1
T = np.zeros((n,), dtype='i')
# Since the C code does not support striding using strides.
# The dimensions are used instead.
[Z] = _copy_arrays_if_base_present([Z])
if criterion == 'inconsistent':
if R is None:
R = inconsistent(Z, depth)
else:
R = np.asarray(R, order='c')
is_valid_im(R, throw=True, name='R')
# Since the C code does not support striding using strides.
# The dimensions are used instead.
[R] = _copy_arrays_if_base_present([R])
_hierarchy_wrap.cluster_in_wrap(Z, R, T, float(t), int(n))
elif criterion == 'distance':
_hierarchy_wrap.cluster_dist_wrap(Z, T, float(t), int(n))
elif criterion == 'maxclust':
_hierarchy_wrap.cluster_maxclust_dist_wrap(Z, T, int(n), int(t))
elif criterion == 'monocrit':
[monocrit] = _copy_arrays_if_base_present([monocrit])
_hierarchy_wrap.cluster_monocrit_wrap(Z, monocrit, T, float(t), int(n))
elif criterion == 'maxclust_monocrit':
[monocrit] = _copy_arrays_if_base_present([monocrit])
_hierarchy_wrap.cluster_maxclust_monocrit_wrap(Z, monocrit, T,
int(n), int(t))
else:
raise ValueError('Invalid cluster formation criterion: %s'
% str(criterion))
return T
def fclusterdata(X, t, criterion='inconsistent', \
metric='euclidean', depth=2, method='single', R=None):
"""
Cluster observation data using a given metric.
Clusters the original observations in the n-by-m data
matrix X (n observations in m dimensions), using the euclidean
distance metric to calculate distances between original observations,
performs hierarchical clustering using the single linkage algorithm,
and forms flat clusters using the inconsistency method with `t` as the
cut-off threshold.
A one-dimensional array T of length n is returned. T[i] is the index
of the flat cluster to which the original observation i belongs.
Parameters
----------
X : ndarray
n by m data matrix with n observations in m dimensions.
t : float
The threshold to apply when forming flat clusters.
criterion : str, optional
Specifies the criterion for forming flat clusters. Valid
values are 'inconsistent' (default), 'distance', or 'maxclust'
cluster formation algorithms. See `fcluster` for descriptions.
method : str, optional
The linkage method to use (single, complete, average,
weighted, median centroid, ward). See `linkage` for more
information. Default is "single".
metric : str, optional
The distance metric for calculating pairwise distances. See
`distance.pdist` for descriptions and linkage to verify
compatibility with the linkage method.
t : double, optional
The cut-off threshold for the cluster function or the
maximum number of clusters (criterion='maxclust').
depth : int, optional
The maximum depth for the inconsistency calculation. See
`inconsistent` for more information.
R : ndarray, optional
The inconsistency matrix. It will be computed if necessary
if it is not passed.
Returns
-------
T : ndarray
A vector of length n. T[i] is the flat cluster number to
which original observation i belongs.
Notes
-----
This function is similar to the MATLAB function clusterdata.
"""
X = np.asarray(X, order='c', dtype=np.double)
if type(X) != np.ndarray or len(X.shape) != 2:
raise TypeError('The observation matrix X must be an n by m numpy '
'array.')
Y = distance.pdist(X, metric=metric)
Z = linkage(Y, method=method)
if R is None:
R = inconsistent(Z, d=depth)
else:
R = np.asarray(R, order='c')
T = fcluster(Z, criterion=criterion, depth=depth, R=R, t=t)
return T
def leaves_list(Z):
"""
Returns a list of leaf node ids (corresponding to observation
vector index) as they appear in the tree from left to right. Z is
a linkage matrix.
Parameters
----------
Z : ndarray
The hierarchical clustering encoded as a matrix. See
``linkage`` for more information.
Returns
-------
L : ndarray
The list of leaf node ids.
"""
Z = np.asarray(Z, order='c')
is_valid_linkage(Z, throw=True, name='Z')
n = Z.shape[0] + 1
ML = np.zeros((n,), dtype='i')
[Z] = _copy_arrays_if_base_present([Z])
_hierarchy_wrap.prelist_wrap(Z, ML, int(n))
return ML
# Maps number of leaves to text size.
#
# p <= 20, size="12"
# 20 < p <= 30, size="10"
# 30 < p <= 50, size="8"
# 50 < p <= np.inf, size="6"
_dtextsizes = {20: 12, 30: 10, 50: 8, 85: 6, np.inf: 5}
_drotation = {20: 0, 40: 45, np.inf: 90}
_dtextsortedkeys = list(_dtextsizes.keys())
_dtextsortedkeys.sort()
_drotationsortedkeys = list(_drotation.keys())
_drotationsortedkeys.sort()
def _remove_dups(L):
"""
Removes duplicates AND preserves the original order of the elements.
The set class is not guaranteed to do this.
"""
seen_before = set([])
L2 = []
for i in L:
if i not in seen_before:
seen_before.add(i)
L2.append(i)
return L2
def _get_tick_text_size(p):
for k in _dtextsortedkeys:
if p <= k:
return _dtextsizes[k]
def _get_tick_rotation(p):
for k in _drotationsortedkeys:
if p <= k:
return _drotation[k]
def _plot_dendrogram(icoords, dcoords, ivl, p, n, mh, orientation,
no_labels, color_list, leaf_font_size=None,
leaf_rotation=None, contraction_marks=None):
# Import matplotlib here so that it's not imported unless dendrograms
# are plotted. Raise an informative error if importing fails.
try:
import matplotlib.pylab
import matplotlib.patches
import matplotlib.collections
except ImportError:
raise ImportError("You must install the matplotlib library to plot the dendrogram. Use no_plot=True to calculate the dendrogram without plotting.")
axis = matplotlib.pylab.gca()
# Independent variable plot width
ivw = len(ivl) * 10
# Depenendent variable plot height
dvw = mh + mh * 0.05
ivticks = np.arange(5, len(ivl) * 10 + 5, 10)
if orientation == 'top':
axis.set_ylim([0, dvw])
axis.set_xlim([0, ivw])
xlines = icoords
ylines = dcoords
if no_labels:
axis.set_xticks([])
axis.set_xticklabels([])
else:
axis.set_xticks(ivticks)
axis.set_xticklabels(ivl)
axis.xaxis.set_ticks_position('bottom')
lbls = axis.get_xticklabels()
if leaf_rotation:
matplotlib.pylab.setp(lbls, 'rotation', leaf_rotation)
else:
matplotlib.pylab.setp(lbls, 'rotation',
float(_get_tick_rotation(len(ivl))))
if leaf_font_size:
matplotlib.pylab.setp(lbls, 'size', leaf_font_size)
else:
matplotlib.pylab.setp(lbls, 'size',
float(_get_tick_text_size(len(ivl))))
# txt.set_fontsize()
# txt.set_rotation(45)
# Make the tick marks invisible because they cover up the links
for line in axis.get_xticklines():
line.set_visible(False)
elif orientation == 'bottom':
axis.set_ylim([dvw, 0])
axis.set_xlim([0, ivw])
xlines = icoords
ylines = dcoords
if no_labels:
axis.set_xticks([])
axis.set_xticklabels([])
else:
axis.set_xticks(ivticks)
axis.set_xticklabels(ivl)
lbls = axis.get_xticklabels()
if leaf_rotation:
matplotlib.pylab.setp(lbls, 'rotation', leaf_rotation)
else:
matplotlib.pylab.setp(lbls, 'rotation',
float(_get_tick_rotation(p)))
if leaf_font_size:
matplotlib.pylab.setp(lbls, 'size', leaf_font_size)
else:
matplotlib.pylab.setp(lbls, 'size',
float(_get_tick_text_size(p)))
axis.xaxis.set_ticks_position('top')
# Make the tick marks invisible because they cover up the links
for line in axis.get_xticklines():
line.set_visible(False)
elif orientation == 'left':
axis.set_xlim([0, dvw])
axis.set_ylim([0, ivw])
xlines = dcoords
ylines = icoords
if no_labels:
axis.set_yticks([])
axis.set_yticklabels([])
else:
axis.set_yticks(ivticks)
axis.set_yticklabels(ivl)
lbls = axis.get_yticklabels()
if leaf_rotation:
matplotlib.pylab.setp(lbls, 'rotation', leaf_rotation)
if leaf_font_size:
matplotlib.pylab.setp(lbls, 'size', leaf_font_size)
axis.yaxis.set_ticks_position('left')
# Make the tick marks invisible because they cover up the
# links
for line in axis.get_yticklines():
line.set_visible(False)
elif orientation == 'right':
axis.set_xlim([dvw, 0])
axis.set_ylim([0, ivw])
xlines = dcoords
ylines = icoords
if no_labels:
axis.set_yticks([])
axis.set_yticklabels([])
else:
axis.set_yticks(ivticks)
axis.set_yticklabels(ivl)
lbls = axis.get_yticklabels()
if leaf_rotation:
matplotlib.pylab.setp(lbls, 'rotation', leaf_rotation)
if leaf_font_size:
matplotlib.pylab.setp(lbls, 'size', leaf_font_size)
axis.yaxis.set_ticks_position('right')
# Make the tick marks invisible because they cover up the links
for line in axis.get_yticklines():
line.set_visible(False)
# Let's use collections instead. This way there is a separate legend
# item for each tree grouping, rather than stupidly one for each line
# segment.
colors_used = _remove_dups(color_list)
color_to_lines = {}
for color in colors_used:
color_to_lines[color] = []
for (xline, yline, color) in zip(xlines, ylines, color_list):
color_to_lines[color].append(zip(xline, yline))
colors_to_collections = {}
# Construct the collections.
for color in colors_used:
coll = matplotlib.collections.LineCollection(color_to_lines[color],
colors=(color,))
colors_to_collections[color] = coll
# Add all the non-blue link groupings, i.e. those groupings below the
# color threshold.
for color in colors_used:
if color != 'b':
axis.add_collection(colors_to_collections[color])
# If there is a blue grouping (i.e., links above the color threshold),
# it should go last.
if 'b' in colors_to_collections:
axis.add_collection(colors_to_collections['b'])
if contraction_marks is not None:
#xs=[x for (x, y) in contraction_marks]
#ys=[y for (x, y) in contraction_marks]
if orientation in ('left', 'right'):
for (x, y) in contraction_marks:
e = matplotlib.patches.Ellipse((y, x),
width=dvw / 100, height=1.0)
axis.add_artist(e)
e.set_clip_box(axis.bbox)
e.set_alpha(0.5)
e.set_facecolor('k')
if orientation in ('top', 'bottom'):
for (x, y) in contraction_marks:
e = matplotlib.patches.Ellipse((x, y),
width=1.0, height=dvw / 100)
axis.add_artist(e)
e.set_clip_box(axis.bbox)
e.set_alpha(0.5)
e.set_facecolor('k')
#matplotlib.pylab.plot(xs, ys, 'go', markeredgecolor='k',
# markersize=3)
#matplotlib.pylab.plot(ys, xs, 'go', markeredgecolor='k',
# markersize=3)
matplotlib.pylab.draw_if_interactive()
_link_line_colors = ['g', 'r', 'c', 'm', 'y', 'k']
def set_link_color_palette(palette):
"""
Changes the list of matplotlib color codes to use when coloring
links with the dendrogram color_threshold feature.
Parameters
----------
palette : A list of matplotlib color codes. The order of
the color codes is the order in which the colors are cycled
through when color thresholding in the dendrogram.
"""
if type(palette) not in (types.ListType, types.TupleType):
raise TypeError("palette must be a list or tuple")
_ptypes = [type(p) == types.StringType for p in palette]
if False in _ptypes:
raise TypeError("all palette list elements must be color strings")
for i in list(_link_line_colors):
_link_line_colors.remove(i)
_link_line_colors.extend(list(palette))
def dendrogram(Z, p=30, truncate_mode=None, color_threshold=None,
get_leaves=True, orientation='top', labels=None,
count_sort=False, distance_sort=False, show_leaf_counts=True,
no_plot=False, no_labels=False, color_list=None,
leaf_font_size=None, leaf_rotation=None, leaf_label_func=None,
no_leaves=False, show_contracted=False,
link_color_func=None):
"""
Plots the hierarchical clustering as a dendrogram.
The dendrogram illustrates how each cluster is
composed by drawing a U-shaped link between a non-singleton
cluster and its children. The height of the top of the U-link is
the distance between its children clusters. It is also the
cophenetic distance between original observations in the two
children clusters. It is expected that the distances in Z[:,2] be
monotonic, otherwise crossings appear in the dendrogram.
Parameters
----------
Z : ndarray
The linkage matrix encoding the hierarchical clustering to
render as a dendrogram. See the ``linkage`` function for more
information on the format of ``Z``.
p : int, optional
The ``p`` parameter for ``truncate_mode``.
truncate_mode : str, optional
The dendrogram can be hard to read when the original
observation matrix from which the linkage is derived is
large. Truncation is used to condense the dendrogram. There
are several modes:
* None/'none': no truncation is performed (Default)
* 'lastp': the last ``p`` non-singleton formed in the linkage
are the only non-leaf nodes in the linkage; they correspond
to to rows ``Z[n-p-2:end]`` in ``Z``. All other
non-singleton clusters are contracted into leaf nodes.
* 'mlab': This corresponds to MATLAB(TM) behavior. (not
implemented yet)
* 'level'/'mtica': no more than ``p`` levels of the
dendrogram tree are displayed. This corresponds to
Mathematica(TM) behavior.
color_threshold : double, optional
For brevity, let :math:`t` be the ``color_threshold``.
Colors all the descendent links below a cluster node
:math:`k` the same color if :math:`k` is the first node below
the cut threshold :math:`t`. All links connecting nodes with
distances greater than or equal to the threshold are colored
blue. If :math:`t` is less than or equal to zero, all nodes
are colored blue. If ``color_threshold`` is ``None`` or
'default', corresponding with MATLAB(TM) behavior, the
threshold is set to ``0.7*max(Z[:,2])``.
get_leaves : bool, optional
Includes a list ``R['leaves']=H`` in the result
dictionary. For each :math:`i`, ``H[i] == j``, cluster node
``j`` appears in position ``i`` in the left-to-right traversal
of the leaves, where :math:`j < 2n-1` and :math:`i < n`.
orientation : str, optional
The direction to plot the dendrogram, which can be any
of the following strings:
* 'top' plots the root at the top, and plot descendent
links going downwards. (default).
* 'bottom'- plots the root at the bottom, and plot descendent
links going upwards.
* 'left'- plots the root at the left, and plot descendent
links going right.
* 'right'- plots the root at the right, and plot descendent
links going left.
labels : ndarray, optional
By default ``labels`` is ``None`` so the index of the
original observation is used to label the leaf nodes.
Otherwise, this is an :math:`n` -sized list (or tuple). The
``labels[i]`` value is the text to put under the :math:`i` th
leaf node only if it corresponds to an original observation
and not a non-singleton cluster.
count_sort : str or bool, optional
For each node n, the order (visually, from left-to-right) n's
two descendent links are plotted is determined by this
parameter, which can be any of the following values:
* False: nothing is done.
* 'ascending'/True: the child with the minimum number of
original objects in its cluster is plotted first.
* 'descendent': the child with the maximum number of
original objects in its cluster is plotted first.
Note ``distance_sort`` and ``count_sort`` cannot both be
``True``.
distance_sort : str or bool, optional
For each node n, the order (visually, from left-to-right) n's
two descendent links are plotted is determined by this
parameter, which can be any of the following values:
* False: nothing is done.
* 'ascending'/True: the child with the minimum distance
between its direct descendents is plotted first.
* 'descending': the child with the maximum distance
between its direct descendents is plotted first.
Note ``distance_sort`` and ``count_sort`` cannot both be
``True``.
show_leaf_counts : bool, optional
When ``True``, leaf nodes representing :math:`k>1` original
observation are labeled with the number of observations they
contain in parentheses.
no_plot : bool, optional
When ``True``, the final rendering is not performed. This is
useful if only the data structures computed for the rendering
are needed or if matplotlib is not available.
no_labels : bool, optional
When ``True``, no labels appear next to the leaf nodes in the
rendering of the dendrogram.
leaf_label_rotation : double, optional
Specifies the angle (in degrees) to rotate the leaf
labels. When unspecified, the rotation based on the number of
nodes in the dendrogram. (Default=0)
leaf_font_size : int, optional
Specifies the font size (in points) of the leaf labels. When
unspecified, the size based on the number of nodes in the
dendrogram.
leaf_label_func : lambda or function, optional
When leaf_label_func is a callable function, for each
leaf with cluster index :math:`k < 2n-1`. The function
is expected to return a string with the label for the
leaf.
Indices :math:`k < n` correspond to original observations
while indices :math:`k \\geq n` correspond to non-singleton
clusters.
For example, to label singletons with their node id and
non-singletons with their id, count, and inconsistency
coefficient, simply do::
# First define the leaf label function.
def llf(id):
if id < n:
return str(id)
else:
return '[%d %d %1.2f]' % (id, count, R[n-id,3])
# The text for the leaf nodes is going to be big so force
# a rotation of 90 degrees.
dendrogram(Z, leaf_label_func=llf, leaf_rotation=90)
show_contracted : bool
When ``True`` the heights of non-singleton nodes contracted
into a leaf node are plotted as crosses along the link
connecting that leaf node. This really is only useful when
truncation is used (see ``truncate_mode`` parameter).
link_color_func : lambda/function
When a callable function,
link_color_function is called with each non-singleton id
corresponding to each U-shaped link it will paint. The
function is expected to return the color to paint the link,
encoded as a matplotlib color string code. For example:
>>> dendrogram(Z, link_color_func=lambda k: colors[k])
colors the direct links below each untruncated non-singleton node
``k`` using ``colors[k]``.
Returns
-------
R : dict
A dictionary of data structures computed to render the
dendrogram. Its has the following keys:
* 'icoords': a list of lists ``[I1, I2, ..., Ip]`` where
``Ik`` is a list of 4 independent variable coordinates
corresponding to the line that represents the k'th link
painted.
* 'dcoords': a list of lists ``[I2, I2, ..., Ip]`` where
``Ik`` is a list of 4 independent variable coordinates
corresponding to the line that represents the k'th link
painted.
* 'ivl': a list of labels corresponding to the leaf nodes.
* 'leaves': for each i, ``H[i] == j``, cluster node
``j`` appears in position ``i`` in the left-to-right
traversal of the leaves, where :math:`j < 2n-1`
and :math:`i < n`. If ``j`` is less than ``n``, the
``i`` th leaf node corresponds to an original observation.
Otherwise, it corresponds to a non-singleton cluster.
"""
# Features under consideration.
#
# ... = dendrogram(..., leaves_order=None)
#
# Plots the leaves in the order specified by a vector of
# original observation indices. If the vector contains duplicates
# or results in a crossing, an exception will be thrown. Passing
# None orders leaf nodes based on the order they appear in the
# pre-order traversal.
Z = np.asarray(Z, order='c')
is_valid_linkage(Z, throw=True, name='Z')
Zs = Z.shape
n = Zs[0] + 1
if type(p) in (types.IntType, types.FloatType):
p = int(p)
else:
raise TypeError('The second argument must be a number')
if truncate_mode not in ('lastp', 'mlab', 'mtica', 'level', 'none', None):
raise ValueError('Invalid truncation mode.')
if truncate_mode == 'lastp' or truncate_mode == 'mlab':
if p > n or p == 0:
p = n
if truncate_mode == 'mtica' or truncate_mode == 'level':
if p <= 0:
p = np.inf
if get_leaves:
lvs = []
else:
lvs = None
icoord_list = []
dcoord_list = []
color_list = []
current_color = [0]
currently_below_threshold = [False]
if no_leaves:
ivl = None
else:
ivl = []
if color_threshold is None or \
(type(color_threshold) == types.StringType and
color_threshold == 'default'):
color_threshold = max(Z[:, 2]) * 0.7
R = {'icoord': icoord_list, 'dcoord': dcoord_list, 'ivl': ivl,
'leaves': lvs, 'color_list': color_list}
props = {'cbt': False, 'cc': 0}
if show_contracted:
contraction_marks = []
else:
contraction_marks = None
_dendrogram_calculate_info(
Z=Z, p=p,
truncate_mode=truncate_mode,
color_threshold=color_threshold,
get_leaves=get_leaves,
orientation=orientation,
labels=labels,
count_sort=count_sort,
distance_sort=distance_sort,
show_leaf_counts=show_leaf_counts,
i=2 * n - 2, iv=0.0, ivl=ivl, n=n,
icoord_list=icoord_list,
dcoord_list=dcoord_list, lvs=lvs,
current_color=current_color,
color_list=color_list,
currently_below_threshold=currently_below_threshold,
leaf_label_func=leaf_label_func,
contraction_marks=contraction_marks,
link_color_func=link_color_func)
if not no_plot:
mh = max(Z[:, 2])
_plot_dendrogram(icoord_list, dcoord_list, ivl, p, n, mh, orientation,
no_labels, color_list, leaf_font_size=leaf_font_size,
leaf_rotation=leaf_rotation,
contraction_marks=contraction_marks)
return R
def _append_singleton_leaf_node(Z, p, n, level, lvs, ivl, leaf_label_func,
i, labels):
# If the leaf id structure is not None and is a list then the caller
# to dendrogram has indicated that cluster id's corresponding to the
# leaf nodes should be recorded.
if lvs is not None:
lvs.append(int(i))
# If leaf node labels are to be displayed...
if ivl is not None:
# If a leaf_label_func has been provided, the label comes from the
# string returned from the leaf_label_func, which is a function
# passed to dendrogram.
if leaf_label_func:
ivl.append(leaf_label_func(int(i)))
else:
# Otherwise, if the dendrogram caller has passed a labels list
# for the leaf nodes, use it.
if labels is not None:
ivl.append(labels[int(i - n)])
else:
# Otherwise, use the id as the label for the leaf.x
ivl.append(str(int(i)))
def _append_nonsingleton_leaf_node(Z, p, n, level, lvs, ivl, leaf_label_func,
i, labels, show_leaf_counts):
# If the leaf id structure is not None and is a list then the caller
# to dendrogram has indicated that cluster id's corresponding to the
# leaf nodes should be recorded.
if lvs is not None:
lvs.append(int(i))
if ivl is not None:
if leaf_label_func:
ivl.append(leaf_label_func(int(i)))
else:
if show_leaf_counts:
ivl.append("(" + str(int(Z[i - n, 3])) + ")")
else:
ivl.append("")
def _append_contraction_marks(Z, iv, i, n, contraction_marks):
_append_contraction_marks_sub(Z, iv, Z[i - n, 0], n, contraction_marks)
_append_contraction_marks_sub(Z, iv, Z[i - n, 1], n, contraction_marks)
def _append_contraction_marks_sub(Z, iv, i, n, contraction_marks):
if i >= n:
contraction_marks.append((iv, Z[i - n, 2]))
_append_contraction_marks_sub(Z, iv, Z[i - n, 0], n, contraction_marks)
_append_contraction_marks_sub(Z, iv, Z[i - n, 1], n, contraction_marks)
def _dendrogram_calculate_info(Z, p, truncate_mode, \
color_threshold=np.inf, get_leaves=True, \
orientation='top', labels=None, \
count_sort=False, distance_sort=False, \
show_leaf_counts=False, i=-1, iv=0.0, \
ivl=[], n=0, icoord_list=[], dcoord_list=[], \
lvs=None, mhr=False, \
current_color=[], color_list=[], \
currently_below_threshold=[], \
leaf_label_func=None, level=0,
contraction_marks=None,
link_color_func=None):
"""
Calculates the endpoints of the links as well as the labels for the
the dendrogram rooted at the node with index i. iv is the independent
variable value to plot the left-most leaf node below the root node i
(if orientation='top', this would be the left-most x value where the
plotting of this root node i and its descendents should begin).
ivl is a list to store the labels of the leaf nodes. The leaf_label_func
is called whenever ivl != None, labels == None, and
leaf_label_func != None. When ivl != None and labels != None, the
labels list is used only for labeling the the leaf nodes. When
ivl == None, no labels are generated for leaf nodes.
When get_leaves==True, a list of leaves is built as they are visited
in the dendrogram.
Returns a tuple with l being the independent variable coordinate that
corresponds to the midpoint of cluster to the left of cluster i if
i is non-singleton, otherwise the independent coordinate of the leaf
node if i is a leaf node.
Returns
-------
A tuple (left, w, h, md), where:
* left is the independent variable coordinate of the center of the
the U of the subtree
* w is the amount of space used for the subtree (in independent
variable units)
* h is the height of the subtree in dependent variable units
* md is the max(Z[*,2]) for all nodes * below and including
the target node.
"""
if n == 0:
raise ValueError("Invalid singleton cluster count n.")
if i == -1:
raise ValueError("Invalid root cluster index i.")
if truncate_mode == 'lastp':
# If the node is a leaf node but corresponds to a non-single cluster,
# it's label is either the empty string or the number of original
# observations belonging to cluster i.
if i < 2 * n - p and i >= n:
d = Z[i - n, 2]
_append_nonsingleton_leaf_node(Z, p, n, level, lvs, ivl,
leaf_label_func, i, labels,
show_leaf_counts)
if contraction_marks is not None:
_append_contraction_marks(Z, iv + 5.0, i, n, contraction_marks)
return (iv + 5.0, 10.0, 0.0, d)
elif i < n:
_append_singleton_leaf_node(Z, p, n, level, lvs, ivl,
leaf_label_func, i, labels)
return (iv + 5.0, 10.0, 0.0, 0.0)
elif truncate_mode in ('mtica', 'level'):
if i > n and level > p:
d = Z[i - n, 2]
_append_nonsingleton_leaf_node(Z, p, n, level, lvs, ivl,
leaf_label_func, i, labels,
show_leaf_counts)
if contraction_marks is not None:
_append_contraction_marks(Z, iv + 5.0, i, n, contraction_marks)
return (iv + 5.0, 10.0, 0.0, d)
elif i < n:
_append_singleton_leaf_node(Z, p, n, level, lvs, ivl,
leaf_label_func, i, labels)
return (iv + 5.0, 10.0, 0.0, 0.0)
elif truncate_mode in ('mlab',):
pass
# Otherwise, only truncate if we have a leaf node.
#
# If the truncate_mode is mlab, the linkage has been modified
# with the truncated tree.
#
# Only place leaves if they correspond to original observations.
if i < n:
_append_singleton_leaf_node(Z, p, n, level, lvs, ivl,
leaf_label_func, i, labels)
return (iv + 5.0, 10.0, 0.0, 0.0)
# !!! Otherwise, we don't have a leaf node, so work on plotting a
# non-leaf node.
# Actual indices of a and b
aa = Z[i - n, 0]
ab = Z[i - n, 1]
if aa > n:
# The number of singletons below cluster a
na = Z[aa - n, 3]
# The distance between a's two direct children.
da = Z[aa - n, 2]
else:
na = 1
da = 0.0
if ab > n:
nb = Z[ab - n, 3]
db = Z[ab - n, 2]
else:
nb = 1
db = 0.0
if count_sort == 'ascending' or count_sort == True:
# If a has a count greater than b, it and its descendents should
# be drawn to the right. Otherwise, to the left.
if na > nb:
# The cluster index to draw to the left (ua) will be ab
# and the one to draw to the right (ub) will be aa
ua = ab
ub = aa
else:
ua = aa
ub = ab
elif count_sort == 'descending':
# If a has a count less than or equal to b, it and its
# descendents should be drawn to the left. Otherwise, to
# the right.
if na > nb:
ua = aa
ub = ab
else:
ua = ab
ub = aa
elif distance_sort == 'ascending' or distance_sort == True:
# If a has a distance greater than b, it and its descendents should
# be drawn to the right. Otherwise, to the left.
if da > db:
ua = ab
ub = aa
else:
ua = aa
ub = ab
elif distance_sort == 'descending':
# If a has a distance less than or equal to b, it and its
# descendents should be drawn to the left. Otherwise, to
# the right.
if da > db:
ua = aa
ub = ab
else:
ua = ab
ub = aa
else:
ua = aa
ub = ab
# The distance of the cluster to draw to the left (ua) is uad
# and its count is uan. Likewise, the cluster to draw to the
# right has distance ubd and count ubn.
if ua < n:
uad = 0.0
uan = 1
else:
uad = Z[ua - n, 2]
uan = Z[ua - n, 3]
if ub < n:
ubd = 0.0
ubn = 1
else:
ubd = Z[ub - n, 2]
ubn = Z[ub - n, 3]
# Updated iv variable and the amount of space used.
(uiva, uwa, uah, uamd) = \
_dendrogram_calculate_info(
Z=Z, p=p,
truncate_mode=truncate_mode,
color_threshold=color_threshold,
get_leaves=get_leaves,
orientation=orientation,
labels=labels,
count_sort=count_sort,
distance_sort=distance_sort,
show_leaf_counts=show_leaf_counts,
i=ua, iv=iv, ivl=ivl, n=n,
icoord_list=icoord_list,
dcoord_list=dcoord_list, lvs=lvs,
current_color=current_color,
color_list=color_list,
currently_below_threshold=currently_below_threshold,
leaf_label_func=leaf_label_func,
level=level + 1, contraction_marks=contraction_marks,
link_color_func=link_color_func)
h = Z[i - n, 2]
if h >= color_threshold or color_threshold <= 0:
c = 'b'
if currently_below_threshold[0]:
current_color[0] = (current_color[0] + 1) % len(_link_line_colors)
currently_below_threshold[0] = False
else:
currently_below_threshold[0] = True
c = _link_line_colors[current_color[0]]
(uivb, uwb, ubh, ubmd) = \
_dendrogram_calculate_info(
Z=Z, p=p,
truncate_mode=truncate_mode,
color_threshold=color_threshold,
get_leaves=get_leaves,
orientation=orientation,
labels=labels,
count_sort=count_sort,
distance_sort=distance_sort,
show_leaf_counts=show_leaf_counts,
i=ub, iv=iv + uwa, ivl=ivl, n=n,
icoord_list=icoord_list,
dcoord_list=dcoord_list, lvs=lvs,
current_color=current_color,
color_list=color_list,
currently_below_threshold=currently_below_threshold,
leaf_label_func=leaf_label_func,
level=level + 1, contraction_marks=contraction_marks,
link_color_func=link_color_func)
# The height of clusters a and b
ah = uad
bh = ubd
max_dist = max(uamd, ubmd, h)
icoord_list.append([uiva, uiva, uivb, uivb])
dcoord_list.append([uah, h, h, ubh])
if link_color_func is not None:
v = link_color_func(int(i))
if type(v) != types.StringType:
raise TypeError("link_color_func must return a matplotlib "
"color string!")
color_list.append(v)
else:
color_list.append(c)
return (((uiva + uivb) / 2), uwa + uwb, h, max_dist)
def is_isomorphic(T1, T2):
"""
Determines if two different cluster assignments are equivalent.
Parameters
----------
T1 : array_like
An assignment of singleton cluster ids to flat cluster ids.
T2 : array_like
An assignment of singleton cluster ids to flat cluster ids.
Returns
-------
b : bool
Whether the flat cluster assignments `T1` and `T2` are
equivalent.
"""
T1 = np.asarray(T1, order='c')
T2 = np.asarray(T2, order='c')
if type(T1) != np.ndarray:
raise TypeError('T1 must be a numpy array.')
if type(T2) != np.ndarray:
raise TypeError('T2 must be a numpy array.')
T1S = T1.shape
T2S = T2.shape
if len(T1S) != 1:
raise ValueError('T1 must be one-dimensional.')
if len(T2S) != 1:
raise ValueError('T2 must be one-dimensional.')
if T1S[0] != T2S[0]:
raise ValueError('T1 and T2 must have the same number of elements.')
n = T1S[0]
d = {}
for i in xrange(0, n):
if T1[i] in d.keys():
if d[T1[i]] != T2[i]:
return False
else:
d[T1[i]] = T2[i]
return True
def maxdists(Z):
"""
Returns the maximum distance between any non-singleton cluster.
Parameters
----------
Z : ndarray
The hierarchical clustering encoded as a matrix. See
``linkage`` for more information.
Returns
-------
maxdists : ndarray
A ``(n-1)`` sized numpy array of doubles; ``MD[i]`` represents
the maximum distance between any cluster (including
singletons) below and including the node with index i. More
specifically, ``MD[i] = Z[Q(i)-n, 2].max()`` where ``Q(i)`` is the
set of all node indices below and including node i.
"""
Z = np.asarray(Z, order='c', dtype=np.double)
is_valid_linkage(Z, throw=True, name='Z')
n = Z.shape[0] + 1
MD = np.zeros((n - 1,))
[Z] = _copy_arrays_if_base_present([Z])
_hierarchy_wrap.get_max_dist_for_each_cluster_wrap(Z, MD, int(n))
return MD
def maxinconsts(Z, R):
"""
Returns the maximum inconsistency coefficient for each
non-singleton cluster and its descendents.
Parameters
----------
Z : ndarray
The hierarchical clustering encoded as a matrix. See
``linkage`` for more information.
R : ndarray
The inconsistency matrix.
Returns
-------
MI : ndarray
A monotonic ``(n-1)``-sized numpy array of doubles.
"""
Z = np.asarray(Z, order='c')
R = np.asarray(R, order='c')
is_valid_linkage(Z, throw=True, name='Z')
is_valid_im(R, throw=True, name='R')
n = Z.shape[0] + 1
if Z.shape[0] != R.shape[0]:
raise ValueError("The inconsistency matrix and linkage matrix each "
"have a different number of rows.")
MI = np.zeros((n - 1,))
[Z, R] = _copy_arrays_if_base_present([Z, R])
_hierarchy_wrap.get_max_Rfield_for_each_cluster_wrap(Z, R, MI, int(n), 3)
return MI
def maxRstat(Z, R, i):
"""
Returns the maximum statistic for each non-singleton cluster and
its descendents.
Parameters
----------
Z : array_like
The hierarchical clustering encoded as a matrix. See
``linkage`` for more information.
R : array_like
The inconsistency matrix.
i : int
The column of `R` to use as the statistic.
Returns
-------
MR : ndarray
Calculates the maximum statistic for the i'th column of the
inconsistency matrix `R` for each non-singleton cluster
node. ``MR[j]`` is the maximum over ``R[Q(j)-n, i]`` where
``Q(j)`` the set of all node ids corresponding to nodes below
and including ``j``.
"""
Z = np.asarray(Z, order='c')
R = np.asarray(R, order='c')
is_valid_linkage(Z, throw=True, name='Z')
is_valid_im(R, throw=True, name='R')
if type(i) is not types.IntType:
raise TypeError('The third argument must be an integer.')
if i < 0 or i > 3:
raise ValueError('i must be an integer between 0 and 3 inclusive.')
if Z.shape[0] != R.shape[0]:
raise ValueError("The inconsistency matrix and linkage matrix each "
"have a different number of rows.")
n = Z.shape[0] + 1
MR = np.zeros((n - 1,))
[Z, R] = _copy_arrays_if_base_present([Z, R])
_hierarchy_wrap.get_max_Rfield_for_each_cluster_wrap(Z, R, MR, int(n), i)
return MR
def leaders(Z, T):
"""
(L, M) = leaders(Z, T):
Returns the root nodes in a hierarchical clustering corresponding
to a cut defined by a flat cluster assignment vector ``T``. See
the ``fcluster`` function for more information on the format of ``T``.
For each flat cluster :math:`j` of the :math:`k` flat clusters
represented in the n-sized flat cluster assignment vector ``T``,
this function finds the lowest cluster node :math:`i` in the linkage
tree Z such that:
* leaf descendents belong only to flat cluster j
(i.e. ``T[p]==j`` for all :math:`p` in :math:`S(i)` where
:math:`S(i)` is the set of leaf ids of leaf nodes descendent
with cluster node :math:`i`)
* there does not exist a leaf that is not descendent with
:math:`i` that also belongs to cluster :math:`j`
(i.e. ``T[q]!=j`` for all :math:`q` not in :math:`S(i)`). If
this condition is violated, ``T`` is not a valid cluster
assignment vector, and an exception will be thrown.
Parameters
----------
Z : ndarray
The hierarchical clustering encoded as a matrix. See
``linkage`` for more information.
T : ndarray
The flat cluster assignment vector.
Returns
-------
A tuple (L, M) with
L : ndarray
The leader linkage node id's stored as a k-element 1D
array where :math:`k` is the number of flat clusters found
in ``T``.
``L[j]=i`` is the linkage cluster node id that is the
leader of flat cluster with id M[j]. If ``i < n``, ``i``
corresponds to an original observation, otherwise it
corresponds to a non-singleton cluster.
For example: if ``L[3]=2`` and ``M[3]=8``, the flat cluster with
id 8's leader is linkage node 2.
M : ndarray
The leader linkage node id's stored as a k-element 1D
array where :math:`k` is the number of flat clusters found
in ``T``. This allows the set of flat cluster ids to be
any arbitrary set of :math:`k` integers.
"""
Z = np.asarray(Z, order='c')
T = np.asarray(T, order='c')
if type(T) != np.ndarray or T.dtype != 'i':
raise TypeError('T must be a one-dimensional numpy array of integers.')
is_valid_linkage(Z, throw=True, name='Z')
if len(T) != Z.shape[0] + 1:
raise ValueError('Mismatch: len(T)!=Z.shape[0] + 1.')
Cl = np.unique(T)
kk = len(Cl)
L = np.zeros((kk,), dtype='i')
M = np.zeros((kk,), dtype='i')
n = Z.shape[0] + 1
[Z, T] = _copy_arrays_if_base_present([Z, T])
s = _hierarchy_wrap.leaders_wrap(Z, T, L, M, int(kk), int(n))
if s >= 0:
raise ValueError(('T is not a valid assignment vector. Error found '
'when examining linkage node %d (< 2n-1).') % s)
return (L, M)
# These are test functions to help me test the leaders function.
def _leaders_test(Z, T):
tr = to_tree(Z)
_leaders_test_recurs_mark(tr, T)
return tr
def _leader_identify(tr, T):
if tr.is_leaf():
return T[tr.id]
else:
left = tr.get_left()
right = tr.get_right()
lfid = _leader_identify(left, T)
rfid = _leader_identify(right, T)
print 'ndid: %d lid: %d lfid: %d rid: %d rfid: %d' \
% (tr.get_id(), left.get_id(), lfid, right.get_id(), rfid)
if lfid != rfid:
if lfid != -1:
print 'leader: %d with tag %d' % (left.id, lfid)
if rfid != -1:
print 'leader: %d with tag %d' % (right.id, rfid)
return -1
else:
return lfid
def _leaders_test_recurs_mark(tr, T):
if tr.is_leaf():
tr.asgn = T[tr.id]
else:
tr.asgn = -1
_leaders_test_recurs_mark(tr.left, T)
_leaders_test_recurs_mark(tr.right, T)
| bsd-3-clause |
rajat1994/scikit-learn | sklearn/decomposition/tests/test_truncated_svd.py | 240 | 6055 | """Test truncated SVD transformer."""
import numpy as np
import scipy.sparse as sp
from sklearn.decomposition import TruncatedSVD
from sklearn.utils import check_random_state
from sklearn.utils.testing import (assert_array_almost_equal, assert_equal,
assert_raises, assert_greater,
assert_array_less)
# Make an X that looks somewhat like a small tf-idf matrix.
# XXX newer versions of SciPy have scipy.sparse.rand for this.
shape = 60, 55
n_samples, n_features = shape
rng = check_random_state(42)
X = rng.randint(-100, 20, np.product(shape)).reshape(shape)
X = sp.csr_matrix(np.maximum(X, 0), dtype=np.float64)
X.data[:] = 1 + np.log(X.data)
Xdense = X.A
def test_algorithms():
svd_a = TruncatedSVD(30, algorithm="arpack")
svd_r = TruncatedSVD(30, algorithm="randomized", random_state=42)
Xa = svd_a.fit_transform(X)[:, :6]
Xr = svd_r.fit_transform(X)[:, :6]
assert_array_almost_equal(Xa, Xr)
comp_a = np.abs(svd_a.components_)
comp_r = np.abs(svd_r.components_)
# All elements are equal, but some elements are more equal than others.
assert_array_almost_equal(comp_a[:9], comp_r[:9])
assert_array_almost_equal(comp_a[9:], comp_r[9:], decimal=3)
def test_attributes():
for n_components in (10, 25, 41):
tsvd = TruncatedSVD(n_components).fit(X)
assert_equal(tsvd.n_components, n_components)
assert_equal(tsvd.components_.shape, (n_components, n_features))
def test_too_many_components():
for algorithm in ["arpack", "randomized"]:
for n_components in (n_features, n_features+1):
tsvd = TruncatedSVD(n_components=n_components, algorithm=algorithm)
assert_raises(ValueError, tsvd.fit, X)
def test_sparse_formats():
for fmt in ("array", "csr", "csc", "coo", "lil"):
Xfmt = Xdense if fmt == "dense" else getattr(X, "to" + fmt)()
tsvd = TruncatedSVD(n_components=11)
Xtrans = tsvd.fit_transform(Xfmt)
assert_equal(Xtrans.shape, (n_samples, 11))
Xtrans = tsvd.transform(Xfmt)
assert_equal(Xtrans.shape, (n_samples, 11))
def test_inverse_transform():
for algo in ("arpack", "randomized"):
# We need a lot of components for the reconstruction to be "almost
# equal" in all positions. XXX Test means or sums instead?
tsvd = TruncatedSVD(n_components=52, random_state=42)
Xt = tsvd.fit_transform(X)
Xinv = tsvd.inverse_transform(Xt)
assert_array_almost_equal(Xinv, Xdense, decimal=1)
def test_integers():
Xint = X.astype(np.int64)
tsvd = TruncatedSVD(n_components=6)
Xtrans = tsvd.fit_transform(Xint)
assert_equal(Xtrans.shape, (n_samples, tsvd.n_components))
def test_explained_variance():
# Test sparse data
svd_a_10_sp = TruncatedSVD(10, algorithm="arpack")
svd_r_10_sp = TruncatedSVD(10, algorithm="randomized", random_state=42)
svd_a_20_sp = TruncatedSVD(20, algorithm="arpack")
svd_r_20_sp = TruncatedSVD(20, algorithm="randomized", random_state=42)
X_trans_a_10_sp = svd_a_10_sp.fit_transform(X)
X_trans_r_10_sp = svd_r_10_sp.fit_transform(X)
X_trans_a_20_sp = svd_a_20_sp.fit_transform(X)
X_trans_r_20_sp = svd_r_20_sp.fit_transform(X)
# Test dense data
svd_a_10_de = TruncatedSVD(10, algorithm="arpack")
svd_r_10_de = TruncatedSVD(10, algorithm="randomized", random_state=42)
svd_a_20_de = TruncatedSVD(20, algorithm="arpack")
svd_r_20_de = TruncatedSVD(20, algorithm="randomized", random_state=42)
X_trans_a_10_de = svd_a_10_de.fit_transform(X.toarray())
X_trans_r_10_de = svd_r_10_de.fit_transform(X.toarray())
X_trans_a_20_de = svd_a_20_de.fit_transform(X.toarray())
X_trans_r_20_de = svd_r_20_de.fit_transform(X.toarray())
# helper arrays for tests below
svds = (svd_a_10_sp, svd_r_10_sp, svd_a_20_sp, svd_r_20_sp, svd_a_10_de,
svd_r_10_de, svd_a_20_de, svd_r_20_de)
svds_trans = (
(svd_a_10_sp, X_trans_a_10_sp),
(svd_r_10_sp, X_trans_r_10_sp),
(svd_a_20_sp, X_trans_a_20_sp),
(svd_r_20_sp, X_trans_r_20_sp),
(svd_a_10_de, X_trans_a_10_de),
(svd_r_10_de, X_trans_r_10_de),
(svd_a_20_de, X_trans_a_20_de),
(svd_r_20_de, X_trans_r_20_de),
)
svds_10_v_20 = (
(svd_a_10_sp, svd_a_20_sp),
(svd_r_10_sp, svd_r_20_sp),
(svd_a_10_de, svd_a_20_de),
(svd_r_10_de, svd_r_20_de),
)
svds_sparse_v_dense = (
(svd_a_10_sp, svd_a_10_de),
(svd_a_20_sp, svd_a_20_de),
(svd_r_10_sp, svd_r_10_de),
(svd_r_20_sp, svd_r_20_de),
)
# Assert the 1st component is equal
for svd_10, svd_20 in svds_10_v_20:
assert_array_almost_equal(
svd_10.explained_variance_ratio_,
svd_20.explained_variance_ratio_[:10],
decimal=5,
)
# Assert that 20 components has higher explained variance than 10
for svd_10, svd_20 in svds_10_v_20:
assert_greater(
svd_20.explained_variance_ratio_.sum(),
svd_10.explained_variance_ratio_.sum(),
)
# Assert that all the values are greater than 0
for svd in svds:
assert_array_less(0.0, svd.explained_variance_ratio_)
# Assert that total explained variance is less than 1
for svd in svds:
assert_array_less(svd.explained_variance_ratio_.sum(), 1.0)
# Compare sparse vs. dense
for svd_sparse, svd_dense in svds_sparse_v_dense:
assert_array_almost_equal(svd_sparse.explained_variance_ratio_,
svd_dense.explained_variance_ratio_)
# Test that explained_variance is correct
for svd, transformed in svds_trans:
total_variance = np.var(X.toarray(), axis=0).sum()
variances = np.var(transformed, axis=0)
true_explained_variance_ratio = variances / total_variance
assert_array_almost_equal(
svd.explained_variance_ratio_,
true_explained_variance_ratio,
)
| bsd-3-clause |
anntzer/scikit-learn | examples/cluster/plot_adjusted_for_chance_measures.py | 14 | 4345 | """
==========================================================
Adjustment for chance in clustering performance evaluation
==========================================================
The following plots demonstrate the impact of the number of clusters and
number of samples on various clustering performance evaluation metrics.
Non-adjusted measures such as the V-Measure show a dependency between
the number of clusters and the number of samples: the mean V-Measure
of random labeling increases significantly as the number of clusters is
closer to the total number of samples used to compute the measure.
Adjusted for chance measure such as ARI display some random variations
centered around a mean score of 0.0 for any number of samples and
clusters.
Only adjusted measures can hence safely be used as a consensus index
to evaluate the average stability of clustering algorithms for a given
value of k on various overlapping sub-samples of the dataset.
"""
print(__doc__)
# Author: Olivier Grisel <olivier.grisel@ensta.org>
# License: BSD 3 clause
import numpy as np
import matplotlib.pyplot as plt
from time import time
from sklearn import metrics
def uniform_labelings_scores(score_func, n_samples, n_clusters_range,
fixed_n_classes=None, n_runs=5, seed=42):
"""Compute score for 2 random uniform cluster labelings.
Both random labelings have the same number of clusters for each value
possible value in ``n_clusters_range``.
When fixed_n_classes is not None the first labeling is considered a ground
truth class assignment with fixed number of classes.
"""
random_labels = np.random.RandomState(seed).randint
scores = np.zeros((len(n_clusters_range), n_runs))
if fixed_n_classes is not None:
labels_a = random_labels(low=0, high=fixed_n_classes, size=n_samples)
for i, k in enumerate(n_clusters_range):
for j in range(n_runs):
if fixed_n_classes is None:
labels_a = random_labels(low=0, high=k, size=n_samples)
labels_b = random_labels(low=0, high=k, size=n_samples)
scores[i, j] = score_func(labels_a, labels_b)
return scores
def ami_score(U, V):
return metrics.adjusted_mutual_info_score(U, V)
score_funcs = [
metrics.adjusted_rand_score,
metrics.v_measure_score,
ami_score,
metrics.mutual_info_score,
]
# 2 independent random clusterings with equal cluster number
n_samples = 100
n_clusters_range = np.linspace(2, n_samples, 10).astype(int)
plt.figure(1)
plots = []
names = []
for score_func in score_funcs:
print("Computing %s for %d values of n_clusters and n_samples=%d"
% (score_func.__name__, len(n_clusters_range), n_samples))
t0 = time()
scores = uniform_labelings_scores(score_func, n_samples, n_clusters_range)
print("done in %0.3fs" % (time() - t0))
plots.append(plt.errorbar(
n_clusters_range, np.median(scores, axis=1), scores.std(axis=1))[0])
names.append(score_func.__name__)
plt.title("Clustering measures for 2 random uniform labelings\n"
"with equal number of clusters")
plt.xlabel('Number of clusters (Number of samples is fixed to %d)' % n_samples)
plt.ylabel('Score value')
plt.legend(plots, names)
plt.ylim(bottom=-0.05, top=1.05)
# Random labeling with varying n_clusters against ground class labels
# with fixed number of clusters
n_samples = 1000
n_clusters_range = np.linspace(2, 100, 10).astype(int)
n_classes = 10
plt.figure(2)
plots = []
names = []
for score_func in score_funcs:
print("Computing %s for %d values of n_clusters and n_samples=%d"
% (score_func.__name__, len(n_clusters_range), n_samples))
t0 = time()
scores = uniform_labelings_scores(score_func, n_samples, n_clusters_range,
fixed_n_classes=n_classes)
print("done in %0.3fs" % (time() - t0))
plots.append(plt.errorbar(
n_clusters_range, scores.mean(axis=1), scores.std(axis=1))[0])
names.append(score_func.__name__)
plt.title("Clustering measures for random uniform labeling\n"
"against reference assignment with %d classes" % n_classes)
plt.xlabel('Number of clusters (Number of samples is fixed to %d)' % n_samples)
plt.ylabel('Score value')
plt.ylim(bottom=-0.05, top=1.05)
plt.legend(plots, names)
plt.show()
| bsd-3-clause |
quantopian/zipline | tests/test_security_list.py | 1 | 9722 | from datetime import timedelta
import pandas as pd
from nose_parameterized import parameterized
from zipline.algorithm import TradingAlgorithm
from zipline.errors import TradingControlViolation
from zipline.testing import (
add_security_data,
security_list_copy,
)
from zipline.testing.fixtures import (
WithMakeAlgo,
ZiplineTestCase,
)
from zipline.utils import factory
from zipline.utils.security_list import (
SecurityListSet,
load_from_directory,
)
LEVERAGED_ETFS = load_from_directory('leveraged_etf_list')
class RestrictedAlgoWithCheck(TradingAlgorithm):
def initialize(self, symbol):
self.rl = SecurityListSet(self.get_datetime, self.asset_finder)
self.set_asset_restrictions(self.rl.restrict_leveraged_etfs)
self.order_count = 0
self.sid = self.symbol(symbol)
def handle_data(self, data):
if not self.order_count:
if self.sid not in \
self.rl.leveraged_etf_list.\
current_securities(self.get_datetime()):
self.order(self.sid, 100)
self.order_count += 1
class RestrictedAlgoWithoutCheck(TradingAlgorithm):
def initialize(self, symbol):
self.rl = SecurityListSet(self.get_datetime, self.asset_finder)
self.set_asset_restrictions(self.rl.restrict_leveraged_etfs)
self.order_count = 0
self.sid = self.symbol(symbol)
def handle_data(self, data):
self.order(self.sid, 100)
self.order_count += 1
class RestrictedAlgoWithoutCheckSetDoNotOrderList(TradingAlgorithm):
def initialize(self, symbol):
self.rl = SecurityListSet(self.get_datetime, self.asset_finder)
self.set_do_not_order_list(self.rl.leveraged_etf_list)
self.order_count = 0
self.sid = self.symbol(symbol)
def handle_data(self, data):
self.order(self.sid, 100)
self.order_count += 1
class IterateRLAlgo(TradingAlgorithm):
def initialize(self, symbol):
self.rl = SecurityListSet(self.get_datetime, self.asset_finder)
self.set_asset_restrictions(self.rl.restrict_leveraged_etfs)
self.order_count = 0
self.sid = self.symbol(symbol)
self.found = False
def handle_data(self, data):
for stock in self.rl.leveraged_etf_list.\
current_securities(self.get_datetime()):
if stock == self.sid:
self.found = True
class SecurityListTestCase(WithMakeAlgo, ZiplineTestCase):
# XXX: This suite uses way more than it probably needs.
START_DATE = pd.Timestamp('2002-01-03', tz='UTC')
assert START_DATE == sorted(list(LEVERAGED_ETFS.keys()))[0], \
"START_DATE should match start of LEVERAGED_ETF data."
END_DATE = pd.Timestamp('2015-02-17', tz='utc')
extra_knowledge_date = pd.Timestamp('2015-01-27', tz='utc')
trading_day_before_first_kd = pd.Timestamp('2015-01-23', tz='utc')
SIM_PARAMS_END = pd.Timestamp('2002-01-08', tz='UTC')
SIM_PARAMS_DATA_FREQUENCY = 'daily'
DATA_PORTAL_USE_MINUTE_DATA = False
ASSET_FINDER_EQUITY_SIDS = (1, 2, 3, 4, 5)
ASSET_FINDER_EQUITY_SYMBOLS = ('AAPL', 'GOOG', 'BZQ', 'URTY', 'JFT')
def test_iterate_over_restricted_list(self):
algo = self.make_algo(
algo_class=IterateRLAlgo,
symbol='BZQ',
)
algo.run()
self.assertTrue(algo.found)
def test_security_list(self):
# set the knowledge date to the first day of the
# leveraged etf knowledge date.
def get_datetime():
return self.START_DATE
rl = SecurityListSet(get_datetime, self.asset_finder)
# assert that a sample from the leveraged list are in restricted
should_exist = [
asset.sid for asset in
[self.asset_finder.lookup_symbol(
symbol,
as_of_date=self.extra_knowledge_date)
for symbol in ["BZQ", "URTY", "JFT"]]
]
for sid in should_exist:
self.assertIn(
sid, rl.leveraged_etf_list.current_securities(get_datetime()))
# assert that a sample of allowed stocks are not in restricted
shouldnt_exist = [
asset.sid for asset in
[self.asset_finder.lookup_symbol(
symbol,
as_of_date=self.extra_knowledge_date)
for symbol in ["AAPL", "GOOG"]]
]
for sid in shouldnt_exist:
self.assertNotIn(
sid, rl.leveraged_etf_list.current_securities(get_datetime()))
def test_security_add(self):
def get_datetime():
return pd.Timestamp("2015-01-27", tz='UTC')
with security_list_copy():
add_security_data(['AAPL', 'GOOG'], [])
rl = SecurityListSet(get_datetime, self.asset_finder)
should_exist = [
asset.sid for asset in
[self.asset_finder.lookup_symbol(
symbol,
as_of_date=self.extra_knowledge_date
) for symbol in ["AAPL", "GOOG", "BZQ", "URTY"]]
]
for sid in should_exist:
self.assertIn(
sid,
rl.leveraged_etf_list.current_securities(get_datetime())
)
def test_security_add_delete(self):
with security_list_copy():
def get_datetime():
return pd.Timestamp("2015-01-27", tz='UTC')
rl = SecurityListSet(get_datetime, self.asset_finder)
self.assertNotIn(
"BZQ",
rl.leveraged_etf_list.current_securities(get_datetime())
)
self.assertNotIn(
"URTY",
rl.leveraged_etf_list.current_securities(get_datetime())
)
def test_algo_without_rl_violation_via_check(self):
self.run_algorithm(algo_class=RestrictedAlgoWithCheck, symbol="BZQ")
def test_algo_without_rl_violation(self):
self.run_algorithm(
algo_class=RestrictedAlgoWithoutCheck, symbol="AAPL",
)
@parameterized.expand([
('using_set_do_not_order_list',
RestrictedAlgoWithoutCheckSetDoNotOrderList),
('using_set_restrictions', RestrictedAlgoWithoutCheck),
])
def test_algo_with_rl_violation(self, name, algo_class):
algo = self.make_algo(algo_class=algo_class, symbol='BZQ')
with self.assertRaises(TradingControlViolation) as ctx:
algo.run()
self.check_algo_exception(algo, ctx, 0)
# repeat with a symbol from a different lookup date
algo = self.make_algo(
algo_class=RestrictedAlgoWithoutCheck, symbol='JFT',
)
with self.assertRaises(TradingControlViolation) as ctx:
algo.run()
self.check_algo_exception(algo, ctx, 0)
def test_algo_with_rl_violation_after_knowledge_date(self):
start = self.START_DATE + timedelta(days=7)
end = start + self.trading_calendar.day * 4
algo = self.make_algo(
algo_class=RestrictedAlgoWithoutCheck,
symbol='BZQ',
sim_params=self.make_simparams(
start_session=start,
end_session=end,
)
)
with self.assertRaises(TradingControlViolation) as ctx:
algo.run()
self.check_algo_exception(algo, ctx, 0)
def test_algo_with_rl_violation_cumulative(self):
"""
Add a new restriction, run a test long after both
knowledge dates, make sure stock from original restriction
set is still disallowed.
"""
sim_params = factory.create_simulation_parameters(
start=self.START_DATE + timedelta(days=7),
num_days=4
)
with security_list_copy():
add_security_data(['AAPL'], [])
algo = self.make_algo(
algo_class=RestrictedAlgoWithoutCheck,
symbol='BZQ',
sim_params=sim_params,
)
with self.assertRaises(TradingControlViolation) as ctx:
algo.run()
self.check_algo_exception(algo, ctx, 0)
def test_algo_without_rl_violation_after_delete(self):
sim_params = factory.create_simulation_parameters(
start=self.extra_knowledge_date,
num_days=4,
)
with security_list_copy():
# add a delete statement removing bzq
# write a new delete statement file to disk
add_security_data([], ['BZQ'])
algo = self.make_algo(
algo_class=RestrictedAlgoWithoutCheck,
symbol='BZQ',
sim_params=sim_params,
)
algo.run()
def test_algo_with_rl_violation_after_add(self):
sim_params = factory.create_simulation_parameters(
start=self.trading_day_before_first_kd,
num_days=4,
)
with security_list_copy():
add_security_data(['AAPL'], [])
algo = self.make_algo(
algo_class=RestrictedAlgoWithoutCheck,
symbol='AAPL',
sim_params=sim_params,
)
with self.assertRaises(TradingControlViolation) as ctx:
algo.run()
self.check_algo_exception(algo, ctx, 2)
def check_algo_exception(self, algo, ctx, expected_order_count):
self.assertEqual(algo.order_count, expected_order_count)
exc = ctx.exception
self.assertEqual(TradingControlViolation, type(exc))
exc_msg = str(ctx.exception)
self.assertTrue("RestrictedListOrder" in exc_msg)
| apache-2.0 |
Akshay0724/scikit-learn | examples/cluster/plot_color_quantization.py | 61 | 3444 | # -*- coding: utf-8 -*-
"""
==================================
Color Quantization using K-Means
==================================
Performs a pixel-wise Vector Quantization (VQ) of an image of the summer palace
(China), reducing the number of colors required to show the image from 96,615
unique colors to 64, while preserving the overall appearance quality.
In this example, pixels are represented in a 3D-space and K-means is used to
find 64 color clusters. In the image processing literature, the codebook
obtained from K-means (the cluster centers) is called the color palette. Using
a single byte, up to 256 colors can be addressed, whereas an RGB encoding
requires 3 bytes per pixel. The GIF file format, for example, uses such a
palette.
For comparison, a quantized image using a random codebook (colors picked up
randomly) is also shown.
"""
# Authors: Robert Layton <robertlayton@gmail.com>
# Olivier Grisel <olivier.grisel@ensta.org>
# Mathieu Blondel <mathieu@mblondel.org>
#
# License: BSD 3 clause
print(__doc__)
import numpy as np
import matplotlib.pyplot as plt
from sklearn.cluster import KMeans
from sklearn.metrics import pairwise_distances_argmin
from sklearn.datasets import load_sample_image
from sklearn.utils import shuffle
from time import time
n_colors = 64
# Load the Summer Palace photo
china = load_sample_image("china.jpg")
# Convert to floats instead of the default 8 bits integer coding. Dividing by
# 255 is important so that plt.imshow behaves works well on float data (need to
# be in the range [0-1])
china = np.array(china, dtype=np.float64) / 255
# Load Image and transform to a 2D numpy array.
w, h, d = original_shape = tuple(china.shape)
assert d == 3
image_array = np.reshape(china, (w * h, d))
print("Fitting model on a small sub-sample of the data")
t0 = time()
image_array_sample = shuffle(image_array, random_state=0)[:1000]
kmeans = KMeans(n_clusters=n_colors, random_state=0).fit(image_array_sample)
print("done in %0.3fs." % (time() - t0))
# Get labels for all points
print("Predicting color indices on the full image (k-means)")
t0 = time()
labels = kmeans.predict(image_array)
print("done in %0.3fs." % (time() - t0))
codebook_random = shuffle(image_array, random_state=0)[:n_colors + 1]
print("Predicting color indices on the full image (random)")
t0 = time()
labels_random = pairwise_distances_argmin(codebook_random,
image_array,
axis=0)
print("done in %0.3fs." % (time() - t0))
def recreate_image(codebook, labels, w, h):
"""Recreate the (compressed) image from the code book & labels"""
d = codebook.shape[1]
image = np.zeros((w, h, d))
label_idx = 0
for i in range(w):
for j in range(h):
image[i][j] = codebook[labels[label_idx]]
label_idx += 1
return image
# Display all results, alongside original image
plt.figure(1)
plt.clf()
ax = plt.axes([0, 0, 1, 1])
plt.axis('off')
plt.title('Original image (96,615 colors)')
plt.imshow(china)
plt.figure(2)
plt.clf()
ax = plt.axes([0, 0, 1, 1])
plt.axis('off')
plt.title('Quantized image (64 colors, K-Means)')
plt.imshow(recreate_image(kmeans.cluster_centers_, labels, w, h))
plt.figure(3)
plt.clf()
ax = plt.axes([0, 0, 1, 1])
plt.axis('off')
plt.title('Quantized image (64 colors, Random)')
plt.imshow(recreate_image(codebook_random, labels_random, w, h))
plt.show()
| bsd-3-clause |
sanjanalab/GUIDES | static/data/pre_processed/precompute_guides_msgpack.py | 2 | 9512 | import msgpack
import json
import pickle
import os.path
from Queue import PriorityQueue
import re
import doench_score
import azimuth.model_comparison
import numpy as np
import pandas as pd
import csv
from intervaltree import IntervalTree
class GuideRNA():
"""Holder of gRNA information"""
def __init__(self, selected, start, seq, PAM, score, exon_ranking, ensembl_gene, gene_name, functional_domain=None):
self.start = start
self.seq = seq
self.PAM = PAM
self.score = score
self.exon_ranking = exon_ranking
self.ensembl_gene = ensembl_gene
self.gene_name = gene_name
self.selected = selected
self.functional_domain = functional_domain
def serialize_for_display(self):
"""Serialize for the way we are returning json"""
serialization = {
"score": self.score,
"start": self.start,
"seq": self.seq,
"PAM": self.PAM,
"selected": self.selected,
}
if self.functional_domain != None:
serialization["functional_domain"] = self.functional_domain
return serialization
def __cmp__(self, other):
return cmp(self.score, other.score)
params = {
"PAM": "NGG",
"protospacer_len": 20,
"prime5": True,
"scoring": "Azimuth",
"quantity": 100,
"functional_domains": False
}
# azimuth mdoel
azimuth_saved_model_dir = os.path.join(os.path.dirname(azimuth.__file__), 'saved_models')
model_name = 'V3_model_full.pickle'
azimuth_model_file = os.path.join(azimuth_saved_model_dir, model_name)
with open(azimuth_model_file, 'rb') as f:
azimuth_model = pickle.load(f)
# Create interval tree for functional domains
print "constructing interval tuples"
interval_tuples_dict = {}
ucsc_pfam_f = '../functional_domains/ucsc_pfam.txt'
with open(ucsc_pfam_f, 'r') as pfam_csv:
csvreader = csv.reader(pfam_csv, delimiter='\t')
next(csvreader) # skip header
for row in csvreader:
chrom = row[1]
start = row[2]
end = row[3]
name = row[4]
if chrom not in interval_tuples_dict:
interval_tuples_dict[chrom] = []
new_tuple = (int(start), int(end), name)
interval_tuples_dict[chrom].append(new_tuple)
print "constructing interval trees"
interval_trees_dict = {}
for k, v in interval_tuples_dict.iteritems():
interval_trees_dict[k] = IntervalTree.from_tuples(v)
modPAM = params["PAM"].upper()
modPAM = modPAM.replace('N', '[ATCG]')
params["modPAM"] = modPAM
params["PAM_len"] = len(params["PAM"])
revcompl = lambda x: ''.join([{'A':'T','C':'G','G':'C','T':'A','N':'N'}[B] for B in x][::-1])
print "constructing refGene"
refGeneFilename = '../gtex/refGene.txt'
refGene = pd.read_csv(refGeneFilename, sep="\t")
refGene.columns=['','name','chrom','strand','txStart','txEnd','cdsStart','cdsEnd','exonCount','exonStarts','exonEnds','id','name2','cdsStartStat','cdsEndStat','exonFrames']
refGene["exonStarts"] = refGene.apply(lambda x: x['exonStarts'].split(',')[:-1], axis=1)
refGene["exonEnds"] = refGene.apply(lambda x: x['exonEnds'].split(',')[:-1], axis=1)
refGene["exonFrames"] = refGene.apply(lambda x: x['exonFrames'].split(',')[:-1], axis=1)
def gene_exon_coords(gene, exon):
try:
start = list(refGene.loc[refGene['name'] == gene]['exonStarts'])[0][exon]
end = list(refGene.loc[refGene['name'] == gene]['exonEnds'])[0][exon]
chrom = list(refGene.loc[refGene['name'] == gene]['chrom'])[0]
return {
'start': int(start),
'end': int(end),
'chrom': str(chrom)
}
except IndexError:
return None
def gene_exon_file(gene, exon):
filename = gene + "_" + str(exon)
seq_path = os.path.join('../GRCh37_exons/', filename)
if os.path.isfile(seq_path):
with open(seq_path) as infile:
return infile.read()
else:
return None
with open('genes_list.json') as genes_list_file:
genes_list = json.load(genes_list_file)
# gene format: {"ensembl_id": "ENSG00000261122.2", "name": "5S_rRNA", "description": ""}
for gene in genes_list:
exon = 0
seq = gene_exon_file(gene["ensembl_id"], exon)
coords = gene_exon_coords(gene["ensembl_id"], exon)
while seq:
# Check if we haven't done this in a preivous run of the program
outfile_name = gene["ensembl_id"] + "_" + str(exon) + ".p"
folder = '../GRCh37_guides_msgpack_' + params["scoring"] + '/'
if params['functional_domains']:
folder = '../GRCh37_guides_msgpack_' + params['scoring'] + '_domains/'
output_path = os.path.join(folder, outfile_name)
if os.path.isfile(output_path):
# prepare next exon
exon += 1
seq = gene_exon_file(gene["ensembl_id"], exon)
coords = gene_exon_coords(gene["ensembl_id"], exon)
continue
q = PriorityQueue()
domain_q = PriorityQueue()
def process_guide(m, selected, max_queue_size, seq, domain):
if 'N' in seq:
return
PAM_start = m.start()
score = 0
if params["scoring"] == "Doench":
# Doench score requires the 4 before and 6 after 20-mer (gives 30-mer)
mer30 = seq[PAM_start-params["protospacer_len"]-4:PAM_start+params["PAM_len"]+3]
if len(mer30) == 30:
score = doench_score.calc_score(mer30)
elif params["scoring"] == "Azimuth":
# Azimuth requires the 4 before and 6 after 20-mer (gives 30-mer)
mer30 = seq[PAM_start-params["protospacer_len"]-4:PAM_start+params["PAM_len"]+3]
if len(mer30) == 30:
score = azimuth.model_comparison.predict(np.array([mer30]), aa_cut=None, percent_peptide=None, model=azimuth_model, model_file=azimuth_model_file)[0]
protospacer = ""
PAM = ""
if params["prime5"]:
protospacer = seq[PAM_start-params["protospacer_len"]:PAM_start]
PAM = seq[PAM_start:PAM_start+params["PAM_len"]]
else:
protospacer = seq[PAM_start+params["PAM_len"]:PAM_start+params["PAM_len"]+params["protospacer_len"]]
PAM = seq[PAM_start:PAM_start+params["PAM_len"]]
potential_gRNA = GuideRNA(selected, PAM_start-params["protospacer_len"], protospacer, PAM, score, exon, gene["ensembl_id"], gene["name"], domain)
if domain:
domain_q.put(potential_gRNA)
# If there's enough room, add it, no question.
elif q.qsize() < max_queue_size:
q.put(potential_gRNA)
# Otherwise, take higher score
else:
lowest_gRNA = q.get()
if potential_gRNA.score > lowest_gRNA.score:
q.put(potential_gRNA)
else:
q.put(lowest_gRNA)
for m in re.finditer(params["modPAM"], seq):
if params["prime5"] and (m.start() < params["protospacer_len"] or m.start() + params["PAM_len"] > len(seq)):
continue
elif not params["prime5"] and (m.start() + params["PAM_len"] + params["protospacer_len"] > len(seq)):
continue
# Functional domains currently only supported for Cas9.
# This needs to be modified for other genome editing proteins.
domain = None
if params["PAM"] == "NGG": # spCas9
cut_site = coords['start'] + m.start() - 3
chrom = 'chr' + coords['chrom']
if chrom in interval_trees_dict:
domain_matches = list(interval_trees_dict[chrom][cut_site])
if len(domain_matches) > 0:
domain = domain_matches[0].data
process_guide(m, True, params["quantity"], seq, domain)
seq_rc = revcompl(seq)
for m in re.finditer(params["modPAM"], seq_rc):
if params["prime5"] and (m.start() < params["protospacer_len"] or m.start() + params["PAM_len"] > len(seq)):
continue
elif not params["prime5"] and (m.start() + params["PAM_len"] + params["protospacer_len"] > len(seq)):
continue
# Functional domains currently only supported for Cas9.
# This needs to be modified for other genome editing proteins.
domain = None
if params["PAM"] == "NGG": #spCas9
cut_site = coords['end'] - m.start() + 3
chrom = 'chr' + coords['chrom']
if chrom in interval_trees_dict:
domain_matches = list(interval_trees_dict[chrom][cut_site])
if len(domain_matches) > 0:
domain = domain_matches[0].data
process_guide(m, True, params["quantity"], seq_rc, domain)
# Pop gRNAs into our 'permanent' storage
count = 0
gRNAs = []
while not q.empty() and count < params["quantity"]:
gRNA = q.get()
gRNAs.append(gRNA.serialize_for_display())
count = count + 1
while not domain_q.empty() and count < params["quantity"]:
gRNA = domain_q.get()
gRNAs.append(gRNA.serialize_for_display())
count = count + 1
domain_count = count
outfile_name = gene["ensembl_id"] + "_" + str(exon) + ".p"
if domain_count > 0:
print "for {0} we had {1} domain and {2} ordinary guides.".format(outfile_name, domain_count, count - domain_count)
folder = '../GRCh37_guides_msgpack_' + params['scoring'] + '/'
if params['functional_domains']:
folder = '../GRCh37_guides_msgpack_' + params['scoring'] + '_domains/'
output_path = os.path.join(folder, outfile_name)
with open(output_path, 'w') as outfile:
# Reverse gRNAs list.
# Want highest on-target first.
msgpack.dump(gRNAs[::-1], outfile)
# prepare next exon
exon += 1
seq = gene_exon_file(gene["ensembl_id"], exon)
coords = gene_exon_coords(gene["ensembl_id"], exon)
| bsd-3-clause |
mr3bn/DAT210x | Module2/module2Notes.py | 1 | 2684 | # -*- coding: utf-8 -*-
"""
Created on Sun Jun 04 14:30:05 2017
@author: Mark
"""
import pandas as pd
from sqlalchemy import create_engine
engine = create_engine('sqlite:///:memory:')
# some import examples
#df = pd.read_sql_table('my_table', engine, columns = ['ColA', 'ColB'])
#df = pd.read_excel('my_dataset.xlsx', 'Sheet1', na_values=['NA'])
#df = pd.read_json('my_dataset.json', orient='columns')
#df = pd.read_csv('my_dataset.csv')
#
## writing is easy, too
#df.to_sql('table', engine)
#df.to_excel('dataset.xlsx')
#df.to_json('dataset.json')
#df.to_csv('dataset.csv')
# none of those will work, so let's do one that will:
df = pd.read_csv('Datasets/direct_marketing.csv')
# basic summary stuff included in pandas...
print df.head(5)
print df.tail(5)
print df.describe()
print df.columns
#################### COLUMN INDEXING ####################
# one way to get a single column out of data frame is to access it by name...
# this syntax will return a SERIES object of one column, size=nrow:
rec = df.recency
rec = df['recency']
# doubling up on the brackets returns a DATA FRAME of the same column, size=nrowx1
# intuitively, the interior set of brackets is a list in itself,
# so this application can actually handle more than one column:
rec = df[['recency']]
rec = df[['recency', 'history']]
# the df.loc method provides a marginally faster way to access a column by name...
# same series of size=nrow, just using a different method:
rec = df.loc[:, 'recency']
# and this application will again return a data frame (nrowx1)
rec = df.loc[:, ['recency']]
# same story, can slice to > 1 column:
rec = df.loc[:, ['recency', 'history']]
# df.loc also works with boolean masks, but won't bother with that right now
# the df.iloc method uses numbered indexes instead of names
rec = df.iloc[:, 0]
rec = df.iloc[:, [0]]
# IMPORTANT: for the list implementation of .iloc, note that the results
# are NOT inclusive.
rec = df.iloc[:, 0:1] # returns the same as df.iloc[:, [0]]
# df.ix is sort of a hybrid of .loc and .iloc
rec = df.ix[:, 0]
rec = df.ix[:, 'recency']
rec = df.ix[:, 0:1]
#################### ROW INDEXING ####################
# one easy way to subest rows is with a boolean operation...
df.recency < 7 # returns a series of booleans, which we can use as a mask:
df[df.recency < 7]
# this methodology can work with multiple boolean tests:
df[(df.recency < 7) & (df.newbie == 0)]
# it's also possible to write to a dataframe into a slice:
# df[df.recency < 7] = -100 will render ALL rows in the data frame as -100
# where recency is < 7. a better implementation is to do this for one
# column at a time, to account for data frame nonhomogeneity | mit |
ericmjl/influenza-reassortment-analysis | circos.py | 1 | 2142 | import numpy as np
import matplotlib.pyplot as plt
import matplotlib.patches as patches
import math
from matplotlib.path import Path
class CircosPlot(object):
def __init__(self, nodes, edges, radius, nodecolor='black', figsize=(8,8), ax=None, fig=None):
self.nodes = nodes # Dictionary of nodes
self.edges = edges # Dictionary of Edges
self.nodecolor = nodecolor
self.radius = radius
if fig == None:
self.fig = plt.figure(figsize=figsize)
else:
self.fig = fig
if ax == None:
self.ax = self.fig.add_subplot(111)
else:
self.ax = ax
self.node_radius = self.radius*0.05
self.ax.set_xlim(-radius*1.05, radius*1.05)
self.ax.set_ylim(-radius*1.05, radius*1.05)
self.ax.xaxis.set_visible(False)
self.ax.yaxis.set_visible(False)
for k in self.ax.spines.keys():
self.ax.spines[k].set_visible(False)
def draw(self):
self.add_nodes()
self.add_edges()
def add_nodes(self):
r = self.radius
node_r = self.node_radius
for node in self.nodes:
theta = self.node_theta(node)
x, y = get_cartesian(r, theta)
node_patch = patches.Ellipse((x,y), node_r, node_r, facecolor=self.nodecolor, lw=0)
self.ax.add_patch(node_patch)
def draw_edge(self, node1, node2):
start_theta = self.node_theta(node1)
end_theta = self.node_theta(node2)
middle_theta = (start_theta + end_theta)/2.0
delta_theta = abs(end_theta - start_theta)
middle_r = self.radius * (1 - delta_theta / np.pi)
# verts = [get_cartesian(self.radius, start_theta), get_cartesian(middle_theta, middle_r), get_cartesian(self.radius,end_theta)]
verts = [get_cartesian(self.radius, start_theta), (0,0), get_cartesian(self.radius,end_theta)]
codes = [Path.MOVETO, Path.CURVE3, Path.CURVE3]
path = Path(verts, codes)
patch = patches.PathPatch(path, lw=1, facecolor='none')
self.ax.add_patch(patch)
def node_theta(self, node):
''' Maps node to Angle '''
i = self.nodes.index(node)
theta = i*2*np.pi/len(self.nodes)
return theta
def add_edges(self):
for start, end in self.edges:
self.draw_edge(start, end)
def get_cartesian(r, theta):
x = r*np.sin(theta)
y = r*np.cos(theta)
return x, y | mit |
vibhorag/scikit-learn | sklearn/metrics/cluster/unsupervised.py | 230 | 8281 | """ Unsupervised evaluation metrics. """
# Authors: Robert Layton <robertlayton@gmail.com>
#
# License: BSD 3 clause
import numpy as np
from ...utils import check_random_state
from ..pairwise import pairwise_distances
def silhouette_score(X, labels, metric='euclidean', sample_size=None,
random_state=None, **kwds):
"""Compute the mean Silhouette Coefficient of all samples.
The Silhouette Coefficient is calculated using the mean intra-cluster
distance (``a``) and the mean nearest-cluster distance (``b``) for each
sample. The Silhouette Coefficient for a sample is ``(b - a) / max(a,
b)``. To clarify, ``b`` is the distance between a sample and the nearest
cluster that the sample is not a part of.
Note that Silhouette Coefficent is only defined if number of labels
is 2 <= n_labels <= n_samples - 1.
This function returns the mean Silhouette Coefficient over all samples.
To obtain the values for each sample, use :func:`silhouette_samples`.
The best value is 1 and the worst value is -1. Values near 0 indicate
overlapping clusters. Negative values generally indicate that a sample has
been assigned to the wrong cluster, as a different cluster is more similar.
Read more in the :ref:`User Guide <silhouette_coefficient>`.
Parameters
----------
X : array [n_samples_a, n_samples_a] if metric == "precomputed", or, \
[n_samples_a, n_features] otherwise
Array of pairwise distances between samples, or a feature array.
labels : array, shape = [n_samples]
Predicted labels for each sample.
metric : string, or callable
The metric to use when calculating distance between instances in a
feature array. If metric is a string, it must be one of the options
allowed by :func:`metrics.pairwise.pairwise_distances
<sklearn.metrics.pairwise.pairwise_distances>`. If X is the distance
array itself, use ``metric="precomputed"``.
sample_size : int or None
The size of the sample to use when computing the Silhouette Coefficient
on a random subset of the data.
If ``sample_size is None``, no sampling is used.
random_state : integer or numpy.RandomState, optional
The generator used to randomly select a subset of samples if
``sample_size is not None``. If an integer is given, it fixes the seed.
Defaults to the global numpy random number generator.
`**kwds` : optional keyword parameters
Any further parameters are passed directly to the distance function.
If using a scipy.spatial.distance metric, the parameters are still
metric dependent. See the scipy docs for usage examples.
Returns
-------
silhouette : float
Mean Silhouette Coefficient for all samples.
References
----------
.. [1] `Peter J. Rousseeuw (1987). "Silhouettes: a Graphical Aid to the
Interpretation and Validation of Cluster Analysis". Computational
and Applied Mathematics 20: 53-65.
<http://www.sciencedirect.com/science/article/pii/0377042787901257>`_
.. [2] `Wikipedia entry on the Silhouette Coefficient
<http://en.wikipedia.org/wiki/Silhouette_(clustering)>`_
"""
n_labels = len(np.unique(labels))
n_samples = X.shape[0]
if not 1 < n_labels < n_samples:
raise ValueError("Number of labels is %d. Valid values are 2 "
"to n_samples - 1 (inclusive)" % n_labels)
if sample_size is not None:
random_state = check_random_state(random_state)
indices = random_state.permutation(X.shape[0])[:sample_size]
if metric == "precomputed":
X, labels = X[indices].T[indices].T, labels[indices]
else:
X, labels = X[indices], labels[indices]
return np.mean(silhouette_samples(X, labels, metric=metric, **kwds))
def silhouette_samples(X, labels, metric='euclidean', **kwds):
"""Compute the Silhouette Coefficient for each sample.
The Silhouette Coefficient is a measure of how well samples are clustered
with samples that are similar to themselves. Clustering models with a high
Silhouette Coefficient are said to be dense, where samples in the same
cluster are similar to each other, and well separated, where samples in
different clusters are not very similar to each other.
The Silhouette Coefficient is calculated using the mean intra-cluster
distance (``a``) and the mean nearest-cluster distance (``b``) for each
sample. The Silhouette Coefficient for a sample is ``(b - a) / max(a,
b)``.
Note that Silhouette Coefficent is only defined if number of labels
is 2 <= n_labels <= n_samples - 1.
This function returns the Silhouette Coefficient for each sample.
The best value is 1 and the worst value is -1. Values near 0 indicate
overlapping clusters.
Read more in the :ref:`User Guide <silhouette_coefficient>`.
Parameters
----------
X : array [n_samples_a, n_samples_a] if metric == "precomputed", or, \
[n_samples_a, n_features] otherwise
Array of pairwise distances between samples, or a feature array.
labels : array, shape = [n_samples]
label values for each sample
metric : string, or callable
The metric to use when calculating distance between instances in a
feature array. If metric is a string, it must be one of the options
allowed by :func:`sklearn.metrics.pairwise.pairwise_distances`. If X is
the distance array itself, use "precomputed" as the metric.
`**kwds` : optional keyword parameters
Any further parameters are passed directly to the distance function.
If using a ``scipy.spatial.distance`` metric, the parameters are still
metric dependent. See the scipy docs for usage examples.
Returns
-------
silhouette : array, shape = [n_samples]
Silhouette Coefficient for each samples.
References
----------
.. [1] `Peter J. Rousseeuw (1987). "Silhouettes: a Graphical Aid to the
Interpretation and Validation of Cluster Analysis". Computational
and Applied Mathematics 20: 53-65.
<http://www.sciencedirect.com/science/article/pii/0377042787901257>`_
.. [2] `Wikipedia entry on the Silhouette Coefficient
<http://en.wikipedia.org/wiki/Silhouette_(clustering)>`_
"""
distances = pairwise_distances(X, metric=metric, **kwds)
n = labels.shape[0]
A = np.array([_intra_cluster_distance(distances[i], labels, i)
for i in range(n)])
B = np.array([_nearest_cluster_distance(distances[i], labels, i)
for i in range(n)])
sil_samples = (B - A) / np.maximum(A, B)
return sil_samples
def _intra_cluster_distance(distances_row, labels, i):
"""Calculate the mean intra-cluster distance for sample i.
Parameters
----------
distances_row : array, shape = [n_samples]
Pairwise distance matrix between sample i and each sample.
labels : array, shape = [n_samples]
label values for each sample
i : int
Sample index being calculated. It is excluded from calculation and
used to determine the current label
Returns
-------
a : float
Mean intra-cluster distance for sample i
"""
mask = labels == labels[i]
mask[i] = False
if not np.any(mask):
# cluster of size 1
return 0
a = np.mean(distances_row[mask])
return a
def _nearest_cluster_distance(distances_row, labels, i):
"""Calculate the mean nearest-cluster distance for sample i.
Parameters
----------
distances_row : array, shape = [n_samples]
Pairwise distance matrix between sample i and each sample.
labels : array, shape = [n_samples]
label values for each sample
i : int
Sample index being calculated. It is used to determine the current
label.
Returns
-------
b : float
Mean nearest-cluster distance for sample i
"""
label = labels[i]
b = np.min([np.mean(distances_row[labels == cur_label])
for cur_label in set(labels) if not cur_label == label])
return b
| bsd-3-clause |
shikhardb/scikit-learn | examples/bicluster/plot_spectral_biclustering.py | 403 | 2011 | """
=============================================
A demo of the Spectral Biclustering algorithm
=============================================
This example demonstrates how to generate a checkerboard dataset and
bicluster it using the Spectral Biclustering algorithm.
The data is generated with the ``make_checkerboard`` function, then
shuffled and passed to the Spectral Biclustering algorithm. The rows
and columns of the shuffled matrix are rearranged to show the
biclusters found by the algorithm.
The outer product of the row and column label vectors shows a
representation of the checkerboard structure.
"""
print(__doc__)
# Author: Kemal Eren <kemal@kemaleren.com>
# License: BSD 3 clause
import numpy as np
from matplotlib import pyplot as plt
from sklearn.datasets import make_checkerboard
from sklearn.datasets import samples_generator as sg
from sklearn.cluster.bicluster import SpectralBiclustering
from sklearn.metrics import consensus_score
n_clusters = (4, 3)
data, rows, columns = make_checkerboard(
shape=(300, 300), n_clusters=n_clusters, noise=10,
shuffle=False, random_state=0)
plt.matshow(data, cmap=plt.cm.Blues)
plt.title("Original dataset")
data, row_idx, col_idx = sg._shuffle(data, random_state=0)
plt.matshow(data, cmap=plt.cm.Blues)
plt.title("Shuffled dataset")
model = SpectralBiclustering(n_clusters=n_clusters, method='log',
random_state=0)
model.fit(data)
score = consensus_score(model.biclusters_,
(rows[:, row_idx], columns[:, col_idx]))
print("consensus score: {:.1f}".format(score))
fit_data = data[np.argsort(model.row_labels_)]
fit_data = fit_data[:, np.argsort(model.column_labels_)]
plt.matshow(fit_data, cmap=plt.cm.Blues)
plt.title("After biclustering; rearranged to show biclusters")
plt.matshow(np.outer(np.sort(model.row_labels_) + 1,
np.sort(model.column_labels_) + 1),
cmap=plt.cm.Blues)
plt.title("Checkerboard structure of rearranged data")
plt.show()
| bsd-3-clause |
RomainBrault/scikit-learn | examples/cross_decomposition/plot_compare_cross_decomposition.py | 19 | 4761 | """
===================================
Compare cross decomposition methods
===================================
Simple usage of various cross decomposition algorithms:
- PLSCanonical
- PLSRegression, with multivariate response, a.k.a. PLS2
- PLSRegression, with univariate response, a.k.a. PLS1
- CCA
Given 2 multivariate covarying two-dimensional datasets, X, and Y,
PLS extracts the 'directions of covariance', i.e. the components of each
datasets that explain the most shared variance between both datasets.
This is apparent on the **scatterplot matrix** display: components 1 in
dataset X and dataset Y are maximally correlated (points lie around the
first diagonal). This is also true for components 2 in both dataset,
however, the correlation across datasets for different components is
weak: the point cloud is very spherical.
"""
print(__doc__)
import numpy as np
import matplotlib.pyplot as plt
from sklearn.cross_decomposition import PLSCanonical, PLSRegression, CCA
###############################################################################
# Dataset based latent variables model
n = 500
# 2 latents vars:
l1 = np.random.normal(size=n)
l2 = np.random.normal(size=n)
latents = np.array([l1, l1, l2, l2]).T
X = latents + np.random.normal(size=4 * n).reshape((n, 4))
Y = latents + np.random.normal(size=4 * n).reshape((n, 4))
X_train = X[:n // 2]
Y_train = Y[:n // 2]
X_test = X[n // 2:]
Y_test = Y[n // 2:]
print("Corr(X)")
print(np.round(np.corrcoef(X.T), 2))
print("Corr(Y)")
print(np.round(np.corrcoef(Y.T), 2))
###############################################################################
# Canonical (symmetric) PLS
# Transform data
# ~~~~~~~~~~~~~~
plsca = PLSCanonical(n_components=2)
plsca.fit(X_train, Y_train)
X_train_r, Y_train_r = plsca.transform(X_train, Y_train)
X_test_r, Y_test_r = plsca.transform(X_test, Y_test)
# Scatter plot of scores
# ~~~~~~~~~~~~~~~~~~~~~~
# 1) On diagonal plot X vs Y scores on each components
plt.figure(figsize=(12, 8))
plt.subplot(221)
plt.plot(X_train_r[:, 0], Y_train_r[:, 0], "ob", label="train")
plt.plot(X_test_r[:, 0], Y_test_r[:, 0], "or", label="test")
plt.xlabel("x scores")
plt.ylabel("y scores")
plt.title('Comp. 1: X vs Y (test corr = %.2f)' %
np.corrcoef(X_test_r[:, 0], Y_test_r[:, 0])[0, 1])
plt.xticks(())
plt.yticks(())
plt.legend(loc="best")
plt.subplot(224)
plt.plot(X_train_r[:, 1], Y_train_r[:, 1], "ob", label="train")
plt.plot(X_test_r[:, 1], Y_test_r[:, 1], "or", label="test")
plt.xlabel("x scores")
plt.ylabel("y scores")
plt.title('Comp. 2: X vs Y (test corr = %.2f)' %
np.corrcoef(X_test_r[:, 1], Y_test_r[:, 1])[0, 1])
plt.xticks(())
plt.yticks(())
plt.legend(loc="best")
# 2) Off diagonal plot components 1 vs 2 for X and Y
plt.subplot(222)
plt.plot(X_train_r[:, 0], X_train_r[:, 1], "*b", label="train")
plt.plot(X_test_r[:, 0], X_test_r[:, 1], "*r", label="test")
plt.xlabel("X comp. 1")
plt.ylabel("X comp. 2")
plt.title('X comp. 1 vs X comp. 2 (test corr = %.2f)'
% np.corrcoef(X_test_r[:, 0], X_test_r[:, 1])[0, 1])
plt.legend(loc="best")
plt.xticks(())
plt.yticks(())
plt.subplot(223)
plt.plot(Y_train_r[:, 0], Y_train_r[:, 1], "*b", label="train")
plt.plot(Y_test_r[:, 0], Y_test_r[:, 1], "*r", label="test")
plt.xlabel("Y comp. 1")
plt.ylabel("Y comp. 2")
plt.title('Y comp. 1 vs Y comp. 2 , (test corr = %.2f)'
% np.corrcoef(Y_test_r[:, 0], Y_test_r[:, 1])[0, 1])
plt.legend(loc="best")
plt.xticks(())
plt.yticks(())
plt.show()
###############################################################################
# PLS regression, with multivariate response, a.k.a. PLS2
n = 1000
q = 3
p = 10
X = np.random.normal(size=n * p).reshape((n, p))
B = np.array([[1, 2] + [0] * (p - 2)] * q).T
# each Yj = 1*X1 + 2*X2 + noize
Y = np.dot(X, B) + np.random.normal(size=n * q).reshape((n, q)) + 5
pls2 = PLSRegression(n_components=3)
pls2.fit(X, Y)
print("True B (such that: Y = XB + Err)")
print(B)
# compare pls2.coef_ with B
print("Estimated B")
print(np.round(pls2.coef_, 1))
pls2.predict(X)
###############################################################################
# PLS regression, with univariate response, a.k.a. PLS1
n = 1000
p = 10
X = np.random.normal(size=n * p).reshape((n, p))
y = X[:, 0] + 2 * X[:, 1] + np.random.normal(size=n * 1) + 5
pls1 = PLSRegression(n_components=3)
pls1.fit(X, y)
# note that the number of components exceeds 1 (the dimension of y)
print("Estimated betas")
print(np.round(pls1.coef_, 1))
###############################################################################
# CCA (PLS mode B with symmetric deflation)
cca = CCA(n_components=2)
cca.fit(X_train, Y_train)
X_train_r, Y_train_r = cca.transform(X_train, Y_train)
X_test_r, Y_test_r = cca.transform(X_test, Y_test)
| bsd-3-clause |
jbargu/dotfiles | ipython/ipython_config.py | 2 | 24350 | # Configuration file for ipython.
# ------------------------------------------------------------------------------
# InteractiveShellApp(Configurable) configuration
# ------------------------------------------------------------------------------
## A Mixin for applications that start InteractiveShell instances.
#
# Provides configurables for loading extensions and executing files as part of
# configuring a Shell environment.
#
# The following methods should be called by the :meth:`initialize` method of the
# subclass:
#
# - :meth:`init_path`
# - :meth:`init_shell` (to be implemented by the subclass)
# - :meth:`init_gui_pylab`
# - :meth:`init_extensions`
# - :meth:`init_code`
## Execute the given command string.
# c.InteractiveShellApp.code_to_run = ''
## Run the file referenced by the PYTHONSTARTUP environment variable at IPython
# startup.
# c.InteractiveShellApp.exec_PYTHONSTARTUP = True
## List of files to run at IPython startup.
# c.InteractiveShellApp.exec_files = []
## lines of code to run at IPython startup.
c.InteractiveShellApp.exec_lines = ["%autoreload 2"]
## A list of dotted module names of IPython extensions to load.
c.InteractiveShellApp.extensions = ["autoreload"]
## dotted module name of an IPython extension to load.
# c.InteractiveShellApp.extra_extension = ''
## A file to be run
# c.InteractiveShellApp.file_to_run = ''
## Enable GUI event loop integration with any of ('asyncio', 'glut', 'gtk',
# 'gtk2', 'gtk3', 'osx', 'pyglet', 'qt', 'qt4', 'qt5', 'tk', 'wx', 'gtk2',
# 'qt4').
# c.InteractiveShellApp.gui = None
## Should variables loaded at startup (by startup files, exec_lines, etc.) be
# hidden from tools like %who?
# c.InteractiveShellApp.hide_initial_ns = True
## If True, IPython will not add the current working directory to sys.path. When
# False, the current working directory is added to sys.path, allowing imports of
# modules defined in the current directory.
# c.InteractiveShellApp.ignore_cwd = False
## Configure matplotlib for interactive use with the default matplotlib backend.
# c.InteractiveShellApp.matplotlib = None
## Run the module as a script.
# c.InteractiveShellApp.module_to_run = ''
## Pre-load matplotlib and numpy for interactive use, selecting a particular
# matplotlib backend and loop integration.
# c.InteractiveShellApp.pylab = None
## If true, IPython will populate the user namespace with numpy, pylab, etc. and
# an ``import *`` is done from numpy and pylab, when using pylab mode.
#
# When False, pylab mode should not import any names into the user namespace.
# c.InteractiveShellApp.pylab_import_all = True
## Reraise exceptions encountered loading IPython extensions?
# c.InteractiveShellApp.reraise_ipython_extension_failures = False
# ------------------------------------------------------------------------------
# Application(SingletonConfigurable) configuration
# ------------------------------------------------------------------------------
## This is an application.
## The date format used by logging formatters for %(asctime)s
# c.Application.log_datefmt = '%Y-%m-%d %H:%M:%S'
## The Logging format template
# c.Application.log_format = '[%(name)s]%(highlevel)s %(message)s'
## Set the log level by value or name.
# c.Application.log_level = 30
# ------------------------------------------------------------------------------
# BaseIPythonApplication(Application) configuration
# ------------------------------------------------------------------------------
## IPython: an enhanced interactive Python shell.
## Whether to create profile dir if it doesn't exist
# c.BaseIPythonApplication.auto_create = False
## Whether to install the default config files into the profile dir. If a new
# profile is being created, and IPython contains config files for that profile,
# then they will be staged into the new directory. Otherwise, default config
# files will be automatically generated.
# c.BaseIPythonApplication.copy_config_files = False
## Path to an extra config file to load.
#
# If specified, load this config file in addition to any other IPython config.
# c.BaseIPythonApplication.extra_config_file = ''
## The name of the IPython directory. This directory is used for logging
# configuration (through profiles), history storage, etc. The default is usually
# $HOME/.ipython. This option can also be specified through the environment
# variable IPYTHONDIR.
# c.BaseIPythonApplication.ipython_dir = ''
## Whether to overwrite existing config files when copying
# c.BaseIPythonApplication.overwrite = False
## The IPython profile to use.
# c.BaseIPythonApplication.profile = 'default'
## Create a massive crash report when IPython encounters what may be an internal
# error. The default is to append a short message to the usual traceback
# c.BaseIPythonApplication.verbose_crash = False
# ------------------------------------------------------------------------------
# TerminalIPythonApp(BaseIPythonApplication,InteractiveShellApp) configuration
# ------------------------------------------------------------------------------
## Whether to display a banner upon starting IPython.
# c.TerminalIPythonApp.display_banner = True
## If a command or file is given via the command-line, e.g. 'ipython foo.py',
# start an interactive shell after executing the file or command.
# c.TerminalIPythonApp.force_interact = False
## Class to use to instantiate the TerminalInteractiveShell object. Useful for
# custom Frontends
# c.TerminalIPythonApp.interactive_shell_class = 'IPython.terminal.interactiveshell.TerminalInteractiveShell'
## Start IPython quickly by skipping the loading of config files.
# c.TerminalIPythonApp.quick = False
# ------------------------------------------------------------------------------
# InteractiveShell(SingletonConfigurable) configuration
# ------------------------------------------------------------------------------
## An enhanced, interactive shell for Python.
## 'all', 'last', 'last_expr' or 'none', 'last_expr_or_assign' specifying which
# nodes should be run interactively (displaying output from expressions).
# c.InteractiveShell.ast_node_interactivity = 'last_expr'
## A list of ast.NodeTransformer subclass instances, which will be applied to
# user input before code is run.
# c.InteractiveShell.ast_transformers = []
## Automatically run await statement in the top level repl.
# c.InteractiveShell.autoawait = True
## Make IPython automatically call any callable object even if you didn't type
# explicit parentheses. For example, 'str 43' becomes 'str(43)' automatically.
# The value can be '0' to disable the feature, '1' for 'smart' autocall, where
# it is not applied if there are no more arguments on the line, and '2' for
# 'full' autocall, where all callable objects are automatically called (even if
# no arguments are present).
# c.InteractiveShell.autocall = 0
## Autoindent IPython code entered interactively.
# c.InteractiveShell.autoindent = True
## Enable magic commands to be called without the leading %.
# c.InteractiveShell.automagic = True
## The part of the banner to be printed before the profile
# c.InteractiveShell.banner1 = "Python 3.7.3 (default, Apr 3 2019, 19:16:38) \nType 'copyright', 'credits' or 'license' for more information\nIPython 7.16.1 -- An enhanced Interactive Python. Type '?' for help.\n"
## The part of the banner to be printed after the profile
# c.InteractiveShell.banner2 = ''
## Set the size of the output cache. The default is 1000, you can change it
# permanently in your config file. Setting it to 0 completely disables the
# caching system, and the minimum value accepted is 3 (if you provide a value
# less than 3, it is reset to 0 and a warning is issued). This limit is defined
# because otherwise you'll spend more time re-flushing a too small cache than
# working
# c.InteractiveShell.cache_size = 1000
## Use colors for displaying information about objects. Because this information
# is passed through a pager (like 'less'), and some pagers get confused with
# color codes, this capability can be turned off.
# c.InteractiveShell.color_info = True
## Set the color scheme (NoColor, Neutral, Linux, or LightBG).
# c.InteractiveShell.colors = 'Neutral'
##
# c.InteractiveShell.debug = False
## Don't call post-execute functions that have failed in the past.
# c.InteractiveShell.disable_failing_post_execute = False
## If True, anything that would be passed to the pager will be displayed as
# regular output instead.
# c.InteractiveShell.display_page = False
## (Provisional API) enables html representation in mime bundles sent to pagers.
# c.InteractiveShell.enable_html_pager = False
## Total length of command history
# c.InteractiveShell.history_length = 10000
## The number of saved history entries to be loaded into the history buffer at
# startup.
# c.InteractiveShell.history_load_length = 1000
##
# c.InteractiveShell.ipython_dir = ''
## Start logging to the given file in append mode. Use `logfile` to specify a log
# file to **overwrite** logs to.
# c.InteractiveShell.logappend = ''
## The name of the logfile to use.
# c.InteractiveShell.logfile = ''
## Start logging to the default log file in overwrite mode. Use `logappend` to
# specify a log file to **append** logs to.
# c.InteractiveShell.logstart = False
## Select the loop runner that will be used to execute top-level asynchronous
# code
# c.InteractiveShell.loop_runner = 'IPython.core.interactiveshell._asyncio_runner'
##
# c.InteractiveShell.object_info_string_level = 0
## Automatically call the pdb debugger after every exception.
# c.InteractiveShell.pdb = False
## Deprecated since IPython 4.0 and ignored since 5.0, set
# TerminalInteractiveShell.prompts object directly.
# c.InteractiveShell.prompt_in1 = 'In [\\#]: '
## Deprecated since IPython 4.0 and ignored since 5.0, set
# TerminalInteractiveShell.prompts object directly.
# c.InteractiveShell.prompt_in2 = ' .\\D.: '
## Deprecated since IPython 4.0 and ignored since 5.0, set
# TerminalInteractiveShell.prompts object directly.
# c.InteractiveShell.prompt_out = 'Out[\\#]: '
## Deprecated since IPython 4.0 and ignored since 5.0, set
# TerminalInteractiveShell.prompts object directly.
# c.InteractiveShell.prompts_pad_left = True
##
# c.InteractiveShell.quiet = False
##
# c.InteractiveShell.separate_in = '\n'
##
# c.InteractiveShell.separate_out = ''
##
# c.InteractiveShell.separate_out2 = ''
## Show rewritten input, e.g. for autocall.
# c.InteractiveShell.show_rewritten_input = True
## Enables rich html representation of docstrings. (This requires the docrepr
# module).
# c.InteractiveShell.sphinxify_docstring = False
##
# c.InteractiveShell.wildcards_case_sensitive = True
## Switch modes for the IPython exception handlers.
# c.InteractiveShell.xmode = 'Context'
# ------------------------------------------------------------------------------
# TerminalInteractiveShell(InteractiveShell) configuration
# ------------------------------------------------------------------------------
## Autoformatter to reformat Terminal code. Can be `'black'` or `None`
# c.TerminalInteractiveShell.autoformatter = None
## Set to confirm when you try to exit IPython with an EOF (Control-D in Unix,
# Control-Z/Enter in Windows). By typing 'exit' or 'quit', you can force a
# direct exit without any confirmation.
# c.TerminalInteractiveShell.confirm_exit = True
## Options for displaying tab completions, 'column', 'multicolumn', and
# 'readlinelike'. These options are for `prompt_toolkit`, see `prompt_toolkit`
# documentation for more information.
# c.TerminalInteractiveShell.display_completions = 'multicolumn'
## Shortcut style to use at the prompt. 'vi' or 'emacs'.
# c.TerminalInteractiveShell.editing_mode = 'emacs'
## Set the editor used by IPython (default to $EDITOR/vi/notepad).
# c.TerminalInteractiveShell.editor = 'vim'
## Allows to enable/disable the prompt toolkit history search
# c.TerminalInteractiveShell.enable_history_search = True
## Enable vi (v) or Emacs (C-X C-E) shortcuts to open an external editor. This is
# in addition to the F2 binding, which is always enabled.
# c.TerminalInteractiveShell.extra_open_editor_shortcuts = False
## Provide an alternative handler to be called when the user presses Return. This
# is an advanced option intended for debugging, which may be changed or removed
# in later releases.
# c.TerminalInteractiveShell.handle_return = None
## Highlight matching brackets.
# c.TerminalInteractiveShell.highlight_matching_brackets = True
## The name or class of a Pygments style to use for syntax highlighting. To see
# available styles, run `pygmentize -L styles`.
# c.TerminalInteractiveShell.highlighting_style = traitlets.Undefined
## Override highlighting format for specific tokens
# c.TerminalInteractiveShell.highlighting_style_overrides = {}
##
# c.TerminalInteractiveShell.mime_renderers = {}
## Enable mouse support in the prompt (Note: prevents selecting text with the
# mouse)
# c.TerminalInteractiveShell.mouse_support = False
## Display the current vi mode (when using vi editing mode).
# c.TerminalInteractiveShell.prompt_includes_vi_mode = True
## Class used to generate Prompt token for prompt_toolkit
# c.TerminalInteractiveShell.prompts_class = 'IPython.terminal.prompts.Prompts'
## Use `raw_input` for the REPL, without completion and prompt colors.
#
# Useful when controlling IPython as a subprocess, and piping STDIN/OUT/ERR.
# Known usage are: IPython own testing machinery, and emacs inferior-shell
# integration through elpy.
#
# This mode default to `True` if the `IPY_TEST_SIMPLE_PROMPT` environment
# variable is set, or the current terminal is not a tty.
# c.TerminalInteractiveShell.simple_prompt = False
## Number of line at the bottom of the screen to reserve for the tab completion
# menu, search history, ...etc, the height of these menus will at most this
# value. Increase it is you prefer long and skinny menus, decrease for short and
# wide.
# c.TerminalInteractiveShell.space_for_menu = 6
## Automatically set the terminal title
# c.TerminalInteractiveShell.term_title = True
## Customize the terminal title format. This is a python format string.
# Available substitutions are: {cwd}.
# c.TerminalInteractiveShell.term_title_format = 'IPython: {cwd}'
## Use 24bit colors instead of 256 colors in prompt highlighting. If your
# terminal supports true color, the following command should print 'TRUECOLOR'
# in orange: printf "\x1b[38;2;255;100;0mTRUECOLOR\x1b[0m\n"
# c.TerminalInteractiveShell.true_color = False
# ------------------------------------------------------------------------------
# HistoryAccessor(HistoryAccessorBase) configuration
# ------------------------------------------------------------------------------
## Access the history database without adding to it.
#
# This is intended for use by standalone history tools. IPython shells use
# HistoryManager, below, which is a subclass of this.
## Options for configuring the SQLite connection
#
# These options are passed as keyword args to sqlite3.connect when establishing
# database connections.
# c.HistoryAccessor.connection_options = {}
## enable the SQLite history
#
# set enabled=False to disable the SQLite history, in which case there will be
# no stored history, no SQLite connection, and no background saving thread.
# This may be necessary in some threaded environments where IPython is embedded.
# c.HistoryAccessor.enabled = True
## Path to file to use for SQLite history database.
#
# By default, IPython will put the history database in the IPython profile
# directory. If you would rather share one history among profiles, you can set
# this value in each, so that they are consistent.
#
# Due to an issue with fcntl, SQLite is known to misbehave on some NFS mounts.
# If you see IPython hanging, try setting this to something on a local disk,
# e.g::
#
# ipython --HistoryManager.hist_file=/tmp/ipython_hist.sqlite
#
# you can also use the specific value `:memory:` (including the colon at both
# end but not the back ticks), to avoid creating an history file.
# c.HistoryAccessor.hist_file = ''
# ------------------------------------------------------------------------------
# HistoryManager(HistoryAccessor) configuration
# ------------------------------------------------------------------------------
## A class to organize all history-related functionality in one place.
## Write to database every x commands (higher values save disk access & power).
# Values of 1 or less effectively disable caching.
# c.HistoryManager.db_cache_size = 0
## Should the history database include output? (default: no)
# c.HistoryManager.db_log_output = False
# ------------------------------------------------------------------------------
# ProfileDir(LoggingConfigurable) configuration
# ------------------------------------------------------------------------------
## An object to manage the profile directory and its resources.
#
# The profile directory is used by all IPython applications, to manage
# configuration, logging and security.
#
# This object knows how to find, create and manage these directories. This
# should be used by any code that wants to handle profiles.
## Set the profile location directly. This overrides the logic used by the
# `profile` option.
# c.ProfileDir.location = ''
# ------------------------------------------------------------------------------
# BaseFormatter(Configurable) configuration
# ------------------------------------------------------------------------------
## A base formatter class that is configurable.
#
# This formatter should usually be used as the base class of all formatters. It
# is a traited :class:`Configurable` class and includes an extensible API for
# users to determine how their objects are formatted. The following logic is
# used to find a function to format an given object.
#
# 1. The object is introspected to see if it has a method with the name
# :attr:`print_method`. If is does, that object is passed to that method
# for formatting.
# 2. If no print method is found, three internal dictionaries are consulted
# to find print method: :attr:`singleton_printers`, :attr:`type_printers`
# and :attr:`deferred_printers`.
#
# Users should use these dictionaries to register functions that will be used to
# compute the format data for their objects (if those objects don't have the
# special print methods). The easiest way of using these dictionaries is through
# the :meth:`for_type` and :meth:`for_type_by_name` methods.
#
# If no function/callable is found to compute the format data, ``None`` is
# returned and this format type is not used.
##
# c.BaseFormatter.deferred_printers = {}
##
# c.BaseFormatter.enabled = True
##
# c.BaseFormatter.singleton_printers = {}
##
# c.BaseFormatter.type_printers = {}
# ------------------------------------------------------------------------------
# PlainTextFormatter(BaseFormatter) configuration
# ------------------------------------------------------------------------------
## The default pretty-printer.
#
# This uses :mod:`IPython.lib.pretty` to compute the format data of the object.
# If the object cannot be pretty printed, :func:`repr` is used. See the
# documentation of :mod:`IPython.lib.pretty` for details on how to write pretty
# printers. Here is a simple example::
#
# def dtype_pprinter(obj, p, cycle):
# if cycle:
# return p.text('dtype(...)')
# if hasattr(obj, 'fields'):
# if obj.fields is None:
# p.text(repr(obj))
# else:
# p.begin_group(7, 'dtype([')
# for i, field in enumerate(obj.descr):
# if i > 0:
# p.text(',')
# p.breakable()
# p.pretty(field)
# p.end_group(7, '])')
##
# c.PlainTextFormatter.float_precision = ''
## Truncate large collections (lists, dicts, tuples, sets) to this size.
#
# Set to 0 to disable truncation.
# c.PlainTextFormatter.max_seq_length = 1000
##
# c.PlainTextFormatter.max_width = 79
##
# c.PlainTextFormatter.newline = '\n'
##
# c.PlainTextFormatter.pprint = True
##
# c.PlainTextFormatter.verbose = False
# ------------------------------------------------------------------------------
# Completer(Configurable) configuration
# ------------------------------------------------------------------------------
## Enable unicode completions, e.g. \alpha<tab> . Includes completion of latex
# commands, unicode names, and expanding unicode characters back to latex
# commands.
# c.Completer.backslash_combining_completions = True
## Enable debug for the Completer. Mostly print extra information for
# experimental jedi integration.
# c.Completer.debug = False
## Activate greedy completion PENDING DEPRECTION. this is now mostly taken care
# of with Jedi.
#
# This will enable completion on elements of lists, results of function calls,
# etc., but can be unsafe because the code is actually evaluated on TAB.
# c.Completer.greedy = False
## Experimental: restrict time (in milliseconds) during which Jedi can compute
# types. Set to 0 to stop computing types. Non-zero value lower than 100ms may
# hurt performance by preventing jedi to build its cache.
# c.Completer.jedi_compute_type_timeout = 400
## Experimental: Use Jedi to generate autocompletions. Default to True if jedi is
# installed.
# c.Completer.use_jedi = True
# ------------------------------------------------------------------------------
# IPCompleter(Completer) configuration
# ------------------------------------------------------------------------------
## Extension of the completer class with IPython-specific features
## DEPRECATED as of version 5.0.
#
# Instruct the completer to use __all__ for the completion
#
# Specifically, when completing on ``object.<tab>``.
#
# When True: only those names in obj.__all__ will be included.
#
# When False [default]: the __all__ attribute is ignored
# c.IPCompleter.limit_to__all__ = False
## Whether to merge completion results into a single list
#
# If False, only the completion results from the first non-empty completer will
# be returned.
# c.IPCompleter.merge_completions = True
## Instruct the completer to omit private method names
#
# Specifically, when completing on ``object.<tab>``.
#
# When 2 [default]: all names that start with '_' will be excluded.
#
# When 1: all 'magic' names (``__foo__``) will be excluded.
#
# When 0: nothing will be excluded.
# c.IPCompleter.omit__names = 2
# ------------------------------------------------------------------------------
# ScriptMagics(Magics) configuration
# ------------------------------------------------------------------------------
## Magics for talking to scripts
#
# This defines a base `%%script` cell magic for running a cell with a program in
# a subprocess, and registers a few top-level magics that call %%script with
# common interpreters.
## Extra script cell magics to define
#
# This generates simple wrappers of `%%script foo` as `%%foo`.
#
# If you want to add script magics that aren't on your path, specify them in
# script_paths
# c.ScriptMagics.script_magics = []
## Dict mapping short 'ruby' names to full paths, such as '/opt/secret/bin/ruby'
#
# Only necessary for items in script_magics where the default path will not find
# the right interpreter.
# c.ScriptMagics.script_paths = {}
# ------------------------------------------------------------------------------
# LoggingMagics(Magics) configuration
# ------------------------------------------------------------------------------
## Magics related to all logging machinery.
## Suppress output of log state when logging is enabled
# c.LoggingMagics.quiet = False
# ------------------------------------------------------------------------------
# StoreMagics(Magics) configuration
# ------------------------------------------------------------------------------
## Lightweight persistence for python variables.
#
# Provides the %store magic.
## If True, any %store-d variables will be automatically restored when IPython
# starts.
# c.StoreMagics.autorestore = False
| mit |
valandil/msc_thesis | figs/backmatter/sParameters.py | 2 | 2230 | # ------------------- Information --------------------- #
# Author: Joey Dumont <joey.dumont@gmail.com> #
# Date created: October 7th, 2013 #
# Date mod. October 7th, 2013 #
# Description: We plot the scattering parameters of #
# RF-21 fibre design. Experimental and #
# theoretical. #
# ----------------------------------------------------- #
# --------------- Modules Importation ----------------- #
from pylab import *
from matplotlib.ticker import AutoMinorLocator
# Setting the rc parameters.
rcParams['text.usetex'] = True
rcParams['text.latex.preamble'] = r'\usepackage[charter]{mathdesign}'
rcParams['font.size'] = 10
rcParams['legend.numpoints'] = 3
# ----------------- Data Importation ------------------ #
S11 = loadtxt("RF21-S11.dat")
Ssim = loadtxt("RF21-sParameters-Ag4.csv",skiprows=1,delimiter=",")
S22 = loadtxt("RF21-S22.dat")
S12 = loadtxt("RF21-S21.txt", skiprows=2)
# ---------------- Data Manipulation ------------------ #
# -- We plot the data as a function of normalized values
c=3.0e8/10e9
sz=32.0e-3
dz=28.0e-3
P=sz+dz
dl=27.0e-3
dr=55.0e-3
# -- We use two axes to plot data.
freqmin = min(S11[0,0],Ssim[0,0],S12[0,0],S22[0,0])
freqmax = max(S11[-1,0],Ssim[-1,0],S12[-1,0],S22[-1,0])
Smin = min(min(S11[:,1]),min(Ssim[:,1]),min(S12[:,1]),min(S22[:,1]))
Smax = max(max(S11[:,1]),max(Ssim[:,1]),max(S12[:,1]),max(S22[:,1]))
# ------------------ Plotting data -------------------- #
fig1 = figure(figsize=(7,3))
ax1 = fig1.add_subplot(111)
ax1.plot(Ssim[:,0],Ssim[:,1],"b--")
ax1.plot(Ssim[:,0],Ssim[:,2],"k--")
minorLocator = AutoMinorLocator()
ax1.xaxis.set_minor_locator(minorLocator)
ax1.set_xlabel("Frequency (GHz)")
ax1.set_ylabel("$S$-parameters (dB)")
xlim((0.1,5))
fig1.savefig("sParametersRF21sim.pdf", bbox_inches='tight')
ax1.plot(S11[:,0],S11[:,1], "b-", label="$S_{11}$")
ax1.plot(S22[:,0],S22[:,1], "r-", label="$S_{22}$")
ax1.plot(S12[:,0],S12[:,1], "k-", label="$S_{12}$")
ax1.legend(loc=0)
ax2 = ax1.twiny()
ax2.set_xlabel(r"$\lambda_f/P$")
ax2.set_xticks(ax1.get_xticks())
def tick_function(X):
V = c/(P*X)
return ["%.3f" % z for z in V]
ax2.set_xticklabels(tick_function(ax1.get_xticks()))
#show()
fig1.savefig("sParametersRF21.pdf", bbox_inches="tight")
| gpl-3.0 |
charlesll/RamPy | legacy_code/HT_dec.py | 1 | 5650 | # -*- coding: utf-8 -*-
"""
Created on Mon Jul 21 16:26:34 2014
Modfied on Nov. 2014 for spectral differentiation and testing how
hydrogen bonding affects DH fractionation
Input => one line = paths of fluid and melts spectra with their 1000 ln(alpha) values and the initial DH values
@author: charleslelosq
"""
import numpy as np
import scipy
import matplotlib
import matplotlib.gridspec as gridspec
from pylab import *
from Tkinter import *
import tkMessageBox
from tkFileDialog import askopenfilename
from tkFileDialog import asksaveasfile
# Collecting the list of spectra
tkMessageBox.showinfo(
"Open ",
"Please open the list of spectra")
Tk().withdraw() # we don't want a full GUI, so keep the root window from appearing
samplename = askopenfilename() # show an "Open" dialog box and return the path to the selected file
# we import the information in an array, skipping the first line
dataliste = np.genfromtxt(samplename,dtype = 'string', delimiter = '\t', skip_header=0,skip_footer=0)
pathfluid = (dataliste[:,0])
pathmelt = (dataliste[:,1])
alphas = np.genfromtxt(dataliste[:,2])
esealphas = np.genfromtxt(dataliste[:,3])
DHinit = dataliste[:,4]
xOH = np.arange(2100,3850,0.2)
ratio = np.zeros((len(alphas),9)) # Total aire diff sp, ese, positiv aire diff sp, ese
for i in range(len(alphas)): # We loop over in dataliste
rawspfluid = np.genfromtxt(pathfluid[i]) # full path is expected
rawspmelt = np.genfromtxt(pathmelt[i]) # full path is expected
# resampling for having the same X axis
rawfluid = np.zeros((len(xOH),3))
rawfluid[:,1] = np.interp(xOH,rawspfluid[:,0],rawspfluid[:,1])
rawfluid[:,0] = xOH
rawfluid[:,2] = sqrt(abs(rawfluid[:,1]))/abs(rawfluid[:,1]) #relative error
rawmelt = np.zeros((len(xOH),3))
rawmelt[:,1] = np.interp(xOH,rawspmelt[:,0],rawspmelt[:,1])
rawmelt[:,0] = xOH
rawmelt[:,2] = sqrt(abs(rawmelt[:,1]))/abs(rawmelt[:,1]) #relative error
# Boundaries for the OD and OH stretch peaks
lbOH = 2810
hbOH = 3800
lbOD = 2100
hbOD = lbOH
ODfluid = rawfluid[np.where((rawfluid[:,0]>lbOD) & (rawfluid[:,0] < hbOD))]
ODmelt = rawmelt[np.where((rawmelt[:,0]>lbOD) & (rawmelt[:,0] < hbOD))]
OHfluid = rawfluid[np.where((rawfluid[:,0]>lbOH) & (rawfluid[:,0] < hbOH))]
OHmelt = rawmelt[np.where((rawmelt[:,0]>lbOH) & (rawmelt[:,0] < hbOH))]
# Normalization to total area
aOHfluid = np.trapz(OHfluid[:,1],OHfluid[:,0])
eseaOHfluid = sqrt(aOHfluid)
aOHmelt = np.trapz(OHmelt[:,1],OHmelt[:,0])
eseaOHmelt = sqrt(aOHmelt)
OHfluid[:,1] = OHfluid[:,1]/aOHfluid
OHmelt[:,1] = OHmelt[:,1]/aOHmelt
aODfluid = np.trapz(ODfluid[:,1],ODfluid[:,0])
eseaODfluid = sqrt(aODfluid)
aODmelt = np.trapz(ODmelt[:,1],ODmelt[:,0])
eseaODmelt = sqrt(aODmelt)
ODfluid[:,1] = ODfluid[:,1]/aODfluid
ODmelt[:,1] = ODmelt[:,1]/aODmelt
diffOH = np.zeros(shape(OHfluid))
diffOH[:,0] = OHmelt[:,0]
diffOH[:,1] = OHmelt[:,1] - OHfluid[:,1]
diffOH[:,2] = np.sqrt(OHmelt[:,2]**2 + OHfluid[:,2]**2)
diffOD = np.zeros(shape(ODfluid))
diffOD[:,0] = ODmelt[:,0]
diffOD[:,1] = ODmelt[:,1] - ODfluid[:,1]
diffOD[:,2] = np.sqrt(ODmelt[:,2]**2+ODfluid[:,2]**2)
# Here we only quantify the total difference between the OH stretch of the melt and the fluid
ratio[i,0] = np.trapz(abs(diffOH[:,1]),diffOH[:,0])
ratio[i,1] = np.trapz(abs(diffOH[:,2]),diffOH[:,0])
# And here we quantify the difference between the mean O-D and O-H streching frequencies in the melt and the fluid
# Mean frequencies of vibration
OHFreqMelt = np.sum(OHmelt[:,0]*(OHmelt[:,1]/np.sum(OHmelt[:,1])))
OHFreqFluid = np.sum(OHfluid[:,0]*(OHfluid[:,1]/np.sum(OHfluid[:,1])))
ODFreqMelt = np.sum(ODmelt[:,0]*(ODmelt[:,1]/np.sum(ODmelt[:,1])))
ODFreqFluid = np.sum(ODfluid[:,0]*(ODfluid[:,1]/np.sum(ODfluid[:,1])))
pseudoDZPEmelt = OHFreqMelt - ODFreqMelt
pseudoDZPEfluid = OHFreqFluid - ODFreqFluid
diffPseudoDPE = pseudoDZPEmelt - pseudoDZPEfluid
figure(i,figsize=(12,6))
gs = gridspec.GridSpec(1, 2)
ax1 = plt.subplot(gs[0])
ax2 = plt.subplot(gs[1])
ax1.plot(ODfluid[:,0],ODfluid[:,1],'b-')
ax1.plot(ODmelt[:,0],ODmelt[:,1],'r-')
ax1.plot(diffOD[:,0],diffOD[:,1],'g-')
ax1.plot(np.array([ODFreqMelt,ODFreqMelt]),np.array([0,1]),'r-')
ax1.plot(np.array([ODFreqFluid,ODFreqFluid]),np.array([0,1]),'b-')
ax1.set_ylim(((1.10*np.min(diffOD[:,1])),(1.10*np.max(ODfluid[:,1]))))
ax2.plot(OHfluid[:,0],OHfluid[:,1],'b-')
ax2.plot(OHmelt[:,0],OHmelt[:,1],'r-')
ax2.plot(diffOH[:,0],diffOH[:,1],'g-')
ax2.plot(np.array([OHFreqMelt,OHFreqMelt]),np.array([0,1]),'r-')
ax2.plot(np.array([OHFreqFluid,OHFreqFluid]),np.array([0,1]),'b-')
ax2.set_ylim((1.10*min(diffOH[:,1]),1.10*max(OHfluid[:,1])))
ratio[i,2] = OHFreqMelt
ratio[i,3] = OHFreqFluid
ratio[i,4] = ODFreqMelt
ratio[i,5] = ODFreqFluid
ratio[i,6] = pseudoDZPEmelt
ratio[i,7] = pseudoDZPEfluid
ratio[i,8] = diffPseudoDPE
figure()
plot(alphas,ratio[:,4],'ro')
Tk().withdraw() # we don't want a full GUI, so keep the root window from appearing
savefilename = asksaveasfile() # show an "Open" dialog box and return the path to the selected file
out = vstack((alphas,esealphas,ratio[:,0],ratio[:,1],ratio[:,2],ratio[:,3],ratio[:,4],ratio[:,5],ratio[:,6],ratio[:,7],ratio[:,8])).T # Output matrix with 4 columns: Alphas, errors, Ratio Hydro bonds, errors
np.savetxt(savefilename,out) | gpl-2.0 |
lodemo/CATANA | data/small_sample_cluster/comparisons/compare.py | 1 | 4381 | # -*- coding: utf-8 -*-
# detects collaborations of actors from features in db
# read features from db
# method 1
# generate pairs of features (all pairs of not-same video features)
# measure distance of feature lists from pair, like ytf-eval, mean, max etc.
# use threshold (same as ytf-eval?) to decide feature are same person
# if same add edge between feature ids/videoids
# todo how to detect if collab or own channel
# method 2
# cluster features (all feature from every list?)
# add edge between all videos/feature ids in same cluster
# which clustering? dbscan, hdbscan?
# method x -> use clustering with average feature etc?
from __future__ import unicode_literals
import os
import time
import numpy as np
import pandas as pa
import cPickle as cp
import json
import math
import itertools
import string
import networkx as nx
fileDir = os.path.dirname(os.path.realpath(__file__))
def create_collab_graph(ftcollabs):
cluster = {}
label = ftcollabs['label'].unique()
for l in label:
ftl = ftcollabs[ftcollabs.label == l]
groups = ftl.groupby(['channel'])
vcounts = groups.videoID.nunique()
cluster[l] = [(cid, nof) for cid, nof in vcounts.sort_values(ascending=False).iteritems()]
# do graph creation somewhere else, so cluster labels are not lists but single ids generated based on cluster lists?
G = nx.DiGraph() # undirected graph
for l, cls in cluster.iteritems():
mainc = cls[0][0]
#print mainc
if G.has_node(mainc):
if 'cluster' in G.node[mainc]:
G.node[mainc]['cluster'].append(str(l))
else:
G.node[mainc]['cluster'] = [str(l)]
with db._session_scope(False) as session:
G.node[mainc]['network'] = session.query(Channel.network).filter(Channel.id == mainc).first()[0]
else:
with db._session_scope(False) as session:
network = session.query(Channel.network).filter(Channel.id == mainc).first()[0]
G.add_node(mainc, cluster=[str(l)], network=network) # todo make clusters a list, extend list if already there, so multiple cluster could have "main channel"
for (c, n) in cls[1:]:
G.add_edge(mainc, c, weight=int(n), cluster=str(l))
print G.nodes()
print G.edges()
#nx.write_gexf(G, "collab_detections_graph.gexf")
#nx.write_gml(G, "collab_detections_graph.gml")
# save features with labels as pickle ?!
return G
def extend_df(ft, label, proba, pers):
db = YTDatabase()
ch = []
persistents = []
with db._session_scope(False) as session:
for i, l in enumerate(label):
vid = ft['videoID'].iloc[i]
cid = session.query(Video.channelID).filter(Video.id==vid).first()[0]
ch.append(cid)
if int(l) != -1:
persistents.append(pers[int(l)])
else:
persistents.append(0)
ft['channel'] = np.array(ch)
ft['label'] = np.array(label)
ft['proba'] = np.array(proba)
ft['pers'] = np.array(persistents)
fto = ft.iloc[:,[0,1,3,4,5,6,7]]
ftost = fto.sort_values(['channel', 'videoID'])
return ftost
G_list = {}
G0 = nx.read_gml("sample_cluster_collabs.gml")
G1 = nx.read_gml("hdb_collab_cluster_sample.gml")
G_list['normal'] = G1
G3 = nx.read_gml("hdb_collab_cluster_sample_first.gml")
G_list['first'] = G3
G4 = nx.read_gml("hdb_collab_cluster_sample_mean_mean.gml")
G_list['mean'] = G4
G5 = nx.read_gml("hdb_collab_cluster_sample_median.gml")
G_list['median'] = G5
G6 = nx.read_gml("hdb_collab_cluster_sample_firstth.gml")
G_list['firstth'] = G6
G7 = nx.read_gml("hdb_collab_cluster_sample_firstth20.gml")
G_list['firstth20'] = G7
print 'ground truth sample cluster:'
print 'nodes:', len(G0.nodes())
print 'edges:', len(G0.edges())
print '\n\n'
for node in G1.nodes():
for key, value in G_list.iteritems():
if not value.has_node(node):
value.add_node(node)
for key, value in G_list.iteritems():
print key, 'sample cluster'
print 'nodes:', len(value.nodes())
print 'edges:', len(value.edges())
print '\nDIFF normal test:'
print 'missing edges:', len(nx.difference(G1, value).edges())
print 'added edges:', len(nx.difference(value, G1).edges())
print '\n\n'
| mit |
Titan-C/scikit-learn | sklearn/neighbors/tests/test_neighbors.py | 18 | 48928 | from itertools import product
import numpy as np
from scipy.sparse import (bsr_matrix, coo_matrix, csc_matrix, csr_matrix,
dok_matrix, lil_matrix)
from sklearn import metrics
from sklearn import neighbors, datasets
from sklearn.exceptions import DataConversionWarning
from sklearn.metrics.pairwise import pairwise_distances
from sklearn.model_selection import cross_val_score
from sklearn.model_selection import train_test_split
from sklearn.neighbors.base import VALID_METRICS_SPARSE, VALID_METRICS
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_false
from sklearn.utils.testing import assert_greater
from sklearn.utils.testing import assert_in
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import assert_true
from sklearn.utils.testing import assert_warns
from sklearn.utils.testing import ignore_warnings
from sklearn.utils.validation import check_random_state
rng = np.random.RandomState(0)
# load and shuffle iris dataset
iris = datasets.load_iris()
perm = rng.permutation(iris.target.size)
iris.data = iris.data[perm]
iris.target = iris.target[perm]
# load and shuffle digits
digits = datasets.load_digits()
perm = rng.permutation(digits.target.size)
digits.data = digits.data[perm]
digits.target = digits.target[perm]
SPARSE_TYPES = (bsr_matrix, coo_matrix, csc_matrix, csr_matrix, dok_matrix,
lil_matrix)
SPARSE_OR_DENSE = SPARSE_TYPES + (np.asarray,)
ALGORITHMS = ('ball_tree', 'brute', 'kd_tree', 'auto')
P = (1, 2, 3, 4, np.inf)
# Filter deprecation warnings.
neighbors.kneighbors_graph = ignore_warnings(neighbors.kneighbors_graph)
neighbors.radius_neighbors_graph = ignore_warnings(
neighbors.radius_neighbors_graph)
def _weight_func(dist):
""" Weight function to replace lambda d: d ** -2.
The lambda function is not valid because:
if d==0 then 0^-2 is not valid. """
# Dist could be multidimensional, flatten it so all values
# can be looped
with np.errstate(divide='ignore'):
retval = 1. / dist
return retval ** 2
def test_unsupervised_kneighbors(n_samples=20, n_features=5,
n_query_pts=2, n_neighbors=5):
# Test unsupervised neighbors methods
X = rng.rand(n_samples, n_features)
test = rng.rand(n_query_pts, n_features)
for p in P:
results_nodist = []
results = []
for algorithm in ALGORITHMS:
neigh = neighbors.NearestNeighbors(n_neighbors=n_neighbors,
algorithm=algorithm,
p=p)
neigh.fit(X)
results_nodist.append(neigh.kneighbors(test,
return_distance=False))
results.append(neigh.kneighbors(test, return_distance=True))
for i in range(len(results) - 1):
assert_array_almost_equal(results_nodist[i], results[i][1])
assert_array_almost_equal(results[i][0], results[i + 1][0])
assert_array_almost_equal(results[i][1], results[i + 1][1])
def test_unsupervised_inputs():
# test the types of valid input into NearestNeighbors
X = rng.random_sample((10, 3))
nbrs_fid = neighbors.NearestNeighbors(n_neighbors=1)
nbrs_fid.fit(X)
dist1, ind1 = nbrs_fid.kneighbors(X)
nbrs = neighbors.NearestNeighbors(n_neighbors=1)
for input in (nbrs_fid, neighbors.BallTree(X), neighbors.KDTree(X)):
nbrs.fit(input)
dist2, ind2 = nbrs.kneighbors(X)
assert_array_almost_equal(dist1, dist2)
assert_array_almost_equal(ind1, ind2)
def test_precomputed(random_state=42):
"""Tests unsupervised NearestNeighbors with a distance matrix."""
# Note: smaller samples may result in spurious test success
rng = np.random.RandomState(random_state)
X = rng.random_sample((10, 4))
Y = rng.random_sample((3, 4))
DXX = metrics.pairwise_distances(X, metric='euclidean')
DYX = metrics.pairwise_distances(Y, X, metric='euclidean')
for method in ['kneighbors']:
# TODO: also test radius_neighbors, but requires different assertion
# As a feature matrix (n_samples by n_features)
nbrs_X = neighbors.NearestNeighbors(n_neighbors=3)
nbrs_X.fit(X)
dist_X, ind_X = getattr(nbrs_X, method)(Y)
# As a dense distance matrix (n_samples by n_samples)
nbrs_D = neighbors.NearestNeighbors(n_neighbors=3, algorithm='brute',
metric='precomputed')
nbrs_D.fit(DXX)
dist_D, ind_D = getattr(nbrs_D, method)(DYX)
assert_array_almost_equal(dist_X, dist_D)
assert_array_almost_equal(ind_X, ind_D)
# Check auto works too
nbrs_D = neighbors.NearestNeighbors(n_neighbors=3, algorithm='auto',
metric='precomputed')
nbrs_D.fit(DXX)
dist_D, ind_D = getattr(nbrs_D, method)(DYX)
assert_array_almost_equal(dist_X, dist_D)
assert_array_almost_equal(ind_X, ind_D)
# Check X=None in prediction
dist_X, ind_X = getattr(nbrs_X, method)(None)
dist_D, ind_D = getattr(nbrs_D, method)(None)
assert_array_almost_equal(dist_X, dist_D)
assert_array_almost_equal(ind_X, ind_D)
# Must raise a ValueError if the matrix is not of correct shape
assert_raises(ValueError, getattr(nbrs_D, method), X)
target = np.arange(X.shape[0])
for Est in (neighbors.KNeighborsClassifier,
neighbors.RadiusNeighborsClassifier,
neighbors.KNeighborsRegressor,
neighbors.RadiusNeighborsRegressor):
print(Est)
est = Est(metric='euclidean')
est.radius = est.n_neighbors = 1
pred_X = est.fit(X, target).predict(Y)
est.metric = 'precomputed'
pred_D = est.fit(DXX, target).predict(DYX)
assert_array_almost_equal(pred_X, pred_D)
def test_precomputed_cross_validation():
# Ensure array is split correctly
rng = np.random.RandomState(0)
X = rng.rand(20, 2)
D = pairwise_distances(X, metric='euclidean')
y = rng.randint(3, size=20)
for Est in (neighbors.KNeighborsClassifier,
neighbors.RadiusNeighborsClassifier,
neighbors.KNeighborsRegressor,
neighbors.RadiusNeighborsRegressor):
metric_score = cross_val_score(Est(), X, y)
precomp_score = cross_val_score(Est(metric='precomputed'), D, y)
assert_array_equal(metric_score, precomp_score)
def test_unsupervised_radius_neighbors(n_samples=20, n_features=5,
n_query_pts=2, radius=0.5,
random_state=0):
# Test unsupervised radius-based query
rng = np.random.RandomState(random_state)
X = rng.rand(n_samples, n_features)
test = rng.rand(n_query_pts, n_features)
for p in P:
results = []
for algorithm in ALGORITHMS:
neigh = neighbors.NearestNeighbors(radius=radius,
algorithm=algorithm,
p=p)
neigh.fit(X)
ind1 = neigh.radius_neighbors(test, return_distance=False)
# sort the results: this is not done automatically for
# radius searches
dist, ind = neigh.radius_neighbors(test, return_distance=True)
for (d, i, i1) in zip(dist, ind, ind1):
j = d.argsort()
d[:] = d[j]
i[:] = i[j]
i1[:] = i1[j]
results.append((dist, ind))
assert_array_almost_equal(np.concatenate(list(ind)),
np.concatenate(list(ind1)))
for i in range(len(results) - 1):
assert_array_almost_equal(np.concatenate(list(results[i][0])),
np.concatenate(list(results[i + 1][0]))),
assert_array_almost_equal(np.concatenate(list(results[i][1])),
np.concatenate(list(results[i + 1][1])))
def test_kneighbors_classifier(n_samples=40,
n_features=5,
n_test_pts=10,
n_neighbors=5,
random_state=0):
# Test k-neighbors classification
rng = np.random.RandomState(random_state)
X = 2 * rng.rand(n_samples, n_features) - 1
y = ((X ** 2).sum(axis=1) < .5).astype(np.int)
y_str = y.astype(str)
weight_func = _weight_func
for algorithm in ALGORITHMS:
for weights in ['uniform', 'distance', weight_func]:
knn = neighbors.KNeighborsClassifier(n_neighbors=n_neighbors,
weights=weights,
algorithm=algorithm)
knn.fit(X, y)
epsilon = 1e-5 * (2 * rng.rand(1, n_features) - 1)
y_pred = knn.predict(X[:n_test_pts] + epsilon)
assert_array_equal(y_pred, y[:n_test_pts])
# Test prediction with y_str
knn.fit(X, y_str)
y_pred = knn.predict(X[:n_test_pts] + epsilon)
assert_array_equal(y_pred, y_str[:n_test_pts])
def test_kneighbors_classifier_float_labels(n_samples=40, n_features=5,
n_test_pts=10, n_neighbors=5,
random_state=0):
# Test k-neighbors classification
rng = np.random.RandomState(random_state)
X = 2 * rng.rand(n_samples, n_features) - 1
y = ((X ** 2).sum(axis=1) < .5).astype(np.int)
knn = neighbors.KNeighborsClassifier(n_neighbors=n_neighbors)
knn.fit(X, y.astype(np.float))
epsilon = 1e-5 * (2 * rng.rand(1, n_features) - 1)
y_pred = knn.predict(X[:n_test_pts] + epsilon)
assert_array_equal(y_pred, y[:n_test_pts])
def test_kneighbors_classifier_predict_proba():
# Test KNeighborsClassifier.predict_proba() method
X = np.array([[0, 2, 0],
[0, 2, 1],
[2, 0, 0],
[2, 2, 0],
[0, 0, 2],
[0, 0, 1]])
y = np.array([4, 4, 5, 5, 1, 1])
cls = neighbors.KNeighborsClassifier(n_neighbors=3, p=1) # cityblock dist
cls.fit(X, y)
y_prob = cls.predict_proba(X)
real_prob = np.array([[0, 2. / 3, 1. / 3],
[1. / 3, 2. / 3, 0],
[1. / 3, 0, 2. / 3],
[0, 1. / 3, 2. / 3],
[2. / 3, 1. / 3, 0],
[2. / 3, 1. / 3, 0]])
assert_array_equal(real_prob, y_prob)
# Check that it also works with non integer labels
cls.fit(X, y.astype(str))
y_prob = cls.predict_proba(X)
assert_array_equal(real_prob, y_prob)
# Check that it works with weights='distance'
cls = neighbors.KNeighborsClassifier(
n_neighbors=2, p=1, weights='distance')
cls.fit(X, y)
y_prob = cls.predict_proba(np.array([[0, 2, 0], [2, 2, 2]]))
real_prob = np.array([[0, 1, 0], [0, 0.4, 0.6]])
assert_array_almost_equal(real_prob, y_prob)
def test_radius_neighbors_classifier(n_samples=40,
n_features=5,
n_test_pts=10,
radius=0.5,
random_state=0):
# Test radius-based classification
rng = np.random.RandomState(random_state)
X = 2 * rng.rand(n_samples, n_features) - 1
y = ((X ** 2).sum(axis=1) < .5).astype(np.int)
y_str = y.astype(str)
weight_func = _weight_func
for algorithm in ALGORITHMS:
for weights in ['uniform', 'distance', weight_func]:
neigh = neighbors.RadiusNeighborsClassifier(radius=radius,
weights=weights,
algorithm=algorithm)
neigh.fit(X, y)
epsilon = 1e-5 * (2 * rng.rand(1, n_features) - 1)
y_pred = neigh.predict(X[:n_test_pts] + epsilon)
assert_array_equal(y_pred, y[:n_test_pts])
neigh.fit(X, y_str)
y_pred = neigh.predict(X[:n_test_pts] + epsilon)
assert_array_equal(y_pred, y_str[:n_test_pts])
def test_radius_neighbors_classifier_when_no_neighbors():
# Test radius-based classifier when no neighbors found.
# In this case it should rise an informative exception
X = np.array([[1.0, 1.0], [2.0, 2.0]])
y = np.array([1, 2])
radius = 0.1
z1 = np.array([[1.01, 1.01], [2.01, 2.01]]) # no outliers
z2 = np.array([[1.01, 1.01], [1.4, 1.4]]) # one outlier
weight_func = _weight_func
for outlier_label in [0, -1, None]:
for algorithm in ALGORITHMS:
for weights in ['uniform', 'distance', weight_func]:
rnc = neighbors.RadiusNeighborsClassifier
clf = rnc(radius=radius, weights=weights, algorithm=algorithm,
outlier_label=outlier_label)
clf.fit(X, y)
assert_array_equal(np.array([1, 2]),
clf.predict(z1))
if outlier_label is None:
assert_raises(ValueError, clf.predict, z2)
elif False:
assert_array_equal(np.array([1, outlier_label]),
clf.predict(z2))
def test_radius_neighbors_classifier_outlier_labeling():
# Test radius-based classifier when no neighbors found and outliers
# are labeled.
X = np.array([[1.0, 1.0], [2.0, 2.0], [0.99, 0.99],
[0.98, 0.98], [2.01, 2.01]])
y = np.array([1, 2, 1, 1, 2])
radius = 0.1
z1 = np.array([[1.01, 1.01], [2.01, 2.01]]) # no outliers
z2 = np.array([[1.4, 1.4], [1.01, 1.01], [2.01, 2.01]]) # one outlier
correct_labels1 = np.array([1, 2])
correct_labels2 = np.array([-1, 1, 2])
weight_func = _weight_func
for algorithm in ALGORITHMS:
for weights in ['uniform', 'distance', weight_func]:
clf = neighbors.RadiusNeighborsClassifier(radius=radius,
weights=weights,
algorithm=algorithm,
outlier_label=-1)
clf.fit(X, y)
assert_array_equal(correct_labels1, clf.predict(z1))
assert_array_equal(correct_labels2, clf.predict(z2))
def test_radius_neighbors_classifier_zero_distance():
# Test radius-based classifier, when distance to a sample is zero.
X = np.array([[1.0, 1.0], [2.0, 2.0]])
y = np.array([1, 2])
radius = 0.1
z1 = np.array([[1.01, 1.01], [2.0, 2.0]])
correct_labels1 = np.array([1, 2])
weight_func = _weight_func
for algorithm in ALGORITHMS:
for weights in ['uniform', 'distance', weight_func]:
clf = neighbors.RadiusNeighborsClassifier(radius=radius,
weights=weights,
algorithm=algorithm)
clf.fit(X, y)
assert_array_equal(correct_labels1, clf.predict(z1))
def test_neighbors_regressors_zero_distance():
# Test radius-based regressor, when distance to a sample is zero.
X = np.array([[1.0, 1.0], [1.0, 1.0], [2.0, 2.0], [2.5, 2.5]])
y = np.array([1.0, 1.5, 2.0, 0.0])
radius = 0.2
z = np.array([[1.1, 1.1], [2.0, 2.0]])
rnn_correct_labels = np.array([1.25, 2.0])
knn_correct_unif = np.array([1.25, 1.0])
knn_correct_dist = np.array([1.25, 2.0])
for algorithm in ALGORITHMS:
# we don't test for weights=_weight_func since user will be expected
# to handle zero distances themselves in the function.
for weights in ['uniform', 'distance']:
rnn = neighbors.RadiusNeighborsRegressor(radius=radius,
weights=weights,
algorithm=algorithm)
rnn.fit(X, y)
assert_array_almost_equal(rnn_correct_labels, rnn.predict(z))
for weights, corr_labels in zip(['uniform', 'distance'],
[knn_correct_unif, knn_correct_dist]):
knn = neighbors.KNeighborsRegressor(n_neighbors=2,
weights=weights,
algorithm=algorithm)
knn.fit(X, y)
assert_array_almost_equal(corr_labels, knn.predict(z))
def test_radius_neighbors_boundary_handling():
"""Test whether points lying on boundary are handled consistently
Also ensures that even with only one query point, an object array
is returned rather than a 2d array.
"""
X = np.array([[1.5], [3.0], [3.01]])
radius = 3.0
for algorithm in ALGORITHMS:
nbrs = neighbors.NearestNeighbors(radius=radius,
algorithm=algorithm).fit(X)
results = nbrs.radius_neighbors([[0.0]], return_distance=False)
assert_equal(results.shape, (1,))
assert_equal(results.dtype, object)
assert_array_equal(results[0], [0, 1])
def test_RadiusNeighborsClassifier_multioutput():
# Test k-NN classifier on multioutput data
rng = check_random_state(0)
n_features = 2
n_samples = 40
n_output = 3
X = rng.rand(n_samples, n_features)
y = rng.randint(0, 3, (n_samples, n_output))
X_train, X_test, y_train, y_test = train_test_split(X, y, random_state=0)
weights = [None, 'uniform', 'distance', _weight_func]
for algorithm, weights in product(ALGORITHMS, weights):
# Stack single output prediction
y_pred_so = []
for o in range(n_output):
rnn = neighbors.RadiusNeighborsClassifier(weights=weights,
algorithm=algorithm)
rnn.fit(X_train, y_train[:, o])
y_pred_so.append(rnn.predict(X_test))
y_pred_so = np.vstack(y_pred_so).T
assert_equal(y_pred_so.shape, y_test.shape)
# Multioutput prediction
rnn_mo = neighbors.RadiusNeighborsClassifier(weights=weights,
algorithm=algorithm)
rnn_mo.fit(X_train, y_train)
y_pred_mo = rnn_mo.predict(X_test)
assert_equal(y_pred_mo.shape, y_test.shape)
assert_array_almost_equal(y_pred_mo, y_pred_so)
def test_kneighbors_classifier_sparse(n_samples=40,
n_features=5,
n_test_pts=10,
n_neighbors=5,
random_state=0):
# Test k-NN classifier on sparse matrices
# Like the above, but with various types of sparse matrices
rng = np.random.RandomState(random_state)
X = 2 * rng.rand(n_samples, n_features) - 1
X *= X > .2
y = ((X ** 2).sum(axis=1) < .5).astype(np.int)
for sparsemat in SPARSE_TYPES:
knn = neighbors.KNeighborsClassifier(n_neighbors=n_neighbors,
algorithm='auto')
knn.fit(sparsemat(X), y)
epsilon = 1e-5 * (2 * rng.rand(1, n_features) - 1)
for sparsev in SPARSE_TYPES + (np.asarray,):
X_eps = sparsev(X[:n_test_pts] + epsilon)
y_pred = knn.predict(X_eps)
assert_array_equal(y_pred, y[:n_test_pts])
def test_KNeighborsClassifier_multioutput():
# Test k-NN classifier on multioutput data
rng = check_random_state(0)
n_features = 5
n_samples = 50
n_output = 3
X = rng.rand(n_samples, n_features)
y = rng.randint(0, 3, (n_samples, n_output))
X_train, X_test, y_train, y_test = train_test_split(X, y, random_state=0)
weights = [None, 'uniform', 'distance', _weight_func]
for algorithm, weights in product(ALGORITHMS, weights):
# Stack single output prediction
y_pred_so = []
y_pred_proba_so = []
for o in range(n_output):
knn = neighbors.KNeighborsClassifier(weights=weights,
algorithm=algorithm)
knn.fit(X_train, y_train[:, o])
y_pred_so.append(knn.predict(X_test))
y_pred_proba_so.append(knn.predict_proba(X_test))
y_pred_so = np.vstack(y_pred_so).T
assert_equal(y_pred_so.shape, y_test.shape)
assert_equal(len(y_pred_proba_so), n_output)
# Multioutput prediction
knn_mo = neighbors.KNeighborsClassifier(weights=weights,
algorithm=algorithm)
knn_mo.fit(X_train, y_train)
y_pred_mo = knn_mo.predict(X_test)
assert_equal(y_pred_mo.shape, y_test.shape)
assert_array_almost_equal(y_pred_mo, y_pred_so)
# Check proba
y_pred_proba_mo = knn_mo.predict_proba(X_test)
assert_equal(len(y_pred_proba_mo), n_output)
for proba_mo, proba_so in zip(y_pred_proba_mo, y_pred_proba_so):
assert_array_almost_equal(proba_mo, proba_so)
def test_kneighbors_regressor(n_samples=40,
n_features=5,
n_test_pts=10,
n_neighbors=3,
random_state=0):
# Test k-neighbors regression
rng = np.random.RandomState(random_state)
X = 2 * rng.rand(n_samples, n_features) - 1
y = np.sqrt((X ** 2).sum(1))
y /= y.max()
y_target = y[:n_test_pts]
weight_func = _weight_func
for algorithm in ALGORITHMS:
for weights in ['uniform', 'distance', weight_func]:
knn = neighbors.KNeighborsRegressor(n_neighbors=n_neighbors,
weights=weights,
algorithm=algorithm)
knn.fit(X, y)
epsilon = 1E-5 * (2 * rng.rand(1, n_features) - 1)
y_pred = knn.predict(X[:n_test_pts] + epsilon)
assert_true(np.all(abs(y_pred - y_target) < 0.3))
def test_KNeighborsRegressor_multioutput_uniform_weight():
# Test k-neighbors in multi-output regression with uniform weight
rng = check_random_state(0)
n_features = 5
n_samples = 40
n_output = 4
X = rng.rand(n_samples, n_features)
y = rng.rand(n_samples, n_output)
X_train, X_test, y_train, y_test = train_test_split(X, y, random_state=0)
for algorithm, weights in product(ALGORITHMS, [None, 'uniform']):
knn = neighbors.KNeighborsRegressor(weights=weights,
algorithm=algorithm)
knn.fit(X_train, y_train)
neigh_idx = knn.kneighbors(X_test, return_distance=False)
y_pred_idx = np.array([np.mean(y_train[idx], axis=0)
for idx in neigh_idx])
y_pred = knn.predict(X_test)
assert_equal(y_pred.shape, y_test.shape)
assert_equal(y_pred_idx.shape, y_test.shape)
assert_array_almost_equal(y_pred, y_pred_idx)
def test_kneighbors_regressor_multioutput(n_samples=40,
n_features=5,
n_test_pts=10,
n_neighbors=3,
random_state=0):
# Test k-neighbors in multi-output regression
rng = np.random.RandomState(random_state)
X = 2 * rng.rand(n_samples, n_features) - 1
y = np.sqrt((X ** 2).sum(1))
y /= y.max()
y = np.vstack([y, y]).T
y_target = y[:n_test_pts]
weights = ['uniform', 'distance', _weight_func]
for algorithm, weights in product(ALGORITHMS, weights):
knn = neighbors.KNeighborsRegressor(n_neighbors=n_neighbors,
weights=weights,
algorithm=algorithm)
knn.fit(X, y)
epsilon = 1E-5 * (2 * rng.rand(1, n_features) - 1)
y_pred = knn.predict(X[:n_test_pts] + epsilon)
assert_equal(y_pred.shape, y_target.shape)
assert_true(np.all(np.abs(y_pred - y_target) < 0.3))
def test_radius_neighbors_regressor(n_samples=40,
n_features=3,
n_test_pts=10,
radius=0.5,
random_state=0):
# Test radius-based neighbors regression
rng = np.random.RandomState(random_state)
X = 2 * rng.rand(n_samples, n_features) - 1
y = np.sqrt((X ** 2).sum(1))
y /= y.max()
y_target = y[:n_test_pts]
weight_func = _weight_func
for algorithm in ALGORITHMS:
for weights in ['uniform', 'distance', weight_func]:
neigh = neighbors.RadiusNeighborsRegressor(radius=radius,
weights=weights,
algorithm=algorithm)
neigh.fit(X, y)
epsilon = 1E-5 * (2 * rng.rand(1, n_features) - 1)
y_pred = neigh.predict(X[:n_test_pts] + epsilon)
assert_true(np.all(abs(y_pred - y_target) < radius / 2))
def test_RadiusNeighborsRegressor_multioutput_with_uniform_weight():
# Test radius neighbors in multi-output regression (uniform weight)
rng = check_random_state(0)
n_features = 5
n_samples = 40
n_output = 4
X = rng.rand(n_samples, n_features)
y = rng.rand(n_samples, n_output)
X_train, X_test, y_train, y_test = train_test_split(X, y, random_state=0)
for algorithm, weights in product(ALGORITHMS, [None, 'uniform']):
rnn = neighbors. RadiusNeighborsRegressor(weights=weights,
algorithm=algorithm)
rnn.fit(X_train, y_train)
neigh_idx = rnn.radius_neighbors(X_test, return_distance=False)
y_pred_idx = np.array([np.mean(y_train[idx], axis=0)
for idx in neigh_idx])
y_pred_idx = np.array(y_pred_idx)
y_pred = rnn.predict(X_test)
assert_equal(y_pred_idx.shape, y_test.shape)
assert_equal(y_pred.shape, y_test.shape)
assert_array_almost_equal(y_pred, y_pred_idx)
def test_RadiusNeighborsRegressor_multioutput(n_samples=40,
n_features=5,
n_test_pts=10,
n_neighbors=3,
random_state=0):
# Test k-neighbors in multi-output regression with various weight
rng = np.random.RandomState(random_state)
X = 2 * rng.rand(n_samples, n_features) - 1
y = np.sqrt((X ** 2).sum(1))
y /= y.max()
y = np.vstack([y, y]).T
y_target = y[:n_test_pts]
weights = ['uniform', 'distance', _weight_func]
for algorithm, weights in product(ALGORITHMS, weights):
rnn = neighbors.RadiusNeighborsRegressor(n_neighbors=n_neighbors,
weights=weights,
algorithm=algorithm)
rnn.fit(X, y)
epsilon = 1E-5 * (2 * rng.rand(1, n_features) - 1)
y_pred = rnn.predict(X[:n_test_pts] + epsilon)
assert_equal(y_pred.shape, y_target.shape)
assert_true(np.all(np.abs(y_pred - y_target) < 0.3))
def test_kneighbors_regressor_sparse(n_samples=40,
n_features=5,
n_test_pts=10,
n_neighbors=5,
random_state=0):
# Test radius-based regression on sparse matrices
# Like the above, but with various types of sparse matrices
rng = np.random.RandomState(random_state)
X = 2 * rng.rand(n_samples, n_features) - 1
y = ((X ** 2).sum(axis=1) < .25).astype(np.int)
for sparsemat in SPARSE_TYPES:
knn = neighbors.KNeighborsRegressor(n_neighbors=n_neighbors,
algorithm='auto')
knn.fit(sparsemat(X), y)
for sparsev in SPARSE_OR_DENSE:
X2 = sparsev(X)
assert_true(np.mean(knn.predict(X2).round() == y) > 0.95)
def test_neighbors_iris():
# Sanity checks on the iris dataset
# Puts three points of each label in the plane and performs a
# nearest neighbor query on points near the decision boundary.
for algorithm in ALGORITHMS:
clf = neighbors.KNeighborsClassifier(n_neighbors=1,
algorithm=algorithm)
clf.fit(iris.data, iris.target)
assert_array_equal(clf.predict(iris.data), iris.target)
clf.set_params(n_neighbors=9, algorithm=algorithm)
clf.fit(iris.data, iris.target)
assert_true(np.mean(clf.predict(iris.data) == iris.target) > 0.95)
rgs = neighbors.KNeighborsRegressor(n_neighbors=5, algorithm=algorithm)
rgs.fit(iris.data, iris.target)
assert_greater(np.mean(rgs.predict(iris.data).round() == iris.target),
0.95)
def test_neighbors_digits():
# Sanity check on the digits dataset
# the 'brute' algorithm has been observed to fail if the input
# dtype is uint8 due to overflow in distance calculations.
X = digits.data.astype('uint8')
Y = digits.target
(n_samples, n_features) = X.shape
train_test_boundary = int(n_samples * 0.8)
train = np.arange(0, train_test_boundary)
test = np.arange(train_test_boundary, n_samples)
(X_train, Y_train, X_test, Y_test) = X[train], Y[train], X[test], Y[test]
clf = neighbors.KNeighborsClassifier(n_neighbors=1, algorithm='brute')
score_uint8 = clf.fit(X_train, Y_train).score(X_test, Y_test)
score_float = clf.fit(X_train.astype(float), Y_train).score(
X_test.astype(float), Y_test)
assert_equal(score_uint8, score_float)
def test_kneighbors_graph():
# Test kneighbors_graph to build the k-Nearest Neighbor graph.
X = np.array([[0, 1], [1.01, 1.], [2, 0]])
# n_neighbors = 1
A = neighbors.kneighbors_graph(X, 1, mode='connectivity',
include_self=True)
assert_array_equal(A.toarray(), np.eye(A.shape[0]))
A = neighbors.kneighbors_graph(X, 1, mode='distance')
assert_array_almost_equal(
A.toarray(),
[[0.00, 1.01, 0.],
[1.01, 0., 0.],
[0.00, 1.40716026, 0.]])
# n_neighbors = 2
A = neighbors.kneighbors_graph(X, 2, mode='connectivity',
include_self=True)
assert_array_equal(
A.toarray(),
[[1., 1., 0.],
[1., 1., 0.],
[0., 1., 1.]])
A = neighbors.kneighbors_graph(X, 2, mode='distance')
assert_array_almost_equal(
A.toarray(),
[[0., 1.01, 2.23606798],
[1.01, 0., 1.40716026],
[2.23606798, 1.40716026, 0.]])
# n_neighbors = 3
A = neighbors.kneighbors_graph(X, 3, mode='connectivity',
include_self=True)
assert_array_almost_equal(
A.toarray(),
[[1, 1, 1], [1, 1, 1], [1, 1, 1]])
def test_kneighbors_graph_sparse(seed=36):
# Test kneighbors_graph to build the k-Nearest Neighbor graph
# for sparse input.
rng = np.random.RandomState(seed)
X = rng.randn(10, 10)
Xcsr = csr_matrix(X)
for n_neighbors in [1, 2, 3]:
for mode in ["connectivity", "distance"]:
assert_array_almost_equal(
neighbors.kneighbors_graph(X,
n_neighbors,
mode=mode).toarray(),
neighbors.kneighbors_graph(Xcsr,
n_neighbors,
mode=mode).toarray())
def test_radius_neighbors_graph():
# Test radius_neighbors_graph to build the Nearest Neighbor graph.
X = np.array([[0, 1], [1.01, 1.], [2, 0]])
A = neighbors.radius_neighbors_graph(X, 1.5, mode='connectivity',
include_self=True)
assert_array_equal(
A.toarray(),
[[1., 1., 0.],
[1., 1., 1.],
[0., 1., 1.]])
A = neighbors.radius_neighbors_graph(X, 1.5, mode='distance')
assert_array_almost_equal(
A.toarray(),
[[0., 1.01, 0.],
[1.01, 0., 1.40716026],
[0., 1.40716026, 0.]])
def test_radius_neighbors_graph_sparse(seed=36):
# Test radius_neighbors_graph to build the Nearest Neighbor graph
# for sparse input.
rng = np.random.RandomState(seed)
X = rng.randn(10, 10)
Xcsr = csr_matrix(X)
for n_neighbors in [1, 2, 3]:
for mode in ["connectivity", "distance"]:
assert_array_almost_equal(
neighbors.radius_neighbors_graph(X,
n_neighbors,
mode=mode).toarray(),
neighbors.radius_neighbors_graph(Xcsr,
n_neighbors,
mode=mode).toarray())
def test_neighbors_badargs():
# Test bad argument values: these should all raise ValueErrors
assert_raises(ValueError,
neighbors.NearestNeighbors,
algorithm='blah')
X = rng.random_sample((10, 2))
Xsparse = csr_matrix(X)
y = np.ones(10)
for cls in (neighbors.KNeighborsClassifier,
neighbors.RadiusNeighborsClassifier,
neighbors.KNeighborsRegressor,
neighbors.RadiusNeighborsRegressor):
assert_raises(ValueError,
cls,
weights='blah')
assert_raises(ValueError,
cls, p=-1)
assert_raises(ValueError,
cls, algorithm='blah')
nbrs = cls(algorithm='ball_tree', metric='haversine')
assert_raises(ValueError,
nbrs.predict,
X)
assert_raises(ValueError,
ignore_warnings(nbrs.fit),
Xsparse, y)
nbrs = cls()
assert_raises(ValueError,
nbrs.fit,
np.ones((0, 2)), np.ones(0))
assert_raises(ValueError,
nbrs.fit,
X[:, :, None], y)
nbrs.fit(X, y)
assert_raises(ValueError,
nbrs.predict,
[[]])
if (isinstance(cls, neighbors.KNeighborsClassifier) or
isinstance(cls, neighbors.KNeighborsRegressor)):
nbrs = cls(n_neighbors=-1)
assert_raises(ValueError, nbrs.fit, X, y)
nbrs = neighbors.NearestNeighbors().fit(X)
assert_raises(ValueError, nbrs.kneighbors_graph, X, mode='blah')
assert_raises(ValueError, nbrs.radius_neighbors_graph, X, mode='blah')
def test_neighbors_metrics(n_samples=20, n_features=3,
n_query_pts=2, n_neighbors=5):
# Test computing the neighbors for various metrics
# create a symmetric matrix
V = rng.rand(n_features, n_features)
VI = np.dot(V, V.T)
metrics = [('euclidean', {}),
('manhattan', {}),
('minkowski', dict(p=1)),
('minkowski', dict(p=2)),
('minkowski', dict(p=3)),
('minkowski', dict(p=np.inf)),
('chebyshev', {}),
('seuclidean', dict(V=rng.rand(n_features))),
('wminkowski', dict(p=3, w=rng.rand(n_features))),
('mahalanobis', dict(VI=VI))]
algorithms = ['brute', 'ball_tree', 'kd_tree']
X = rng.rand(n_samples, n_features)
test = rng.rand(n_query_pts, n_features)
for metric, metric_params in metrics:
results = {}
p = metric_params.pop('p', 2)
for algorithm in algorithms:
# KD tree doesn't support all metrics
if (algorithm == 'kd_tree' and
metric not in neighbors.KDTree.valid_metrics):
assert_raises(ValueError,
neighbors.NearestNeighbors,
algorithm=algorithm,
metric=metric, metric_params=metric_params)
continue
neigh = neighbors.NearestNeighbors(n_neighbors=n_neighbors,
algorithm=algorithm,
metric=metric, p=p,
metric_params=metric_params)
neigh.fit(X)
results[algorithm] = neigh.kneighbors(test, return_distance=True)
assert_array_almost_equal(results['brute'][0], results['ball_tree'][0])
assert_array_almost_equal(results['brute'][1], results['ball_tree'][1])
if 'kd_tree' in results:
assert_array_almost_equal(results['brute'][0],
results['kd_tree'][0])
assert_array_almost_equal(results['brute'][1],
results['kd_tree'][1])
def test_callable_metric():
def custom_metric(x1, x2):
return np.sqrt(np.sum(x1 ** 2 + x2 ** 2))
X = np.random.RandomState(42).rand(20, 2)
nbrs1 = neighbors.NearestNeighbors(3, algorithm='auto',
metric=custom_metric)
nbrs2 = neighbors.NearestNeighbors(3, algorithm='brute',
metric=custom_metric)
nbrs1.fit(X)
nbrs2.fit(X)
dist1, ind1 = nbrs1.kneighbors(X)
dist2, ind2 = nbrs2.kneighbors(X)
assert_array_almost_equal(dist1, dist2)
def test_valid_brute_metric_for_auto_algorithm():
X = rng.rand(12, 12)
Xcsr = csr_matrix(X)
# check that there is a metric that is valid for brute
# but not ball_tree (so we actually test something)
assert_in("cosine", VALID_METRICS['brute'])
assert_false("cosine" in VALID_METRICS['ball_tree'])
# Metric which don't required any additional parameter
require_params = ['mahalanobis', 'wminkowski', 'seuclidean']
for metric in VALID_METRICS['brute']:
if metric != 'precomputed' and metric not in require_params:
nn = neighbors.NearestNeighbors(n_neighbors=3, algorithm='auto',
metric=metric).fit(X)
nn.kneighbors(X)
elif metric == 'precomputed':
X_precomputed = rng.random_sample((10, 4))
Y_precomputed = rng.random_sample((3, 4))
DXX = metrics.pairwise_distances(X_precomputed, metric='euclidean')
DYX = metrics.pairwise_distances(Y_precomputed, X_precomputed,
metric='euclidean')
nb_p = neighbors.NearestNeighbors(n_neighbors=3)
nb_p.fit(DXX)
nb_p.kneighbors(DYX)
for metric in VALID_METRICS_SPARSE['brute']:
if metric != 'precomputed' and metric not in require_params:
nn = neighbors.NearestNeighbors(n_neighbors=3, algorithm='auto',
metric=metric).fit(Xcsr)
nn.kneighbors(Xcsr)
# Metric with parameter
VI = np.dot(X, X.T)
list_metrics = [('seuclidean', dict(V=rng.rand(12))),
('wminkowski', dict(w=rng.rand(12))),
('mahalanobis', dict(VI=VI))]
for metric, params in list_metrics:
nn = neighbors.NearestNeighbors(n_neighbors=3, algorithm='auto',
metric=metric,
metric_params=params).fit(X)
nn.kneighbors(X)
def test_metric_params_interface():
assert_warns(SyntaxWarning, neighbors.KNeighborsClassifier,
metric_params={'p': 3})
def test_predict_sparse_ball_kd_tree():
rng = np.random.RandomState(0)
X = rng.rand(5, 5)
y = rng.randint(0, 2, 5)
nbrs1 = neighbors.KNeighborsClassifier(1, algorithm='kd_tree')
nbrs2 = neighbors.KNeighborsRegressor(1, algorithm='ball_tree')
for model in [nbrs1, nbrs2]:
model.fit(X, y)
assert_raises(ValueError, model.predict, csr_matrix(X))
def test_non_euclidean_kneighbors():
rng = np.random.RandomState(0)
X = rng.rand(5, 5)
# Find a reasonable radius.
dist_array = pairwise_distances(X).flatten()
np.sort(dist_array)
radius = dist_array[15]
# Test kneighbors_graph
for metric in ['manhattan', 'chebyshev']:
nbrs_graph = neighbors.kneighbors_graph(
X, 3, metric=metric, mode='connectivity',
include_self=True).toarray()
nbrs1 = neighbors.NearestNeighbors(3, metric=metric).fit(X)
assert_array_equal(nbrs_graph, nbrs1.kneighbors_graph(X).toarray())
# Test radiusneighbors_graph
for metric in ['manhattan', 'chebyshev']:
nbrs_graph = neighbors.radius_neighbors_graph(
X, radius, metric=metric, mode='connectivity',
include_self=True).toarray()
nbrs1 = neighbors.NearestNeighbors(metric=metric, radius=radius).fit(X)
assert_array_equal(nbrs_graph, nbrs1.radius_neighbors_graph(X).A)
# Raise error when wrong parameters are supplied,
X_nbrs = neighbors.NearestNeighbors(3, metric='manhattan')
X_nbrs.fit(X)
assert_raises(ValueError, neighbors.kneighbors_graph, X_nbrs, 3,
metric='euclidean')
X_nbrs = neighbors.NearestNeighbors(radius=radius, metric='manhattan')
X_nbrs.fit(X)
assert_raises(ValueError, neighbors.radius_neighbors_graph, X_nbrs,
radius, metric='euclidean')
def check_object_arrays(nparray, list_check):
for ind, ele in enumerate(nparray):
assert_array_equal(ele, list_check[ind])
def test_k_and_radius_neighbors_train_is_not_query():
# Test kneighbors et.al when query is not training data
for algorithm in ALGORITHMS:
nn = neighbors.NearestNeighbors(n_neighbors=1, algorithm=algorithm)
X = [[0], [1]]
nn.fit(X)
test_data = [[2], [1]]
# Test neighbors.
dist, ind = nn.kneighbors(test_data)
assert_array_equal(dist, [[1], [0]])
assert_array_equal(ind, [[1], [1]])
dist, ind = nn.radius_neighbors([[2], [1]], radius=1.5)
check_object_arrays(dist, [[1], [1, 0]])
check_object_arrays(ind, [[1], [0, 1]])
# Test the graph variants.
assert_array_equal(
nn.kneighbors_graph(test_data).A, [[0., 1.], [0., 1.]])
assert_array_equal(
nn.kneighbors_graph([[2], [1]], mode='distance').A,
np.array([[0., 1.], [0., 0.]]))
rng = nn.radius_neighbors_graph([[2], [1]], radius=1.5)
assert_array_equal(rng.A, [[0, 1], [1, 1]])
def test_k_and_radius_neighbors_X_None():
# Test kneighbors et.al when query is None
for algorithm in ALGORITHMS:
nn = neighbors.NearestNeighbors(n_neighbors=1, algorithm=algorithm)
X = [[0], [1]]
nn.fit(X)
dist, ind = nn.kneighbors()
assert_array_equal(dist, [[1], [1]])
assert_array_equal(ind, [[1], [0]])
dist, ind = nn.radius_neighbors(None, radius=1.5)
check_object_arrays(dist, [[1], [1]])
check_object_arrays(ind, [[1], [0]])
# Test the graph variants.
rng = nn.radius_neighbors_graph(None, radius=1.5)
kng = nn.kneighbors_graph(None)
for graph in [rng, kng]:
assert_array_equal(rng.A, [[0, 1], [1, 0]])
assert_array_equal(rng.data, [1, 1])
assert_array_equal(rng.indices, [1, 0])
X = [[0, 1], [0, 1], [1, 1]]
nn = neighbors.NearestNeighbors(n_neighbors=2, algorithm=algorithm)
nn.fit(X)
assert_array_equal(
nn.kneighbors_graph().A,
np.array([[0., 1., 1.], [1., 0., 1.], [1., 1., 0]]))
def test_k_and_radius_neighbors_duplicates():
# Test behavior of kneighbors when duplicates are present in query
for algorithm in ALGORITHMS:
nn = neighbors.NearestNeighbors(n_neighbors=1, algorithm=algorithm)
nn.fit([[0], [1]])
# Do not do anything special to duplicates.
kng = nn.kneighbors_graph([[0], [1]], mode='distance')
assert_array_equal(
kng.A,
np.array([[0., 0.], [0., 0.]]))
assert_array_equal(kng.data, [0., 0.])
assert_array_equal(kng.indices, [0, 1])
dist, ind = nn.radius_neighbors([[0], [1]], radius=1.5)
check_object_arrays(dist, [[0, 1], [1, 0]])
check_object_arrays(ind, [[0, 1], [0, 1]])
rng = nn.radius_neighbors_graph([[0], [1]], radius=1.5)
assert_array_equal(rng.A, np.ones((2, 2)))
rng = nn.radius_neighbors_graph([[0], [1]], radius=1.5,
mode='distance')
assert_array_equal(rng.A, [[0, 1], [1, 0]])
assert_array_equal(rng.indices, [0, 1, 0, 1])
assert_array_equal(rng.data, [0, 1, 1, 0])
# Mask the first duplicates when n_duplicates > n_neighbors.
X = np.ones((3, 1))
nn = neighbors.NearestNeighbors(n_neighbors=1)
nn.fit(X)
dist, ind = nn.kneighbors()
assert_array_equal(dist, np.zeros((3, 1)))
assert_array_equal(ind, [[1], [0], [1]])
# Test that zeros are explicitly marked in kneighbors_graph.
kng = nn.kneighbors_graph(mode='distance')
assert_array_equal(
kng.A, np.zeros((3, 3)))
assert_array_equal(kng.data, np.zeros(3))
assert_array_equal(kng.indices, [1., 0., 1.])
assert_array_equal(
nn.kneighbors_graph().A,
np.array([[0., 1., 0.], [1., 0., 0.], [0., 1., 0.]]))
def test_include_self_neighbors_graph():
# Test include_self parameter in neighbors_graph
X = [[2, 3], [4, 5]]
kng = neighbors.kneighbors_graph(X, 1, include_self=True).A
kng_not_self = neighbors.kneighbors_graph(X, 1, include_self=False).A
assert_array_equal(kng, [[1., 0.], [0., 1.]])
assert_array_equal(kng_not_self, [[0., 1.], [1., 0.]])
rng = neighbors.radius_neighbors_graph(X, 5.0, include_self=True).A
rng_not_self = neighbors.radius_neighbors_graph(
X, 5.0, include_self=False).A
assert_array_equal(rng, [[1., 1.], [1., 1.]])
assert_array_equal(rng_not_self, [[0., 1.], [1., 0.]])
def test_same_knn_parallel():
X, y = datasets.make_classification(n_samples=30, n_features=5,
n_redundant=0, random_state=0)
X_train, X_test, y_train, y_test = train_test_split(X, y)
def check_same_knn_parallel(algorithm):
clf = neighbors.KNeighborsClassifier(n_neighbors=3,
algorithm=algorithm)
clf.fit(X_train, y_train)
y = clf.predict(X_test)
dist, ind = clf.kneighbors(X_test)
graph = clf.kneighbors_graph(X_test, mode='distance').toarray()
clf.set_params(n_jobs=3)
clf.fit(X_train, y_train)
y_parallel = clf.predict(X_test)
dist_parallel, ind_parallel = clf.kneighbors(X_test)
graph_parallel = \
clf.kneighbors_graph(X_test, mode='distance').toarray()
assert_array_equal(y, y_parallel)
assert_array_almost_equal(dist, dist_parallel)
assert_array_equal(ind, ind_parallel)
assert_array_almost_equal(graph, graph_parallel)
for algorithm in ALGORITHMS:
yield check_same_knn_parallel, algorithm
def test_dtype_convert():
classifier = neighbors.KNeighborsClassifier(n_neighbors=1)
CLASSES = 15
X = np.eye(CLASSES)
y = [ch for ch in 'ABCDEFGHIJKLMNOPQRSTU'[:CLASSES]]
result = classifier.fit(X, y).predict(X)
assert_array_equal(result, y)
# ignore conversion to boolean in pairwise_distances
@ignore_warnings(category=DataConversionWarning)
def test_pairwise_boolean_distance():
# Non-regression test for #4523
# 'brute': uses scipy.spatial.distance through pairwise_distances
# 'ball_tree': uses sklearn.neighbors.dist_metrics
rng = np.random.RandomState(0)
X = rng.uniform(size=(6, 5))
NN = neighbors.NearestNeighbors
nn1 = NN(metric="jaccard", algorithm='brute').fit(X)
nn2 = NN(metric="jaccard", algorithm='ball_tree').fit(X)
assert_array_equal(nn1.kneighbors(X)[0], nn2.kneighbors(X)[0])
| bsd-3-clause |
rtlee9/food-GAN | src/viz.py | 1 | 1386 | from os import path
import argparse
import pickle
import matplotlib as mpl
mpl.use('Agg')
import matplotlib.pyplot as plt
import numpy as np
import config
parser = argparse.ArgumentParser()
parser.add_argument('--naverage', type=int, help='number of periods over which to average', default=20)
args = parser.parse_args()
columns = [
'Discriminator loss',
'Prob real',
'Prob fake (before G update)',
'Prob fake (after G update)',
]
# read pickled loss history
with open(path.join(config.path_outputs, 'loss_history.pkl'), 'rb') as f:
loss_history = pickle.load(f)
# smooth loss curves via moving average
losses = np.array(loss_history)
smoothed = np.vstack(
[np.convolve(series, np.ones(args.naverage) / args.naverage)
for series in losses.T]).T[args.naverage - 1:-args.naverage + 1, :]
# create x axis
epochs = 25 # TODO: read this dynamically
truncated = args.naverage / 2 / epochs
x = np.linspace(1 + truncated, epochs - truncated, smoothed.shape[0])
fig, ax = plt.subplots()
for i, series in enumerate(smoothed.T):
ax.plot(x, series, label=columns[i])
box = ax.get_position()
ax.set_position([box.x0, box.y0 + box.height * 0.2, box.width, box.height * 0.9])
legend = ax.legend(loc='lower center', ncol=2, bbox_to_anchor=[0.5, -0.32], shadow=True)
ax.set_xlabel('Iteration')
fig.savefig(path.join(config.path_base, 'WGAN_loss_history.png'))
| mit |
GehenHe/Recognize-Face-on-Android | tensorflow/contrib/learn/python/learn/tests/dataframe/feeding_queue_runner_test.py | 6 | 5273 | # Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests `FeedingQueueRunner` using arrays and `DataFrames`."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import sys
# TODO: #6568 Remove this hack that makes dlopen() not crash.
if hasattr(sys, "getdlopenflags") and hasattr(sys, "setdlopenflags"):
import ctypes
sys.setdlopenflags(sys.getdlopenflags() | ctypes.RTLD_GLOBAL)
import numpy as np
import tensorflow.contrib.learn.python.learn.dataframe.queues.feeding_functions as ff
from tensorflow.python.client import session
from tensorflow.python.framework import ops
from tensorflow.python.platform import test
from tensorflow.python.training import coordinator
from tensorflow.python.training import queue_runner_impl
# pylint: disable=g-import-not-at-top
try:
import pandas as pd
HAS_PANDAS = True
except ImportError:
HAS_PANDAS = False
def get_rows(array, row_indices):
rows = [array[i] for i in row_indices]
return np.vstack(rows)
class FeedingQueueRunnerTestCase(test.TestCase):
"""Tests for `FeedingQueueRunner`."""
def testArrayFeeding(self):
with ops.Graph().as_default():
array = np.arange(32).reshape([16, 2])
q = ff.enqueue_data(array, capacity=100)
batch_size = 3
dq_op = q.dequeue_many(batch_size)
with session.Session() as sess:
coord = coordinator.Coordinator()
threads = queue_runner_impl.start_queue_runners(sess=sess, coord=coord)
for i in range(100):
indices = [
j % array.shape[0]
for j in range(batch_size * i, batch_size * (i + 1))
]
expected_dq = get_rows(array, indices)
dq = sess.run(dq_op)
np.testing.assert_array_equal(indices, dq[0])
np.testing.assert_array_equal(expected_dq, dq[1])
coord.request_stop()
coord.join(threads)
def testArrayFeedingMultiThread(self):
with ops.Graph().as_default():
array = np.arange(256).reshape([128, 2])
q = ff.enqueue_data(array, capacity=128, num_threads=8, shuffle=True)
batch_size = 3
dq_op = q.dequeue_many(batch_size)
with session.Session() as sess:
coord = coordinator.Coordinator()
threads = queue_runner_impl.start_queue_runners(sess=sess, coord=coord)
for _ in range(100):
dq = sess.run(dq_op)
indices = dq[0]
expected_dq = get_rows(array, indices)
np.testing.assert_array_equal(expected_dq, dq[1])
coord.request_stop()
coord.join(threads)
def testPandasFeeding(self):
if not HAS_PANDAS:
return
with ops.Graph().as_default():
array1 = np.arange(32)
array2 = np.arange(32, 64)
df = pd.DataFrame({"a": array1, "b": array2}, index=np.arange(64, 96))
q = ff.enqueue_data(df, capacity=100)
batch_size = 5
dq_op = q.dequeue_many(5)
with session.Session() as sess:
coord = coordinator.Coordinator()
threads = queue_runner_impl.start_queue_runners(sess=sess, coord=coord)
for i in range(100):
indices = [
j % array1.shape[0]
for j in range(batch_size * i, batch_size * (i + 1))
]
expected_df_indices = df.index[indices]
expected_rows = df.iloc[indices]
dq = sess.run(dq_op)
np.testing.assert_array_equal(expected_df_indices, dq[0])
for col_num, col in enumerate(df.columns):
np.testing.assert_array_equal(expected_rows[col].values,
dq[col_num + 1])
coord.request_stop()
coord.join(threads)
def testPandasFeedingMultiThread(self):
if not HAS_PANDAS:
return
with ops.Graph().as_default():
array1 = np.arange(128, 256)
array2 = 2 * array1
df = pd.DataFrame({"a": array1, "b": array2}, index=np.arange(128))
q = ff.enqueue_data(df, capacity=128, num_threads=8, shuffle=True)
batch_size = 5
dq_op = q.dequeue_many(batch_size)
with session.Session() as sess:
coord = coordinator.Coordinator()
threads = queue_runner_impl.start_queue_runners(sess=sess, coord=coord)
for _ in range(100):
dq = sess.run(dq_op)
indices = dq[0]
expected_rows = df.iloc[indices]
for col_num, col in enumerate(df.columns):
np.testing.assert_array_equal(expected_rows[col].values,
dq[col_num + 1])
coord.request_stop()
coord.join(threads)
if __name__ == "__main__":
test.main()
| apache-2.0 |
coreyabshire/ptolemy | python/triangulate_measure.py | 2 | 2147 | # regression_measure.py: Predict modern coordinates for places
# described by Ptolemy using linear regression against
# the places that have been suggested by other means.
import os
import logging
import pandas as pd
from sklearn.cross_validation import LeaveOneOut
from geopy.distance import vincenty
import sgdb
import geocode
from flocking import FlockingModel
PTOL_HOME = os.environ['PTOL_HOME']
logging.basicConfig(level='DEBUG')
KEY_PLACE_FIELDNAMES = [
'ptol_id',
'ptol_name',
'ptol_lat',
'ptol_lon',
'modern_name']
X_NAMES = ['ptol_lat', 'ptol_lon']
Y_NAMES = ['modern_lat', 'modern_lon']
P_NAMES = ['pred_lat', 'pred_lon']
# book 7 contains India
# chapter 1 is within the Ganges
TARGET_BOOK = '7.01'
places = sgdb.read_places().drop_duplicates('ptol_id')
places.reindex(columns=['ptol_id'])
places = places.loc[pd.notnull(places.ptol_lat), :]
places = places.loc[:, KEY_PLACE_FIELDNAMES]
places = places.loc[places.ptol_id.str.startswith(TARGET_BOOK), :]
places = pd.merge(places, geocode.read_geocodes(), how='left')
known = places.loc[pd.notnull(places.modern_lat), :]
known.is_copy = False
loo = LeaveOneOut(len(known))
for train, test in loo:
trainx = known.iloc[train, :].loc[:, X_NAMES]
trainy = known.iloc[train, :].loc[:, Y_NAMES]
testx = known.iloc[test, :].loc[:, X_NAMES]
model = FlockingModel()
model.fit(trainx, trainy)
testy = model.predict(testx)
known.loc[known.iloc[test,:].index, 'pred_lat'] = testy[0][0]
known.loc[known.iloc[test,:].index, 'pred_lon'] = testy[0][1]
for i, p in known.iterrows():
lat_err = p.modern_lat - p.pred_lat
lon_err = p.modern_lon - p.pred_lon
sq_err = lat_err ** 2 + lon_err ** 2
modern_coords = (p.modern_lat, p.modern_lon)
pred_coords = (p.pred_lat, p.pred_lon)
dist_err = vincenty(modern_coords, pred_coords).miles
known.loc[i, 'lat_err'] = lat_err
known.loc[i, 'lon_err'] = lon_err
known.loc[i, 'sq_err'] = sq_err
known.loc[i, 'dist_err'] = dist_err
known.to_csv('../Data/flocking_measure.csv', encoding='cp1252')
| gpl-2.0 |
devakumar/BSEDown | download_data.py | 1 | 14240 | import requests
import os
import pandas as pd
import zipfile
import numpy as np
import re
import sys
import pickle
data_from_date=raw_input("From date (eg 2016-10-20):")#"2016-10-01"
nse_op_path=os.path.join("NSE","unadjusted_data","all_data","op.data")
nse_hi_path=os.path.join("NSE","unadjusted_data","all_data","hi.data")
nse_lo_path=os.path.join("NSE","unadjusted_data","all_data","lo.data")
nse_cl_path=os.path.join("NSE","unadjusted_data","all_data","cl.data")
nse_vl_path=os.path.join("NSE","unadjusted_data","all_data","vl.data")
bse_op_path=os.path.join("BSE","unadjusted_data","all_data","op.data")
bse_hi_path=os.path.join("BSE","unadjusted_data","all_data","hi.data")
bse_lo_path=os.path.join("BSE","unadjusted_data","all_data","lo.data")
bse_cl_path=os.path.join("BSE","unadjusted_data","all_data","cl.data")
bse_vl_path=os.path.join("BSE","unadjusted_data","all_data","vl.data")
nse_ad_op_path=os.path.join("NSE","adjusted_data","all_data","op.data")
nse_ad_hi_path=os.path.join("NSE","adjusted_data","all_data","hi.data")
nse_ad_lo_path=os.path.join("NSE","adjusted_data","all_data","lo.data")
nse_ad_cl_path=os.path.join("NSE","adjusted_data","all_data","cl.data")
nse_ad_vl_path=os.path.join("NSE","adjusted_data","all_data","vl.data")
bse_ad_op_path=os.path.join("BSE","adjusted_data","all_data","op.data")
bse_ad_hi_path=os.path.join("BSE","adjusted_data","all_data","hi.data")
bse_ad_lo_path=os.path.join("BSE","adjusted_data","all_data","lo.data")
bse_ad_cl_path=os.path.join("BSE","adjusted_data","all_data","cl.data")
bse_ad_vl_path=os.path.join("BSE","adjusted_data","all_data","vl.data")
comp_scrip_csv=pd.read_csv("ListOfScrips.csv")
# checking for scrip shape
def check_for_new_companies():
last_shape_path="scripshape.data"
if os.path.exists(last_shape_path):
with open(last_shape_path,'rb') as f:
last_shape=pickle.load(f)
if comp_scrip_csv.shape[0]<last_shape[0]:
#print "Less Companies today"
#print "Not Possible"
sys.exit()
elif comp_scrip_csv.shape[0]>last_shape[0]:
#print comp_scrip_csv.shape[0]-last_shape[0], "Companies added"
last_shape=comp_scrip_csv.shape
with open(last_shape_path,'wb') as f:
pickle.dump(last_shape,f)
else:
print "No new companies added"
else:
with open(last_shape_path,'wb') as f:
pickle.dump(comp_scrip_csv.shape,f)
check_for_new_companies()
code_2_symbol=dict(zip(comp_scrip_csv['Security Code'],comp_scrip_csv['Security Id']))
symbol_2_name=dict(zip(comp_scrip_csv['Security Id'],comp_scrip_csv['Security Name']))
code_2_name=dict(zip(comp_scrip_csv['Security Code'],comp_scrip_csv['Security Name']))
nse_error=[]
bse_error=[]
if os.path.exists(nse_op_path) and os.path.exists(nse_hi_path) and os.path.exists(nse_lo_path) and \
os.path.exists(nse_cl_path) and os.path.exists(nse_vl_path):
op_nse=pd.read_pickle(nse_op_path)
hi_nse=pd.read_pickle(nse_hi_path)
lo_nse=pd.read_pickle(nse_lo_path)
cl_nse=pd.read_pickle(nse_cl_path)
vl_nse=pd.read_pickle(nse_vl_path)
else:
op_nse=pd.DataFrame()
hi_nse=pd.DataFrame()
lo_nse=pd.DataFrame()
cl_nse=pd.DataFrame()
vl_nse=pd.DataFrame()
if os.path.exists(bse_op_path) and os.path.exists(bse_hi_path) and os.path.exists(bse_lo_path) and \
os.path.exists(bse_cl_path) and os.path.exists(bse_vl_path):
op_bse=pd.read_pickle(bse_op_path)
hi_bse=pd.read_pickle(bse_hi_path)
lo_bse=pd.read_pickle(bse_lo_path)
cl_bse=pd.read_pickle(bse_cl_path)
vl_bse=pd.read_pickle(bse_vl_path)
else:
op_bse=pd.DataFrame()
hi_bse=pd.DataFrame()
lo_bse=pd.DataFrame()
cl_bse=pd.DataFrame()
vl_bse=pd.DataFrame()
def gen_today_date(starting_from):
start_date=starting_from
now=pd.datetime.now()
updation_time=pd.datetime(now.year,now.month,now.day,21)
if now<updation_time:
return pd.date_range(start_date,str(pd.datetime.now().date()))[:-1]
elif now>=updation_time:
return pd.date_range(start_date,str(pd.datetime.now().date()))
dates=gen_today_date(data_from_date)
def convert_to_nse_date(date):
""" Takes pandas datetime and returns NSE url extension for bhavcopies """
months=['JAN','FEB','MAR','APR','MAY','JUN','JUL','AUG','SEP','OCT','NOV','DEC']
month_dict=dict(zip(range(1,13),months))
sep='/'
t_str="cm"+"%02d"%date.day+month_dict[date.month]+str(date.year)+"bhav.csv.zip"
attach=sep.join([str(date.year),month_dict[date.month],t_str])
rest_url="http://www.nseindia.com/content/historical/EQUITIES/"
data_url=rest_url+attach
f_name="%02d"%date.day+month_dict[date.month]+str(date.year)+".zip"
return data_url,f_name
def download_nse_data():
success=0
error=[]
for items in dates:
try:
url,name=convert_to_nse_date(items)
if not os.path.exists(os.path.join("NSE","unadjusted_zip",name)):
r=requests.get(url)
f=open(os.path.join("NSE","unadjusted_zip",name),'wb')
f.write(r.content)
f.close()
success=success+1
#print success,"downloaded"
except:
error.append(name)
#print "Error: ",name
#print "NSE: ",len(dates),success,len(error)
def convert_to_bse_date(date):
months=['JAN','FEB','MAR','APR','MAY','JUN','JUL','AUG','SEP','OCT','NOV','DEC']
month_dict=dict(zip(range(1,13),months))
base="http://www.bseindia.com/download/BhavCopy/Equity/eq{0}_csv.zip"
temp=str(date)
insrt=temp[8:10]+temp[5:7]+temp[2:4]
data_url=base.format(insrt)
f_name="%02d"%date.day+month_dict[date.month]+str(date.year)+".zip"
return data_url,f_name
def download_bse_data():
success=0
error=[]
#loc = os.path.abspath('.')
for items in dates[::1]:
try:
url,name=convert_to_bse_date(items)
if not os.path.exists(os.path.join("BSE","unadjusted_zip",name)):
r=requests.get(url)
f=open(os.path.join("BSE","unadjusted_zip",name),'wb')
f.write(r.content)
f.close()
success=success+1
print success,"downloaded"
except Exception as e:
error.append(name)
print "Error: ",name, e
print "BSE: ",len(dates),success,len(error)
def ToDateTime(day_string):
name_month=['JAN','FEB','MAR','APR','MAY','JUN','JUL','AUG','SEP','OCT','NOV','DEC']
num_months=["%02d"%i for i in xrange(1,13)]
month_to_num_dict=dict(zip(name_month,num_months))
day=day_string[:2]
month=month_to_num_dict[day_string[2:5]]
year=day_string[5:9]
return pd.to_datetime("%s-%s-%s"%(year,month,day))
def wrangle(key,value,frame,day):
dct=dict(zip(key,value))
temp=pd.DataFrame(dct,index=[ToDateTime(day)])
frame=pd.concat([frame,temp],axis=0)
return frame
def get_required_fields_dict(mydict,data):
converted=[]
use=[]
for i in xrange(len(data)):
try:
converted.append(mydict[data.ix[i]])
use.append(i)
except:
pass
return converted,use
def unprocessed_zip_list(path):
temp_file_names=os.listdir(path)
if "NSE" in path:
if op_nse.empty:
#print "All files: ",len(temp_file_names)
latest_update_date=pd.to_datetime(data_from_date)
else:
latest_update_date=max(op_nse.index)
temp_dates=pd.date_range(str(latest_update_date.date()),str(pd.datetime.now().date()))[1:]
selection_list=[]
for items in temp_dates:
unused_url,f_name=convert_to_nse_date(items)
selection_list.append(f_name)
final_list=[]
for filenames in selection_list:
if filenames in temp_file_names:
final_list.append(filenames)
#print final_list
return final_list
elif "BSE" in path:
if op_bse.empty:
#print "All files: ",len(temp_file_names)
latest_update_date=pd.to_datetime(data_from_date)
else:
c = op_bse[op_bse.columns.tolist()[0]][max(op_bse.index)]
temp_dates=pd.date_range(str(latest_update_date.date()),str(pd.datetime.now().date()))[1:]
selection_list=[]
for items in temp_dates:
unused_url,f_name=convert_to_bse_date(items)
selection_list.append(f_name)
final_list=[]
for filenames in selection_list:
if filenames in temp_file_names:
final_list.append(filenames)
#print final_list
return final_list
def process_nse_zip():
global op_nse,hi_nse,lo_nse,cl_nse,vl_nse
count=0
success=0
file_names=unprocessed_zip_list(os.path.join("NSE","unadjusted_zip"))
for files in file_names:
try:
f=open(os.path.join("NSE","unadjusted_zip",files),'rb')
z=zipfile.ZipFile(f)
d=pd.read_csv(z.open(z.filelist[0]))
f.close()
op_nse=wrangle(np.array(d['SYMBOL'],dtype=str),d['OPEN'],op_nse,files)
hi_nse=wrangle(np.array(d['SYMBOL'],dtype=str),d['HIGH'],hi_nse,files)
lo_nse=wrangle(np.array(d['SYMBOL'],dtype=str),d['LOW'],lo_nse,files)
cl_nse=wrangle(np.array(d['SYMBOL'],dtype=str),d['CLOSE'],cl_nse,files)
vl_nse=wrangle(np.array(d['SYMBOL'],dtype=str),d['TOTTRDQTY'],vl_nse,files)
success=success+1
except:
nse_error.append(files)
count=count+1
op_nse.to_pickle(nse_op_path)
hi_nse.to_pickle(nse_hi_path)
lo_nse.to_pickle(nse_lo_path)
cl_nse.to_pickle(nse_cl_path)
vl_nse.to_pickle(nse_vl_path)
#print "NSE successful:",success
#print "NSE fialed:",count
def process_bse_zip():
global op_bse,hi_bse,lo_bse,cl_bse,vl_bse,splits
count=0
success=0
file_names=unprocessed_zip_list(os.path.join("BSE","unadjusted_zip"))
for files in file_names:
try:
f=open(os.path.join("BSE","unadjusted_zip",files),'rb')
z=zipfile.ZipFile(f)
d=pd.read_csv(z.open(z.filelist[0]))
f.close()
symbols,useful=get_required_fields_dict(code_2_symbol,d['SC_CODE'])
op_bse=wrangle(symbols,d['OPEN'].ix[useful],op_bse,files)
hi_bse=wrangle(symbols,d['HIGH'].ix[useful],hi_bse,files)
lo_bse=wrangle(symbols,d['LOW'].ix[useful],lo_bse,files)
cl_bse=wrangle(symbols,d['CLOSE'].ix[useful],cl_bse,files)
vl_bse=wrangle(symbols,d['NO_OF_SHRS'].ix[useful],vl_bse,files)
success=success+1
except:
bse_error.append(files)
count=count+1
op_bse.to_pickle(bse_op_path)
hi_bse.to_pickle(bse_hi_path)
lo_bse.to_pickle(bse_lo_path)
cl_bse.to_pickle(bse_cl_path)
vl_bse.to_pickle(bse_vl_path)
print "BSE successful:",success, files
#print "BSE fialed:",count
def process_splits():
def using_re(myarray):
old_new=[]
for items in myarray:
old_new.append(re.findall('\d+',items))
for i in xrange(len(old_new)):
if old_new[i]==[]:
old_new[i]=[1,1]
return np.array(old_new,dtype=float)
def gen_split_dates(mydatearray):
mydates=[]
for items in mydatearray:
#print items
params=items.upper().split(' ')
date_string="%02d%s%s"%(int(params[0]),params[1],params[2])
mydates.append(ToDateTime(date_string))
return mydates
split_data=pd.read_csv("Corporate_Actions.csv")
symbols,useful=get_required_fields_dict(code_2_symbol,split_data['Security Code'])
old_new_values=using_re(split_data['Purpose'].ix[useful])
split_dates=gen_split_dates(split_data['Ex Date'].ix[useful])
#print len(symbols),len(old_new_values),len(split_dates)
#print symbols[100],old_new_values[100],split_dates[100]
#print old_new_values.shape
ops_nse=op_nse.copy()
his_nse=hi_nse.copy()
los_nse=lo_nse.copy()
cls_nse=cl_nse.copy()
vls_nse=vl_nse.copy()
ops_bse=op_bse.copy()
his_bse=hi_bse.copy()
los_bse=lo_bse.copy()
cls_bse=cl_bse.copy()
vls_bse=vl_bse.copy()
for i in xrange(len(symbols)):
if symbols[i] in op_nse.columns:
ops_nse[symbols[i]].ix[ops_nse.index<split_dates[i]]=ops_nse[symbols[i]].ix[ops_nse.index<split_dates[i]]/(old_new_values[i,0]/old_new_values[i,1])
his_nse[symbols[i]].ix[his_nse.index<split_dates[i]]=his_nse[symbols[i]].ix[his_nse.index<split_dates[i]]/(old_new_values[i,0]/old_new_values[i,1])
los_nse[symbols[i]].ix[los_nse.index<split_dates[i]]=los_nse[symbols[i]].ix[los_nse.index<split_dates[i]]/(old_new_values[i,0]/old_new_values[i,1])
cls_nse[symbols[i]].ix[cls_nse.index<split_dates[i]]=cls_nse[symbols[i]].ix[cls_nse.index<split_dates[i]]/(old_new_values[i,0]/old_new_values[i,1])
if symbols[i] in op_bse.columns:
ops_bse[symbols[i]].ix[ops_bse.index<split_dates[i]]=ops_bse[symbols[i]].ix[ops_bse.index<split_dates[i]]/(old_new_values[i,0]/old_new_values[i,1])
his_bse[symbols[i]].ix[his_bse.index<split_dates[i]]=his_bse[symbols[i]].ix[his_bse.index<split_dates[i]]/(old_new_values[i,0]/old_new_values[i,1])
los_bse[symbols[i]].ix[los_bse.index<split_dates[i]]=los_bse[symbols[i]].ix[los_bse.index<split_dates[i]]/(old_new_values[i,0]/old_new_values[i,1])
cls_bse[symbols[i]].ix[cls_bse.index<split_dates[i]]=cls_bse[symbols[i]].ix[cls_bse.index<split_dates[i]]/(old_new_values[i,0]/old_new_values[i,1])
ops_nse.to_pickle(nse_ad_op_path)
his_nse.to_pickle(nse_ad_hi_path)
los_nse.to_pickle(nse_ad_lo_path)
cls_nse.to_pickle(nse_ad_cl_path)
vls_nse.to_pickle(nse_ad_vl_path)
ops_bse.to_pickle(bse_ad_op_path)
his_bse.to_pickle(bse_ad_hi_path)
los_bse.to_pickle(bse_ad_lo_path)
cls_bse.to_pickle(bse_ad_cl_path)
vls_bse.to_pickle(bse_ad_vl_path)
if __name__=='__main__':
download_bse_data()
download_nse_data()
process_bse_zip()
process_nse_zip()
process_splits()
| mit |
rsignell-usgs/python-training | web-services/xray_test_forecast_model.py | 1 | 1384 |
# coding: utf-8
# #Testing Xray on weather forecast model data
# In[21]:
import xray
import datetime
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
get_ipython().magic(u'matplotlib inline')
# In[22]:
URL = 'http://thredds.ucar.edu/thredds/dodsC/grib/NCEP/GFS/Global_0p5deg/Best'
# In[23]:
ds = xray.open_dataset(URL)
# In[24]:
ds
# In[25]:
# select lat,lon region of interest
# note: slice(20.5,55.0) fails
dsloc = ds.sel(lon=slice(230.5,300.0),lat=slice(55.0,20.5))
# In[26]:
# select closest data to time of interest
#date = datetime.datetime(2015,7,15,3,0,0)
date = datetime.datetime.now()
ds_snapshot = dsloc.sel(time=date,time1=date,time2=date,method='nearest')
# In[27]:
ds.data_vars
# In[28]:
ds.coords
# In[29]:
ds.attrs
# In[30]:
t = ds_snapshot['Temperature_surface']
# In[40]:
t.time.values
# In[32]:
plt.pcolormesh(t.lon.data,t.lat.data,t.data)
plt.title(t.name+pd.Timestamp(t.time.values).strftime(': %Y-%m-%d %H:%M:%S %Z %z'));
# In[33]:
# time series closest to specified lon,lat location
ds_series = ds.sel(lon=250.,lat=33.,method='nearest')
# In[34]:
# Select temperature and convert to Pandas Series
v_series = ds_series['Temperature_surface'].to_series()
# In[35]:
v_series.plot(title=v_series.name);
# In[36]:
ds_snapshot
# In[37]:
#ds_snapshot.to_netcdf('ds_snapshot.nc')
# In[ ]:
| cc0-1.0 |
boland1992/SeisSuite | seissuite/sort_later/full_test.py | 2 | 8253 | # -*- coding: utf-8 -*-
"""
Created on Mon Jun 20 12:28:32 2015
@author: boland
"""
import sys
sys.path.append('/home/boland/Anaconda/lib/python2.7/site-packages')
import pickle
import numpy as np
import matplotlib.pyplot as plt
from scipy.cluster.vq import kmeans
import multiprocessing as mp
import pyproj
import os
import itertools
import datetime
import pointshape as ps
from math import sqrt, atan2, radians,degrees, cos, tan, sin, asin
shape_path = "/home/boland/Dropbox/University/UniMelb/AGOS/PROGRAMS/ANT/Versions/26.04.2015/shapefiles/aus.shp"
N = 130
#enter km spacing between path density points
km_points = 20.0
# reference elipsoid to calculate distance
wgs84 = pyproj.Geod(ellps='WGS84')
nbins = 200
def haversine(coordinates):
"""
Calculate the great circle distance between two points
on the earth (specified in decimal degrees)
"""
lon1, lat1, lon2, lat2= coordinates[0],coordinates[1],\
coordinates[2],coordinates[3]
# convert decimal degrees to radians
lon1, lat1, lon2, lat2 = map(radians, [lon1, lat1, lon2, lat2])
# haversine formula
dlon, dlat = lon2 - lon1, lat2 - lat1
a = sin(dlat/2)**2 + cos(lat1) * cos(lat2) * sin(dlon/2)**2
c = 2 * asin(sqrt(a))
km = 6367 * c
return km
def haversine2(lon1, lat1, lon2, lat2):
"""
Calculate the great circle distance between two points
on the earth (specified in decimal degrees)
"""
# convert decimal degrees to radians
lon1, lat1, lon2, lat2 = map(radians, [lon1, lat1, lon2, lat2])
# haversine formula
dlon, dlat = lon2 - lon1, lat2 - lat1
a = sin(dlat/2)**2 + cos(lat1) * cos(lat2) * sin(dlon/2)**2
c = 2 * asin(sqrt(a))
km = 6367 * c
return km
def geodesic(coord1, coord2, npts):
"""
Returns a list of *npts* points along the geodesic between
(and including) *coord1* and *coord2*, in an array of
shape (*npts*, 2).
@rtype: L{ndarray}
"""
if npts < 2:
raise Exception('nb of points must be at least 2')
path = wgs84.npts(lon1=coord1[0], lat1=coord1[1],
lon2=coord2[0], lat2=coord2[1],
npts=npts-2)
return np.array([coord1] + path + [coord2])
def new_geodesic(lon1,lat1,lon2,lat2, npts):
"""
Returns a list of *npts* points along the geodesic between
(and including) *coord1* and *coord2*, in an array of
shape (*npts*, 2).
@rtype: L{ndarray}
"""
if npts < 2:
raise Exception('nb of points must be at least 2')
path = wgs84.npts(lon1=lon1, lat1=lat1,
lon2=lon2, lat2=lat2,
npts=npts-2)
return np.array([[lon1,lat1]] + path + [[lon2,lat2]])
def cluster_points(coord_points):
"""
Function that returns k which is an nx2 matrix of lon-lat vector columns
containing the optimal cluster centroid spacings within a large set of random
numbers e.g. those produced by the many_points() function above!
"""
k = kmeans(coord_points, 130)
return k[0]
def new_paths(path_info, km=km_points):
lon1, lat1, lon2, lat2 = path_info[0], \
path_info[1], path_info[2], path_info[3]
#lon1, lat1, lon2, lat2, dist = path_info[0], \
#path_info[1], path_info[2], path_info[3], \
#path_info[4]
dist = haversine2(lon1, lat1, lon2, lat2)
# interpoint distance <= 1 km, and nb of points >= 100
npts = max(int((np.ceil(dist) + 1)/km), 100)
path = new_geodesic(lon1,lat1,lon2,lat2, npts)
#print("still going strong\n")
return path
def HIST2D(nbins,paths, grad=False):
H, xedges, yedges = np.histogram2d(paths[:,0],paths[:,1],bins=nbins)
#name = "path_density_2Dhist.png"
if grad:
H = np.abs(np.asarray(np.gradient(H)[0]))#name = "path_density_2Dhist_grad.png"
# H needs to be rotated and flipped
H = np.rot90(H)
H = np.flipud(H)
# Mask zeros
Hmasked = np.ma.masked_where(H==0,H) # Mask pixels with a value of zero
return Hmasked
#fig = plt.figure()
#plt.pcolormesh(xedges,yedges,Hmasked)
#plt.xlabel('longitude (degrees)')
#plt.ylabel('longitude (degrees)')
#cbar = plt.colorbar()
#cbar.ax.set_ylabel('Counts')
#fig.savefig(name)
def latitude(dist, sigma01, alpha0, lon0):
sigma = sigma01 + dist#/R
lat = degrees(asin(cos(alpha0)*sin(sigma)))
#alpha = atan2(tan(alpha0),cos(sigma))
return lat
def longitude(dist, sigma01, alpha0, lon0):
sigma = sigma01 + dist#/R
lon = degrees(atan2(sin(alpha0)*sin(sigma), cos(sigma))) + degrees(lon0)
#alpha = atan2(tan(alpha0),cos(sigma))
return lon
vlat_func = np.vectorize(latitude)
vlon_func = np.vectorize(longitude)
def waypoint_init(path_info, km=km_points):
R = 6371
lon1, lat1, lon2, lat2, dist = radians(path_info[0]), \
radians(path_info[1]), radians(path_info[2]), \
radians(path_info[3]), radians(path_info[4])
#lon1, lat1, lon2, lat2, dist = map(radians, [path_info[0],path_info[1],path_info[2],path_info[3],path_info[4]])
lon_diff = lon2-lon1
alpha1 = atan2(sin(lon_diff),(cos(lat1)*tan(lat2)-sin(lat1)*cos(lon_diff)))
#alpha2 = atan2(sin(lon_diff),(-cos(lat2)*tan(lat1)+sin(lat2)*cos(lon_diff)))
#try:
#sigma12 = acos(sin(lat1)*sin(lat2)+cos(lat1)*cos(lat2)*cos(lon_diff))
#except:
#return
sigma01, alpha0 = atan2(tan(lat1), cos(alpha1)), asin(sin(alpha1)*cos(lat1))
#sigma02 = sigma01+sigma12
lon01 = atan2(sin(alpha0)*sin(sigma01), cos(sigma01))
lon0 = lon1 - lon01
npts = max(int((np.ceil(dist) + 1)/km), 100)
all_d = np.linspace(0,dist,npts)/R
lons, lats = vlon_func(all_d, sigma01, alpha0, lon0), vlat_func(all_d, sigma01, alpha0, lon0)
return np.column_stack((lons, lats))
t_total0 = datetime.datetime.now()
number=0
while number < 50:
t0 = datetime.datetime.now()
#lat lon coordinates of random points generated within set shape file
coords = ps.points_in_shape(shape_path, N)
#lons1,lats1 = coords[:,0], coords[:,1]
#lons2,lats2 = lons1,lats1
lonmin = np.floor(min(coords[:,0]))
latmin = np.floor(min(coords[:,1]))
coords1 = [coord1 for coord1 in coords for coord2 in coords]
coords2 = [coord2 for coord1 in coords for coord2 in coords]
columns = np.column_stack((coords1, coords2))
#dists = map(haversine, columns)
#path_info = zip(coords1,coords2, dists)
#path_info = np.column_stack((coords1, coords2, dists))
#parallise the generation of path points for SPEED!
#path_info = np.column_stack((coords1, coords2, dists))
pool = mp.Pool()
paths = pool.map(new_paths, columns)
pool.close()
pool.join()
#create a flattened numpy array of size 2xN from the paths created!
paths = np.asarray(list(itertools.chain(*paths)))
#keep all but the repeated coordinates by keeping only unique whole rows!
#method is slowed without the b contiguous array
b = np.ascontiguousarray(paths).view(np.dtype((np.void, paths.dtype.itemsize * paths.shape[1])))
_, idx = np.unique(b, return_index=True)
paths = np.unique(b).view(paths.dtype).reshape(-1, paths.shape[1])
# Estimate the 2D histogram
H, xedges, yedges = np.histogram2d(paths[:,0],paths[:,1],bins=nbins)
#name = "path_density_2Dhist.png"
GRAD = np.abs(np.asarray(np.gradient(H)[0]))#name = "path_density_2Dhist_grad.png"
# H needs to be rotated and flipped
H = np.rot90(H)
GRAD = np.rot90(GRAD)
H = np.flipud(H)
GRAD = np.flipud(GRAD)
# Mask zeros
H = np.ma.masked_where(H==0,H) # Mask pixels with a value of zero
GRAD = np.ma.masked_where(GRAD==0,GRAD) # Mask pixels with a value of zero
t1 = datetime.datetime.now()
print t1-t0
#fig = plt.figure()
#plt.pcolormesh(xedges,yedges,H)
#plt.xlabel('longitude (degrees)')
#plt.ylabel('longitude (degrees)')
#cbar = plt.colorbar()
#cbar.ax.set_ylabel('Counts')
#fig.savefig("SAVE_{}.png".format(number))
number+=1
t_total1 = datetime.datetime.now()
print t_total1-t_total0 | gpl-3.0 |
akrherz/idep | scripts/biomass/yearly.py | 2 | 2141 | """Make plots of monthly values or differences"""
from __future__ import print_function
import calendar
from pandas.io.sql import read_sql
import matplotlib.pyplot as plt
from pyiem.util import get_dbconn
PGCONN = get_dbconn("idep")
def get_scenario(scenario):
df = read_sql(
"""
WITH yearly as (
SELECT huc_12, generate_series(2008, 2016) as yr
from huc12 where states = 'IA' and scenario = 0),
results as (
SELECT r.huc_12, extract(year from valid)::int as yr,
sum(qc_precip) as precip, sum(avg_runoff) as runoff,
sum(avg_delivery) as delivery,
sum(avg_loss) as detachment from results_by_huc12 r
WHERE r.scenario = %s and r.valid >= '2008-01-01'
and r.valid < '2017-01-01' GROUP by r.huc_12, yr),
agg as (
SELECT c.huc_12, c.yr, coalesce(r.precip, 0) as precip,
coalesce(r.runoff, 0) as runoff,
coalesce(r.delivery, 0) as delivery,
coalesce(r.detachment, 0) as detachment
from yearly c LEFT JOIN results r on (c.huc_12 = r.huc_12 and
c.yr = r.yr))
select yr,
avg(runoff) / 25.4 as runoff_in,
avg(delivery) * 4.463 as delivery_ta,
avg(detachment) * 4.463 as detachment_ta
from agg GROUP by yr ORDER by yr ASC
""",
PGCONN,
params=(scenario,),
index_col="yr",
)
return df
def main():
"""Go Main"""
adf = get_scenario(0)
b25 = get_scenario(25)
b26 = get_scenario(26)
delta25 = b25 - adf
delta26 = b26 - adf
(fig, ax) = plt.subplots(1, 1)
ax.bar(
delta25.index.values - 0.2,
delta25["delivery_ta"].values,
width=0.4,
label="HI 0.8",
)
ax.bar(
delta26.index.values + 0.2,
delta26["delivery_ta"].values,
width=0.4,
label="HI 0.9",
)
ax.legend(loc="best")
ax.grid(True)
ax.set_title("2008-2016 Change in Delivery vs DEP Baseline")
ax.set_ylabel("Change [tons/acre]")
fig.savefig("test.png")
if __name__ == "__main__":
main()
| mit |
marionleborgne/nupic.research | projects/capybara/sandbox/classification/plot_raw_sensortag_data.py | 9 | 2428 | #!/usr/bin/env python
# ----------------------------------------------------------------------
# Numenta Platform for Intelligent Computing (NuPIC)
# Copyright (C) 2013, Numenta, Inc. Unless you have an agreement
# with Numenta, Inc., for a separate license for this software code, the
# following terms and conditions apply:
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero Public License version 3 as
# published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
# See the GNU Affero Public License for more details.
#
# You should have received a copy of the GNU Affero Public License
# along with this program. If not, see http://www.gnu.org/licenses.
#
# http://numenta.org/licenses/
# ----------------------------------------------------------------------
import csv
import os
import matplotlib.pyplot as plt
from settings.acc_data import (INPUT_FILES,
METRICS,
DATA_DIR)
numRecordsToPlot = 500
plt.figure(figsize=(20, 13))
for inputFile in INPUT_FILES:
filePath = os.path.join(DATA_DIR, inputFile)
with open(filePath, 'rU') as f:
reader = csv.reader(f)
headers = reader.next()
reader.next()
t = []
x = []
y = []
z = []
for i, values in enumerate(reader):
record = dict(zip(headers, values))
try:
for metric in METRICS:
float(record[metric])
x.append(float(record['x']))
y.append(float(record['y']))
z.append(float(record['z']))
t.append(i)
except ValueError:
print "Not possible to convert some values of %s to a float" % record
if i > numRecordsToPlot:
break
subplot_index = INPUT_FILES.index(inputFile)
ax = plt.subplot(4, 1, subplot_index + 1)
ax.plot(t, x, 'r', label='x')
ax.plot(t, y, 'b', label='y')
ax.plot(t, z, 'g', label='z')
handles, labels = ax.get_legend_handles_labels()
ax.legend(handles, labels)
plt.tight_layout()
plt.title(inputFile)
plt.xlim([0, numRecordsToPlot])
plt.ylim([-8, 8])
plt.xlabel('timestep')
plt.ylabel('accelerometer')
plt.grid()
plt.savefig(outputFile = '%s.png' % inputFile[:-4])
plt.show()
| agpl-3.0 |
Haunter17/MIR_SU17 | exp8/exp8g_batchnorm.py | 1 | 7515 | import numpy as np
import tensorflow as tf
import h5py
from sklearn.preprocessing import OneHotEncoder
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as plt
import time
import scipy.io
# Functions for initializing neural nets parameters
def weight_variable(shape, var_name):
initial = tf.truncated_normal(shape, stddev=0.1, dtype=tf.float64)
return tf.Variable(initial, name=var_name)
def bias_variable(shape, var_name):
initial = tf.constant(0.1, shape=shape, dtype=tf.float64)
return tf.Variable(initial, name=var_name)
def conv2d(x, W):
return tf.nn.conv2d(x, W, [1, 1, 1, 1], 'VALID')
def batch_nm(x, eps=1e-5):
# batch normalization to have zero mean and unit variance
mu, var = tf.nn.moments(x, [0])
return tf.nn.batch_normalization(x, mu, var, None, None, eps)
# Download data from .mat file into numpy array
print('==> Experiment 8g - with batchnorm')
filepath = '/scratch/ttanpras/exp8a_d7_1s.mat'
print('==> Loading data from {}'.format(filepath))
f = h5py.File(filepath)
data_train = np.array(f.get('trainingFeatures'))
data_val = np.array(f.get('validationFeatures'))
del f
print('==> Data sizes:',data_train.shape, data_val.shape)
# Transform labels into on-hot encoding form
enc = OneHotEncoder(n_values = 71)
'''
NN config parameters
'''
sub_window_size = 32
num_features = 169*sub_window_size
num_frames = 32
hidden_layer_size = 2000
num_bits = 2000
num_classes = 71
print("Number of features:", num_features)
print("Number of songs:",num_classes)
# Reshape input features
X_train = np.reshape(data_train,(-1, num_features))
X_val = np.reshape(data_val,(-1, num_features))
print("Input sizes:", X_train.shape, X_val.shape)
y_train = []
y_val = []
# Add Labels
for label in range(num_classes):
for sampleCount in range(X_train.shape[0]//num_classes):
y_train.append([label])
for sampleCount in range(X_val.shape[0]//num_classes):
y_val.append([label])
X_train = np.concatenate((X_train, y_train), axis=1)
X_val = np.concatenate((X_val, y_val), axis=1)
# Shuffle
np.random.shuffle(X_train)
np.random.shuffle(X_val)
# Separate coefficients and labels
y_train = X_train[:, -1].reshape(-1, 1)
X_train = X_train[:, :-1]
y_val = X_val[:, -1].reshape(-1, 1)
X_val = X_val[:, :-1]
print('==> Data sizes:',X_train.shape, y_train.shape,X_val.shape, y_val.shape)
y_train = enc.fit_transform(y_train.copy()).astype(int).toarray()
y_val = enc.fit_transform(y_val.copy()).astype(int).toarray()
plotx = []
ploty_train = []
ploty_val = []
# Set-up NN layers
x = tf.placeholder(tf.float64, [None, num_features])
W1 = weight_variable([num_features, hidden_layer_size], "W1")
b1 = bias_variable([hidden_layer_size], "b1")
OpW1 = tf.placeholder(tf.float64, [num_features, hidden_layer_size])
Opb1 = tf.placeholder(tf.float64, [hidden_layer_size])
# Hidden layer activation function: ReLU
h1 = batch_nm(tf.nn.relu(tf.matmul(x, W1) + b1))
W2 = weight_variable([hidden_layer_size, num_bits], "W2")
b2 = bias_variable([num_bits], "b2")
OpW2 = tf.placeholder(tf.float64, [hidden_layer_size, num_bits])
Opb2 = tf.placeholder(tf.float64, [num_bits])
# Pre-activation value for bit representation
h = tf.matmul(h1, W2) + b2
h2 = batch_nm(tf.nn.relu(tf.matmul(h1, W2) + b2))
W3 = weight_variable([num_bits, num_classes], "W3")
b3 = bias_variable([num_classes], "b3")
OpW3 = tf.placeholder(tf.float64, [num_bits, num_classes])
Opb3 = tf.placeholder(tf.float64, [num_classes])
# Softmax layer (Output), dtype = float64
y = tf.matmul(h2, W3) + b3
# NN desired value (labels)
y_ = tf.placeholder(tf.float64, [None, num_classes])
# Loss function
cross_entropy = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(labels=y_, logits=y))
train_step = tf.train.AdamOptimizer(1e-4).minimize(cross_entropy)
sess = tf.InteractiveSession()
correct_prediction = tf.equal(tf.argmax(y, 1), tf.argmax(y_, 1))
accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float64))
sess.run(tf.initialize_all_variables())
# Training
numTrainingVec = len(X_train)
batchSize = 500
numEpochs = 1000
bestValErr = 10000
bestValEpoch = 0
startTime = time.time()
for epoch in range(numEpochs):
for i in range(0,numTrainingVec,batchSize):
# Batch Data
batchEndPoint = min(i+batchSize, numTrainingVec)
trainBatchData = X_train[i:batchEndPoint]
trainBatchLabel = y_train[i:batchEndPoint]
train_step.run(feed_dict={x: trainBatchData, y_: trainBatchLabel})
# Print accuracy
if epoch % 5 == 0 or epoch == numEpochs-1:
plotx.append(epoch)
train_error = cross_entropy.eval(feed_dict={x:trainBatchData, y_: trainBatchLabel})
train_acc = accuracy.eval(feed_dict={x:trainBatchData, y_: trainBatchLabel})
val_error = cross_entropy.eval(feed_dict={x:X_val, y_: y_val})
val_acc = accuracy.eval(feed_dict={x:X_val, y_: y_val})
ploty_train.append(train_error)
ploty_val.append(val_error)
print("epoch: %d, val error %g, train error %g"%(epoch, val_error, train_error))
if val_error < bestValErr:
bestValErr = val_error
bestValEpoch = epoch
OpW1 = W1
Opb1 = b1
OpW2 = W2
Opb2 = b2
OpW3 = W3
Opb3 = b3
endTime = time.time()
print("Elapse Time:", endTime - startTime)
print("Best validation error: %g at epoch %d"%(bestValErr, bestValEpoch))
# Restore best model for early stopping
W1 = OpW1
b1 = Opb1
W2 = OpW2
b2 = Opb2
W3 = OpW3
b3 = Opb3
print('==> Generating error plot...')
errfig = plt.figure()
trainErrPlot = errfig.add_subplot(111)
trainErrPlot.set_xlabel('Number of Epochs')
trainErrPlot.set_ylabel('Cross-Entropy Error')
trainErrPlot.set_title('Error vs Number of Epochs')
trainErrPlot.scatter(plotx, ploty_train)
valErrPlot = errfig.add_subplot(111)
valErrPlot.scatter(plotx, ploty_val)
errfig.savefig('exp8g_batchnorm.png')
'''
GENERATING REPRESENTATION OF NOISY FILES
'''
namelist = ['orig','comp5','comp10','str5','str10','ampSat_(-15)','ampSat_(-10)','ampSat_(-5)', \
'ampSat_(5)','ampSat_(10)','ampSat_(15)','pitchShift_(-1)','pitchShift_(-0.5)', \
'pitchShift_(0.5)','pitchShift_(1)','rev_dkw','rev_gal','rev_shan0','rev_shan1', \
'rev_gen','crowd-15','crowd-10','crowd-5','crowd0','crowd5','crowd10','crowd15', \
'crowd100','rest-15','rest-10','rest-5','rest0','rest5','rest10','rest15', \
'rest100','AWGN-15','AWGN-10','AWGN-5','AWGN0','AWGN5','AWGN10','AWGN15', 'AWGN100']
outdir = '/scratch/ttanpras/taylorswift_noisy_processed/'
repDict = {}
# Loop over each CQT files, not shuffled
for count in range(len(namelist)):
name = namelist[count]
filename = outdir + name + '.mat'
cqt = scipy.io.loadmat(filename)['Q']
cqt = np.transpose(np.array(cqt))
# Group into windows of 32 without overlapping
# Discard any leftover frames
num_windows = cqt.shape[0] // 32
cqt = cqt[:32*num_windows]
X = np.reshape(cqt,(num_windows, num_features))
# Feed window through model (Only 1 layer of weight w/o non-linearity)
rep = h.eval(feed_dict={x:X})
# Put the output representation into a dictionary
repDict['n'+str(count)] = rep
scipy.io.savemat('exp8g_batchnorm_repNon.mat',repDict) | mit |
cbmoore/statsmodels | statsmodels/stats/anova.py | 25 | 13433 | from statsmodels.compat.python import lrange, lmap
import numpy as np
from scipy import stats
from pandas import DataFrame, Index
from statsmodels.formula.formulatools import (_remove_intercept_patsy,
_has_intercept, _intercept_idx)
def _get_covariance(model, robust):
if robust is None:
return model.cov_params()
elif robust == "hc0":
se = model.HC0_se
return model.cov_HC0
elif robust == "hc1":
se = model.HC1_se
return model.cov_HC1
elif robust == "hc2":
se = model.HC2_se
return model.cov_HC2
elif robust == "hc3":
se = model.HC3_se
return model.cov_HC3
else: # pragma: no cover
raise ValueError("robust options %s not understood" % robust)
#NOTE: these need to take into account weights !
def anova_single(model, **kwargs):
"""
ANOVA table for one fitted linear model.
Parameters
----------
model : fitted linear model results instance
A fitted linear model
typ : int or str {1,2,3} or {"I","II","III"}
Type of sum of squares to use.
**kwargs**
scale : float
Estimate of variance, If None, will be estimated from the largest
model. Default is None.
test : str {"F", "Chisq", "Cp"} or None
Test statistics to provide. Default is "F".
Notes
-----
Use of this function is discouraged. Use anova_lm instead.
"""
test = kwargs.get("test", "F")
scale = kwargs.get("scale", None)
typ = kwargs.get("typ", 1)
robust = kwargs.get("robust", None)
if robust:
robust = robust.lower()
endog = model.model.endog
exog = model.model.exog
nobs = exog.shape[0]
response_name = model.model.endog_names
design_info = model.model.data.design_info
exog_names = model.model.exog_names
# +1 for resids
n_rows = (len(design_info.terms) - _has_intercept(design_info) + 1)
pr_test = "PR(>%s)" % test
names = ['df', 'sum_sq', 'mean_sq', test, pr_test]
table = DataFrame(np.zeros((n_rows, 5)), columns = names)
if typ in [1,"I"]:
return anova1_lm_single(model, endog, exog, nobs, design_info, table,
n_rows, test, pr_test, robust)
elif typ in [2, "II"]:
return anova2_lm_single(model, design_info, n_rows, test, pr_test,
robust)
elif typ in [3, "III"]:
return anova3_lm_single(model, design_info, n_rows, test, pr_test,
robust)
elif typ in [4, "IV"]:
raise NotImplemented("Type IV not yet implemented")
else: # pragma: no cover
raise ValueError("Type %s not understood" % str(typ))
def anova1_lm_single(model, endog, exog, nobs, design_info, table, n_rows, test,
pr_test, robust):
"""
ANOVA table for one fitted linear model.
Parameters
----------
model : fitted linear model results instance
A fitted linear model
**kwargs**
scale : float
Estimate of variance, If None, will be estimated from the largest
model. Default is None.
test : str {"F", "Chisq", "Cp"} or None
Test statistics to provide. Default is "F".
Notes
-----
Use of this function is discouraged. Use anova_lm instead.
"""
#maybe we should rethink using pinv > qr in OLS/linear models?
effects = getattr(model, 'effects', None)
if effects is None:
q,r = np.linalg.qr(exog)
effects = np.dot(q.T, endog)
arr = np.zeros((len(design_info.terms), len(design_info.column_names)))
slices = [design_info.slice(name) for name in design_info.term_names]
for i,slice_ in enumerate(slices):
arr[i, slice_] = 1
sum_sq = np.dot(arr, effects**2)
#NOTE: assumes intercept is first column
idx = _intercept_idx(design_info)
sum_sq = sum_sq[~idx]
term_names = np.array(design_info.term_names) # want boolean indexing
term_names = term_names[~idx]
index = term_names.tolist()
table.index = Index(index + ['Residual'])
table.ix[index, ['df', 'sum_sq']] = np.c_[arr[~idx].sum(1), sum_sq]
if test == 'F':
table.ix[:n_rows, test] = ((table['sum_sq']/table['df'])/
(model.ssr/model.df_resid))
table.ix[:n_rows, pr_test] = stats.f.sf(table["F"], table["df"],
model.df_resid)
# fill in residual
table.ix['Residual', ['sum_sq','df', test, pr_test]] = (model.ssr,
model.df_resid,
np.nan, np.nan)
table['mean_sq'] = table['sum_sq'] / table['df']
return table
#NOTE: the below is not agnostic about formula...
def anova2_lm_single(model, design_info, n_rows, test, pr_test, robust):
"""
ANOVA type II table for one fitted linear model.
Parameters
----------
model : fitted linear model results instance
A fitted linear model
**kwargs**
scale : float
Estimate of variance, If None, will be estimated from the largest
model. Default is None.
test : str {"F", "Chisq", "Cp"} or None
Test statistics to provide. Default is "F".
Notes
-----
Use of this function is discouraged. Use anova_lm instead.
Type II
Sum of Squares compares marginal contribution of terms. Thus, it is
not particularly useful for models with significant interaction terms.
"""
terms_info = design_info.terms[:] # copy
terms_info = _remove_intercept_patsy(terms_info)
names = ['sum_sq', 'df', test, pr_test]
table = DataFrame(np.zeros((n_rows, 4)), columns = names)
cov = _get_covariance(model, None)
robust_cov = _get_covariance(model, robust)
col_order = []
index = []
for i, term in enumerate(terms_info):
# grab all varaibles except interaction effects that contain term
# need two hypotheses matrices L1 is most restrictive, ie., term==0
# L2 is everything except term==0
cols = design_info.slice(term)
L1 = lrange(cols.start, cols.stop)
L2 = []
term_set = set(term.factors)
for t in terms_info: # for the term you have
other_set = set(t.factors)
if term_set.issubset(other_set) and not term_set == other_set:
col = design_info.slice(t)
# on a higher order term containing current `term`
L1.extend(lrange(col.start, col.stop))
L2.extend(lrange(col.start, col.stop))
L1 = np.eye(model.model.exog.shape[1])[L1]
L2 = np.eye(model.model.exog.shape[1])[L2]
if L2.size:
LVL = np.dot(np.dot(L1,robust_cov),L2.T)
from scipy import linalg
orth_compl,_ = linalg.qr(LVL)
r = L1.shape[0] - L2.shape[0]
# L1|2
# use the non-unique orthogonal completion since L12 is rank r
L12 = np.dot(orth_compl[:,-r:].T, L1)
else:
L12 = L1
r = L1.shape[0]
#from IPython.core.debugger import Pdb; Pdb().set_trace()
if test == 'F':
f = model.f_test(L12, cov_p=robust_cov)
table.ix[i, test] = test_value = f.fvalue
table.ix[i, pr_test] = f.pvalue
# need to back out SSR from f_test
table.ix[i, 'df'] = r
col_order.append(cols.start)
index.append(term.name())
table.index = Index(index + ['Residual'])
table = table.ix[np.argsort(col_order + [model.model.exog.shape[1]+1])]
# back out sum of squares from f_test
ssr = table[test] * table['df'] * model.ssr/model.df_resid
table['sum_sq'] = ssr
# fill in residual
table.ix['Residual', ['sum_sq','df', test, pr_test]] = (model.ssr,
model.df_resid,
np.nan, np.nan)
return table
def anova3_lm_single(model, design_info, n_rows, test, pr_test, robust):
n_rows += _has_intercept(design_info)
terms_info = design_info.terms
names = ['sum_sq', 'df', test, pr_test]
table = DataFrame(np.zeros((n_rows, 4)), columns = names)
cov = _get_covariance(model, robust)
col_order = []
index = []
for i, term in enumerate(terms_info):
# grab term, hypothesis is that term == 0
cols = design_info.slice(term)
L1 = np.eye(model.model.exog.shape[1])[cols]
L12 = L1
r = L1.shape[0]
if test == 'F':
f = model.f_test(L12, cov_p=cov)
table.ix[i, test] = test_value = f.fvalue
table.ix[i, pr_test] = f.pvalue
# need to back out SSR from f_test
table.ix[i, 'df'] = r
#col_order.append(cols.start)
index.append(term.name())
table.index = Index(index + ['Residual'])
#NOTE: Don't need to sort because terms are an ordered dict now
#table = table.ix[np.argsort(col_order + [model.model.exog.shape[1]+1])]
# back out sum of squares from f_test
ssr = table[test] * table['df'] * model.ssr/model.df_resid
table['sum_sq'] = ssr
# fill in residual
table.ix['Residual', ['sum_sq','df', test, pr_test]] = (model.ssr,
model.df_resid,
np.nan, np.nan)
return table
def anova_lm(*args, **kwargs):
"""
ANOVA table for one or more fitted linear models.
Parameters
----------
args : fitted linear model results instance
One or more fitted linear models
scale : float
Estimate of variance, If None, will be estimated from the largest
model. Default is None.
test : str {"F", "Chisq", "Cp"} or None
Test statistics to provide. Default is "F".
typ : str or int {"I","II","III"} or {1,2,3}
The type of ANOVA test to perform. See notes.
robust : {None, "hc0", "hc1", "hc2", "hc3"}
Use heteroscedasticity-corrected coefficient covariance matrix.
If robust covariance is desired, it is recommended to use `hc3`.
Returns
-------
anova : DataFrame
A DataFrame containing.
Notes
-----
Model statistics are given in the order of args. Models must have
been fit using the formula api.
See Also
--------
model_results.compare_f_test, model_results.compare_lm_test
Examples
--------
>>> import statsmodels.api as sm
>>> from statsmodels.formula.api import ols
>>> moore = sm.datasets.get_rdataset("Moore", "car",
... cache=True) # load data
>>> data = moore.data
>>> data = data.rename(columns={"partner.status" :
... "partner_status"}) # make name pythonic
>>> moore_lm = ols('conformity ~ C(fcategory, Sum)*C(partner_status, Sum)',
... data=data).fit()
>>> table = sm.stats.anova_lm(moore_lm, typ=2) # Type 2 ANOVA DataFrame
>>> print table
"""
typ = kwargs.get('typ', 1)
### Farm Out Single model ANOVA Type I, II, III, and IV ###
if len(args) == 1:
model = args[0]
return anova_single(model, **kwargs)
try:
assert typ in [1,"I"]
except:
raise ValueError("Multiple models only supported for type I. "
"Got type %s" % str(typ))
### COMPUTE ANOVA TYPE I ###
# if given a single model
if len(args) == 1:
return anova_single(*args, **kwargs)
# received multiple fitted models
test = kwargs.get("test", "F")
scale = kwargs.get("scale", None)
n_models = len(args)
model_formula = []
pr_test = "Pr(>%s)" % test
names = ['df_resid', 'ssr', 'df_diff', 'ss_diff', test, pr_test]
table = DataFrame(np.zeros((n_models, 6)), columns = names)
if not scale: # assume biggest model is last
scale = args[-1].scale
table["ssr"] = lmap(getattr, args, ["ssr"]*n_models)
table["df_resid"] = lmap(getattr, args, ["df_resid"]*n_models)
table.ix[1:, "df_diff"] = -np.diff(table["df_resid"].values)
table["ss_diff"] = -table["ssr"].diff()
if test == "F":
table["F"] = table["ss_diff"] / table["df_diff"] / scale
table[pr_test] = stats.f.sf(table["F"], table["df_diff"],
table["df_resid"])
# for earlier scipy - stats.f.sf(np.nan, 10, 2) -> 0 not nan
table[pr_test][table['F'].isnull()] = np.nan
return table
if __name__ == "__main__":
import pandas
from statsmodels.formula.api import ols
# in R
#library(car)
#write.csv(Moore, "moore.csv", row.names=FALSE)
moore = pandas.read_table('moore.csv', delimiter=",", skiprows=1,
names=['partner_status','conformity',
'fcategory','fscore'])
moore_lm = ols('conformity ~ C(fcategory, Sum)*C(partner_status, Sum)',
data=moore).fit()
mooreB = ols('conformity ~ C(partner_status, Sum)', data=moore).fit()
# for each term you just want to test vs the model without its
# higher-order terms
# using Monette-Fox slides and Marden class notes for linear algebra /
# orthogonal complement
# https://netfiles.uiuc.edu/jimarden/www/Classes/STAT324/
table = anova_lm(moore_lm, typ=2)
| bsd-3-clause |
NixaSoftware/CVis | venv/lib/python2.7/site-packages/pandas/tests/frame/test_apply.py | 7 | 25966 | # -*- coding: utf-8 -*-
from __future__ import print_function
import pytest
from datetime import datetime
import warnings
import numpy as np
from pandas import (notna, DataFrame, Series, MultiIndex, date_range,
Timestamp, compat)
import pandas as pd
from pandas.core.dtypes.dtypes import CategoricalDtype
from pandas.util.testing import (assert_series_equal,
assert_frame_equal)
import pandas.util.testing as tm
from pandas.tests.frame.common import TestData
class TestDataFrameApply(TestData):
def test_apply(self):
with np.errstate(all='ignore'):
# ufunc
applied = self.frame.apply(np.sqrt)
tm.assert_series_equal(np.sqrt(self.frame['A']), applied['A'])
# aggregator
applied = self.frame.apply(np.mean)
assert applied['A'] == np.mean(self.frame['A'])
d = self.frame.index[0]
applied = self.frame.apply(np.mean, axis=1)
assert applied[d] == np.mean(self.frame.xs(d))
assert applied.index is self.frame.index # want this
# invalid axis
df = DataFrame(
[[1, 2, 3], [4, 5, 6], [7, 8, 9]], index=['a', 'a', 'c'])
pytest.raises(ValueError, df.apply, lambda x: x, 2)
# see gh-9573
df = DataFrame({'c0': ['A', 'A', 'B', 'B'],
'c1': ['C', 'C', 'D', 'D']})
df = df.apply(lambda ts: ts.astype('category'))
assert df.shape == (4, 2)
assert isinstance(df['c0'].dtype, CategoricalDtype)
assert isinstance(df['c1'].dtype, CategoricalDtype)
def test_apply_mixed_datetimelike(self):
# mixed datetimelike
# GH 7778
df = DataFrame({'A': date_range('20130101', periods=3),
'B': pd.to_timedelta(np.arange(3), unit='s')})
result = df.apply(lambda x: x, axis=1)
assert_frame_equal(result, df)
def test_apply_empty(self):
# empty
applied = self.empty.apply(np.sqrt)
assert applied.empty
applied = self.empty.apply(np.mean)
assert applied.empty
no_rows = self.frame[:0]
result = no_rows.apply(lambda x: x.mean())
expected = Series(np.nan, index=self.frame.columns)
assert_series_equal(result, expected)
no_cols = self.frame.loc[:, []]
result = no_cols.apply(lambda x: x.mean(), axis=1)
expected = Series(np.nan, index=self.frame.index)
assert_series_equal(result, expected)
# 2476
xp = DataFrame(index=['a'])
rs = xp.apply(lambda x: x['a'], axis=1)
assert_frame_equal(xp, rs)
# reduce with an empty DataFrame
x = []
result = self.empty.apply(x.append, axis=1, reduce=False)
assert_frame_equal(result, self.empty)
result = self.empty.apply(x.append, axis=1, reduce=True)
assert_series_equal(result, Series(
[], index=pd.Index([], dtype=object)))
empty_with_cols = DataFrame(columns=['a', 'b', 'c'])
result = empty_with_cols.apply(x.append, axis=1, reduce=False)
assert_frame_equal(result, empty_with_cols)
result = empty_with_cols.apply(x.append, axis=1, reduce=True)
assert_series_equal(result, Series(
[], index=pd.Index([], dtype=object)))
# Ensure that x.append hasn't been called
assert x == []
def test_apply_standard_nonunique(self):
df = DataFrame(
[[1, 2, 3], [4, 5, 6], [7, 8, 9]], index=['a', 'a', 'c'])
rs = df.apply(lambda s: s[0], axis=1)
xp = Series([1, 4, 7], ['a', 'a', 'c'])
assert_series_equal(rs, xp)
rs = df.T.apply(lambda s: s[0], axis=0)
assert_series_equal(rs, xp)
def test_with_string_args(self):
for arg in ['sum', 'mean', 'min', 'max', 'std']:
result = self.frame.apply(arg)
expected = getattr(self.frame, arg)()
tm.assert_series_equal(result, expected)
result = self.frame.apply(arg, axis=1)
expected = getattr(self.frame, arg)(axis=1)
tm.assert_series_equal(result, expected)
def test_apply_broadcast(self):
broadcasted = self.frame.apply(np.mean, broadcast=True)
agged = self.frame.apply(np.mean)
for col, ts in compat.iteritems(broadcasted):
assert (ts == agged[col]).all()
broadcasted = self.frame.apply(np.mean, axis=1, broadcast=True)
agged = self.frame.apply(np.mean, axis=1)
for idx in broadcasted.index:
assert (broadcasted.xs(idx) == agged[idx]).all()
def test_apply_raw(self):
result0 = self.frame.apply(np.mean, raw=True)
result1 = self.frame.apply(np.mean, axis=1, raw=True)
expected0 = self.frame.apply(lambda x: x.values.mean())
expected1 = self.frame.apply(lambda x: x.values.mean(), axis=1)
assert_series_equal(result0, expected0)
assert_series_equal(result1, expected1)
# no reduction
result = self.frame.apply(lambda x: x * 2, raw=True)
expected = self.frame * 2
assert_frame_equal(result, expected)
def test_apply_axis1(self):
d = self.frame.index[0]
tapplied = self.frame.apply(np.mean, axis=1)
assert tapplied[d] == np.mean(self.frame.xs(d))
def test_apply_ignore_failures(self):
result = self.mixed_frame._apply_standard(np.mean, 0,
ignore_failures=True)
expected = self.mixed_frame._get_numeric_data().apply(np.mean)
assert_series_equal(result, expected)
def test_apply_mixed_dtype_corner(self):
df = DataFrame({'A': ['foo'],
'B': [1.]})
result = df[:0].apply(np.mean, axis=1)
# the result here is actually kind of ambiguous, should it be a Series
# or a DataFrame?
expected = Series(np.nan, index=pd.Index([], dtype='int64'))
assert_series_equal(result, expected)
df = DataFrame({'A': ['foo'],
'B': [1.]})
result = df.apply(lambda x: x['A'], axis=1)
expected = Series(['foo'], index=[0])
assert_series_equal(result, expected)
result = df.apply(lambda x: x['B'], axis=1)
expected = Series([1.], index=[0])
assert_series_equal(result, expected)
def test_apply_empty_infer_type(self):
no_cols = DataFrame(index=['a', 'b', 'c'])
no_index = DataFrame(columns=['a', 'b', 'c'])
def _check(df, f):
with warnings.catch_warnings(record=True):
test_res = f(np.array([], dtype='f8'))
is_reduction = not isinstance(test_res, np.ndarray)
def _checkit(axis=0, raw=False):
res = df.apply(f, axis=axis, raw=raw)
if is_reduction:
agg_axis = df._get_agg_axis(axis)
assert isinstance(res, Series)
assert res.index is agg_axis
else:
assert isinstance(res, DataFrame)
_checkit()
_checkit(axis=1)
_checkit(raw=True)
_checkit(axis=0, raw=True)
with np.errstate(all='ignore'):
_check(no_cols, lambda x: x)
_check(no_cols, lambda x: x.mean())
_check(no_index, lambda x: x)
_check(no_index, lambda x: x.mean())
result = no_cols.apply(lambda x: x.mean(), broadcast=True)
assert isinstance(result, DataFrame)
def test_apply_with_args_kwds(self):
def add_some(x, howmuch=0):
return x + howmuch
def agg_and_add(x, howmuch=0):
return x.mean() + howmuch
def subtract_and_divide(x, sub, divide=1):
return (x - sub) / divide
result = self.frame.apply(add_some, howmuch=2)
exp = self.frame.apply(lambda x: x + 2)
assert_frame_equal(result, exp)
result = self.frame.apply(agg_and_add, howmuch=2)
exp = self.frame.apply(lambda x: x.mean() + 2)
assert_series_equal(result, exp)
res = self.frame.apply(subtract_and_divide, args=(2,), divide=2)
exp = self.frame.apply(lambda x: (x - 2.) / 2.)
assert_frame_equal(res, exp)
def test_apply_yield_list(self):
result = self.frame.apply(list)
assert_frame_equal(result, self.frame)
def test_apply_reduce_Series(self):
self.frame.loc[::2, 'A'] = np.nan
expected = self.frame.mean(1)
result = self.frame.apply(np.mean, axis=1)
assert_series_equal(result, expected)
def test_apply_differently_indexed(self):
df = DataFrame(np.random.randn(20, 10))
result0 = df.apply(Series.describe, axis=0)
expected0 = DataFrame(dict((i, v.describe())
for i, v in compat.iteritems(df)),
columns=df.columns)
assert_frame_equal(result0, expected0)
result1 = df.apply(Series.describe, axis=1)
expected1 = DataFrame(dict((i, v.describe())
for i, v in compat.iteritems(df.T)),
columns=df.index).T
assert_frame_equal(result1, expected1)
def test_apply_modify_traceback(self):
data = DataFrame({'A': ['foo', 'foo', 'foo', 'foo',
'bar', 'bar', 'bar', 'bar',
'foo', 'foo', 'foo'],
'B': ['one', 'one', 'one', 'two',
'one', 'one', 'one', 'two',
'two', 'two', 'one'],
'C': ['dull', 'dull', 'shiny', 'dull',
'dull', 'shiny', 'shiny', 'dull',
'shiny', 'shiny', 'shiny'],
'D': np.random.randn(11),
'E': np.random.randn(11),
'F': np.random.randn(11)})
data.loc[4, 'C'] = np.nan
def transform(row):
if row['C'].startswith('shin') and row['A'] == 'foo':
row['D'] = 7
return row
def transform2(row):
if (notna(row['C']) and row['C'].startswith('shin') and
row['A'] == 'foo'):
row['D'] = 7
return row
try:
data.apply(transform, axis=1)
except AttributeError as e:
assert len(e.args) == 2
assert e.args[1] == 'occurred at index 4'
assert e.args[0] == "'float' object has no attribute 'startswith'"
def test_apply_bug(self):
# GH 6125
positions = pd.DataFrame([[1, 'ABC0', 50], [1, 'YUM0', 20],
[1, 'DEF0', 20], [2, 'ABC1', 50],
[2, 'YUM1', 20], [2, 'DEF1', 20]],
columns=['a', 'market', 'position'])
def f(r):
return r['market']
expected = positions.apply(f, axis=1)
positions = DataFrame([[datetime(2013, 1, 1), 'ABC0', 50],
[datetime(2013, 1, 2), 'YUM0', 20],
[datetime(2013, 1, 3), 'DEF0', 20],
[datetime(2013, 1, 4), 'ABC1', 50],
[datetime(2013, 1, 5), 'YUM1', 20],
[datetime(2013, 1, 6), 'DEF1', 20]],
columns=['a', 'market', 'position'])
result = positions.apply(f, axis=1)
assert_series_equal(result, expected)
def test_apply_convert_objects(self):
data = DataFrame({'A': ['foo', 'foo', 'foo', 'foo',
'bar', 'bar', 'bar', 'bar',
'foo', 'foo', 'foo'],
'B': ['one', 'one', 'one', 'two',
'one', 'one', 'one', 'two',
'two', 'two', 'one'],
'C': ['dull', 'dull', 'shiny', 'dull',
'dull', 'shiny', 'shiny', 'dull',
'shiny', 'shiny', 'shiny'],
'D': np.random.randn(11),
'E': np.random.randn(11),
'F': np.random.randn(11)})
result = data.apply(lambda x: x, axis=1)
assert_frame_equal(result._convert(datetime=True), data)
def test_apply_attach_name(self):
result = self.frame.apply(lambda x: x.name)
expected = Series(self.frame.columns, index=self.frame.columns)
assert_series_equal(result, expected)
result = self.frame.apply(lambda x: x.name, axis=1)
expected = Series(self.frame.index, index=self.frame.index)
assert_series_equal(result, expected)
# non-reductions
result = self.frame.apply(lambda x: np.repeat(x.name, len(x)))
expected = DataFrame(np.tile(self.frame.columns,
(len(self.frame.index), 1)),
index=self.frame.index,
columns=self.frame.columns)
assert_frame_equal(result, expected)
result = self.frame.apply(lambda x: np.repeat(x.name, len(x)),
axis=1)
expected = DataFrame(np.tile(self.frame.index,
(len(self.frame.columns), 1)).T,
index=self.frame.index,
columns=self.frame.columns)
assert_frame_equal(result, expected)
def test_apply_multi_index(self):
s = DataFrame([[1, 2], [3, 4], [5, 6]])
s.index = MultiIndex.from_arrays([['a', 'a', 'b'], ['c', 'd', 'd']])
s.columns = ['col1', 'col2']
res = s.apply(lambda x: Series({'min': min(x), 'max': max(x)}), 1)
assert isinstance(res.index, MultiIndex)
def test_apply_dict(self):
# GH 8735
A = DataFrame([['foo', 'bar'], ['spam', 'eggs']])
A_dicts = pd.Series([dict([(0, 'foo'), (1, 'spam')]),
dict([(0, 'bar'), (1, 'eggs')])])
B = DataFrame([[0, 1], [2, 3]])
B_dicts = pd.Series([dict([(0, 0), (1, 2)]), dict([(0, 1), (1, 3)])])
fn = lambda x: x.to_dict()
for df, dicts in [(A, A_dicts), (B, B_dicts)]:
reduce_true = df.apply(fn, reduce=True)
reduce_false = df.apply(fn, reduce=False)
reduce_none = df.apply(fn, reduce=None)
assert_series_equal(reduce_true, dicts)
assert_frame_equal(reduce_false, df)
assert_series_equal(reduce_none, dicts)
def test_applymap(self):
applied = self.frame.applymap(lambda x: x * 2)
tm.assert_frame_equal(applied, self.frame * 2)
self.frame.applymap(type)
# gh-465: function returning tuples
result = self.frame.applymap(lambda x: (x, x))
assert isinstance(result['A'][0], tuple)
# gh-2909: object conversion to float in constructor?
df = DataFrame(data=[1, 'a'])
result = df.applymap(lambda x: x)
assert result.dtypes[0] == object
df = DataFrame(data=[1., 'a'])
result = df.applymap(lambda x: x)
assert result.dtypes[0] == object
# see gh-2786
df = DataFrame(np.random.random((3, 4)))
df2 = df.copy()
cols = ['a', 'a', 'a', 'a']
df.columns = cols
expected = df2.applymap(str)
expected.columns = cols
result = df.applymap(str)
tm.assert_frame_equal(result, expected)
# datetime/timedelta
df['datetime'] = Timestamp('20130101')
df['timedelta'] = pd.Timedelta('1 min')
result = df.applymap(str)
for f in ['datetime', 'timedelta']:
assert result.loc[0, f] == str(df.loc[0, f])
# see gh-8222
empty_frames = [pd.DataFrame(),
pd.DataFrame(columns=list('ABC')),
pd.DataFrame(index=list('ABC')),
pd.DataFrame({'A': [], 'B': [], 'C': []})]
for frame in empty_frames:
for func in [round, lambda x: x]:
result = frame.applymap(func)
tm.assert_frame_equal(result, frame)
def test_applymap_box(self):
# ufunc will not be boxed. Same test cases as the test_map_box
df = pd.DataFrame({'a': [pd.Timestamp('2011-01-01'),
pd.Timestamp('2011-01-02')],
'b': [pd.Timestamp('2011-01-01', tz='US/Eastern'),
pd.Timestamp('2011-01-02', tz='US/Eastern')],
'c': [pd.Timedelta('1 days'),
pd.Timedelta('2 days')],
'd': [pd.Period('2011-01-01', freq='M'),
pd.Period('2011-01-02', freq='M')]})
res = df.applymap(lambda x: '{0}'.format(x.__class__.__name__))
exp = pd.DataFrame({'a': ['Timestamp', 'Timestamp'],
'b': ['Timestamp', 'Timestamp'],
'c': ['Timedelta', 'Timedelta'],
'd': ['Period', 'Period']})
tm.assert_frame_equal(res, exp)
def test_frame_apply_dont_convert_datetime64(self):
from pandas.tseries.offsets import BDay
df = DataFrame({'x1': [datetime(1996, 1, 1)]})
df = df.applymap(lambda x: x + BDay())
df = df.applymap(lambda x: x + BDay())
assert df.x1.dtype == 'M8[ns]'
# See gh-12244
def test_apply_non_numpy_dtype(self):
df = DataFrame({'dt': pd.date_range(
"2015-01-01", periods=3, tz='Europe/Brussels')})
result = df.apply(lambda x: x)
assert_frame_equal(result, df)
result = df.apply(lambda x: x + pd.Timedelta('1day'))
expected = DataFrame({'dt': pd.date_range(
"2015-01-02", periods=3, tz='Europe/Brussels')})
assert_frame_equal(result, expected)
df = DataFrame({'dt': ['a', 'b', 'c', 'a']}, dtype='category')
result = df.apply(lambda x: x)
assert_frame_equal(result, df)
def zip_frames(*frames):
"""
take a list of frames, zip the columns together for each
assume that these all have the first frame columns
return a new frame
"""
columns = frames[0].columns
zipped = [f[c] for c in columns for f in frames]
return pd.concat(zipped, axis=1)
class TestDataFrameAggregate(TestData):
_multiprocess_can_split_ = True
def test_agg_transform(self):
with np.errstate(all='ignore'):
f_sqrt = np.sqrt(self.frame)
f_abs = np.abs(self.frame)
# ufunc
result = self.frame.transform(np.sqrt)
expected = f_sqrt.copy()
assert_frame_equal(result, expected)
result = self.frame.apply(np.sqrt)
assert_frame_equal(result, expected)
result = self.frame.transform(np.sqrt)
assert_frame_equal(result, expected)
# list-like
result = self.frame.apply([np.sqrt])
expected = f_sqrt.copy()
expected.columns = pd.MultiIndex.from_product(
[self.frame.columns, ['sqrt']])
assert_frame_equal(result, expected)
result = self.frame.transform([np.sqrt])
assert_frame_equal(result, expected)
# multiple items in list
# these are in the order as if we are applying both
# functions per series and then concatting
expected = zip_frames(f_sqrt, f_abs)
expected.columns = pd.MultiIndex.from_product(
[self.frame.columns, ['sqrt', 'absolute']])
result = self.frame.apply([np.sqrt, np.abs])
assert_frame_equal(result, expected)
result = self.frame.transform(['sqrt', np.abs])
assert_frame_equal(result, expected)
def test_transform_and_agg_err(self):
# cannot both transform and agg
def f():
self.frame.transform(['max', 'min'])
pytest.raises(ValueError, f)
def f():
with np.errstate(all='ignore'):
self.frame.agg(['max', 'sqrt'])
pytest.raises(ValueError, f)
def f():
with np.errstate(all='ignore'):
self.frame.transform(['max', 'sqrt'])
pytest.raises(ValueError, f)
df = pd.DataFrame({'A': range(5), 'B': 5})
def f():
with np.errstate(all='ignore'):
df.agg({'A': ['abs', 'sum'], 'B': ['mean', 'max']})
def test_demo(self):
# demonstration tests
df = pd.DataFrame({'A': range(5), 'B': 5})
result = df.agg(['min', 'max'])
expected = DataFrame({'A': [0, 4], 'B': [5, 5]},
columns=['A', 'B'],
index=['min', 'max'])
tm.assert_frame_equal(result, expected)
result = df.agg({'A': ['min', 'max'], 'B': ['sum', 'max']})
expected = DataFrame({'A': [4.0, 0.0, np.nan],
'B': [5.0, np.nan, 25.0]},
columns=['A', 'B'],
index=['max', 'min', 'sum'])
tm.assert_frame_equal(result.reindex_like(expected), expected)
def test_agg_dict_nested_renaming_depr(self):
df = pd.DataFrame({'A': range(5), 'B': 5})
# nested renaming
with tm.assert_produces_warning(FutureWarning):
df.agg({'A': {'foo': 'min'},
'B': {'bar': 'max'}})
def test_agg_reduce(self):
# all reducers
expected = zip_frames(self.frame.mean().to_frame(),
self.frame.max().to_frame(),
self.frame.sum().to_frame()).T
expected.index = ['mean', 'max', 'sum']
result = self.frame.agg(['mean', 'max', 'sum'])
assert_frame_equal(result, expected)
# dict input with scalars
result = self.frame.agg({'A': 'mean', 'B': 'sum'})
expected = Series([self.frame.A.mean(), self.frame.B.sum()],
index=['A', 'B'])
assert_series_equal(result.reindex_like(expected), expected)
# dict input with lists
result = self.frame.agg({'A': ['mean'], 'B': ['sum']})
expected = DataFrame({'A': Series([self.frame.A.mean()],
index=['mean']),
'B': Series([self.frame.B.sum()],
index=['sum'])})
assert_frame_equal(result.reindex_like(expected), expected)
# dict input with lists with multiple
result = self.frame.agg({'A': ['mean', 'sum'],
'B': ['sum', 'max']})
expected = DataFrame({'A': Series([self.frame.A.mean(),
self.frame.A.sum()],
index=['mean', 'sum']),
'B': Series([self.frame.B.sum(),
self.frame.B.max()],
index=['sum', 'max'])})
assert_frame_equal(result.reindex_like(expected), expected)
def test_nuiscance_columns(self):
# GH 15015
df = DataFrame({'A': [1, 2, 3],
'B': [1., 2., 3.],
'C': ['foo', 'bar', 'baz'],
'D': pd.date_range('20130101', periods=3)})
result = df.agg('min')
expected = Series([1, 1., 'bar', pd.Timestamp('20130101')],
index=df.columns)
assert_series_equal(result, expected)
result = df.agg(['min'])
expected = DataFrame([[1, 1., 'bar', pd.Timestamp('20130101')]],
index=['min'], columns=df.columns)
assert_frame_equal(result, expected)
result = df.agg('sum')
expected = Series([6, 6., 'foobarbaz'],
index=['A', 'B', 'C'])
assert_series_equal(result, expected)
result = df.agg(['sum'])
expected = DataFrame([[6, 6., 'foobarbaz']],
index=['sum'], columns=['A', 'B', 'C'])
assert_frame_equal(result, expected)
def test_non_callable_aggregates(self):
# GH 16405
# 'size' is a property of frame/series
# validate that this is working
df = DataFrame({'A': [None, 2, 3],
'B': [1.0, np.nan, 3.0],
'C': ['foo', None, 'bar']})
# Function aggregate
result = df.agg({'A': 'count'})
expected = pd.Series({'A': 2})
assert_series_equal(result, expected)
# Non-function aggregate
result = df.agg({'A': 'size'})
expected = pd.Series({'A': 3})
assert_series_equal(result, expected)
# Mix function and non-function aggs
result1 = df.agg(['count', 'size'])
result2 = df.agg({'A': ['count', 'size'],
'B': ['count', 'size'],
'C': ['count', 'size']})
expected = pd.DataFrame({'A': {'count': 2, 'size': 3},
'B': {'count': 2, 'size': 3},
'C': {'count': 2, 'size': 3}})
assert_frame_equal(result1, result2, check_like=True)
assert_frame_equal(result2, expected, check_like=True)
# Just functional string arg is same as calling df.arg()
result = df.agg('count')
expected = df.count()
assert_series_equal(result, expected)
# Just a string attribute arg same as calling df.arg
result = df.agg('size')
expected = df.size
assert result == expected
| apache-2.0 |
alshedivat/tensorflow | tensorflow/contrib/gan/python/estimator/python/gan_estimator_test.py | 5 | 15018 | # Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for TFGAN's estimator.py."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import shutil
import tempfile
from absl.testing import parameterized
import numpy as np
import six
from tensorflow.contrib import layers
from tensorflow.contrib.gan.python import namedtuples as tfgan_tuples
from tensorflow.contrib.gan.python.estimator.python import gan_estimator_impl as estimator
from tensorflow.contrib.gan.python.losses.python import tuple_losses as losses
from tensorflow.contrib.learn.python.learn.learn_io import graph_io
from tensorflow.core.example import example_pb2
from tensorflow.core.example import feature_pb2
from tensorflow.python.estimator import model_fn as model_fn_lib
from tensorflow.python.estimator.estimator import WarmStartSettings
from tensorflow.python.estimator.inputs import numpy_io
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.framework import tensor_shape
from tensorflow.python.framework.errors_impl import NotFoundError
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import metrics as metrics_lib
from tensorflow.python.ops import parsing_ops
from tensorflow.python.ops import variable_scope
from tensorflow.python.platform import test
from tensorflow.python.summary.writer import writer_cache
from tensorflow.python.training import input as input_lib
from tensorflow.python.training import learning_rate_decay
from tensorflow.python.training import training
from tensorflow.python.training import training_util
def generator_fn(noise_dict, mode):
del mode
noise = noise_dict['x']
return layers.fully_connected(noise, tensor_shape.dimension_value(
noise.shape[1]))
def discriminator_fn(data, unused_conditioning, mode):
del unused_conditioning, mode
return layers.fully_connected(data, 1)
class GetGANModelTest(test.TestCase, parameterized.TestCase):
"""Tests that `GetGANModel` produces the correct model."""
@parameterized.named_parameters(
('train', model_fn_lib.ModeKeys.TRAIN),
('eval', model_fn_lib.ModeKeys.EVAL),
('predict', model_fn_lib.ModeKeys.PREDICT))
def test_get_gan_model(self, mode):
with ops.Graph().as_default():
generator_inputs = {'x': array_ops.ones([3, 4])}
real_data = (array_ops.zeros([3, 4]) if
mode != model_fn_lib.ModeKeys.PREDICT else None)
gan_model = estimator._get_gan_model(
mode, generator_fn, discriminator_fn, real_data, generator_inputs,
add_summaries=False)
self.assertEqual(generator_inputs, gan_model.generator_inputs)
self.assertIsNotNone(gan_model.generated_data)
self.assertEqual(2, len(gan_model.generator_variables)) # 1 FC layer
self.assertIsNotNone(gan_model.generator_fn)
if mode == model_fn_lib.ModeKeys.PREDICT:
self.assertIsNone(gan_model.real_data)
self.assertIsNone(gan_model.discriminator_real_outputs)
self.assertIsNone(gan_model.discriminator_gen_outputs)
self.assertIsNone(gan_model.discriminator_variables)
self.assertIsNone(gan_model.discriminator_scope)
self.assertIsNone(gan_model.discriminator_fn)
else:
self.assertIsNotNone(gan_model.real_data)
self.assertIsNotNone(gan_model.discriminator_real_outputs)
self.assertIsNotNone(gan_model.discriminator_gen_outputs)
self.assertEqual(2, len(gan_model.discriminator_variables)) # 1 FC layer
self.assertIsNotNone(gan_model.discriminator_scope)
self.assertIsNotNone(gan_model.discriminator_fn)
def get_dummy_gan_model():
# TODO(joelshor): Find a better way of creating a variable scope.
with variable_scope.variable_scope('generator') as gen_scope:
gen_var = variable_scope.get_variable('dummy_var', initializer=0.0)
with variable_scope.variable_scope('discriminator') as dis_scope:
dis_var = variable_scope.get_variable('dummy_var', initializer=0.0)
return tfgan_tuples.GANModel(
generator_inputs=None,
generated_data=array_ops.ones([3, 4]),
generator_variables=[gen_var],
generator_scope=gen_scope,
generator_fn=None,
real_data=array_ops.zeros([3, 4]),
discriminator_real_outputs=array_ops.ones([1, 2, 3]) * dis_var,
discriminator_gen_outputs=array_ops.ones([1, 2, 3]) * gen_var * dis_var,
discriminator_variables=[dis_var],
discriminator_scope=dis_scope,
discriminator_fn=None)
def dummy_loss_fn(gan_model, add_summaries=True):
return math_ops.reduce_sum(gan_model.discriminator_real_outputs -
gan_model.discriminator_gen_outputs)
def get_metrics(gan_model):
return {
'mse_custom_metric': metrics_lib.mean_squared_error(
gan_model.real_data, gan_model.generated_data)
}
class GetEstimatorSpecTest(test.TestCase, parameterized.TestCase):
"""Tests that the EstimatorSpec is constructed appropriately."""
@classmethod
def setUpClass(cls):
cls._generator_optimizer = training.GradientDescentOptimizer(1.0)
cls._discriminator_optimizer = training.GradientDescentOptimizer(1.0)
@parameterized.named_parameters(
('train', model_fn_lib.ModeKeys.TRAIN),
('eval', model_fn_lib.ModeKeys.EVAL),
('predict', model_fn_lib.ModeKeys.PREDICT))
def test_get_estimator_spec(self, mode):
with ops.Graph().as_default():
self._gan_model = get_dummy_gan_model()
spec = estimator._get_estimator_spec(
mode,
self._gan_model,
generator_loss_fn=dummy_loss_fn,
discriminator_loss_fn=dummy_loss_fn,
get_eval_metric_ops_fn=get_metrics,
generator_optimizer=self._generator_optimizer,
discriminator_optimizer=self._discriminator_optimizer)
self.assertEqual(mode, spec.mode)
if mode == model_fn_lib.ModeKeys.PREDICT:
self.assertEqual(self._gan_model.generated_data, spec.predictions)
elif mode == model_fn_lib.ModeKeys.TRAIN:
self.assertShapeEqual(np.array(0), spec.loss) # must be a scalar
self.assertIsNotNone(spec.train_op)
self.assertIsNotNone(spec.training_hooks)
elif mode == model_fn_lib.ModeKeys.EVAL:
self.assertEqual(self._gan_model.generated_data, spec.predictions)
self.assertShapeEqual(np.array(0), spec.loss) # must be a scalar
self.assertIsNotNone(spec.eval_metric_ops)
# TODO(joelshor): Add pandas test.
class GANEstimatorIntegrationTest(test.TestCase):
def setUp(self):
self._model_dir = tempfile.mkdtemp()
def tearDown(self):
if self._model_dir:
writer_cache.FileWriterCache.clear()
shutil.rmtree(self._model_dir)
def _test_complete_flow(
self, train_input_fn, eval_input_fn, predict_input_fn, prediction_size,
lr_decay=False):
def make_opt():
gstep = training_util.get_or_create_global_step()
lr = learning_rate_decay.exponential_decay(1.0, gstep, 10, 0.9)
return training.GradientDescentOptimizer(lr)
gopt = make_opt if lr_decay else training.GradientDescentOptimizer(1.0)
dopt = make_opt if lr_decay else training.GradientDescentOptimizer(1.0)
est = estimator.GANEstimator(
generator_fn=generator_fn,
discriminator_fn=discriminator_fn,
generator_loss_fn=losses.wasserstein_generator_loss,
discriminator_loss_fn=losses.wasserstein_discriminator_loss,
generator_optimizer=gopt,
discriminator_optimizer=dopt,
get_eval_metric_ops_fn=get_metrics,
model_dir=self._model_dir)
# TRAIN
num_steps = 10
est.train(train_input_fn, steps=num_steps)
# EVALUTE
scores = est.evaluate(eval_input_fn)
self.assertEqual(num_steps, scores[ops.GraphKeys.GLOBAL_STEP])
self.assertIn('loss', six.iterkeys(scores))
self.assertEqual(scores['discriminator_loss'] + scores['generator_loss'],
scores['loss'])
self.assertIn('mse_custom_metric', six.iterkeys(scores))
# PREDICT
predictions = np.array([x for x in est.predict(predict_input_fn)])
self.assertAllEqual(prediction_size, predictions.shape)
def test_numpy_input_fn(self):
"""Tests complete flow with numpy_input_fn."""
input_dim = 4
batch_size = 5
data = np.zeros([batch_size, input_dim])
train_input_fn = numpy_io.numpy_input_fn(
x={'x': data},
y=data,
batch_size=batch_size,
num_epochs=None,
shuffle=True)
eval_input_fn = numpy_io.numpy_input_fn(
x={'x': data},
y=data,
batch_size=batch_size,
shuffle=False)
predict_input_fn = numpy_io.numpy_input_fn(
x={'x': data},
batch_size=batch_size,
shuffle=False)
self._test_complete_flow(
train_input_fn=train_input_fn,
eval_input_fn=eval_input_fn,
predict_input_fn=predict_input_fn,
prediction_size=[batch_size, input_dim])
def test_numpy_input_fn_lrdecay(self):
"""Tests complete flow with numpy_input_fn."""
input_dim = 4
batch_size = 5
data = np.zeros([batch_size, input_dim])
train_input_fn = numpy_io.numpy_input_fn(
x={'x': data},
y=data,
batch_size=batch_size,
num_epochs=None,
shuffle=True)
eval_input_fn = numpy_io.numpy_input_fn(
x={'x': data},
y=data,
batch_size=batch_size,
shuffle=False)
predict_input_fn = numpy_io.numpy_input_fn(
x={'x': data},
batch_size=batch_size,
shuffle=False)
self._test_complete_flow(
train_input_fn=train_input_fn,
eval_input_fn=eval_input_fn,
predict_input_fn=predict_input_fn,
prediction_size=[batch_size, input_dim],
lr_decay=True)
def test_input_fn_from_parse_example(self):
"""Tests complete flow with input_fn constructed from parse_example."""
input_dim = 4
batch_size = 6
data = np.zeros([batch_size, input_dim])
serialized_examples = []
for datum in data:
example = example_pb2.Example(features=feature_pb2.Features(
feature={
'x': feature_pb2.Feature(
float_list=feature_pb2.FloatList(value=datum)),
'y': feature_pb2.Feature(
float_list=feature_pb2.FloatList(value=datum)),
}))
serialized_examples.append(example.SerializeToString())
feature_spec = {
'x': parsing_ops.FixedLenFeature([input_dim], dtypes.float32),
'y': parsing_ops.FixedLenFeature([input_dim], dtypes.float32),
}
def _train_input_fn():
feature_map = parsing_ops.parse_example(
serialized_examples, feature_spec)
_, features = graph_io.queue_parsed_features(feature_map)
labels = features.pop('y')
return features, labels
def _eval_input_fn():
feature_map = parsing_ops.parse_example(
input_lib.limit_epochs(serialized_examples, num_epochs=1),
feature_spec)
_, features = graph_io.queue_parsed_features(feature_map)
labels = features.pop('y')
return features, labels
def _predict_input_fn():
feature_map = parsing_ops.parse_example(
input_lib.limit_epochs(serialized_examples, num_epochs=1),
feature_spec)
_, features = graph_io.queue_parsed_features(feature_map)
features.pop('y')
return features, None
self._test_complete_flow(
train_input_fn=_train_input_fn,
eval_input_fn=_eval_input_fn,
predict_input_fn=_predict_input_fn,
prediction_size=[batch_size, input_dim])
class GANEstimatorWarmStartTest(test.TestCase):
def setUp(self):
self._model_dir = self.get_temp_dir()
self.new_variable_name = 'new_var'
self.new_variable_value = [1, 2, 3]
def tearDown(self):
writer_cache.FileWriterCache.clear()
def _test_warm_start(self, warm_start_from=None):
"""Tests whether WarmStartSettings work as intended."""
def generator_with_new_variable(noise_dict, mode):
variable_scope.get_variable(name=self.new_variable_name,
initializer=self.new_variable_value,
trainable=True)
return generator_fn(noise_dict, mode)
def train_input_fn():
data = np.zeros([3, 4])
return {'x': data}, data
est = estimator.GANEstimator(
generator_fn=generator_fn,
discriminator_fn=discriminator_fn,
generator_loss_fn=losses.wasserstein_generator_loss,
discriminator_loss_fn=losses.wasserstein_discriminator_loss,
generator_optimizer=training.GradientDescentOptimizer(1.0),
discriminator_optimizer=training.GradientDescentOptimizer(1.0),
model_dir=self._model_dir)
est.train(train_input_fn, steps=1)
est_warm = estimator.GANEstimator(
generator_fn=generator_with_new_variable,
discriminator_fn=discriminator_fn,
generator_loss_fn=losses.wasserstein_generator_loss,
discriminator_loss_fn=losses.wasserstein_discriminator_loss,
generator_optimizer=training.GradientDescentOptimizer(1.0),
discriminator_optimizer=training.GradientDescentOptimizer(1.0),
model_dir=None if warm_start_from else self._model_dir,
warm_start_from=warm_start_from)
est_warm.train(train_input_fn, steps=1)
return est_warm
def test_warm_start_error(self):
"""Test if exception when reloading different estimators."""
with self.assertRaises(NotFoundError):
self._test_warm_start()
def test_warm_start_success(self):
"""Test if GANEstimator allows explicit warm start variable assignment."""
# Regex matches all variable names in ckpt except for new_var.
var_regex = '^(?!.*%s.*)' % self.new_variable_name
warmstart = WarmStartSettings(ckpt_to_initialize_from=self._model_dir,
vars_to_warm_start=var_regex)
est_warm = self._test_warm_start(warm_start_from=warmstart)
full_variable_name = 'Generator/%s' % self.new_variable_name
self.assertIn(full_variable_name, est_warm.get_variable_names())
equal_vals = np.array_equal(est_warm.get_variable_value(full_variable_name),
self.new_variable_value)
self.assertTrue(equal_vals)
if __name__ == '__main__':
test.main()
| apache-2.0 |
ishank08/scikit-learn | examples/text/document_clustering.py | 32 | 8526 | """
=======================================
Clustering text documents using k-means
=======================================
This is an example showing how the scikit-learn can be used to cluster
documents by topics using a bag-of-words approach. This example uses
a scipy.sparse matrix to store the features instead of standard numpy arrays.
Two feature extraction methods can be used in this example:
- TfidfVectorizer uses a in-memory vocabulary (a python dict) to map the most
frequent words to features indices and hence compute a word occurrence
frequency (sparse) matrix. The word frequencies are then reweighted using
the Inverse Document Frequency (IDF) vector collected feature-wise over
the corpus.
- HashingVectorizer hashes word occurrences to a fixed dimensional space,
possibly with collisions. The word count vectors are then normalized to
each have l2-norm equal to one (projected to the euclidean unit-ball) which
seems to be important for k-means to work in high dimensional space.
HashingVectorizer does not provide IDF weighting as this is a stateless
model (the fit method does nothing). When IDF weighting is needed it can
be added by pipelining its output to a TfidfTransformer instance.
Two algorithms are demoed: ordinary k-means and its more scalable cousin
minibatch k-means.
Additionally, latent semantic analysis can also be used to reduce dimensionality
and discover latent patterns in the data.
It can be noted that k-means (and minibatch k-means) are very sensitive to
feature scaling and that in this case the IDF weighting helps improve the
quality of the clustering by quite a lot as measured against the "ground truth"
provided by the class label assignments of the 20 newsgroups dataset.
This improvement is not visible in the Silhouette Coefficient which is small
for both as this measure seem to suffer from the phenomenon called
"Concentration of Measure" or "Curse of Dimensionality" for high dimensional
datasets such as text data. Other measures such as V-measure and Adjusted Rand
Index are information theoretic based evaluation scores: as they are only based
on cluster assignments rather than distances, hence not affected by the curse
of dimensionality.
Note: as k-means is optimizing a non-convex objective function, it will likely
end up in a local optimum. Several runs with independent random init might be
necessary to get a good convergence.
"""
# Author: Peter Prettenhofer <peter.prettenhofer@gmail.com>
# Lars Buitinck
# License: BSD 3 clause
from __future__ import print_function
from sklearn.datasets import fetch_20newsgroups
from sklearn.decomposition import TruncatedSVD
from sklearn.feature_extraction.text import TfidfVectorizer
from sklearn.feature_extraction.text import HashingVectorizer
from sklearn.feature_extraction.text import TfidfTransformer
from sklearn.pipeline import make_pipeline
from sklearn.preprocessing import Normalizer
from sklearn import metrics
from sklearn.cluster import KMeans, MiniBatchKMeans
import logging
from optparse import OptionParser
import sys
from time import time
import numpy as np
# Display progress logs on stdout
logging.basicConfig(level=logging.INFO,
format='%(asctime)s %(levelname)s %(message)s')
# parse commandline arguments
op = OptionParser()
op.add_option("--lsa",
dest="n_components", type="int",
help="Preprocess documents with latent semantic analysis.")
op.add_option("--no-minibatch",
action="store_false", dest="minibatch", default=True,
help="Use ordinary k-means algorithm (in batch mode).")
op.add_option("--no-idf",
action="store_false", dest="use_idf", default=True,
help="Disable Inverse Document Frequency feature weighting.")
op.add_option("--use-hashing",
action="store_true", default=False,
help="Use a hashing feature vectorizer")
op.add_option("--n-features", type=int, default=10000,
help="Maximum number of features (dimensions)"
" to extract from text.")
op.add_option("--verbose",
action="store_true", dest="verbose", default=False,
help="Print progress reports inside k-means algorithm.")
print(__doc__)
op.print_help()
def is_interactive():
return not hasattr(sys.modules['__main__'], '__file__')
# work-around for Jupyter notebook and IPython console
argv = [] if is_interactive() else sys.argv[1:]
(opts, args) = op.parse_args(argv)
if len(args) > 0:
op.error("this script takes no arguments.")
sys.exit(1)
###############################################################################
# Load some categories from the training set
categories = [
'alt.atheism',
'talk.religion.misc',
'comp.graphics',
'sci.space',
]
# Uncomment the following to do the analysis on all the categories
# categories = None
print("Loading 20 newsgroups dataset for categories:")
print(categories)
dataset = fetch_20newsgroups(subset='all', categories=categories,
shuffle=True, random_state=42)
print("%d documents" % len(dataset.data))
print("%d categories" % len(dataset.target_names))
print()
labels = dataset.target
true_k = np.unique(labels).shape[0]
print("Extracting features from the training dataset using a sparse vectorizer")
t0 = time()
if opts.use_hashing:
if opts.use_idf:
# Perform an IDF normalization on the output of HashingVectorizer
hasher = HashingVectorizer(n_features=opts.n_features,
stop_words='english', non_negative=True,
norm=None, binary=False)
vectorizer = make_pipeline(hasher, TfidfTransformer())
else:
vectorizer = HashingVectorizer(n_features=opts.n_features,
stop_words='english',
non_negative=False, norm='l2',
binary=False)
else:
vectorizer = TfidfVectorizer(max_df=0.5, max_features=opts.n_features,
min_df=2, stop_words='english',
use_idf=opts.use_idf)
X = vectorizer.fit_transform(dataset.data)
print("done in %fs" % (time() - t0))
print("n_samples: %d, n_features: %d" % X.shape)
print()
if opts.n_components:
print("Performing dimensionality reduction using LSA")
t0 = time()
# Vectorizer results are normalized, which makes KMeans behave as
# spherical k-means for better results. Since LSA/SVD results are
# not normalized, we have to redo the normalization.
svd = TruncatedSVD(opts.n_components)
normalizer = Normalizer(copy=False)
lsa = make_pipeline(svd, normalizer)
X = lsa.fit_transform(X)
print("done in %fs" % (time() - t0))
explained_variance = svd.explained_variance_ratio_.sum()
print("Explained variance of the SVD step: {}%".format(
int(explained_variance * 100)))
print()
###############################################################################
# Do the actual clustering
if opts.minibatch:
km = MiniBatchKMeans(n_clusters=true_k, init='k-means++', n_init=1,
init_size=1000, batch_size=1000, verbose=opts.verbose)
else:
km = KMeans(n_clusters=true_k, init='k-means++', max_iter=100, n_init=1,
verbose=opts.verbose)
print("Clustering sparse data with %s" % km)
t0 = time()
km.fit(X)
print("done in %0.3fs" % (time() - t0))
print()
print("Homogeneity: %0.3f" % metrics.homogeneity_score(labels, km.labels_))
print("Completeness: %0.3f" % metrics.completeness_score(labels, km.labels_))
print("V-measure: %0.3f" % metrics.v_measure_score(labels, km.labels_))
print("Adjusted Rand-Index: %.3f"
% metrics.adjusted_rand_score(labels, km.labels_))
print("Silhouette Coefficient: %0.3f"
% metrics.silhouette_score(X, km.labels_, sample_size=1000))
print()
if not opts.use_hashing:
print("Top terms per cluster:")
if opts.n_components:
original_space_centroids = svd.inverse_transform(km.cluster_centers_)
order_centroids = original_space_centroids.argsort()[:, ::-1]
else:
order_centroids = km.cluster_centers_.argsort()[:, ::-1]
terms = vectorizer.get_feature_names()
for i in range(true_k):
print("Cluster %d:" % i, end='')
for ind in order_centroids[i, :10]:
print(' %s' % terms[ind], end='')
print()
| bsd-3-clause |
TrackDR/dx | dx/dx_valuation.py | 5 | 49297 | #
# DX Analytics
# Derivatives Instruments and Portfolio Valuation Classes
# dx_valuation.py
#
# DX Analytics is a financial analytics library, mainly for
# derviatives modeling and pricing by Monte Carlo simulation
#
# (c) Dr. Yves J. Hilpisch
# The Python Quants GmbH
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see http://www.gnu.org/licenses/.
#
from dx_models import *
import statsmodels.api as sm
# Classes for single risk factor instrument valuation
class valuation_class_single(object):
''' Basic class for single-risk factor instrument valuation.
Attributes
==========
name : string
name of the object
underlying :
instance of simulation class
mar_env : instance of market_environment
market environment data for valuation
payoff_func : string
derivatives payoff in Python syntax
Example: 'np.maximum(maturity_value - 100, 0)'
where maturity_value is the NumPy vector with
respective values of the underlying
Example: 'np.maximum(instrument_values - 100, 0)'
where instrument_values is the NumPy matrix with
values of the underlying over the whole time/path grid
Methods
=======
update:
updates selected valuation parameters
delta :
returns the delta of the derivative
vega :
returns the vega of the derivative
'''
def __init__(self, name, underlying, mar_env, payoff_func=''):
try:
self.name = name
self.pricing_date = mar_env.pricing_date
try:
self.strike = mar_env.get_constant('strike')
# strike is optional
except:
pass
self.maturity = mar_env.get_constant('maturity')
self.currency = mar_env.get_constant('currency')
# simulation parameters and discount curve from simulation object
self.frequency = underlying.frequency
self.paths = underlying.paths
self.discount_curve = underlying.discount_curve
self.payoff_func = payoff_func
self.underlying = underlying
# provide pricing_date and maturity to underlying
self.underlying.special_dates.extend([self.pricing_date,
self.maturity])
except:
print "Error parsing market environment."
def update(self, initial_value=None, volatility=None,
strike=None, maturity=None):
if initial_value is not None:
self.underlying.update(initial_value=initial_value)
if volatility is not None:
self.underlying.update(volatility=volatility)
if strike is not None:
self.strike = strike
if maturity is not None:
self.maturity = maturity
# add new maturity date if not in time_grid
if not maturity in self.underlying.time_grid:
self.underlying.special_dates.append(maturity)
self.underlying.instrument_values = None
def delta(self, interval=None, accuracy=4):
if interval is None:
interval = self.underlying.initial_value / 50.
# forward-difference approximation
# calculate left value for numerical delta
value_left = self.present_value(fixed_seed=True)
# numerical underlying value for right value
initial_del = self.underlying.initial_value + interval
self.underlying.update(initial_value=initial_del)
# calculate right value for numerical delta
value_right = self.present_value(fixed_seed=True)
# reset the initial_value of the simulation object
self.underlying.update(initial_value=initial_del - interval)
delta = (value_right - value_left) / interval
# correct for potential numerical errors
if delta < -1.0:
return -1.0
elif delta > 1.0:
return 1.0
else:
return round(delta, accuracy)
def vega(self, interval=0.01, accuracy=4):
if interval < self.underlying.volatility / 50.:
interval = self.underlying.volatility / 50.
# forward-difference approximation
# calculate the left value for numerical vega
value_left = self.present_value(fixed_seed=True)
# numerical volatility value for right value
vola_del = self.underlying.volatility + interval
# update the simulation object
self.underlying.update(volatility=vola_del)
# calculate the right value of numerical vega
value_right = self.present_value(fixed_seed=True)
# reset volatility value of simulation object
self.underlying.update(volatility=vola_del - interval)
vega = (value_right - value_left) / interval
return round(vega, accuracy)
class valuation_mcs_european_single(valuation_class_single):
''' Class to value European options with arbitrary payoff
by single-factor Monte Carlo simulation.
Methods
=======
generate_payoff :
returns payoffs given the paths and the payoff function
present_value :
returns present value (Monte Carlo estimator)
'''
def generate_payoff(self, fixed_seed=False):
'''
Attributes
==========
fixed_seed : boolean
used same/fixed seed for valued
'''
try:
# strike defined?
strike = self.strike
except:
pass
paths = self.underlying.get_instrument_values(fixed_seed=fixed_seed)
time_grid = self.underlying.time_grid
try:
time_index = np.where(time_grid == self.maturity)[0]
time_index = int(time_index)
except:
print "Maturity date not in time grid of underlying."
maturity_value = paths[time_index]
# average value over whole path
mean_value = np.mean(paths[:time_index], axis=1)
# maximum value over whole path
max_value = np.amax(paths[:time_index], axis=1)[-1]
# minimum value over whole path
min_value = np.amin(paths[:time_index], axis=1)[-1]
try:
payoff = eval(self.payoff_func)
return payoff
except:
print "Error evaluating payoff function."
def present_value(self, accuracy=6, fixed_seed=False, full=False):
'''
Attributes
==========
accuracy : int
number of decimals in returned result
fixed_seed :
used same/fixed seed for valuation
'''
cash_flow = self.generate_payoff(fixed_seed=fixed_seed)
discount_factor = self.discount_curve.get_discount_factors(
self.underlying.time_grid, self.paths)[1][0]
result = np.sum(discount_factor * cash_flow) / len(cash_flow)
if full:
return round(result, accuracy), discount_factor * cash_flow
else:
return round(result, accuracy)
class valuation_mcs_american_single(valuation_class_single):
''' Class to value American options with arbitrary payoff
by single-factor Monte Carlo simulation.
Methods
=======
generate_payoff :
returns payoffs given the paths and the payoff function
present_value :
returns present value (LSM Monte Carlo estimator)
according to Longstaff-Schwartz (2001)
'''
def generate_payoff(self, fixed_seed=False):
'''
Attributes
==========
fixed_seed :
used same/fixed seed for valuation
'''
try:
strike = self.strike
except:
pass
paths = self.underlying.get_instrument_values(fixed_seed=fixed_seed)
time_grid = self.underlying.time_grid
try:
time_index_start = int(np.where(time_grid == self.pricing_date)[0])
time_index_end = int(np.where(time_grid == self.maturity)[0])
except:
print "Maturity date not in time grid of underlying."
instrument_values = paths[time_index_start:time_index_end + 1]
try:
payoff = eval(self.payoff_func)
return instrument_values, payoff, time_index_start, time_index_end
except:
print "Error evaluating payoff function."
def present_value(self, accuracy=3, fixed_seed=False, bf=5, full=False):
'''
Attributes
==========
accuracy : int
number of decimals in returned result
fixed_seed :
used same/fixed seed for valuation
bf : int
number of basis functions for regression
'''
instrument_values, inner_values, time_index_start, time_index_end = \
self.generate_payoff(fixed_seed=fixed_seed)
time_list = \
self.underlying.time_grid[time_index_start:time_index_end + 1]
discount_factors = self.discount_curve.get_discount_factors(
time_list, self.paths, dtobjects=True)[1]
V = inner_values[-1]
for t in range(len(time_list) - 2, 0, -1):
# derive relevant discount factor for given time interval
df = discount_factors[t] / discount_factors[t + 1]
# regression step
rg = np.polyfit(instrument_values[t], V * df, bf)
# calculation of continuation values per path
C = np.polyval(rg, instrument_values[t])
# optimal decision step:
# if condition is satisfied (inner value > regressed cont. value)
# then take inner value; take actual cont. value otherwise
V = np.where(inner_values[t] > C, inner_values[t], V * df)
df = discount_factors[0] / discount_factors[1]
result = np.sum(df * V) / len(V)
if full:
return round(result, accuracy), df * V
else:
return round(result, accuracy)
# Classes for multi risk factor instrument valuation
class valuation_class_multi(object):
''' Basic class for multi-risk factor instrument valuation.
Attributes
==========
name : string
name of the object
mar_env : instance of market_environment
market environment data for valuation
underlyings : dictionary
instances of model classes
correlations : list
correlations between underlyings
payoff_func : string
derivatives payoff in Python syntax
Example: 'np.maximum(maturity_value[key] - 100, 0)'
where maturity_value[key] is the NumPy vector with
respective values of the underlying 'key' from the
risk_factors dictionary
Methods
=======
update:
updates selected valuation parameters
delta :
returns the delta of the derivative
vega :
returns the vega of the derivative
'''
def __init__(self, name, val_env, risk_factors=None, correlations=None,
payoff_func='', fixed_seed=False, portfolio=False):
try:
self.name = name
self.val_env = val_env
self.currency = self.val_env.get_constant('currency')
self.pricing_date = val_env.pricing_date
try:
self.strike = self.val_env.get_constant('strike')
# strike optional
except:
pass
self.maturity = self.val_env.get_constant('maturity')
self.frequency = self.val_env.get_constant('frequency')
self.paths = self.val_env.get_constant('paths')
self.discount_curve = self.val_env.get_curve('discount_curve')
self.risk_factors = risk_factors
self.underlyings = set()
if portfolio is False:
self.underlying_objects = {}
else:
self.underlying_objects = risk_factors
self.correlations = correlations
self.payoff_func = payoff_func
self.fixed_seed = fixed_seed
self.instrument_values = {}
try:
self.time_grid = self.val_env.get_curve('time_grid')
except:
self.time_grid = None
self.correlation_matrix = None
except:
print "Error parsing market environment."
# Generating general time grid
if self.time_grid is None:
start = self.val_env.get_constant('starting_date')
end = self.val_env.get_constant('final_date')
maturity = self.maturity
time_grid = pd.date_range(start=start, end=end,
freq=self.val_env.get_constant('frequency')
).to_pydatetime()
if start in time_grid and end in time_grid and \
maturity in time_grid:
self.time_grid = time_grid
else:
time_grid = list(time_grid)
if maturity not in time_grid:
time_grid.insert(0, maturity)
if start not in time_grid:
time_grid.insert(0, start)
if end not in time_grid:
time_grid.append(end)
time_grid.sort()
self.time_grid = np.array(time_grid)
self.val_env.add_curve('time_grid', self.time_grid)
if portfolio is False:
if correlations is not None:
ul_list = sorted(self.risk_factors)
correlation_matrix = np.zeros((len(ul_list), len(ul_list)))
np.fill_diagonal(correlation_matrix, 1.0)
correlation_matrix = pd.DataFrame(correlation_matrix,
index=ul_list, columns=ul_list)
for corr in correlations:
if corr[2] >= 1.0:
corr[2] = 0.999999999999
correlation_matrix[corr[0]].ix[corr[1]] = corr[2]
correlation_matrix[corr[1]].ix[corr[0]] = corr[2]
self.correlation_matrix = correlation_matrix
cholesky_matrix = np.linalg.cholesky(
np.array(correlation_matrix))
# dictionary with index positions
rn_set = {}
for asset in self.risk_factors:
rn_set[asset] = ul_list.index(asset)
# random numbers array
random_numbers = sn_random_numbers((len(rn_set),
len(self.time_grid),
self.val_env.constants['paths']),
fixed_seed=self.fixed_seed)
# adding all to valuation environment
self.val_env.add_list('cholesky_matrix', cholesky_matrix)
self.val_env.add_list('rn_set', rn_set)
self.val_env.add_list('random_numbers', random_numbers)
for asset in self.risk_factors:
mar_env = self.risk_factors[asset]
mar_env.add_environment(val_env)
model = models[mar_env.constants['model']]
if correlations is not None:
self.underlying_objects[asset] = model(asset,
mar_env, True)
else:
self.underlying_objects[asset] = model(asset,
mar_env, False)
def get_instrument_values(self, fixed_seed=True):
for obj in self.underlying_objects.values():
if obj.instrument_values is None:
obj.generate_paths(fixed_seed=fixed_seed)
def update(self, key=None, initial_value=None, volatility=None,
strike=None, maturity=None):
if key is not None:
underlying = self.underlying_objects[key]
if initial_value is not None:
underlying.update(initial_value=initial_value)
if volatility is not None:
underlying.update(volatility=volatility)
if strike is not None:
self.strike = strike
if maturity is not None:
self.maturity = maturity
for underlying in underlyings.values():
underlying.update(final_date=self.maturity)
self.get_instrument_values()
def delta(self, key, interval=None):
if len(self.instrument_values) == 0:
self.get_instrument_values()
asset = self.underlying_objects[key]
if interval is None:
interval = asset.initial_value / 50.
value_left = self.present_value()
start_value = asset.initial_value
initial_del = start_value + interval
asset.update(initial_value=initial_del)
self.get_instrument_values()
value_right = self.present_value()
asset.update(start_value)
self.instrument_values = {}
delta = (value_right - value_left) / interval
if delta < -1.0:
return -1.0
elif delta > 1.0:
return 1.0
else:
return delta
def vega(self, key, interval=0.01):
if len(self.instrument_values) == 0:
self.get_instrument_values()
asset = self.underlying_objects[key]
if interval < asset.volatility / 50.:
interval = asset.volatility / 50.
value_left = self.present_value()
start_vola = asset.volatility
vola_del = start_vola + interval
asset.update(volatility=vola_del)
self.get_instrument_values()
value_right = self.present_value()
asset.update(volatility=start_vola)
self.instrument_values = {}
return (value_right - value_left) / interval
class valuation_mcs_european_multi(valuation_class_multi):
''' Class to value European options with arbitrary payoff
by multi-risk factor Monte Carlo simulation.
Methods
=======
generate_payoff :
returns payoffs given the paths and the payoff function
present_value :
returns present value (Monte Carlo estimator)
'''
def generate_payoff(self, fixed_seed=True):
self.get_instrument_values(fixed_seed=True)
paths = {key: name.instrument_values for key, name
in self.underlying_objects.items()}
time_grid = self.time_grid
try:
time_index = np.where(time_grid == self.maturity)[0]
time_index = int(time_index)
except:
print "Maturity date not in time grid of underlying."
maturity_value = {}
mean_value = {}
max_value = {}
min_value = {}
for key in paths:
maturity_value[key] = paths[key][time_index]
mean_value[key] = np.mean(paths[key][:time_index], axis=1)
max_value[key] = np.amax(paths[key][:time_index], axis=1)
min_value[key] = np.amin(paths[key][:time_index], axis=1)
try:
payoff = eval(self.payoff_func)
return payoff
except:
print "Error evaluating payoff function."
def present_value(self, accuracy=3, fixed_seed=True, full=False):
cash_flow = self.generate_payoff(fixed_seed)
discount_factor = self.discount_curve.get_discount_factors(
self.time_grid, self.paths)[1][0]
result = np.sum(discount_factor * cash_flow) / len(cash_flow)
if full:
return round(result, accuracy), df * cash_flow
else:
return round(result, accuracy)
class valuation_mcs_american_multi(valuation_class_multi):
''' Class to value American options with arbitrary payoff
by multi-risk factor Monte Carlo simulation.
Methods
=======
generate_payoff :
returns payoffs given the paths and the payoff function
present_value :
returns present value (Monte Carlo estimator)
'''
def generate_payoff(self, fixed_seed=True):
self.get_instrument_values(fixed_seed=True)
self.instrument_values = {key: name.instrument_values for key, name
in self.underlying_objects.items()}
try:
time_index_start = int(np.where(self.time_grid == self.pricing_date)[0])
time_index_end = int(np.where(self.time_grid == self.maturity)[0])
except:
print "Maturity date not in time grid of underlying."
instrument_values = {}
for key, obj in self.instrument_values.items():
instrument_values[key] = \
self.instrument_values[key][time_index_start:time_index_end
+ 1]
try:
payoff = eval(self.payoff_func)
return instrument_values, payoff, time_index_start, time_index_end
except:
print "Error evaluating payoff function."
def present_value(self, accuracy=3, fixed_seed=True, full=False):
instrument_values, inner_values, time_index_start, time_index_end = \
self.generate_payoff(fixed_seed=fixed_seed)
time_list = self.time_grid[time_index_start:time_index_end + 1]
discount_factors = self.discount_curve.get_discount_factors(
time_list, self.paths, dtobjects=True)[1]
V = inner_values[-1]
for t in range(len(time_list) - 2, 0, -1):
df = discount_factors[t] / discount_factors[t + 1]
matrix = {}
for asset_1 in instrument_values.keys():
matrix[asset_1] = instrument_values[asset_1][t]
for asset_2 in instrument_values.keys():
matrix[asset_1 + asset_2] = instrument_values[asset_1][t] \
* instrument_values[asset_2][t]
rg = sm.OLS(V * df, np.array(matrix.values()).T).fit()
C = np.sum(rg.params * np.array(matrix.values()).T, axis=1)
V = np.where(inner_values[t] > C, inner_values[t], V * df)
df = discount_factors[0] / discount_factors[1]
result = np.sum(df * V) / len(V)
if full:
return round(result, accuracy), df * V
else:
return round(result, accuracy)
# Classes for derivatives portfolio valuation
class derivatives_position(object):
''' Class to model a derivatives position.
Attributes
==========
name : string
name of the object
quantity : float
number of derivatives instruments making up the position
underlyings : list of strings
names of risk_factors/risk factors for the derivative
mar_env : instance of market_environment
constants, lists and curves relevant for valuation_class
otype : string
valuation class to use
payoff_func : string
payoff string for the derivative
Methods
=======
get_info :
prints information about the derivative position
'''
def __init__(self, name, quantity, underlyings, mar_env, otype, payoff_func):
self.name = name
self.quantity = quantity
self.underlyings = underlyings
self.mar_env = mar_env
self.otype = otype
self.payoff_func = payoff_func
def get_info(self):
print "NAME"
print self.name, '\n'
print "QUANTITY"
print self.quantity, '\n'
print "UNDERLYINGS"
print self.underlyings, '\n'
print "MARKET ENVIRONMENT"
print "\n**Constants**"
for key in self.mar_env.constants:
print key, self.mar_env.constants[key]
print "\n**Lists**"
for key in self.mar_env.lists:
print key, self.mar_env.lists[key]
print "\n**Curves**"
for key in self.mar_env.curves:
print key, self.mar_env.curves[key]
print "\nOPTION TYPE"
print self.otype, '\n'
print "PAYOFF FUNCTION"
print self.payoff_func
models = {'gbm' : geometric_brownian_motion,
'jd' : jump_diffusion,
'sv' : stochastic_volatility,
'svjd' : stoch_vol_jump_diffusion,
'srd' : square_root_diffusion,
'srjd' : square_root_jump_diffusion,
'srjd+' : square_root_jump_diffusion_plus}
otypes = {'European single' : valuation_mcs_european_single,
'American single' : valuation_mcs_american_single,
'European multi' : valuation_mcs_european_multi,
'American multi' : valuation_mcs_american_multi}
class derivatives_portfolio(object):
''' Class for building and valuing portfolios of derivatives positions.
Attributes
==========
name : str
name of the object
positions : dict
dictionary of positions (instances of derivatives_position class)
val_env : market_environment
market environment for the valuation
risk_factors : dict
dictionary of market environments for the risk_factors
correlations : list or pd.DataFrame
correlations between risk_factors
fixed_seed : boolean
flag for fixed rng seed
Methods
=======
get_positions :
prints information about the single portfolio positions
get_values :
estimates and returns positions values
get_present_values :
returns the full distribution of the simulated portfolio values
get_statistics :
returns a pandas DataFrame object with portfolio statistics
get_port_risk :
estimates sensitivities for point-wise parameter shocks
'''
def __init__(self, name, positions, val_env, risk_factors,
correlations=None, fixed_seed=False, parallel=False):
self.name = name
self.positions = positions
self.val_env = val_env
self.risk_factors = risk_factors
self.underlyings = set()
if correlations is None or correlations is False:
self.correlations = None
else:
self.correlations = correlations
self.time_grid = None
self.underlying_objects = {}
self.valuation_objects = {}
self.fixed_seed = fixed_seed
self.parallel = parallel
self.special_dates = []
for pos in self.positions:
# determine earliest starting_date
self.val_env.constants['starting_date'] = \
min(self.val_env.constants['starting_date'],
positions[pos].mar_env.pricing_date)
# determine latest date of relevance
self.val_env.constants['final_date'] = \
max(self.val_env.constants['final_date'],
positions[pos].mar_env.constants['maturity'])
# collect all underlyings
# add to set; avoids redundancy
for ul in positions[pos].underlyings:
self.underlyings.add(ul)
# generating general time grid
start = self.val_env.constants['starting_date']
end = self.val_env.constants['final_date']
time_grid = pd.date_range(start=start, end=end,
freq=self.val_env.constants['frequency']
).to_pydatetime()
time_grid = list(time_grid)
for pos in self.positions:
maturity_date = positions[pos].mar_env.constants['maturity']
if maturity_date not in time_grid:
time_grid.insert(0, maturity_date)
self.special_dates.append(maturity_date)
if start not in time_grid:
time_grid.insert(0, start)
if end not in time_grid:
time_grid.append(end)
# delete duplicate entries
# & sort dates in time_grid
time_grid = sorted(set(time_grid))
self.time_grid = np.array(time_grid)
self.val_env.add_list('time_grid', self.time_grid)
# taking care of correlations
ul_list = sorted(self.underlyings)
correlation_matrix = np.zeros((len(ul_list), len(ul_list)))
np.fill_diagonal(correlation_matrix, 1.0)
correlation_matrix = pd.DataFrame(correlation_matrix,
index=ul_list, columns=ul_list)
if self.correlations is not None:
if isinstance(self.correlations, list):
# if correlations are given as list of list/tuple objects
for corr in self.correlations:
if corr[2] >= 1.0:
corr[2] = 0.999999999999
if corr[2] <= -1.0:
corr[2] = -0.999999999999
# fill correlation matrix
correlation_matrix[corr[0]].ix[corr[1]] = corr[2]
correlation_matrix[corr[1]].ix[corr[0]] = corr[2]
# determine Cholesky matrix
cholesky_matrix = np.linalg.cholesky(np.array(
correlation_matrix))
else:
# if correlation matrix was already given as pd.DataFrame
cholesky_matrix = np.linalg.cholesky(np.array(
self.correlations))
else:
cholesky_matrix = np.linalg.cholesky(np.array(
correlation_matrix))
# dictionary with index positions for the
# slice of the random number array to be used by
# respective underlying
rn_set = {}
for asset in self.underlyings:
rn_set[asset] = ul_list.index(asset)
# random numbers array, to be used by
# all underlyings (if correlations exist)
random_numbers = sn_random_numbers(
(len(rn_set),
len(self.time_grid),
self.val_env.constants['paths']),
fixed_seed=self.fixed_seed)
# adding all to valuation environment which is
# to be shared with every underlying
self.val_env.add_list('correlation_matrix', correlation_matrix)
self.val_env.add_list('cholesky_matrix', cholesky_matrix)
self.val_env.add_list('random_numbers', random_numbers)
self.val_env.add_list('rn_set', rn_set)
for asset in self.underlyings:
# select market environment of asset
mar_env = self.risk_factors[asset]
# add valuation environment to market environment
mar_env.add_environment(val_env)
# select the right simulation class
model = models[mar_env.constants['model']]
# instantiate simulation object
if self.correlations is not None:
corr = True
else:
corr = False
self.underlying_objects[asset] = model(asset, mar_env,
corr=corr)
for pos in positions:
# select right valuation class (European, American)
val_class = otypes[positions[pos].otype]
# pick the market environment and add the valuation environment
mar_env = positions[pos].mar_env
mar_env.add_environment(self.val_env)
# instantiate valuation class single risk vs. multi risk
if self.positions[pos].otype[-5:] == 'multi':
underlying_objects = {}
for obj in positions[pos].underlyings:
underlying_objects[obj] = self.underlying_objects[obj]
self.valuation_objects[pos] = \
val_class(name=positions[pos].name,
val_env=mar_env,
risk_factors=underlying_objects,
payoff_func=positions[pos].payoff_func,
portfolio=True)
else:
self.valuation_objects[pos] = \
val_class(name=positions[pos].name,
mar_env=mar_env,
underlying=self.underlying_objects[
positions[pos].underlyings[0]],
payoff_func=positions[pos].payoff_func)
def get_positions(self):
''' Convenience method to get information about
all derivatives positions in a portfolio. '''
for pos in self.positions:
bar = '\n' + 50 * '-'
print bar
self.positions[pos].get_info()
print bar
def get_values(self, fixed_seed=False):
''' Providing portfolio position values. '''
res_list = []
if self.parallel is True:
self.underlying_objects = \
simulate_parallel(self.underlying_objects.values())
results = value_parallel(self.valuation_objects.values())
# iterate over all positions in portfolio
for pos in self.valuation_objects:
pos_list = []
if self.parallel is True:
present_value = results[self.valuation_objects[pos].name]
else:
present_value = self.valuation_objects[pos].present_value()
pos_list.append(pos)
pos_list.append(self.positions[pos].name)
pos_list.append(self.positions[pos].quantity)
pos_list.append(self.positions[pos].otype)
pos_list.append(self.positions[pos].underlyings)
# calculate all present values for the single instruments
pos_list.append(present_value)
pos_list.append(self.valuation_objects[pos].currency)
# single instrument value times quantity
pos_list.append(present_value * self.positions[pos].quantity)
res_list.append(pos_list)
res_df = pd.DataFrame(res_list, columns=['position', 'name', 'quantity',
'otype', 'risk_facts', 'value',
'currency', 'pos_value'])
print 'Total\n', res_df[['pos_value']].sum()
return res_df
def get_present_values(self, fixed_seed=False):
''' Get full distribution of present values. '''
present_values = np.zeros(self.val_env.get_constant('paths'))
if self.parallel is True:
self.underlying_objects = \
simulate_parallel(self.underlying_objects.values())
results = value_parallel(self.valuation_objects.values(),
full=True)
for pos in self.valuation_objects:
present_values += results[self.valuation_objects[pos].name] \
* self.positions[pos].quantity
else:
for pos in self.valuation_objects:
present_values += self.valuation_objects[pos].present_value(
fixed_seed = fixed_seed, full=True)[1] \
* self.positions[pos].quantity
return present_values
def get_statistics(self, fixed_seed=None):
''' Providing position statistics. '''
res_list = []
if fixed_seed is None:
fixed_seed = self.fixed_seed
if self.parallel is True:
self.underlying_objects = \
simulate_parallel(self.underlying_objects.values())
results = value_parallel(self.valuation_objects.values(),
fixed_seed=fixed_seed)
delta_list = greeks_parallel(self.valuation_objects.values(),
Greek='Delta')
vega_list = greeks_parallel(self.valuation_objects.values(),
Greek='Vega')
# iterate over all positions in portfolio
for pos in self.valuation_objects:
pos_list = []
if self.parallel is True:
present_value = results[self.valuation_objects[pos].name]
else:
present_value = self.valuation_objects[pos].present_value(
fixed_seed=fixed_seed, accuracy=3)
pos_list.append(pos)
pos_list.append(self.positions[pos].name)
pos_list.append(self.positions[pos].quantity)
pos_list.append(self.positions[pos].otype)
pos_list.append(self.positions[pos].underlyings)
# calculate all present values for the single instruments
pos_list.append(present_value)
pos_list.append(self.valuation_objects[pos].currency)
# single instrument value times quantity
pos_list.append(present_value * self.positions[pos].quantity)
if self.positions[pos].otype[-5:] == 'multi':
# multiple delta and vega values for multi-risk derivatives
delta_dict = {}
vega_dict = {}
for key in self.valuation_objects[pos].underlying_objects.keys():
# delta and vega per position and underlying
delta_dict[key] = round(self.valuation_objects[pos].delta(key)
* self.positions[pos].quantity, 6)
vega_dict[key] = round(self.valuation_objects[pos].vega(key)
* self.positions[pos].quantity, 6)
pos_list.append(str(delta_dict))
pos_list.append(str(vega_dict))
else:
if self.parallel is True:
# delta from parallel calculation
pos_list.append(delta_list[pos]
* self.positions[pos].quantity)
# vega from parallel calculation
pos_list.append(vega_list[pos]
* self.positions[pos].quantity)
else:
# delta per position
pos_list.append(self.valuation_objects[pos].delta()
* self.positions[pos].quantity)
# vega per position
pos_list.append(self.valuation_objects[pos].vega()
* self.positions[pos].quantity)
res_list.append(pos_list)
res_df = pd.DataFrame(res_list, columns=['position', 'name',
'quantity', 'otype',
'risk_facts', 'value',
'currency', 'pos_value',
'pos_delta', 'pos_vega'])
print 'Totals\n', res_df[['pos_value', 'pos_delta', 'pos_vega']].sum()
return res_df
def get_port_risk(self, Greek='Delta', low=0.8, high=1.2, step=0.1,
fixed_seed=None, risk_factors=None):
''' Calculating portfolio risk statistics. '''
if risk_factors is None:
risk_factors = self.underlying_objects.keys()
if fixed_seed is None:
fixed_seed = self.fixed_seed
sensitivities = {}
levels = np.arange(low, high + 0.01, step)
if self.parallel is True:
values = value_parallel(self.valuation_objects.values(),
fixed_seed=fixed_seed)
for key in self.valuation_objects:
values[key] *= self.positions[key].quantity
else:
values = {}
for key, obj in self.valuation_objects.items():
values[key] = obj.present_value() \
* self.positions[key].quantity
import copy
for rf in risk_factors:
print '\n' + rf
in_val = self.underlying_objects[rf].initial_value
in_vol = self.underlying_objects[rf].volatility
results = []
for level in levels:
values_sens = copy.deepcopy(values)
print level,
if level == 1.0:
pass
else:
for key, obj in self.valuation_objects.items():
if rf in self.positions[key].underlyings:
if self.positions[key].otype[-5:] == 'multi':
if Greek == 'Delta':
obj.underlying_objects[rf].update(
initial_value=level * in_val)
if Greek == 'Vega':
obj.underlying_objects[rf].update(
volatility=level * in_vol)
else:
if Greek == 'Delta':
obj.underlying.update(
initial_value=level * in_val)
elif Greek == 'Vega':
obj.underlying.update(
volatility=level * in_vol)
values_sens[key] = obj.present_value(
fixed_seed=fixed_seed) \
* self.positions[key].quantity
if self.positions[key].otype[-5:] == 'multi':
obj.underlying_objects[rf].update(
initial_value=in_val)
obj.underlying_objects[rf].update(
volatility=in_vol)
else:
obj.underlying.update(initial_value=in_val)
obj.underlying.update(volatility=in_vol)
if Greek == 'Delta':
results.append((round(level * in_val, 2),
sum(values_sens.values())))
if Greek == 'Vega':
results.append((round(level * in_vol, 2),
sum(values_sens.values())))
sensitivities[rf + '_' + Greek] = pd.DataFrame(np.array(results),
index=levels,
columns=['factor', 'value'])
print 2 * '\n'
return pd.Panel(sensitivities), sum(values.values())
def risk_report(sensitivities, digits=2):
for key in sensitivities:
print '\n' + key
print np.round(sensitivities[key].transpose(), digits)
import multiprocessing as mp
def simulate_parallel(objs, fixed_seed=True):
procs = []
man = mp.Manager()
output = man.Queue()
def worker(o, output):
o.generate_paths(fixed_seed=fixed_seed)
output.put((o.name, o))
for o in objs:
procs.append(mp.Process(target=worker, args=(o, output)))
[pr.start() for pr in procs]
[pr.join() for pr in procs]
results = [output.get() for o in objs]
underlying_objects = {}
for o in results:
underlying_objects[o[0]] = o[1]
return underlying_objects
def value_parallel(objs, fixed_seed=True, full=False):
procs = []
man = mp.Manager()
output = man.Queue()
def worker(o, output):
if full is True:
pvs = o.present_value(fixed_seed=fixed_seed, full=True)[1]
output.put((o.name, pvs))
else:
pv = o.present_value(fixed_seed=fixed_seed)
output.put((o.name, pv))
for o in objs:
procs.append(mp.Process(target=worker, args=(o, output)))
[pr.start() for pr in procs]
[pr.join() for pr in procs]
res_list = [output.get() for o in objs]
results = {}
for o in res_list:
results[o[0]] = o[1]
return results
def greeks_parallel(objs, Greek='Delta'):
procs = []
man = mp.Manager()
output = man.Queue()
def worker(o, output):
if Greek == 'Delta':
output.put((o.name, o.delta()))
elif Greek == 'Vega':
output.put((o.name, o.vega()))
for o in objs:
procs.append(mp.Process(target=worker, args=(o, output)))
[pr.start() for pr in procs]
[pr.join() for pr in procs]
res_list = [output.get() for o in objs]
results = {}
for o in res_list:
results[o[0]] = o[1]
return results
class var_derivatives_portfolio(derivatives_portfolio):
''' Class for building and valuing portfolios of derivatives positions
with risk factors given from fitted VAR model.
Attributes
==========
name : str
name of the object
positions : dict
dictionary of positions (instances of derivatives_position class)
val_env : market_environment
market environment for the valuation
var_risk_factors : VAR model
vector autoregressive model for risk factors
fixed_seed : boolean
flag for fixed rng seed
Methods
=======
get_positions :
prints information about the single portfolio positions
get_values :
estimates and returns positions values
get_present_values :
returns the full distribution of the simulated portfolio values
'''
def __init__(self, name, positions, val_env, var_risk_factors,
fixed_seed=False, parallel=False):
self.name = name
self.positions = positions
self.val_env = val_env
self.var_risk_factors = var_risk_factors
self.underlyings = set()
self.time_grid = None
self.underlying_objects = {}
self.valuation_objects = {}
self.fixed_seed = fixed_seed
self.special_dates = []
for pos in self.positions:
# determine earliest starting_date
self.val_env.constants['starting_date'] = \
min(self.val_env.constants['starting_date'],
positions[pos].mar_env.pricing_date)
# determine latest date of relevance
self.val_env.constants['final_date'] = \
max(self.val_env.constants['final_date'],
positions[pos].mar_env.constants['maturity'])
# collect all underlyings
# add to set; avoids redundancy
for ul in positions[pos].underlyings:
self.underlyings.add(ul)
# generating general time grid
start = self.val_env.constants['starting_date']
end = self.val_env.constants['final_date']
time_grid = pd.date_range(start=start, end=end,
freq='B' # allow business day only
).to_pydatetime()
time_grid = list(time_grid)
if start not in time_grid:
time_grid.insert(0, start)
if end not in time_grid:
time_grid.append(end)
# delete duplicate entries & sort dates in time_grid
time_grid = sorted(set(time_grid))
self.time_grid = np.array(time_grid)
self.val_env.add_list('time_grid', self.time_grid)
#
# generate simulated paths
#
self.fit_model = var_risk_factors.fit(maxlags=5, ic='bic')
sim_paths = self.fit_model.simulate(
paths=self.val_env.get_constant('paths'),
steps=len(self.time_grid),
initial_values=var_risk_factors.y[-1])
symbols = sim_paths[0].columns.values
for sym in symbols:
df = pd.DataFrame()
for i, path in enumerate(sim_paths):
df[i] = path[sym]
self.underlying_objects[sym] = general_underlying(
sym, df, self.val_env)
for pos in positions:
# select right valuation class (European, American)
val_class = otypes[positions[pos].otype]
# pick the market environment and add the valuation environment
mar_env = positions[pos].mar_env
mar_env.add_environment(self.val_env)
# instantiate valuation classes
self.valuation_objects[pos] = \
val_class(name=positions[pos].name,
mar_env=mar_env,
underlying=self.underlying_objects[
positions[pos].underlyings[0]],
payoff_func=positions[pos].payoff_func)
def get_statistics(self):
raise NotImplementedError
def get_port_risk(self):
raise NotImplementedError
| agpl-3.0 |
robintw/scikit-image | doc/source/conf.py | 9 | 11356 | # -*- coding: utf-8 -*-
#
# skimage documentation build configuration file, created by
# sphinx-quickstart on Sat Aug 22 13:00:30 2009.
#
# This file is execfile()d with the current directory set to its containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import sys
import os
import skimage
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#sys.path.append(os.path.abspath('.'))
curpath = os.path.dirname(__file__)
sys.path.append(os.path.join(curpath, '..', 'ext'))
# -- General configuration -----------------------------------------------------
# Add any Sphinx extension module names here, as strings. They can be extensions
# coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
extensions = ['sphinx.ext.autodoc',
'sphinx.ext.pngmath',
'numpydoc',
'sphinx.ext.autosummary',
'plot2rst',
'sphinx.ext.intersphinx',
'sphinx.ext.linkcode',
]
# Determine if the matplotlib has a recent enough version of the
# plot_directive, otherwise use the local fork.
try:
from matplotlib.sphinxext import plot_directive
except ImportError:
use_matplotlib_plot_directive = False
else:
try:
use_matplotlib_plot_directive = (plot_directive.__version__ >= 2)
except AttributeError:
use_matplotlib_plot_directive = False
if use_matplotlib_plot_directive:
extensions.append('matplotlib.sphinxext.plot_directive')
else:
extensions.append('plot_directive')
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = '.txt'
# The encoding of source files.
#source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = 'skimage'
copyright = '2013, the scikit-image team'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
setup_lines = open('../../skimage/__init__.py').readlines()
version = 'vUndefined'
for l in setup_lines:
if l.startswith('__version__'):
version = l.split("'")[1]
break
# The full version, including alpha/beta/rc tags.
release = version
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of documents that shouldn't be included in the build.
#unused_docs = []
# List of directories, relative to source directory, that shouldn't be searched
# for source files.
exclude_trees = []
# The reST default role (used for this markup: `text`) to use for all documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# -- Options for HTML output ---------------------------------------------------
# The theme to use for HTML and HTML Help pages. Major themes that come with
# Sphinx are currently 'default' and 'sphinxdoc'.
html_theme = 'scikit-image'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
html_theme_path = ['themes']
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
html_title = 'skimage v%s docs' % version
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
html_sidebars = {
'**': ['navigation.html',
'localtoc.html',
'versions.html'],
}
# Additional templates that should be rendered to pages, maps page names to
# template names.
# html_additional_pages = {}
# If false, no module index is generated.
#html_use_modindex = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# If nonempty, this is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = ''
# Output file base name for HTML help builder.
htmlhelp_basename = 'scikitimagedoc'
# -- Options for LaTeX output --------------------------------------------------
# The paper size ('letter' or 'a4').
#latex_paper_size = 'letter'
# The font size ('10pt', '11pt' or '12pt').
latex_font_size = '10pt'
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title, author, documentclass [howto/manual]).
latex_documents = [
('contents', 'scikit-image.tex', u'The scikit-image Documentation',
u'scikit-image development team', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# Additional stuff for the LaTeX preamble.
latex_preamble = r'''
\usepackage{enumitem}
\setlistdepth{100}
\usepackage{amsmath}
\DeclareUnicodeCharacter{00A0}{\nobreakspace}
% In the parameters section, place a newline after the Parameters header
\usepackage{expdlist}
\let\latexdescription=\description
\def\description{\latexdescription{}{} \breaklabel}
% Make Examples/etc section headers smaller and more compact
\makeatletter
\titleformat{\paragraph}{\normalsize\py@HeaderFamily}%
{\py@TitleColor}{0em}{\py@TitleColor}{\py@NormalColor}
\titlespacing*{\paragraph}{0pt}{1ex}{0pt}
\makeatother
'''
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
latex_use_modindex = False
# -----------------------------------------------------------------------------
# Numpy extensions
# -----------------------------------------------------------------------------
numpydoc_show_class_members = False
numpydoc_class_members_toctree = False
# -----------------------------------------------------------------------------
# Plots
# -----------------------------------------------------------------------------
plot_basedir = os.path.join(curpath, "plots")
plot_pre_code = """
import numpy as np
import matplotlib.pyplot as plt
np.random.seed(0)
import matplotlib
matplotlib.rcParams.update({
'font.size': 14,
'axes.titlesize': 12,
'axes.labelsize': 10,
'xtick.labelsize': 8,
'ytick.labelsize': 8,
'legend.fontsize': 10,
'figure.subplot.bottom': 0.2,
'figure.subplot.left': 0.2,
'figure.subplot.right': 0.9,
'figure.subplot.top': 0.85,
'figure.subplot.wspace': 0.4,
'text.usetex': False,
})
"""
plot_include_source = True
plot_formats = [('png', 100), ('pdf', 100)]
plot2rst_index_name = 'README'
plot2rst_rcparams = {'image.cmap' : 'gray',
'image.interpolation' : 'none'}
# -----------------------------------------------------------------------------
# intersphinx
# -----------------------------------------------------------------------------
_python_doc_base = 'http://docs.python.org/2.7'
intersphinx_mapping = {
'python': (_python_doc_base, None),
'numpy': ('http://docs.scipy.org/doc/numpy',
(None, './_intersphinx/numpy-objects.inv')),
'scipy': ('http://docs.scipy.org/doc/scipy/reference',
(None, './_intersphinx/scipy-objects.inv')),
'sklearn': ('http://scikit-learn.org/stable',
(None, './_intersphinx/sklearn-objects.inv')),
}
# ----------------------------------------------------------------------------
# Source code links
# ----------------------------------------------------------------------------
import inspect
from os.path import relpath, dirname
# Function courtesy of NumPy to return URLs containing line numbers
def linkcode_resolve(domain, info):
"""
Determine the URL corresponding to Python object
"""
if domain != 'py':
return None
modname = info['module']
fullname = info['fullname']
submod = sys.modules.get(modname)
if submod is None:
return None
obj = submod
for part in fullname.split('.'):
try:
obj = getattr(obj, part)
except:
return None
try:
fn = inspect.getsourcefile(obj)
except:
fn = None
if not fn:
return None
try:
source, lineno = inspect.findsource(obj)
except:
lineno = None
if lineno:
linespec = "#L%d" % (lineno + 1)
else:
linespec = ""
fn = relpath(fn, start=dirname(skimage.__file__))
if 'dev' in skimage.__version__:
return ("http://github.com/scikit-image/scikit-image/blob/"
"master/skimage/%s%s" % (fn, linespec))
else:
return ("http://github.com/scikit-image/scikit-image/blob/"
"v%s/skimage/%s%s" % (skimage.__version__, fn, linespec))
| bsd-3-clause |
judithfan/pix2svg | generative/tests/compare_test/concat_first/train.py | 1 | 10750 | from __future__ import division
from __future__ import print_function
from __future__ import absolute_import
import os
import sys
import shutil
import numpy as np
from tqdm import tqdm
import torch
import torch.optim as optim
import torch.nn.functional as F
from torch.autograd import Variable
from sklearn.metrics import mean_squared_error
from model import AttendedSpatialCollapseCONV42 # best CONV42 model
from model import AttendedSpatialCollapsePOOL1 # best POOL1 model
from model import PredictorFC6 # best FC6 model
from dataset import VisualDataset
def save_checkpoint(state, is_best, folder='./', filename='checkpoint.pth.tar'):
if not os.path.isdir(folder):
os.mkdir(folder)
torch.save(state, os.path.join(folder, filename))
if is_best:
shutil.copyfile(os.path.join(folder, filename),
os.path.join(folder, 'model_best.pth.tar'))
def load_checkpoint(file_path, use_cuda=False):
checkpoint = torch.load(file_path) if use_cuda else \
torch.load(file_path, map_location=lambda storage, location: storage)
if checkpoint['layer'] == 'fc6':
model = PredictorFC6()
elif checkpoint['layer'] == 'conv42':
model = AttendedSpatialCollapseCONV42()
elif checkpoint['layer'] == 'pool1':
model = AttendedSpatialCollapsePOOL1()
else:
raise Exception('Unrecognized layer: %s' % checkpoint['layer'])
model.load_state_dict(checkpoint['state_dict'])
model.layer = checkpoint['layer']
return model
class AverageMeter(object):
"""Computes and stores the average and current value"""
def __init__(self):
self.reset()
def reset(self):
self.val = 0
self.avg = 0
self.sum = 0
self.count = 0
def update(self, val, n=1):
self.val = val
self.sum += val * n
self.count += n
self.avg = self.sum / self.count
if __name__ == "__main__":
import argparse
parser = argparse.ArgumentParser()
parser.add_argument('layer', type=str, help='fc6|conv42|pool1')
parser.add_argument('--loss-scale', type=float, default=10000., help='multiplier for loss [default: 10000.]')
parser.add_argument('--train-test-split-dir', type=str, default='./train_test_split/1',
help='where to load train_test_split paths [default: ./train_test_split/1]')
parser.add_argument('--out-dir', type=str, default='./trained_models',
help='where to save checkpoints [./trained_models]')
parser.add_argument('--batch-size', type=int, default=10,
help='number of examples in a mini-batch [default: 10]')
parser.add_argument('--lr', type=float, default=1e-4, help='learning rate [default: 1e-4]')
parser.add_argument('--epochs', type=int, default=100, help='number of epochs [default: 100]')
parser.add_argument('--cuda', action='store_true', default=False)
args = parser.parse_args()
args.cuda = args.cuda and torch.cuda.is_available()
if not os.path.isdir(args.out_dir):
os.makedirs(args.out_dir)
train_dataset = VisualDataset(layer=args.layer, split='train',
train_test_split_dir=args.train_test_split_dir)
val_dataset = VisualDataset(layer=args.layer, split='val',
train_test_split_dir=args.train_test_split_dir)
test_dataset = VisualDataset(layer=args.layer, split='test',
train_test_split_dir=args.train_test_split_dir)
train_loader = torch.utils.data.DataLoader(train_dataset, batch_size=args.batch_size, shuffle=True)
val_loader = torch.utils.data.DataLoader(val_dataset, batch_size=args.batch_size, shuffle=False)
test_loader = torch.utils.data.DataLoader(test_dataset, batch_size=args.batch_size, shuffle=False)
if args.layer == 'fc6':
model = PredictorFC6()
elif args.layer == 'conv42':
model = AttendedSpatialCollapseCONV42()
elif args.layer == 'pool1':
model = AttendedSpatialCollapsePOOL1()
else:
raise Exception('Unrecognized layer: %s' % args.layer)
if args.cuda:
model.cuda()
optimizer = optim.Adam(model.parameters(), lr=args.lr, weight_decay=1e-4)
def train(epoch):
model.train()
loss_meter = AverageMeter()
acc_meter = AverageMeter()
for batch_idx, (sketch, label) in enumerate(train_loader):
sketch = Variable(sketch)
label = Variable(label)
batch_size = len(sketch)
if args.cuda:
sketch = sketch.cuda()
label = label.cuda()
# set optimizer defaults to 0
optimizer.zero_grad()
pred_logits = []
photo_generator = train_dataset.gen_photos()
for photo in photo_generator():
photo = Variable(photo)
if args.cuda:
photo = photo.cuda()
photo = (photo.repeat(batch_size, 1) if args.layer == 'fc6' else
photo.repeat(batch_size, 1, 1, 1))
pred_logit = model(photo, sketch)
pred_logits.append(pred_logit)
pred_logits = torch.cat(pred_logits, dim=1)
loss = args.loss_scale * F.cross_entropy(pred_logits, label)
loss_meter.update(loss.data[0], batch_size)
pred = pred_logits.data.max(1, keepdim=True)[1]
correct = pred.eq(label.data.view_as(pred)).long().cpu().sum()
accuracy = correct / float(batch_size)
acc_meter.update(accuracy, batch_size)
loss.backward()
optimizer.step()
mean_grads = torch.mean(torch.cat([param.grad.cpu().data.contiguous().view(-1)
for param in model.parameters()]))
print('Train Epoch: {} [{}/{} ({:.0f}%)]\tLoss: {:.6f}\tAccuracy: {:6f}\t|Grad|: {:6f}'.format(
epoch, batch_idx * batch_size, len(train_loader.dataset), 100. * batch_idx / len(train_loader),
loss_meter.avg, acc_meter.avg, mean_grads))
print('====> Epoch: {}\tLoss: {:.4f}\tAccuracy: {:.6f}'.format(
epoch, loss_meter.avg, acc_meter.avg))
return loss_meter.avg, acc_meter.avg
def validate():
model.eval()
loss_meter = AverageMeter()
acc_meter = AverageMeter()
pbar = tqdm(total=len(val_loader))
for batch_idx, (sketch, label) in enumerate(val_loader):
sketch = Variable(sketch, volatile=True)
label = Variable(label, requires_grad=False)
batch_size = len(sketch)
if args.cuda:
sketch = sketch.cuda()
label = label.cuda()
pred_logits = []
photo_generator = val_dataset.gen_photos()
for photo in photo_generator():
photo = Variable(photo)
if args.cuda:
photo = photo.cuda()
photo = (photo.repeat(batch_size, 1) if args.layer == 'fc6' else
photo.repeat(batch_size, 1, 1, 1))
pred_logit = model(photo, sketch)
pred_logits.append(pred_logit)
pred_logits = torch.cat(pred_logits, dim=1)
loss = args.loss_scale * F.cross_entropy(pred_logits, label)
loss_meter.update(loss.data[0], batch_size)
pred = pred_logits.data.max(1, keepdim=True)[1]
correct = pred.eq(label.data.view_as(pred)).long().cpu().sum()
accuracy = correct / float(batch_size)
acc_meter.update(accuracy, batch_size)
pbar.update()
pbar.close()
print('====> Val Loss: {:.4f}\tVal Accuracy: {:.6f}'.format(
loss_meter.avg, acc_meter.avg))
return loss_meter.avg, acc_meter.avg
def test():
model.eval()
loss_meter = AverageMeter()
acc_meter = AverageMeter()
pbar = tqdm(total=len(test_loader))
for batch_idx, (sketch, label) in enumerate(test_loader):
sketch = Variable(sketch, volatile=True)
label = Variable(label, requires_grad=False)
batch_size = len(sketch)
if args.cuda:
sketch = sketch.cuda()
label = label.cuda()
pred_logits = []
photo_generator = test_dataset.gen_photos()
for photo in photo_generator():
photo = Variable(photo)
if args.cuda:
photo = photo.cuda()
photo = (photo.repeat(batch_size, 1) if args.layer == 'fc6' else
photo.repeat(batch_size, 1, 1, 1))
pred_logit = model(photo, sketch)
pred_logits.append(pred_logit)
pred_logits = torch.cat(pred_logits, dim=1)
loss = args.loss_scale * F.cross_entropy(pred_logits, label)
loss_meter.update(loss.data[0], batch_size)
pred = pred_logits.data.max(1, keepdim=True)[1]
correct = pred.eq(label.data.view_as(pred)).long().cpu().sum()
accuracy = correct / float(batch_size)
acc_meter.update(accuracy, batch_size)
pbar.update()
pbar.close()
print('====> Test Loss: {:.4f}\tTest Accuracy: {:.6f}'.format(
loss_meter.avg, acc_meter.avg))
return loss_meter.avg, acc_meter.avg
best_loss = sys.maxint
store_loss = np.zeros((args.epochs, 3))
store_acc = np.zeros((args.epochs, 3))
for epoch in xrange(1, args.epochs + 1):
train_loss, train_acc = train(epoch)
val_loss, val_acc = validate()
test_loss, test_acc = test()
is_best = val_loss < best_loss
best_loss = min(val_loss, best_loss)
save_checkpoint({
'state_dict': model.state_dict(),
'train_loss': train_loss,
'train_acc': train_acc,
'val_loss': val_loss,
'val_acc': val_acc,
'test_loss': test_loss,
'test_acc': test_acc,
'layer': args.layer,
'optimizer' : optimizer.state_dict(),
}, is_best, folder=args.out_dir)
store_loss[epoch - 1, 0] = train_loss
store_loss[epoch - 1, 1] = val_loss
store_loss[epoch - 1, 2] = test_loss
store_acc[epoch - 1, 0] = train_acc
store_acc[epoch - 1, 1] = val_acc
store_acc[epoch - 1, 2] = test_acc
np.save(os.path.join(args.out_dir, 'summary-loss.npy'), store_loss)
np.save(os.path.join(args.out_dir, 'summary-acc.npy'), store_acc)
| mit |
anurag313/scikit-learn | sklearn/utils/graph.py | 289 | 6239 | """
Graph utilities and algorithms
Graphs are represented with their adjacency matrices, preferably using
sparse matrices.
"""
# Authors: Aric Hagberg <hagberg@lanl.gov>
# Gael Varoquaux <gael.varoquaux@normalesup.org>
# Jake Vanderplas <vanderplas@astro.washington.edu>
# License: BSD 3 clause
import numpy as np
from scipy import sparse
from .validation import check_array
from .graph_shortest_path import graph_shortest_path
###############################################################################
# Path and connected component analysis.
# Code adapted from networkx
def single_source_shortest_path_length(graph, source, cutoff=None):
"""Return the shortest path length from source to all reachable nodes.
Returns a dictionary of shortest path lengths keyed by target.
Parameters
----------
graph: sparse matrix or 2D array (preferably LIL matrix)
Adjacency matrix of the graph
source : node label
Starting node for path
cutoff : integer, optional
Depth to stop the search - only
paths of length <= cutoff are returned.
Examples
--------
>>> from sklearn.utils.graph import single_source_shortest_path_length
>>> import numpy as np
>>> graph = np.array([[ 0, 1, 0, 0],
... [ 1, 0, 1, 0],
... [ 0, 1, 0, 1],
... [ 0, 0, 1, 0]])
>>> single_source_shortest_path_length(graph, 0)
{0: 0, 1: 1, 2: 2, 3: 3}
>>> single_source_shortest_path_length(np.ones((6, 6)), 2)
{0: 1, 1: 1, 2: 0, 3: 1, 4: 1, 5: 1}
"""
if sparse.isspmatrix(graph):
graph = graph.tolil()
else:
graph = sparse.lil_matrix(graph)
seen = {} # level (number of hops) when seen in BFS
level = 0 # the current level
next_level = [source] # dict of nodes to check at next level
while next_level:
this_level = next_level # advance to next level
next_level = set() # and start a new list (fringe)
for v in this_level:
if v not in seen:
seen[v] = level # set the level of vertex v
next_level.update(graph.rows[v])
if cutoff is not None and cutoff <= level:
break
level += 1
return seen # return all path lengths as dictionary
if hasattr(sparse, 'connected_components'):
connected_components = sparse.connected_components
else:
from .sparsetools import connected_components
###############################################################################
# Graph laplacian
def graph_laplacian(csgraph, normed=False, return_diag=False):
""" Return the Laplacian matrix of a directed graph.
For non-symmetric graphs the out-degree is used in the computation.
Parameters
----------
csgraph : array_like or sparse matrix, 2 dimensions
compressed-sparse graph, with shape (N, N).
normed : bool, optional
If True, then compute normalized Laplacian.
return_diag : bool, optional
If True, then return diagonal as well as laplacian.
Returns
-------
lap : ndarray
The N x N laplacian matrix of graph.
diag : ndarray
The length-N diagonal of the laplacian matrix.
diag is returned only if return_diag is True.
Notes
-----
The Laplacian matrix of a graph is sometimes referred to as the
"Kirchoff matrix" or the "admittance matrix", and is useful in many
parts of spectral graph theory. In particular, the eigen-decomposition
of the laplacian matrix can give insight into many properties of the graph.
For non-symmetric directed graphs, the laplacian is computed using the
out-degree of each node.
"""
if csgraph.ndim != 2 or csgraph.shape[0] != csgraph.shape[1]:
raise ValueError('csgraph must be a square matrix or array')
if normed and (np.issubdtype(csgraph.dtype, np.int)
or np.issubdtype(csgraph.dtype, np.uint)):
csgraph = check_array(csgraph, dtype=np.float64, accept_sparse=True)
if sparse.isspmatrix(csgraph):
return _laplacian_sparse(csgraph, normed=normed,
return_diag=return_diag)
else:
return _laplacian_dense(csgraph, normed=normed,
return_diag=return_diag)
def _laplacian_sparse(graph, normed=False, return_diag=False):
n_nodes = graph.shape[0]
if not graph.format == 'coo':
lap = (-graph).tocoo()
else:
lap = -graph.copy()
diag_mask = (lap.row == lap.col)
if not diag_mask.sum() == n_nodes:
# The sparsity pattern of the matrix has holes on the diagonal,
# we need to fix that
diag_idx = lap.row[diag_mask]
diagonal_holes = list(set(range(n_nodes)).difference(diag_idx))
new_data = np.concatenate([lap.data, np.ones(len(diagonal_holes))])
new_row = np.concatenate([lap.row, diagonal_holes])
new_col = np.concatenate([lap.col, diagonal_holes])
lap = sparse.coo_matrix((new_data, (new_row, new_col)),
shape=lap.shape)
diag_mask = (lap.row == lap.col)
lap.data[diag_mask] = 0
w = -np.asarray(lap.sum(axis=1)).squeeze()
if normed:
w = np.sqrt(w)
w_zeros = (w == 0)
w[w_zeros] = 1
lap.data /= w[lap.row]
lap.data /= w[lap.col]
lap.data[diag_mask] = (1 - w_zeros[lap.row[diag_mask]]).astype(
lap.data.dtype)
else:
lap.data[diag_mask] = w[lap.row[diag_mask]]
if return_diag:
return lap, w
return lap
def _laplacian_dense(graph, normed=False, return_diag=False):
n_nodes = graph.shape[0]
lap = -np.asarray(graph) # minus sign leads to a copy
# set diagonal to zero
lap.flat[::n_nodes + 1] = 0
w = -lap.sum(axis=0)
if normed:
w = np.sqrt(w)
w_zeros = (w == 0)
w[w_zeros] = 1
lap /= w
lap /= w[:, np.newaxis]
lap.flat[::n_nodes + 1] = (1 - w_zeros).astype(lap.dtype)
else:
lap.flat[::n_nodes + 1] = w.astype(lap.dtype)
if return_diag:
return lap, w
return lap
| bsd-3-clause |
fbagirov/scikit-learn | sklearn/svm/setup.py | 321 | 3157 | import os
from os.path import join
import numpy
from sklearn._build_utils import get_blas_info
def configuration(parent_package='', top_path=None):
from numpy.distutils.misc_util import Configuration
config = Configuration('svm', parent_package, top_path)
config.add_subpackage('tests')
# Section LibSVM
# we compile both libsvm and libsvm_sparse
config.add_library('libsvm-skl',
sources=[join('src', 'libsvm', 'libsvm_template.cpp')],
depends=[join('src', 'libsvm', 'svm.cpp'),
join('src', 'libsvm', 'svm.h')],
# Force C++ linking in case gcc is picked up instead
# of g++ under windows with some versions of MinGW
extra_link_args=['-lstdc++'],
)
libsvm_sources = ['libsvm.c']
libsvm_depends = [join('src', 'libsvm', 'libsvm_helper.c'),
join('src', 'libsvm', 'libsvm_template.cpp'),
join('src', 'libsvm', 'svm.cpp'),
join('src', 'libsvm', 'svm.h')]
config.add_extension('libsvm',
sources=libsvm_sources,
include_dirs=[numpy.get_include(),
join('src', 'libsvm')],
libraries=['libsvm-skl'],
depends=libsvm_depends,
)
### liblinear module
cblas_libs, blas_info = get_blas_info()
if os.name == 'posix':
cblas_libs.append('m')
liblinear_sources = ['liblinear.c',
join('src', 'liblinear', '*.cpp')]
liblinear_depends = [join('src', 'liblinear', '*.h'),
join('src', 'liblinear', 'liblinear_helper.c')]
config.add_extension('liblinear',
sources=liblinear_sources,
libraries=cblas_libs,
include_dirs=[join('..', 'src', 'cblas'),
numpy.get_include(),
blas_info.pop('include_dirs', [])],
extra_compile_args=blas_info.pop('extra_compile_args',
[]),
depends=liblinear_depends,
# extra_compile_args=['-O0 -fno-inline'],
** blas_info)
## end liblinear module
# this should go *after* libsvm-skl
libsvm_sparse_sources = ['libsvm_sparse.c']
config.add_extension('libsvm_sparse', libraries=['libsvm-skl'],
sources=libsvm_sparse_sources,
include_dirs=[numpy.get_include(),
join("src", "libsvm")],
depends=[join("src", "libsvm", "svm.h"),
join("src", "libsvm",
"libsvm_sparse_helper.c")])
return config
if __name__ == '__main__':
from numpy.distutils.core import setup
setup(**configuration(top_path='').todict())
| bsd-3-clause |
michaelStettler/HISI | HISI/mnist_batch.py | 1 | 1497 | import struct
import numpy as np
import matplotlib.pyplot as plt
import datetime
import tensorflow as tf
from tensorflow.examples.tutorials.mnist import input_data
create_batch = True
# mnist = input_data.read_data_sets('MNIST_data', one_hot=True)
# np.save("Stimuli/mnist_train", mnist.train.images)
# np.save("Stimuli/mnist_train_label", mnist.train.labels)
# np.save("Stimuli/mnist_test", mnist.test.images)
# np.save("Stimuli/mnist_test_label", mnist.test.labels)
#
# print("Number images in train: ", np.shape(mnist.train.images)[0])
# print("Number images in test: ", np.shape(mnist.test.images)[0])
num_pass_image = 20 # CNN1
if create_batch:
batch = []
#create batch for every 1000
for i in range(1,5):
num_images = i*1000
temp_batch = []
for j in range(num_pass_image):
batch1 = np.arange(num_images)
np.random.shuffle(batch1)
temp_batch.append(batch1)
temp_batch = np.reshape(temp_batch,(num_images*num_pass_image,))
batch.append(temp_batch)
#create batch for every 5000
for i in range(1,12):
num_images = i*5000
temp_batch = []
for j in range(num_pass_image):
batch1 = np.arange(num_images)
np.random.shuffle(batch1)
temp_batch.append(batch1)
temp_batch = np.reshape(temp_batch,(num_images*num_pass_image,))
batch.append(temp_batch)
np.save("Stimuli/batch", batch)
print("batch size", np.size(batch)) | mit |
ai-se/Transfer-Learning | src/utils/plot_util.py | 1 | 3918 | from __future__ import division
import os
import re
import sys
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
from pdb import set_trace
from collections import Counter
from scipy.stats import ttest_ind
# Update path
root = os.path.join(os.getcwd().split('src')[0], 'src')
if root not in sys.path:
sys.path.append(root)
def plot_compare(dframe, save_path=os.path.join(root, "results"), y_lbl="", title="", postfix=None):
# Clear all
plt.clf()
# We define a fake subplot that is in fact only the plot.
plot = plt.figure(figsize=(3, 4)).add_subplot(111)
# We change the fontsize of minor ticks label
plot.tick_params(axis='both', which='major', labelsize=12)
# Plot Data
plt.plot(dframe["Overlap"], dframe["XTREEv1"],
color='#a50f15', linewidth=1)
plt.plot(dframe["Overlap"], dframe["Alves"],
color='#2c7fb8', linewidth=1)
plt.plot(dframe["Overlap"], dframe["Oliveira"],
color='#636363', linewidth=1)
plt.plot(dframe["Overlap"], dframe["Shatnawi"],
color='#78c679', linewidth=1)
# Set title, axes labels
plt.title(title, size=12)
plt.ylabel(y_lbl, size=12)
plt.xlabel("Overlap", size=12)
plt.legend(loc="best")
fname = os.path.join(save_path, re.sub(
" ", "_", title).lower() + "_" + postfix + ".png")
plt.savefig(fname, dpi=300, facecolor='w', edgecolor='w', figsize=(3, 4),
orientation='portrait', papertype=None, format=None,
transparent=True, bbox_inches="tight", pad_inches=0.1,
frameon=None)
def plot_bar(dframe_inc, dframe_dec, save_path=os.path.join(root, "results"), y_lbl="", title="", postfix=""):
# Clear all
plt.clf()
# We define a fake subplot that is in fact only the plot.
# plot = plt.figure(figsize=(3, 4)).add_subplot(111)
# We change the fontsize of minor ticks label
plt.tick_params(axis='both', which='major', labelsize=20)
bar_width = 0.3
group_sep = 0.1
opacity = 0.7
index = np.arange(len(dframe_dec['XTREE']))
plt.bar(index, dframe_dec["XTREE"], bar_width,
color='#9D1C29', label='XTREE (Decreased)')
plt.bar(index + bar_width, dframe_inc["XTREE"], bar_width,
color='#D72638', alpha=opacity,
label='XTREE (Increased)')
# plt.bar(index + 2 * bar_width + group_sep, dframe_dec["Alves"], bar_width,
# color='#37002F', label='Alves (Decreased)')
# plt.bar(index + 3 * bar_width + group_sep, dframe_inc["XTREE"], bar_width,
# color='#53174B', alpha=opacity,
# label='Alves (Increased)')
# plt.bar(index + 4 * bar_width + 2 * group_sep, dframe_dec["Shatnawi"], bar_width,
# color='#238443', label='Shatw (Decreased)')
# plt.bar(index + 5 * bar_width + 2 * group_sep, dframe_inc["Shatnawi"], bar_width,
# color='#238443', alpha=opacity,
# label='Shatw (Increased)')
# plt.bar(index + 6 * bar_width + 3 * group_sep, dframe_dec["Oliveira"], bar_width,
# color='#E8500A', label='Olive (Decreased)')
# plt.bar(index + 7 * bar_width + 3 * group_sep, dframe_inc["Oliveira"], bar_width,
# color='#FF7536', alpha=opacity,
# label='Oliveria (Increased)')
# Set title, axes labels
plt.title(title, size=20)
plt.ylabel(y_lbl, size=20)
plt.xlabel("Overlap", size=20)
plt.xticks(index + bar_width * 4, ('25', '50', '75', '100', ''))
plt.legend(loc="best")
# Filename
fname = os.path.join(save_path, re.sub(" ", "_", title).lower() + ".png")
# plt.show()
plt.savefig(fname, dpi=300, facecolor='w', edgecolor='w', figsize=(3, 4),
orientation='portrait', papertype=None, format=None,
transparent=False, bbox_inches="tight", pad_inches=0.1,
frameon=None)
| unlicense |
jereze/scikit-learn | examples/covariance/plot_sparse_cov.py | 300 | 5078 | """
======================================
Sparse inverse covariance estimation
======================================
Using the GraphLasso estimator to learn a covariance and sparse precision
from a small number of samples.
To estimate a probabilistic model (e.g. a Gaussian model), estimating the
precision matrix, that is the inverse covariance matrix, is as important
as estimating the covariance matrix. Indeed a Gaussian model is
parametrized by the precision matrix.
To be in favorable recovery conditions, we sample the data from a model
with a sparse inverse covariance matrix. In addition, we ensure that the
data is not too much correlated (limiting the largest coefficient of the
precision matrix) and that there a no small coefficients in the
precision matrix that cannot be recovered. In addition, with a small
number of observations, it is easier to recover a correlation matrix
rather than a covariance, thus we scale the time series.
Here, the number of samples is slightly larger than the number of
dimensions, thus the empirical covariance is still invertible. However,
as the observations are strongly correlated, the empirical covariance
matrix is ill-conditioned and as a result its inverse --the empirical
precision matrix-- is very far from the ground truth.
If we use l2 shrinkage, as with the Ledoit-Wolf estimator, as the number
of samples is small, we need to shrink a lot. As a result, the
Ledoit-Wolf precision is fairly close to the ground truth precision, that
is not far from being diagonal, but the off-diagonal structure is lost.
The l1-penalized estimator can recover part of this off-diagonal
structure. It learns a sparse precision. It is not able to
recover the exact sparsity pattern: it detects too many non-zero
coefficients. However, the highest non-zero coefficients of the l1
estimated correspond to the non-zero coefficients in the ground truth.
Finally, the coefficients of the l1 precision estimate are biased toward
zero: because of the penalty, they are all smaller than the corresponding
ground truth value, as can be seen on the figure.
Note that, the color range of the precision matrices is tweaked to
improve readability of the figure. The full range of values of the
empirical precision is not displayed.
The alpha parameter of the GraphLasso setting the sparsity of the model is
set by internal cross-validation in the GraphLassoCV. As can be
seen on figure 2, the grid to compute the cross-validation score is
iteratively refined in the neighborhood of the maximum.
"""
print(__doc__)
# author: Gael Varoquaux <gael.varoquaux@inria.fr>
# License: BSD 3 clause
# Copyright: INRIA
import numpy as np
from scipy import linalg
from sklearn.datasets import make_sparse_spd_matrix
from sklearn.covariance import GraphLassoCV, ledoit_wolf
import matplotlib.pyplot as plt
##############################################################################
# Generate the data
n_samples = 60
n_features = 20
prng = np.random.RandomState(1)
prec = make_sparse_spd_matrix(n_features, alpha=.98,
smallest_coef=.4,
largest_coef=.7,
random_state=prng)
cov = linalg.inv(prec)
d = np.sqrt(np.diag(cov))
cov /= d
cov /= d[:, np.newaxis]
prec *= d
prec *= d[:, np.newaxis]
X = prng.multivariate_normal(np.zeros(n_features), cov, size=n_samples)
X -= X.mean(axis=0)
X /= X.std(axis=0)
##############################################################################
# Estimate the covariance
emp_cov = np.dot(X.T, X) / n_samples
model = GraphLassoCV()
model.fit(X)
cov_ = model.covariance_
prec_ = model.precision_
lw_cov_, _ = ledoit_wolf(X)
lw_prec_ = linalg.inv(lw_cov_)
##############################################################################
# Plot the results
plt.figure(figsize=(10, 6))
plt.subplots_adjust(left=0.02, right=0.98)
# plot the covariances
covs = [('Empirical', emp_cov), ('Ledoit-Wolf', lw_cov_),
('GraphLasso', cov_), ('True', cov)]
vmax = cov_.max()
for i, (name, this_cov) in enumerate(covs):
plt.subplot(2, 4, i + 1)
plt.imshow(this_cov, interpolation='nearest', vmin=-vmax, vmax=vmax,
cmap=plt.cm.RdBu_r)
plt.xticks(())
plt.yticks(())
plt.title('%s covariance' % name)
# plot the precisions
precs = [('Empirical', linalg.inv(emp_cov)), ('Ledoit-Wolf', lw_prec_),
('GraphLasso', prec_), ('True', prec)]
vmax = .9 * prec_.max()
for i, (name, this_prec) in enumerate(precs):
ax = plt.subplot(2, 4, i + 5)
plt.imshow(np.ma.masked_equal(this_prec, 0),
interpolation='nearest', vmin=-vmax, vmax=vmax,
cmap=plt.cm.RdBu_r)
plt.xticks(())
plt.yticks(())
plt.title('%s precision' % name)
ax.set_axis_bgcolor('.7')
# plot the model selection metric
plt.figure(figsize=(4, 3))
plt.axes([.2, .15, .75, .7])
plt.plot(model.cv_alphas_, np.mean(model.grid_scores, axis=1), 'o-')
plt.axvline(model.alpha_, color='.5')
plt.title('Model selection')
plt.ylabel('Cross-validation score')
plt.xlabel('alpha')
plt.show()
| bsd-3-clause |
SebastianoF/LabelsManager | nilabels/tools/detections/get_segmentation.py | 1 | 5487 | import numpy as np
import matplotlib.mlab as mlab
from matplotlib import pyplot as plt
from sklearn.mixture import GaussianMixture
from scipy.signal import medfilt
try:
from skimage import filters
except ImportError:
from skimage import filter as filters
from nilabels.tools.image_colors_manipulations.relabeller import relabeller
def intensity_segmentation(in_array, num_levels=5):
"""
Simplest way of getting an intensity based segmentation.
:param in_array: image data in a numpy array.
:param num_levels: maximum allowed 65535 - 1.
:return: segmentation of the result in levels levels based on the intensities of the in_data.
"""
segm = np.zeros_like(in_array, dtype=np.uint16)
min_data = np.min(in_array)
max_data = np.max(in_array)
h = (max_data - min_data) / float(int(num_levels))
for k in range(0, num_levels):
places = (min_data + k * h <= in_array) * (in_array < min_data + (k + 1) * h)
np.place(segm, places, k)
places = in_array == max_data
np.place(segm, places, num_levels-1)
return segm
def otsu_threshold(in_array, side='above', return_as_mask=True):
"""
Segmentation of an array with Otsu thresholding parameters from skimage filters.
:param in_array: input array representing an rgb image.
:param side: must be 'above' or 'below', representing the side of the image thresholded after Otsu response.
:param return_as_mask: the output can be a boolean mask if True.
:return: thresholded input image according to Otsu and input parameters.
"""
otsu_thr = filters.threshold_otsu(in_array)
if side == 'above':
new_data = in_array * (in_array >= otsu_thr)
elif side == 'below':
new_data = in_array * (in_array < otsu_thr)
else:
raise IOError("Parameter side must be 'above' or 'below'.")
if return_as_mask:
new_data = new_data.astype(np.bool)
return new_data
def MoG_array(in_array, K=None, mask_array=None, pre_process_median_filter=False,
output_gmm_class=False, pre_process_only_interquartile=False,
see_histogram=None, reorder_mus=True):
"""
Mixture of gaussians for medical images. A simple wrap of
sklearn.mixture.GaussianMixture to get a mog-based segmentation of an input
nibabel image.
:param in_array: input array format to be segmented with a MOG method.
:param K: number of classes, if None, it is estimated with a BIC criterion (may take a while)
:param mask_array: nibabel mask if you want to consider only a subset of the masked data.
:param pre_process_median_filter: apply a median filter before pre-processing (reduce salt and pepper noise).
:param pre_process_only_interquartile: set to zero above and below interquartile (below mask if any) in the data.
:param output_gmm_class: return only the gmm sklearn class instance.
:param see_histogram: can be True, False (or None) or a string (with a path where to save the plotted histogram).
:param reorder_mus: only if output_gmm_class=False, reorder labels from smallest to bigger means.
:return: [c, p] crisp and probabilistic segmentation OR gmm, instance of the class sklearn.mixture.GaussianMixture.
"""
if pre_process_median_filter:
print('Pre-process with a median filter.')
data = medfilt(in_array)
else:
data = in_array
data = np.copy(data.flatten().reshape(-1, 1))
if mask_array is not None:
mask_data = np.copy(mask_array.flatten().astype(np.bool_).reshape(-1, 1))
data = mask_data * data
if pre_process_only_interquartile:
print('Get only interquartile data.')
non_zero_data = data[np.where(np.nan_to_num(data) > 1e-6)]
low_p = np.percentile(non_zero_data, 25)
high_p = np.percentile(non_zero_data, 75)
data = (data > low_p) * (data < high_p) * data
if K is None:
print('Estimating numbers of components with BIC criterion... may take some minutes')
n_components = range(3, 15)
models = [GaussianMixture(n_components=k, random_state=0).fit(data) for k in n_components]
K = np.min([m.bic(data) for m in models])
print('Estimated number of classes according to BIC: {}'.format(K))
gmm = GaussianMixture(n_components=K).fit(data)
if output_gmm_class:
return gmm
else:
crisp = gmm.predict(data).reshape(in_array.shape)
prob = gmm.predict_proba(data).reshape(list(in_array.shape) + [K])
if reorder_mus:
mu = gmm.means_.reshape(-1)
p = list(np.argsort(mu))
old_labels = list(range(K))
new_labels = [p.index(l) for l in old_labels] # the inverse of p
crisp = np.copy(relabeller(crisp, old_labels, new_labels))
prob = np.stack([prob[..., t] for t in new_labels], axis=3)
if see_histogram is not None and see_histogram is not False:
fig = plt.figure()
ax = fig.add_subplot(111)
ax.set_aspect(1)
ax.hist(crisp.flatten(), bins=50, normed=True)
lx = ax.get_xlim()
x = np.arange(lx[0], lx[1], (lx[1] - lx[0]) / 1000.)
for m, s in zip(gmm.means_, gmm.precisions_.reshape(-1)):
ax.plot(x, mlab.normpdf(x, m, s))
if isinstance(see_histogram, str):
plt.savefig(see_histogram)
else:
plt.show()
return crisp, prob
| mit |
higheredbob/python-scripts-conf-desktop-icon-files | python_sqlite_forensic_parsers/searchsq.py | 1 | 3113 | #!/usr/bin/env python
"""Use pandas to load, then write data to csv. Any sqlite db's should be parseable.
"""
import sys
import argparse
import sqlite3
import os.path
import os
import csv
import pandas as pd
from pandas import DataFrame, read_csv
# import matplotlib.pyplot as plt
parser = argparse.ArgumentParser(description="parse a dir cont. sqlite files")
parser.add_argument("-v", "--verbose", action="store_true",
help="increase the output of information")
parser.add_argument("-d", "--directPath", nargs="?", dest='directPath',
default=sys.stdin, action="store",
help="input a dir to recursively walk through")
args = parser.parse_args()
mode = 'rb'
topdir = args.directPath
exten = ""
def validateSQL(testingDB):
#open file, confirm it is an SQLite DB
try:
if testingDB != "":
f=open(testingDB,"r")
elif testingDB != "":
f=open(testingDB, "rb")
except:
print "file is neither utf or binary sqlite"
f=close(testingDB)
raise argparse.ArgumentTypeError('file is not present or not SQLite')
f.seek(0)
verifyHeader = f.read(16)
if "SQLite" not in verifyHeader:
print ("file is not a SQL or been apeRaped")
raise argparse.ArgumentTypeError('file was opened and read, no SQLite in header')
else:
offset = 0
stats = os.stat(testingDB)
filesize = stats.st_size
f.seek(0)
f.close()
validatedDB = testingDB
return validatedDB
def openOut(outPath, outCSV):
try:
output = (args.outdir, 'w')
except:
print "Error opening output file"
sys.exit(0)
def CSVwriter(db):
selection = "SELECT fileID, relativePath, domain FROM Files"
db = sqDB
def step(topdir):
inPath = []
nameDB = []
for dirpath, dirnames, filenames in os.walk(topdir):
for files in filenames:
inPath = os.path.dirname(dirpath)
nameDB = os.path.basename(files)
if os.path.isfile(files):
nameDB(append(files))
elif os.path.isdir(files):
inPath(append(files))
return nameDB, inPath
def pandasql():
step(topdir)
mastPath = []
j = os.path.join[dirsf, filesf]
mastPath.append(j)
fileSQL = []
otherF = []
testingDB = mastPath
for files in testingDB:
if files != "":
validateSQL(files)
if os.path.isfile(validatedDB):
fileSQL.append(validatedDB)
for dbs in fileSQL:
db = sqlite3.connect(dbs)
query = db.execute("SELECT name FROM sqlite_master WHERE type='table';")
cols = [column[0] for column in query.description]
results= pd.DataFrame.from_records(data = query.fetchall(), columns = cols)
print results
def main():
step(topdir)
if __name__=="__main__":
main()
| gpl-3.0 |
jcrudy/sklearntools | sklearntools/test/test_sklearntools.py | 1 | 17863 | '''
Created on Feb 23, 2016
@author: jason
'''
import numpy as np
from sklearntools.sklearntools import StagedEstimator, MaskedEstimator,\
ColumnSubsetTransformer, NonNullSubsetFitter, safe_assign_column
from sklearn.linear_model.base import LinearRegression
from sklearn.linear_model.logistic import LogisticRegression
from sklearntools.calibration import CalibratedEstimatorCV, ResponseTransformingEstimator,\
LogTransformer, PredictorTransformer, HazardToRiskEstimator,\
MovingAverageSmoothingEstimator, ThresholdClassifier, ProbaPredictingEstimator
from sklearntools.feature_selection import SingleEliminationFeatureImportanceEstimatorCV,\
BackwardEliminationEstimator, UnivariateFeatureImportanceEstimatorCV,\
BestKFeatureSelector
from numpy.testing.utils import assert_raises
from sklearntools.glm import GLM
import statsmodels.api as sm
import warnings
import pandas
from sklearntools.model_selection import ModelSelector
from sklearntools.scoring import log_loss_metric
from sklearn.ensemble.forest import RandomForestRegressor
from numpy.ma.testutils import assert_array_almost_equal
from sklearntools.earth import Earth
from sklearntools.kfold import CrossValidatingEstimator
from sklearn.metrics.regression import r2_score
from sklearn.model_selection import KFold
from nose.tools import assert_list_equal
warnings.simplefilter("error")
def test_safe_assign_column():
data = pandas.DataFrame({'A': [1,2,3], 'B': [4,5,6]})
safe_assign_column(data, 'A', [7,8,9])
assert_list_equal(list(sorted(data.columns)), ['A', 'B'])
def test_single_elimination_feature_importance_estimator_cv():
np.random.seed(0)
m = 100000
n = 6
factor = .9
X = np.random.normal(size=(m,n))
beta = 100 * np.ones(shape=n)
for i in range(1, n):
beta[i] = factor * beta[i-1]
beta = np.random.permutation(beta)[:,None]
y = np.dot(X, beta) + 0.01 * np.random.normal(size=(m, 1))
target_sequence = np.ravel(np.argsort(beta ** 2, axis=0))
model1 = SingleEliminationFeatureImportanceEstimatorCV(LinearRegression())
model1.fit(X, y)
fitted_sequence = np.ravel(np.argsort(model1.feature_importances_, axis=0))
np.testing.assert_array_equal(fitted_sequence, target_sequence)
def test_univariate_feature_importance_estimator_cv():
np.random.seed(0)
m = 100000
n = 6
factor = .9
X = np.random.normal(size=(m,n))
beta = 100 * np.ones(shape=n)
for i in range(1, n):
beta[i] = factor * beta[i-1]
beta = np.random.permutation(beta)[:,None]
y = np.dot(X, beta) + 0.01 * np.random.normal(size=(m, 1))
target_sequence = np.ravel(np.argsort(beta ** 2, axis=0))
model1 = UnivariateFeatureImportanceEstimatorCV(LinearRegression())
model1.fit(X, y)
fitted_sequence = np.ravel(np.argsort(model1.feature_importances_, axis=0))
np.testing.assert_array_equal(fitted_sequence, target_sequence)
def test_k_best_feature_selector():
np.random.seed(0)
m = 100000
n = 6
factor = .9
X = np.random.normal(size=(m,n))
beta = 100 * np.ones(shape=n)
for i in range(1, n):
beta[i] = factor * beta[i-1]
beta = np.random.permutation(beta)[:,None]
# beta = np.random.normal(size=(n,1))
y = np.dot(X, beta) + 0.01 * np.random.normal(size=(m, 1))
target_vars = np.ravel(np.argsort(beta ** 2, axis=0))[::-1][:3]
target_support = np.zeros(shape=n, dtype=bool)
target_support[target_vars] = True
model1 = BestKFeatureSelector(UnivariateFeatureImportanceEstimatorCV(LinearRegression()), k=3)
model1.fit(X, y)
np.testing.assert_array_equal(model1.support_, target_support)
def test_backward_elimination_estimation():
np.random.seed(0)
m = 100000
n = 6
factor = .9
X = np.random.normal(size=(m,n))
beta = 100 * np.ones(shape=n)
for i in range(1, n):
beta[i] = factor * beta[i-1]
beta = np.random.permutation(beta)[:,None]
# beta = np.random.normal(size=(n,1))
y = np.dot(X, beta) + 0.01 * np.random.normal(size=(m, 1))
target_sequence = np.ravel(np.argsort(beta ** 2, axis=0))
model1 = BackwardEliminationEstimator(SingleEliminationFeatureImportanceEstimatorCV(LinearRegression()))
model1.fit(X, y)
# model2 = BRFE(FeatureImportanceEstimatorCV(LinearRegression()))
# model2.fit(X, y)
np.testing.assert_array_equal(model1.elimination_sequence_, target_sequence)
def test_multiple_response_regressor():
np.random.seed(1)
m = 100000
n = 10
X = np.random.normal(size=(m,n))
beta1 = np.random.normal(size=(n,1))
beta2 = np.random.normal(size=(n,1))
y1 = np.dot(X, beta1)
p2 = 1. / (1. + np.exp( - np.dot(X, beta2)))
y2 = np.random.binomial(n=1, p=p2)
y = np.concatenate([y1, y2], axis=1)
model = MaskedEstimator(LinearRegression(), [True, False]) & MaskedEstimator(ProbaPredictingEstimator(LogisticRegression()), [False, True])
# MultipleResponseEstimator([('linear', np.array([True, False], dtype=bool), LinearRegression()),
# ('logistic', np.array([False, True], dtype=bool), ProbaPredictingEstimator(LogisticRegression()))])
model.fit(X, y)
assert np.mean(beta1 - model.estimators_[0].estimator_.coef_) < .01
assert np.mean(beta2 - model.estimators_[1].estimator_.estimator_.coef_) < .01
model.get_params()
model.predict(X)
def test_calibration():
np.random.seed(1)
m = 10000
n = 10
X = np.random.normal(size=(m,n))
beta = np.random.normal(size=(n,1))
y_lin = np.dot(X, beta)
y_clas = np.random.binomial( 1, 1. / (1. + np.exp(-y_lin)) )
y = np.concatenate([y_lin, y_clas], axis=1)
estimator = MaskedEstimator(LinearRegression(), np.array([True, False], dtype=bool))
calibrator = MaskedEstimator(LogisticRegression(), [False, True])
# estimator = linear_regressor & calibrator
# MultipleResponseEstimator([('estimator', np.array([True, False], dtype=bool), LinearRegression())])
# calibrator = MultipleResponseEstimator([('calibrator', np.array([False, True], dtype=bool), LogisticRegression())])
model = CalibratedEstimatorCV(estimator, calibrator, cv=KFold(n_splits=4, shuffle=True), n_jobs=1)
model.fit(X, y)
assert np.max(beta[:, 0] - model.estimator_.estimator_.coef_) < .000001
assert np.max(model.calibrator_.estimator_.coef_ - 1.) < .1
def test_predictor_transformer_calibration():
np.random.seed(1)
m = 10000
n = 10
X = np.random.normal(size=(m,n))
beta = np.random.normal(size=(n,1))
y_lin = np.dot(X, beta)
y_clas = np.random.binomial( 1, 1. / (1. + np.exp(-y_lin)) )
y = np.concatenate([y_lin, y_clas], axis=1)
estimator = MaskedEstimator(LinearRegression(), np.array([True, False], dtype=bool))
calibrator = MaskedEstimator(LogisticRegression(), [False, True])
# estimator = linear_regressor & calibrator
# MultipleResponseEstimator([('estimator', np.array([True, False], dtype=bool), LinearRegression())])
# calibrator = MultipleResponseEstimator([('calibrator', np.array([False, True], dtype=bool), LogisticRegression())])
model = PredictorTransformer(estimator) >> calibrator
model.fit(X, y)
assert np.max(beta[:, 0] - model.intermediate_stages_[0].estimator_.estimator_.coef_) < .000001
assert np.max(model.final_stage_.estimator_.coef_ - 1.) < .1
def test_pipeline():
np.random.seed(1)
m = 10000
n = 10
X = np.random.normal(size=(m,n))
beta = np.random.normal(size=(n,1))
beta[np.random.binomial(p=2.0/float(n), n=1, size=n).astype(bool)] = 0
y = np.dot(X, beta) + 0.5 * np.random.normal(size=(m, 1))
beta_reduced = beta[beta != 0]
model = BackwardEliminationEstimator(SingleEliminationFeatureImportanceEstimatorCV(LinearRegression()))
model >>= LinearRegression()
model.fit(X, y)
assert np.max(np.abs(model.final_stage_.coef_ - beta_reduced)) < .1
def test_response_transforming_estimator():
np.random.seed(1)
m = 10000
n = 10
X = np.random.normal(size=(m,n))
beta = np.random.normal(size=(n,1))
sigma = .1
y_pre = np.dot(X, beta) + sigma * np.random.normal(size=(m,1))
y_post = np.exp(y_pre)
model = ResponseTransformingEstimator(LinearRegression(), LogTransformer(offset=0.))
model.fit(X, y_post)
assert np.abs(np.mean(model.predict(X) - y_pre)) < .01
# Because LinearRegression has no transform method
assert_raises(AttributeError, lambda: model.transform(X))
def test_hazard_to_risk():
np.random.seed(1)
m = 10000
n = 10
# Simulate an event under constant hazard, with hazard = X * beta and
# iid exponentially distributed exposure times.
X = np.random.normal(size=(m,n))
beta = np.random.normal(size=(n,1))
hazard = np.exp(np.dot(X, beta))
exposure = np.random.exponential(size=(m,1))
rate = np.random.poisson(hazard * exposure) / exposure
model = CalibratedEstimatorCV(GLM(sm.families.Gaussian(sm.families.links.log), add_constant=False),
ProbaPredictingEstimator(ThresholdClassifier(HazardToRiskEstimator(LogisticRegression()))))
model.fit(X, rate, exposure=exposure)
y_pred = model.predict(X, exposure)
assert np.abs((np.sum(y_pred) - np.sum(rate > 0)) / np.sum(rate > 0)) < .1
assert np.max(np.abs(model.estimator_.coef_ - beta[:,0])) < .1
def test_hazard_to_risk_staged():
np.random.seed(1)
m = 10000
n = 10
# Simulate an event under constant hazard, with hazard = X * beta and
# iid exponentially distributed exposure times.
X = np.random.normal(size=(m,n))
beta = np.random.normal(size=(n,1))
hazard = np.exp(np.dot(X, beta))
exposure = np.random.exponential(size=(m,1))
rate = np.random.poisson(hazard * exposure) / exposure
model = CalibratedEstimatorCV(GLM(sm.families.Gaussian(sm.families.links.log), add_constant=False),
ProbaPredictingEstimator(ThresholdClassifier(HazardToRiskEstimator(LogisticRegression()))))
model.fit(X, rate, exposure=exposure)
y_pred = model.predict(X, exposure)
assert np.abs((np.sum(y_pred) - np.sum(rate > 0)) / np.sum(rate > 0)) < .1
assert np.max(np.abs(model.estimator_.coef_ - beta[:,0])) < .1
def test_moving_average_smoothing_estimator():
np.random.seed(1)
m = 10000
n = 10
# Simulate an event under constant hazard, with hazard = X * beta and
# iid exponentially distributed exposure times.
X = np.random.normal(size=(m,n))
beta = np.random.normal(size=(n,1))
hazard = np.exp(np.dot(X, beta))
exposure = np.random.exponential(size=(m,1))
rate = np.random.poisson(hazard * exposure) / exposure
model = CalibratedEstimatorCV(GLM(sm.families.Gaussian(sm.families.links.log), add_constant=False),
ThresholdClassifier(HazardToRiskEstimator(MovingAverageSmoothingEstimator(RandomForestRegressor()))))
model.fit(X, rate, exposure=exposure)
y_pred = model.predict(X, exposure)
assert np.abs((np.sum(y_pred) - np.sum(rate > 0)) / np.sum(rate > 0)) < .1
assert np.max(np.abs(model.estimator_.coef_ - beta[:,0])) < .1
def test_staged_estimator():
np.random.seed(1)
m = 10000
n = 10
X = np.random.normal(size=(m,n))
beta = np.random.normal(size=(n,1))
beta[np.random.binomial(p=2.0/float(n), n=1, size=n).astype(bool)] = 0
y = np.dot(X, beta) + 0.5 * np.random.normal(size=(m, 1))
beta_reduced = beta[beta != 0]
stage0 = BackwardEliminationEstimator(SingleEliminationFeatureImportanceEstimatorCV(LinearRegression()))
stage1 = LinearRegression()
model = StagedEstimator([stage0, stage1])
model.fit(X, y)
assert np.max(np.abs(model.final_stage_.coef_ - beta_reduced)) < .1
#
# y_lin = np.dot(X, beta)
# y_clas = np.random.binomial( 1, 1. / (1. + np.exp(-y_lin)) )
# y = np.concatenate([y_lin, y_clas], axis=1)
# estimator = mask_estimator(LinearRegression(), np.array([True, False], dtype=bool))
# calibrator = mask_estimator(LogisticRegression(), [False, True])
# # estimator = linear_regressor & calibrator
# # MultipleResponseEstimator([('estimator', np.array([True, False], dtype=bool), LinearRegression())])
# # calibrator = MultipleResponseEstimator([('calibrator', np.array([False, True], dtype=bool), LogisticRegression())])
# model = CalibratedEstimatorCV(estimator, calibrator)
# model.fit(X, y)
# assert np.max(beta[:, 0] - model.estimator_.estimators_[0][2].coef_) < .000001
# assert np.max(model.calibrator_.estimators_[0][2].coef_ - 1.) < .1
def test_column_subset_transformer():
m = 1000
n = 10
X = np.random.normal(size=(m,n))
x_cols = [0,3,4,5]
y_cols = 9
sample_weight_cols = 8
exposure_cols = 7
subsetter1 = ColumnSubsetTransformer(x_cols=x_cols, y_cols=y_cols,
sample_weight_cols=sample_weight_cols,
exposure_cols=exposure_cols)
np.testing.assert_array_equal(subsetter1.transform(X), X[:, x_cols])
args = {'X': X}
subsetter1.update(args)
np.testing.assert_array_equal(args['X'], X[:, x_cols])
np.testing.assert_array_equal(args['y'], X[:, y_cols])
np.testing.assert_array_equal(args['sample_weight'], X[:, sample_weight_cols])
np.testing.assert_array_equal(args['exposure'], X[:, exposure_cols])
X_ = pandas.DataFrame(X, columns=['x%d' % n for n in range(10)])
x_cols_ = ['x%d' % n for n in x_cols]
y_cols_ = 'x%d' % y_cols
sample_weight_cols_ = 'x%d' % sample_weight_cols
exposure_cols_ = 'x%d' % exposure_cols
subsetter2 = ColumnSubsetTransformer(x_cols=x_cols_, y_cols=y_cols_,
sample_weight_cols=sample_weight_cols_,
exposure_cols=exposure_cols_)
np.testing.assert_array_equal(subsetter2.transform(X_), X[:, x_cols])
args_ = {'X': X_}
subsetter2.update(args_)
np.testing.assert_array_equal(args_['X'], X[:, x_cols])
np.testing.assert_array_equal(args_['y'], X[:, y_cols])
np.testing.assert_array_equal(args_['sample_weight'], X[:, sample_weight_cols])
np.testing.assert_array_equal(args_['exposure'], X[:, exposure_cols])
lin = ColumnSubsetTransformer(x_cols=x_cols_, y_cols=y_cols_) >> LinearRegression()
lin.fit(X_)
lin.predict(X_.loc[:, x_cols_])
lin.score(X_)
def test_model_selector():
np.random.seed(1)
m = 10000
n = 10
# Simulate an event under constant hazard, with hazard = X * beta and
# iid exponentially distributed exposure times.
X = np.random.normal(size=(m,n))
beta = np.random.normal(size=(n,1))
hazard = np.exp(np.dot(X, beta))
exposure = np.random.exponential(size=(m,1))
rate = np.random.poisson(hazard * exposure) / exposure
best_subset = np.ravel(np.argsort(np.abs(beta))[::-1][:3])
worst_subset = np.ravel(np.argsort(np.abs(beta))[:3])
basic_model = CalibratedEstimatorCV(GLM(sm.families.Gaussian(sm.families.links.log), add_constant=False),
ProbaPredictingEstimator(ThresholdClassifier(HazardToRiskEstimator(LogisticRegression()))))
model1 = CrossValidatingEstimator(ColumnSubsetTransformer(x_cols=best_subset) >> basic_model, metric=log_loss_metric)
model2 = CrossValidatingEstimator(ColumnSubsetTransformer(x_cols=worst_subset) >> basic_model, metric=log_loss_metric)
model = ModelSelector([model1, model2])
model.fit(X, rate, exposure=exposure)
np.testing.assert_array_equal(model.best_estimator_.estimator_.intermediate_stages_[0].x_cols, best_subset)
def test_cross_validating_estimator():
np.random.seed(1)
m = 1000
n = 5
X = np.random.normal(size=(m,n))
beta = np.random.normal(size=(n,1))
y = np.ravel(np.dot(X, beta)) + np.random.normal(.5, size=m)
model = CrossValidatingEstimator(LinearRegression(), cv=KFold(n_splits=4, shuffle=True), n_jobs=2)
y_pred_cv = model.fit_predict(X, y)
y_pred = model.predict(X)
assert r2_score(np.ravel(y_pred_cv), np.ravel(y_pred)) > .98
def test_non_null_row_subset_fitter():
np.random.seed(1)
m = 10000
n = 10
# Simulate an event under constant hazard, with hazard = X * beta and
# iid exponentially distributed exposure times.
X = np.random.normal(size=(m,n))
beta = np.random.normal(size=(n,1))
y = np.ravel(np.dot(X, beta))
missing = np.random.binomial(p=.001, n=1, size=(m,n)) == 1
X[missing] = None
model = NonNullSubsetFitter(LinearRegression())
model.fit(X, y)
assert np.max(np.abs(np.ravel(beta) - model.estimator_.coef_)) < .001
def test_linear_transformation():
np.random.seed(1)
m = 10000
n = 10
X = np.random.normal(size=(m,n))
beta1 = np.random.normal(size=(n,1))
y1 = np.ravel(np.dot(X, beta1))
beta2 = np.random.normal(size=(n,1))
y2 = np.ravel(np.dot(X, beta2))
model1 = (Earth() >> LinearRegression()).fit(X, y1)
model2 = Earth().fit(X, y2)
combination = 2*model1 - model2
assert_array_almost_equal(combination.predict(X), 2 * np.ravel(model1.predict(X)) - np.ravel(model2.predict(X)))
if __name__ == '__main__':
import sys
import nose
# This code will run the test in this file.'
module_name = sys.modules[__name__].__file__
result = nose.run(argv=[sys.argv[0],
module_name,
'-s', '-v'])
| bsd-3-clause |
ntnu-tdat2004/machine-learning | 4_logistic_regression_XOR_visualization.py | 1 | 7085 | import numpy as np
import matplotlib
matplotlib.use('TkAgg')
from mpl_toolkits.mplot3d import axes3d, art3d
import matplotlib.pyplot as plt
from matplotlib import cm
matplotlib.rcParams.update({'font.size': 11})
# regarding the notations, see http://stats.stackexchange.com/questions/193908/in-machine-learning-why-are-superscripts-used-instead-of-subscripts
def sigmoid(t):
return 1 / (1 + np.exp(-t))
W1_init=np.mat([[10.0, -10.0], [10.0, -10.0]])
b1_init=np.mat([[-5.0, 15.0]])
W2_init=np.mat([[10.0], [10.0]])
b2_init=np.mat([[-15.0]])
# also try (found through 4_logistic_regression_XOR_example):
# W1_init = np.mat([[7.43929911, 5.68582106], [7.44233704, 5.68641663]])
# b1_init = np.mat([[-3.40935969, -8.69532299]])
# W2_init = np.mat([[13.01280117], [-13.79168701]])
# b2_init = np.mat([[-6.1043458]])
class LogisticRegressionModel:
def __init__(self, W1=W1_init.copy(), W2=W2_init.copy(), b1=b1_init.copy(), b2=b2_init.copy()):
self.W1 = W1
self.W2 = W2
self.b1 = b1
self.b2 = b2
# first layer function
def f1(self, x): return sigmoid(x * self.W1 + self.b1)
# second layer function
def f2(self, h): return sigmoid(h * self.W2 + self.b2)
# predictor
def f(self, x): return self.f2(self.f1(x))
# uses Cross Entropy. The error function is also sometimes called cost or loss function
def error(self, x, y): return -np.sum(np.multiply(y, np.log(self.f(x))) + np.multiply((1 - y), np.log(1 - self.f(x))))
model = LogisticRegressionModel()
# observed/training input and output
x_train = np.mat([[0, 0], [0, 1], [1, 0], [1, 1]])
y_train = np.mat([[0], [1], [1], [0]])
fig = plt.figure("Logistic regression: the logical XOR operator")
plot1 = fig.add_subplot(121, projection='3d')
plot1.plot_wireframe(np.array([[]]), np.array([[]]), np.array([[]]), color="green", label="$h=$f1$(x)=\\sigma(x$W1$+$b1$)$")
plot1_h1 = plot1.plot_wireframe(np.array([[]]), np.array([[]]), np.array([[]]))
plot1_h2 = plot1.plot_wireframe(np.array([[]]), np.array([[]]), np.array([[]]))
plot1.plot(x_train[:,0].A.squeeze(), x_train[:,1].A.squeeze(), y_train[:,0].A.squeeze(), 'o', label="$(\\hat x_1^{(i)}, \\hat x_2^{(i)},\\hat y^{(i)})$", color="blue")
plot1_info = fig.text(0.01, 0.02, "")
plot1.set_xlabel("$x_1$")
plot1.set_ylabel("$x_2$")
plot1.set_zlabel("$h_1,h_2$")
plot1.legend(loc="upper left")
plot1.set_xticks([0, 1])
plot1.set_yticks([0, 1])
plot1.set_zticks([0, 1])
plot1.set_xlim(-0.25, 1.25)
plot1.set_ylim(-0.25, 1.25)
plot1.set_zlim(-0.25, 1.25)
plot2 = fig.add_subplot(222, projection='3d')
plot2_f2 = plot2.plot_wireframe(np.array([[]]), np.array([[]]), np.array([[]]), color="green", label="$y=$f2$(h)=\\sigma(h $W2$+$b2$)$")
plot2_info = fig.text(0.8, 0.9, "")
plot2.set_xlabel("$h_1$")
plot2.set_ylabel("$h_2$")
plot2.set_zlabel("$y$")
plot2.legend(loc="upper left")
plot2.set_xticks([0, 1])
plot2.set_yticks([0, 1])
plot2.set_zticks([0, 1])
plot2.set_xlim(-0.25, 1.25)
plot2.set_ylim(-0.25, 1.25)
plot2.set_zlim(-0.25, 1.25)
plot3 = fig.add_subplot(224, projection='3d')
plot3_f = plot3.plot_wireframe(np.array([[]]), np.array([[]]), np.array([[]]), color="green", label="$y=f(x)=$f2$($f1$(x))$")
plot3_info = fig.text(0.3, 0.01, "")
plot3.set_xlabel("$x_1$")
plot3.set_ylabel("$x_2$")
plot3.set_zlabel("$y$")
plot3.legend(loc="upper left")
plot3.set_xticks([0, 1])
plot3.set_yticks([0, 1])
plot3.set_zticks([0, 1])
plot3.set_xlim(-0.25, 1.25)
plot3.set_ylim(-0.25, 1.25)
plot3.set_zlim(-0.25, 1.25)
table = plt.table(cellText=[[0, 0, 0], [0, 1, 0], [1, 0, 0], [1, 1, 0]], colWidths = [0.1]*3, colLabels=["$x_1$", "$x_2$", "$f(x)$"], cellLoc="center", loc="lower right")
def update_figure(event=None):
if(event is not None):
if event.key == "W":
model.W1[0,0]+=0.2
elif event.key == "w":
model.W1[0,0]-=0.2
elif event.key == "E":
model.W1[0,1]+=0.2
elif event.key == "e":
model.W1[0,1]-=0.2
elif event.key == "R":
model.W1[1,0]+=0.2
elif event.key == "r":
model.W1[1,0]-=0.2
elif event.key == "T":
model.W1[1,1]+=0.2
elif event.key == "t":
model.W1[1,1]-=0.2
elif event.key == "Y":
model.W2[0,0]+=0.2
elif event.key == "y":
model.W2[0,0]-=0.2
elif event.key == "U":
model.W2[1,0]+=0.2
elif event.key == "u":
model.W2[1,0]-=0.2
elif event.key == "B":
model.b1[0,0]+=0.2
elif event.key == "b":
model.b1[0,0]-=0.2
elif event.key == "N":
model.b1[0,1]+=0.2
elif event.key == "n":
model.b1[0,1]-=0.2
elif event.key == "M":
model.b2[0,0]+=0.2
elif event.key == "m":
model.b2[0,0]-=0.2
elif event.key == "c":
model.W1=W1_init.copy()
model.W2=W2_init.copy()
model.b1=b1_init.copy()
model.b2=b2_init.copy()
global plot1_h1, plot1_h2, plot2_f2, plot3_f
plot1_h1.remove()
plot1_h2.remove()
plot2_f2.remove()
plot3_f.remove()
x1_grid, x2_grid = np.meshgrid(np.linspace(-0.25, 1.25, 10), np.linspace(-0.25, 1.25, 10))
h1_grid=np.empty([10, 10])
h2_grid=np.empty([10, 10])
f2_grid=np.empty([10, 10])
f_grid=np.empty([10, 10])
for i in range(0, x1_grid.shape[0]):
for j in range(0, x1_grid.shape[1]):
h=model.f1([[x1_grid[i,j], x2_grid[i,j]]])
h1_grid[i,j]=h[0,0]
h2_grid[i,j]=h[0,1]
f2_grid[i,j]=model.f2([[x1_grid[i,j], x2_grid[i,j]]])
f_grid[i,j]=model.f([[x1_grid[i,j], x2_grid[i,j]]])
plot1_h1 = plot1.plot_wireframe(x1_grid, x2_grid, h1_grid, color="lightgreen")
plot1_h2 = plot1.plot_wireframe(x1_grid, x2_grid, h2_grid, color="darkgreen")
plot1_info.set_text("W1$=\\left[\\stackrel{%.2f}{%.2f}\\/\\stackrel{%.2f}{%.2f}\\right]$\nb1$=[{%.2f}, {%.2f}]$" % (model.W1[0,0], model.W1[1,0], model.W1[0,1], model.W1[1,1], model.b1[0,0], model.b1[0,1]))
plot2_f2 = plot2.plot_wireframe(x1_grid, x2_grid, f2_grid, color="green")
plot2_info.set_text("W2$=\\left[\\stackrel{%.2f}{%.2f}\\right]$\nb2$=[{%.2f}]$" % (model.W2[0,0], model.W2[1,0], model.b2[0,0]))
plot3_f = plot3.plot_wireframe(x1_grid, x2_grid, f_grid, color="green")
plot3_info.set_text("$error = -\\sum_i\\left [ \\hat y^{(i)} \\log\\/f(\\hat x^{(i)}) + (1-\\hat y^{(i)}) \\log (1-f(\\hat x^{(i)})) \\right ] = %.2f$" % model.error(x_train, y_train))
table._cells[(1, 2)]._text.set_text("${%.1f}$" % model.f([[0, 0]]))
table._cells[(2, 2)]._text.set_text("${%.1f}$" % model.f([[0, 1]]))
table._cells[(3, 2)]._text.set_text("${%.1f}$" % model.f([[1, 0]]))
table._cells[(4, 2)]._text.set_text("${%.1f}$" % model.f([[1, 1]]))
fig.canvas.draw()
update_figure()
fig.canvas.mpl_connect('key_press_event', update_figure)
plt.show()
| mit |
xzh86/scikit-learn | examples/linear_model/plot_sgd_separating_hyperplane.py | 260 | 1219 | """
=========================================
SGD: Maximum margin separating hyperplane
=========================================
Plot the maximum margin separating hyperplane within a two-class
separable dataset using a linear Support Vector Machines classifier
trained using SGD.
"""
print(__doc__)
import numpy as np
import matplotlib.pyplot as plt
from sklearn.linear_model import SGDClassifier
from sklearn.datasets.samples_generator import make_blobs
# we create 50 separable points
X, Y = make_blobs(n_samples=50, centers=2, random_state=0, cluster_std=0.60)
# fit the model
clf = SGDClassifier(loss="hinge", alpha=0.01, n_iter=200, fit_intercept=True)
clf.fit(X, Y)
# plot the line, the points, and the nearest vectors to the plane
xx = np.linspace(-1, 5, 10)
yy = np.linspace(-1, 5, 10)
X1, X2 = np.meshgrid(xx, yy)
Z = np.empty(X1.shape)
for (i, j), val in np.ndenumerate(X1):
x1 = val
x2 = X2[i, j]
p = clf.decision_function([x1, x2])
Z[i, j] = p[0]
levels = [-1.0, 0.0, 1.0]
linestyles = ['dashed', 'solid', 'dashed']
colors = 'k'
plt.contour(X1, X2, Z, levels, colors=colors, linestyles=linestyles)
plt.scatter(X[:, 0], X[:, 1], c=Y, cmap=plt.cm.Paired)
plt.axis('tight')
plt.show()
| bsd-3-clause |
tedmeeds/tcga_encoder | tcga_encoder/models/svd/main.py | 1 | 10473 | from tcga_encoder.utils.helpers import *
from tcga_encoder.data.data import *
from tcga_encoder.definitions.tcga import *
from tcga_encoder.definitions.nn import *
from tcga_encoder.definitions.locations import *
from tcga_encoder.algorithms import *
import seaborn as sns
sns.set_style("whitegrid")
sns.set_context("talk")
pd.set_option('display.max_columns', 500)
pd.set_option('display.width', 1000)
import tensorflow as tf
from tcga_encoder.models.svd.batcher_dna_out import *
def train( algo_dict, data_dict, logging_dict, results_dict ):
# -------------------------------------------------- #
# SET-UP NETWORK'S PARAMS #
# -------------------------------------------------- #
cb_info = OrderedDict()
batcher = algo_dict[BATCHER]
train_feed_imputation = batcher.TrainBatch()
test_feed_imputation = batcher.TestBatch()
val_feed_imputation = batcher.ValBatch()
cb_info[TEST_FEED_IMPUTATION] = test_feed_imputation
cb_info[VAL_FEED_IMPUTATION] = val_feed_imputation
cb_info[TRAIN_FEED_IMPUTATION] = train_feed_imputation
# -------------------------------------------------- #
# TRAIN #
# -------------------------------------------------- #
batcher.Train( train_feed_imputation )
batcher.TestFill2( cb_info )
def add_variables( var_dict, data_dict ):
# add very specific numbers:
var_dict["dna_dim"] = data_dict['dataset'].GetDimension("DNA")
var_dict["meth_dim"] = data_dict['dataset'].GetDimension("METH")
var_dict["rna_dim"] = data_dict['dataset'].GetDimension("RNA")
var_dict["tissue_dim"] = data_dict['dataset'].GetDimension("TISSUE")
def load_architecture( arch_dict, data_dict ):
add_variables( arch_dict[VARIABLES], data_dict )
return arch_dict[NETWORK]( arch_dict, data_dict)
# def load_architectures( arches, data ):
# networks = OrderedDict()
# for arch in arches:
# networks[ arch[NAME] ] = load_architecture( arch, data )
# return networks
######################################################################################################
if __name__ == "__main__":
assert len(sys.argv) >= 2, "Must pass yaml file."
yaml_file = sys.argv[1]
print "Running: ",yaml_file
y = load_yaml( yaml_file)
#print y
logging_dict = {}
#print "Loading data"
load_data_from_dict( y[DATA] )
algo_dict = y[ALGORITHM]
arch_dict = y[ARCHITECTURE]
data_dict = y[DATA] #{N_TRAIN:4000}
logging_dict = y[LOGGING]
logging_dict[SAVEDIR] = os.path.join( HOME_DIR, os.path.join( logging_dict[LOCATION], logging_dict[EXPERIMENT] ) )
# #networks = load_architectures( y[ARCHITECTURES], y[DATA] )
#add_variables( arch_dict[VARIABLES], data_dict )
#network = load_architecture( arch_dict, data_dict )
network_name = arch_dict[NAME]
#
# make BATCHER and reassign it to dict
algo_dict[BATCHER] = algo_dict[BATCHER]( network_name, None, data_dict, algo_dict, arch_dict, logging_dict )
algo_dict[BATCHER].network_name = network_name
#algo_dict[BATCHER].network = network
sess = tf.InteractiveSession()
results_dict = {}
train( algo_dict, data_dict, logging_dict, results_dict )
batcher = algo_dict[BATCHER]
model_store = algo_dict[BATCHER].model_store
latent_store = algo_dict[BATCHER].latent_store
epoch_store = algo_dict[BATCHER].epoch_store
data_store = algo_dict[BATCHER].data_store
fill_store = algo_dict[BATCHER].fill_store
model_store.open()
data_store.open()
latent_store.open()
epoch_store.open()
fill_store.open()
# TEST FILL for all TARGETS
rna_test = data_store["/RNA/FAIR"].loc[ batcher.test_barcodes ]
dna_0_test = data_store["/DNA/channel/0"].loc[ batcher.test_barcodes ]
#dna_1_test = data_store["/DNA/channel/1"].loc[ batcher.test_barcodes ]
#dna_2_test = data_store["/DNA/channel/2"].loc[ batcher.test_barcodes ]
#dna_3_test = data_store["/DNA/channel/3"].loc[ batcher.test_barcodes ]
meth_test = data_store["/METH/FAIR"].loc[ batcher.test_barcodes ]
tissue_test = data_store["/CLINICAL/TISSUE"].loc[ batcher.test_barcodes ]
rna_train = data_store["/RNA/FAIR"].loc[ batcher.train_barcodes ]
dna_0_train = data_store["/DNA/channel/0"].loc[ batcher.train_barcodes ]
#dna_1_train = data_store["/DNA/channel/1"].loc[ batcher.train_barcodes ]
#dna_2_train = data_store["/DNA/channel/2"].loc[ batcher.train_barcodes ]
#dna_3_train = data_store["/DNA/channel/3"].loc[ batcher.train_barcodes ]
meth_train = data_store["/METH/FAIR"].loc[ batcher.train_barcodes ]
tissue_train = data_store["/CLINICAL/TISSUE"].loc[ batcher.train_barcodes ]
epoch_store.open()
print "TEST:"
print epoch_store["Test_Error"]
print "VAL:"
print epoch_store["Val_Error"]
# other_barcodes = np.setdiff1d( data_store["/RNA/FAIR"].index, np.union1d(batcher.train_barcodes,batcher.test_barcodes))
# rna_other = data_store["/RNA/FAIR"].loc[ other_barcodes ]
# dna_0_other = data_store["/DNA/channel/0"].loc[ other_barcodes ]
# dna_1_other = data_store["/DNA/channel/1"].loc[ other_barcodes ]
# dna_2_other = data_store["/DNA/channel/2"].loc[ other_barcodes ]
# dna_3_other = data_store["/DNA/channel/3"].loc[ other_barcodes ]
# meth_other = data_store["/METH/FAIR"].loc[ other_barcodes ]
# tissue_other = data_store["/CLINICAL/TISSUE"].loc[ other_barcodes ]
#
# inputs_combos = ["RNA","DNA","METH","RNA+DNA","RNA+METH","DNA+METH","RNA+DNA+METH"]
# targets = OrderedDict()
# targets[RNA] = {"observed":rna_test, "error":"mse"}
# targets[METH] = {"observed":meth_test, "error":"mse"}
# targets[DNA+"/0"] = {"observed":dna_0_test, "error":"auc"}
# targets[DNA+"/1"] = {"observed":dna_1_test, "error":"auc"}
# targets[DNA+"/2"] = {"observed":dna_2_test, "error":"auc"}
# targets[DNA+"/3"] = {"observed":dna_3_test, "error":"auc"}
#
# print "==================================="
# print " ERROR "
# print "==================================="
# for target_source, values in targets.iteritems():
# print "++++ %s"%target_source
# observed = values["observed"].values
# for inputs in inputs_combos:
# predicted = fill_store["/Fill/%s/%s"%(target_source,inputs)].values
#
# if values["error"] == "mse":
# error = np.mean( np.square( observed-predicted ) )
# print "%s\t%10s\t%0.6f"%(target_source, inputs, error )
# elif values["error"] == "auc":
#
# p_flattened = predicted.flatten()
# o_flattened = observed.flatten()
#
# error = roc_auc_score(o_flattened,p_flattened)
# print "%s\t%10s\t%0.6f"%(target_source, inputs, error )
#
#
# print "==================================="
# print " LOGLIK "
# print "==================================="
# for target_source, values in targets.iteritems():
# print "++++ %s"%target_source
# #observed = values["observed"].values
# for inputs in inputs_combos:
# predicted = fill_store["/Loglik/%s/%s"%(target_source,inputs)].values
#
# error = np.mean( np.sum( predicted, 1 ) )
# print "%s\t%10s\t%0.6f"%(target_source, inputs, error )
#print model_store
# print data_store
# print latent_store
# print epoch_store
# def violinplot( data, order_list, prior_mean, prior_std, x=None, y=None, orient = "v", sort = True, lims = (-15,15), width=0.95,lw=0.5 ):
# n = len(prior_mean)
# if sort is True:
# i_order = np.argsort( prior_mean )
# else:
# i_order = np.arange( n, dtype=int )
#
# if orientation == "v":
# pp.fill_between(np.arange(n), prior_mean[i_order]+2*prior_std[i_order], prior_mean[i_order]-2*prior_std[i_order], color='black', alpha=0.25)
# pp.plot(prior_mean[i_order], 'k-')
# sns.violinplot( x=x, y=y, data = data, width=width, linewidth=lw, order=order_list[i_order], orient=orient )
# #sns.swarmplot( x=x, y=y, data = data, linewidth=lw, order=order_list[i_order], orient=orient )
# pp.ylim(lims)
# else:
#
# pp.fill_betweenx(np.arange(n), prior_mean[i_order]+2*prior_std[i_order], prior_mean[i_order]-2*prior_std[i_order], color='black', alpha=0.25)
# pp.plot(prior_mean[i_order], np.arange( n, dtype=int ), 'k-')
# sns.violinplot( x=y, y=x, data = data, width=width, linewidth=lw, order=order_list[i_order], orient=orient )
# pp.xlim(lims)
#
#
#
# pp.xlabel("")
# pp.ylabel("")
# #pp.title(tissue)
# pp.grid('on')
# def plot_z( z, test_tissue, use_columns, fill_store ):
# pp.figure()
#
# pp.title( "Z%d"%(z))
#
# rec_means = []
# rec_stds = []
# gen_means = []
# gen_stds = []
# z_rec_df = fill_store["/Z/rec/mu"]["z%d"%z]
# z_rec_df["Tissue"] = pd.Series( [], index=z_rec_df.index)
# for tissue in use_columns:
# query = test_tissue[tissue].values==1.0
# barcodes = test_tissue[tissue][query].index
#
# # rec_means.append( fill_store["/Z/rec/mu"]["z%d"%z].loc[barcodes].values )
# # rec_stds.append( np.sqrt(fill_store["/Z/rec/var"]["z%d"%z].loc[barcodes].values) )
# # gen_means.append( fill_store["/Z/gen/mu"]["z%d"%z].loc[barcodes].values )
# # gen_stds.append( np.sqrt(fill_store["/Z/gen/var"]["z%d"%z].loc[barcodes].values) )
#
# rec_means = pd.DataFrame( np.array(rec_means), columns = use_columns )
# rec_stds = pd.DataFrame( np.array(rec_stds), columns = use_columns )
# gen_means = pd.DataFrame( np.array(gen_means), columns = use_columns )
# gen_stds = pd.DataFrame( np.array(gen_stds), columns = use_columns )
#
# #violinplot(rec_means, z_list, prior_mu_z, prior_std_z, orient=orientation, sort=False, lims=lims)
# #sns.violinplot( x=None, y=None, data = rec_means, width=width, linewidth=lw, order=order_list[i_order], orient=orient )
# sns.violinplot( x=None, y=None, data = rec_means, width=width, linewidth=lw, order=order_list[i_order], orient=orient )
#
# test_tissue = data_store["/CLINICAL/TISSUE"].loc[batcher.test_barcodes]
# most_common_order = np.argsort(-test_tissue.values.sum(0))
# use_ids = pp.find( test_tissue.values.sum(0) > 1 )
# use_columns = test_tissue.columns[use_ids]
#
# n_z = batcher.n_z
# for z in range(2):
# plot_z( z, test_tissue, use_columns, fill_store )
model_store.close()
data_store.close()
latent_store.close()
epoch_store.close()
| mit |
eclee25/flu-SDI-exploratory-age | scripts/popstat_v5-21-13.py | 1 | 1757 | #!/usr/bin/python
##############################################
###Python template
###Author: Elizabeth Lee
###Date: 5/21/13
###Function:
## 1. draw a histogram of the populations and analyze because the definitions of urban and rural need to be redefined to fit zipcode prefix areas. Census defines urban and rural for cities, towns and villages, but this does not fit the zipcode prefix area (urbanized areas: 50,000 or more; urban clusters: 2,500 to less than 50,000; rural: fewer than 2,500)
## 2. instead of an urban-rural x-axis, split into population
###Import data: popstat_by_zip3_2010.csv
###Command Line: python OR_popstat_v5-21-13.py
##############################################
### notes ###
### packages ###
import matplotlib
import csv
import numpy as np
import matplotlib.pyplot as plt
from pylab import *
## local packages ##
### data structures ###
zip3, popstat = [],[]
### parameters ###
### functions ###
### import data ###
popin=open('/home/elee/Dropbox/Elizabeth_Bansal_Lab/SDI_Data/SQL_export/popstat_by_zip3_2010.csv','r')
pop=csv.reader(popin, delimiter=',')
### program ###
for row in pop:
zip3.append(row[1])
popstat.append(float(row[2]))
zip3.pop()
popstat.pop() # remove last value of each list because it represents the total population across all zip3s
# histogram of data
n, bins, patches = plt.hist(popstat, 25, normed = 1, histtype='bar')
plt.xlabel('popstat')
plt.ylabel('density')
plt.show()
# display deciles, quantiles
# 0%: 0.0
# 25%: 105,774.5
# 50%: 202,375
# 75%: 432,941.5
# 100%: 3,003,916
quants = np.percentile(popstat, [10, 20, 30, 40, 50, 60, 70, 80, 90, 100])
print quants # 48395.8, 83765.6, 126030, 167033.4, 202375, 267947.4, 365125.4, 513930.8, 779568.4, 3003916
| mit |
keans/dstools | setup.py | 1 | 1265 | from setuptools import setup, find_packages
import codecs
import os
# get current directory
here = os.path.abspath(os.path.dirname(__file__))
def get_long_description():
"""
get long description from README.rst file
"""
with codecs.open(os.path.join(here, "README.rst"), "r", "utf-8") as f:
return f.read()
setup(
name='dstools',
version='0.0.17',
description='Set of tools for reoccurring data science tasks.',
long_description=get_long_description(),
url='https://keans.de',
author='Ansgar Kellner',
author_email='keans@gmx.de',
license='MIT',
classifiers=[
'Development Status :: 3 - Alpha',
'Intended Audience :: Developers',
'Topic :: Software Development :: Build Tools',
'License :: OSI Approved :: MIT License',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.6',
'Programming Language :: Python :: 3.7',
'Programming Language :: Python :: 3.8',
],
keywords='python packaging',
packages=find_packages(
exclude=['contrib', 'docs', 'tests']
),
install_requires=[
"requests", "ujson", "numpy", "scipy", "sklearn",
"inflect", "nltk", "beautifulsoup4"
],
)
| mit |
RobertABT/heightmap | build/matplotlib/examples/user_interfaces/gtk_spreadsheet.py | 13 | 2463 | #!/usr/bin/env python
"""
Example of embedding matplotlib in an application and interacting with
a treeview to store data. Double click on an entry to update plot
data
"""
import pygtk
pygtk.require('2.0')
import gtk
from gtk import gdk
import matplotlib
matplotlib.use('GTKAgg') # or 'GTK'
from matplotlib.backends.backend_gtk import FigureCanvasGTK as FigureCanvas
from numpy.random import random
from matplotlib.figure import Figure
class DataManager(gtk.Window):
numRows, numCols = 20,10
data = random((numRows, numCols))
def __init__(self):
gtk.Window.__init__(self)
self.set_default_size(600, 600)
self.connect('destroy', lambda win: gtk.main_quit())
self.set_title('GtkListStore demo')
self.set_border_width(8)
vbox = gtk.VBox(False, 8)
self.add(vbox)
label = gtk.Label('Double click a row to plot the data')
vbox.pack_start(label, False, False)
sw = gtk.ScrolledWindow()
sw.set_shadow_type(gtk.SHADOW_ETCHED_IN)
sw.set_policy(gtk.POLICY_NEVER,
gtk.POLICY_AUTOMATIC)
vbox.pack_start(sw, True, True)
model = self.create_model()
self.treeview = gtk.TreeView(model)
self.treeview.set_rules_hint(True)
# matplotlib stuff
fig = Figure(figsize=(6,4))
self.canvas = FigureCanvas(fig) # a gtk.DrawingArea
vbox.pack_start(self.canvas, True, True)
ax = fig.add_subplot(111)
self.line, = ax.plot(self.data[0,:], 'go') # plot the first row
self.treeview.connect('row-activated', self.plot_row)
sw.add(self.treeview)
self.add_columns()
self.add_events(gdk.BUTTON_PRESS_MASK |
gdk.KEY_PRESS_MASK|
gdk.KEY_RELEASE_MASK)
def plot_row(self, treeview, path, view_column):
ind, = path # get the index into data
points = self.data[ind,:]
self.line.set_ydata(points)
self.canvas.draw()
def add_columns(self):
for i in range(self.numCols):
column = gtk.TreeViewColumn('%d'%i, gtk.CellRendererText(), text=i)
self.treeview.append_column(column)
def create_model(self):
types = [float]*self.numCols
store = gtk.ListStore(*types)
for row in self.data:
store.append(row)
return store
manager = DataManager()
manager.show_all()
gtk.main()
| mit |
kushalbhola/MyStuff | Practice/PythonApplication/env/Lib/site-packages/pandas/tests/sparse/test_combine_concat.py | 1 | 18879 | import itertools
import numpy as np
import pytest
from pandas.errors import PerformanceWarning
import pandas as pd
import pandas.util.testing as tm
class TestSparseArrayConcat:
@pytest.mark.parametrize("kind", ["integer", "block"])
def test_basic(self, kind):
a = pd.SparseArray([1, 0, 0, 2], kind=kind)
b = pd.SparseArray([1, 0, 2, 2], kind=kind)
result = pd.SparseArray._concat_same_type([a, b])
# Can't make any assertions about the sparse index itself
# since we aren't don't merge sparse blocs across arrays
# in to_concat
expected = np.array([1, 2, 1, 2, 2], dtype="int64")
tm.assert_numpy_array_equal(result.sp_values, expected)
assert result.kind == kind
@pytest.mark.parametrize("kind", ["integer", "block"])
def test_uses_first_kind(self, kind):
other = "integer" if kind == "block" else "block"
a = pd.SparseArray([1, 0, 0, 2], kind=kind)
b = pd.SparseArray([1, 0, 2, 2], kind=other)
result = pd.SparseArray._concat_same_type([a, b])
expected = np.array([1, 2, 1, 2, 2], dtype="int64")
tm.assert_numpy_array_equal(result.sp_values, expected)
assert result.kind == kind
@pytest.mark.filterwarnings("ignore:Sparse:FutureWarning")
class TestSparseSeriesConcat:
@pytest.mark.parametrize("kind", ["integer", "block"])
def test_concat(self, kind):
val1 = np.array([1, 2, np.nan, np.nan, 0, np.nan])
val2 = np.array([3, np.nan, 4, 0, 0])
sparse1 = pd.SparseSeries(val1, name="x", kind=kind)
sparse2 = pd.SparseSeries(val2, name="y", kind=kind)
res = pd.concat([sparse1, sparse2])
exp = pd.concat([pd.Series(val1), pd.Series(val2)])
exp = pd.SparseSeries(exp, kind=kind)
tm.assert_sp_series_equal(res, exp, consolidate_block_indices=True)
sparse1 = pd.SparseSeries(val1, fill_value=0, name="x", kind=kind)
sparse2 = pd.SparseSeries(val2, fill_value=0, name="y", kind=kind)
res = pd.concat([sparse1, sparse2])
exp = pd.concat([pd.Series(val1), pd.Series(val2)])
exp = pd.SparseSeries(exp, fill_value=0, kind=kind)
tm.assert_sp_series_equal(res, exp, consolidate_block_indices=True)
def test_concat_axis1(self):
val1 = np.array([1, 2, np.nan, np.nan, 0, np.nan])
val2 = np.array([3, np.nan, 4, 0, 0])
sparse1 = pd.SparseSeries(val1, name="x")
sparse2 = pd.SparseSeries(val2, name="y")
res = pd.concat([sparse1, sparse2], axis=1)
exp = pd.concat([pd.Series(val1, name="x"), pd.Series(val2, name="y")], axis=1)
exp = pd.SparseDataFrame(exp)
tm.assert_sp_frame_equal(res, exp, consolidate_block_indices=True)
def test_concat_different_fill(self):
val1 = np.array([1, 2, np.nan, np.nan, 0, np.nan])
val2 = np.array([3, np.nan, 4, 0, 0])
for kind in ["integer", "block"]:
sparse1 = pd.SparseSeries(val1, name="x", kind=kind)
sparse2 = pd.SparseSeries(val2, name="y", kind=kind, fill_value=0)
with tm.assert_produces_warning(
PerformanceWarning, raise_on_extra_warnings=False
):
res = pd.concat([sparse1, sparse2])
exp = pd.concat([pd.Series(val1), pd.Series(val2)])
exp = pd.SparseSeries(exp, kind=kind)
tm.assert_sp_series_equal(res, exp)
with tm.assert_produces_warning(
PerformanceWarning, raise_on_extra_warnings=False
):
res = pd.concat([sparse2, sparse1])
exp = pd.concat([pd.Series(val2), pd.Series(val1)])
exp = pd.SparseSeries(exp, kind=kind, fill_value=0)
tm.assert_sp_series_equal(res, exp)
def test_concat_axis1_different_fill(self):
val1 = np.array([1, 2, np.nan, np.nan, 0, np.nan])
val2 = np.array([3, np.nan, 4, 0, 0])
sparse1 = pd.SparseSeries(val1, name="x")
sparse2 = pd.SparseSeries(val2, name="y", fill_value=0)
res = pd.concat([sparse1, sparse2], axis=1)
exp = pd.concat([pd.Series(val1, name="x"), pd.Series(val2, name="y")], axis=1)
assert isinstance(res, pd.SparseDataFrame)
tm.assert_frame_equal(res.to_dense(), exp)
def test_concat_different_kind(self):
val1 = np.array([1, 2, np.nan, np.nan, 0, np.nan])
val2 = np.array([3, np.nan, 4, 0, 0])
sparse1 = pd.SparseSeries(val1, name="x", kind="integer")
sparse2 = pd.SparseSeries(val2, name="y", kind="block")
res = pd.concat([sparse1, sparse2])
exp = pd.concat([pd.Series(val1), pd.Series(val2)])
exp = pd.SparseSeries(exp, kind=sparse1.kind)
tm.assert_sp_series_equal(res, exp)
res = pd.concat([sparse2, sparse1])
exp = pd.concat([pd.Series(val2), pd.Series(val1)])
exp = pd.SparseSeries(exp, kind=sparse2.kind)
tm.assert_sp_series_equal(res, exp, consolidate_block_indices=True)
@pytest.mark.parametrize("kind", ["integer", "block"])
def test_concat_sparse_dense(self, kind):
# use first input's fill_value
val1 = np.array([1, 2, np.nan, np.nan, 0, np.nan])
val2 = np.array([3, np.nan, 4, 0, 0])
sparse = pd.SparseSeries(val1, name="x", kind=kind)
dense = pd.Series(val2, name="y")
res = pd.concat([sparse, dense])
exp = pd.SparseSeries(pd.concat([pd.Series(val1), dense]), kind=kind)
tm.assert_sp_series_equal(res, exp)
res = pd.concat([dense, sparse, dense])
exp = pd.concat([dense, pd.Series(val1), dense])
# XXX: changed from SparseSeries to Series[sparse]
exp = pd.Series(pd.SparseArray(exp, kind=kind), index=exp.index, name=exp.name)
tm.assert_series_equal(res, exp)
sparse = pd.SparseSeries(val1, name="x", kind=kind, fill_value=0)
dense = pd.Series(val2, name="y")
res = pd.concat([sparse, dense])
# XXX: changed from SparseSeries to Series[sparse]
exp = pd.concat([pd.Series(val1), dense])
exp = pd.Series(
pd.SparseArray(exp, kind=kind, fill_value=0), index=exp.index, name=exp.name
)
tm.assert_series_equal(res, exp)
res = pd.concat([dense, sparse, dense])
exp = pd.concat([dense, pd.Series(val1), dense])
# XXX: changed from SparseSeries to Series[sparse]
exp = pd.Series(
pd.SparseArray(exp, kind=kind, fill_value=0), index=exp.index, name=exp.name
)
tm.assert_series_equal(res, exp)
@pytest.mark.filterwarnings("ignore:Sparse:FutureWarning")
@pytest.mark.filterwarnings("ignore:DataFrame.to_sparse:FutureWarning")
class TestSparseDataFrameConcat:
def setup_method(self, method):
self.dense1 = pd.DataFrame(
{
"A": [0.0, 1.0, 2.0, np.nan],
"B": [0.0, 0.0, 0.0, 0.0],
"C": [np.nan, np.nan, np.nan, np.nan],
"D": [1.0, 2.0, 3.0, 4.0],
}
)
self.dense2 = pd.DataFrame(
{
"A": [5.0, 6.0, 7.0, 8.0],
"B": [np.nan, 0.0, 7.0, 8.0],
"C": [5.0, 6.0, np.nan, np.nan],
"D": [np.nan, np.nan, np.nan, np.nan],
}
)
self.dense3 = pd.DataFrame(
{
"E": [5.0, 6.0, 7.0, 8.0],
"F": [np.nan, 0.0, 7.0, 8.0],
"G": [5.0, 6.0, np.nan, np.nan],
"H": [np.nan, np.nan, np.nan, np.nan],
}
)
def test_concat(self):
# fill_value = np.nan
sparse = self.dense1.to_sparse()
sparse2 = self.dense2.to_sparse()
res = pd.concat([sparse, sparse])
exp = pd.concat([self.dense1, self.dense1]).to_sparse()
tm.assert_sp_frame_equal(res, exp, consolidate_block_indices=True)
res = pd.concat([sparse2, sparse2])
exp = pd.concat([self.dense2, self.dense2]).to_sparse()
tm.assert_sp_frame_equal(res, exp, consolidate_block_indices=True)
res = pd.concat([sparse, sparse2])
exp = pd.concat([self.dense1, self.dense2]).to_sparse()
tm.assert_sp_frame_equal(res, exp, consolidate_block_indices=True)
res = pd.concat([sparse2, sparse])
exp = pd.concat([self.dense2, self.dense1]).to_sparse()
tm.assert_sp_frame_equal(res, exp, consolidate_block_indices=True)
# fill_value = 0
sparse = self.dense1.to_sparse(fill_value=0)
sparse2 = self.dense2.to_sparse(fill_value=0)
res = pd.concat([sparse, sparse])
exp = pd.concat([self.dense1, self.dense1]).to_sparse(fill_value=0)
exp._default_fill_value = np.nan
tm.assert_sp_frame_equal(res, exp, consolidate_block_indices=True)
res = pd.concat([sparse2, sparse2])
exp = pd.concat([self.dense2, self.dense2]).to_sparse(fill_value=0)
exp._default_fill_value = np.nan
tm.assert_sp_frame_equal(res, exp, consolidate_block_indices=True)
res = pd.concat([sparse, sparse2])
exp = pd.concat([self.dense1, self.dense2]).to_sparse(fill_value=0)
exp._default_fill_value = np.nan
tm.assert_sp_frame_equal(res, exp, consolidate_block_indices=True)
res = pd.concat([sparse2, sparse])
exp = pd.concat([self.dense2, self.dense1]).to_sparse(fill_value=0)
exp._default_fill_value = np.nan
tm.assert_sp_frame_equal(res, exp, consolidate_block_indices=True)
def test_concat_different_fill_value(self):
# 1st fill_value will be used
sparse = self.dense1.to_sparse()
sparse2 = self.dense2.to_sparse(fill_value=0)
with tm.assert_produces_warning(
PerformanceWarning, raise_on_extra_warnings=False
):
res = pd.concat([sparse, sparse2])
exp = pd.concat([self.dense1, self.dense2]).to_sparse()
tm.assert_sp_frame_equal(res, exp, consolidate_block_indices=True)
with tm.assert_produces_warning(
PerformanceWarning, raise_on_extra_warnings=False
):
res = pd.concat([sparse2, sparse])
exp = pd.concat([self.dense2, self.dense1]).to_sparse(fill_value=0)
exp._default_fill_value = np.nan
tm.assert_sp_frame_equal(res, exp, consolidate_block_indices=True)
def test_concat_different_columns_sort_warns(self):
sparse = self.dense1.to_sparse()
sparse3 = self.dense3.to_sparse()
# stacklevel is wrong since we have two FutureWarnings,
# one for depr, one for sorting.
with tm.assert_produces_warning(
FutureWarning, check_stacklevel=False, raise_on_extra_warnings=False
):
res = pd.concat([sparse, sparse3])
with tm.assert_produces_warning(
FutureWarning, check_stacklevel=False, raise_on_extra_warnings=False
):
exp = pd.concat([self.dense1, self.dense3])
exp = exp.to_sparse()
tm.assert_sp_frame_equal(res, exp, check_kind=False)
def test_concat_different_columns(self):
# fill_value = np.nan
sparse = self.dense1.to_sparse()
sparse3 = self.dense3.to_sparse()
res = pd.concat([sparse, sparse3], sort=True)
exp = pd.concat([self.dense1, self.dense3], sort=True).to_sparse()
tm.assert_sp_frame_equal(res, exp, check_kind=False)
res = pd.concat([sparse3, sparse], sort=True)
exp = pd.concat([self.dense3, self.dense1], sort=True).to_sparse()
exp._default_fill_value = np.nan
tm.assert_sp_frame_equal(res, exp, check_kind=False)
def test_concat_bug(self):
from pandas.core.sparse.api import SparseDtype
x = pd.SparseDataFrame({"A": pd.SparseArray([np.nan, np.nan], fill_value=0)})
y = pd.SparseDataFrame({"B": []})
res = pd.concat([x, y], sort=False)[["A"]]
exp = pd.DataFrame(
{"A": pd.SparseArray([np.nan, np.nan], dtype=SparseDtype(float, 0))}
)
tm.assert_frame_equal(res, exp)
def test_concat_different_columns_buggy(self):
sparse = self.dense1.to_sparse(fill_value=0)
sparse3 = self.dense3.to_sparse(fill_value=0)
res = pd.concat([sparse, sparse3], sort=True)
exp = pd.concat([self.dense1, self.dense3], sort=True).to_sparse(fill_value=0)
exp._default_fill_value = np.nan
tm.assert_sp_frame_equal(
res, exp, check_kind=False, consolidate_block_indices=True
)
res = pd.concat([sparse3, sparse], sort=True)
exp = pd.concat([self.dense3, self.dense1], sort=True).to_sparse(fill_value=0)
exp._default_fill_value = np.nan
tm.assert_sp_frame_equal(
res, exp, check_kind=False, consolidate_block_indices=True
)
# different fill values
sparse = self.dense1.to_sparse()
sparse3 = self.dense3.to_sparse(fill_value=0)
# each columns keeps its fill_value, thus compare in dense
res = pd.concat([sparse, sparse3], sort=True)
exp = pd.concat([self.dense1, self.dense3], sort=True)
assert isinstance(res, pd.SparseDataFrame)
tm.assert_frame_equal(res.to_dense(), exp)
res = pd.concat([sparse3, sparse], sort=True)
exp = pd.concat([self.dense3, self.dense1], sort=True)
assert isinstance(res, pd.SparseDataFrame)
tm.assert_frame_equal(res.to_dense(), exp)
def test_concat_series(self):
# fill_value = np.nan
sparse = self.dense1.to_sparse()
sparse2 = self.dense2.to_sparse()
for col in ["A", "D"]:
res = pd.concat([sparse, sparse2[col]])
exp = pd.concat([self.dense1, self.dense2[col]]).to_sparse()
tm.assert_sp_frame_equal(res, exp, check_kind=False)
res = pd.concat([sparse2[col], sparse])
exp = pd.concat([self.dense2[col], self.dense1]).to_sparse()
tm.assert_sp_frame_equal(res, exp, check_kind=False)
# fill_value = 0
sparse = self.dense1.to_sparse(fill_value=0)
sparse2 = self.dense2.to_sparse(fill_value=0)
for col in ["C", "D"]:
res = pd.concat([sparse, sparse2[col]])
exp = pd.concat([self.dense1, self.dense2[col]]).to_sparse(fill_value=0)
exp._default_fill_value = np.nan
tm.assert_sp_frame_equal(
res, exp, check_kind=False, consolidate_block_indices=True
)
res = pd.concat([sparse2[col], sparse])
exp = pd.concat([self.dense2[col], self.dense1]).to_sparse(fill_value=0)
exp["C"] = res["C"]
exp._default_fill_value = np.nan
tm.assert_sp_frame_equal(
res, exp, consolidate_block_indices=True, check_kind=False
)
def test_concat_axis1(self):
# fill_value = np.nan
sparse = self.dense1.to_sparse()
sparse3 = self.dense3.to_sparse()
res = pd.concat([sparse, sparse3], axis=1)
exp = pd.concat([self.dense1, self.dense3], axis=1).to_sparse()
tm.assert_sp_frame_equal(res, exp)
res = pd.concat([sparse3, sparse], axis=1)
exp = pd.concat([self.dense3, self.dense1], axis=1).to_sparse()
exp._default_fill_value = np.nan
tm.assert_sp_frame_equal(res, exp)
# fill_value = 0
sparse = self.dense1.to_sparse(fill_value=0)
sparse3 = self.dense3.to_sparse(fill_value=0)
res = pd.concat([sparse, sparse3], axis=1)
exp = pd.concat([self.dense1, self.dense3], axis=1).to_sparse(fill_value=0)
exp._default_fill_value = np.nan
tm.assert_sp_frame_equal(res, exp)
res = pd.concat([sparse3, sparse], axis=1)
exp = pd.concat([self.dense3, self.dense1], axis=1).to_sparse(fill_value=0)
exp._default_fill_value = np.nan
tm.assert_sp_frame_equal(res, exp)
# different fill values
sparse = self.dense1.to_sparse()
sparse3 = self.dense3.to_sparse(fill_value=0)
# each columns keeps its fill_value, thus compare in dense
res = pd.concat([sparse, sparse3], axis=1)
exp = pd.concat([self.dense1, self.dense3], axis=1)
assert isinstance(res, pd.SparseDataFrame)
tm.assert_frame_equal(res.to_dense(), exp)
res = pd.concat([sparse3, sparse], axis=1)
exp = pd.concat([self.dense3, self.dense1], axis=1)
assert isinstance(res, pd.SparseDataFrame)
tm.assert_frame_equal(res.to_dense(), exp)
@pytest.mark.parametrize(
"fill_value,sparse_idx,dense_idx",
itertools.product([None, 0, 1, np.nan], [0, 1], [1, 0]),
)
def test_concat_sparse_dense_rows(self, fill_value, sparse_idx, dense_idx):
frames = [self.dense1, self.dense2]
sparse_frame = [
frames[dense_idx],
frames[sparse_idx].to_sparse(fill_value=fill_value),
]
dense_frame = [frames[dense_idx], frames[sparse_idx]]
# This will try both directions sparse + dense and dense + sparse
for _ in range(2):
res = pd.concat(sparse_frame)
exp = pd.concat(dense_frame)
assert isinstance(res, pd.SparseDataFrame)
tm.assert_frame_equal(res.to_dense(), exp)
sparse_frame = sparse_frame[::-1]
dense_frame = dense_frame[::-1]
@pytest.mark.parametrize(
"fill_value,sparse_idx,dense_idx",
itertools.product([None, 0, 1, np.nan], [0, 1], [1, 0]),
)
@pytest.mark.xfail(reason="The iloc fails and I can't make expected", strict=True)
def test_concat_sparse_dense_cols(self, fill_value, sparse_idx, dense_idx):
# See GH16874, GH18914 and #18686 for why this should be a DataFrame
from pandas.core.dtypes.common import is_sparse
frames = [self.dense1, self.dense3]
sparse_frame = [
frames[dense_idx],
frames[sparse_idx].to_sparse(fill_value=fill_value),
]
dense_frame = [frames[dense_idx], frames[sparse_idx]]
# This will try both directions sparse + dense and dense + sparse
for _ in range(2):
res = pd.concat(sparse_frame, axis=1)
exp = pd.concat(dense_frame, axis=1)
cols = [i for (i, x) in enumerate(res.dtypes) if is_sparse(x)]
for col in cols:
exp.iloc[:, col] = exp.iloc[:, col].astype("Sparse")
for column in frames[dense_idx].columns:
if dense_idx == sparse_idx:
tm.assert_frame_equal(res[column], exp[column])
else:
tm.assert_series_equal(res[column], exp[column])
tm.assert_frame_equal(res, exp)
sparse_frame = sparse_frame[::-1]
dense_frame = dense_frame[::-1]
| apache-2.0 |
arjoly/scikit-learn | examples/ensemble/plot_feature_transformation.py | 9 | 4327 | """
===============================================
Feature transformations with ensembles of trees
===============================================
Transform your features into a higher dimensional, sparse space. Then
train a linear model on these features.
First fit an ensemble of trees (totally random trees, a random
forest, or gradient boosted trees) on the training set. Then each leaf
of each tree in the ensemble is assigned a fixed arbitrary feature
index in a new feature space. These leaf indices are then encoded in a
one-hot fashion.
Each sample goes through the decisions of each tree of the ensemble
and ends up in one leaf per tree. The sample is encoded by setting
feature values for these leaves to 1 and the other feature values to 0.
The resulting transformer has then learned a supervised, sparse,
high-dimensional categorical embedding of the data.
"""
# Author: Tim Head <betatim@gmail.com>
#
# License: BSD 3 clause
import numpy as np
np.random.seed(10)
import matplotlib.pyplot as plt
from sklearn.datasets import make_classification
from sklearn.linear_model import LogisticRegression
from sklearn.ensemble import (RandomTreesEmbedding, RandomForestClassifier,
GradientBoostingClassifier)
from sklearn.preprocessing import OneHotEncoder
from sklearn.cross_validation import train_test_split
from sklearn.metrics import roc_curve
from sklearn.pipeline import make_pipeline
n_estimator = 10
X, y = make_classification(n_samples=80000)
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.5)
# It is important to train the ensemble of trees on a different subset
# of the training data than the linear regression model to avoid
# overfitting, in particular if the total number of leaves is
# similar to the number of training samples
X_train, X_train_lr, y_train, y_train_lr = train_test_split(X_train,
y_train,
test_size=0.5)
# Unsupervised transformation based on totally random trees
rt = RandomTreesEmbedding(max_depth=3, n_estimators=n_estimator,
random_state=0)
rt_lm = LogisticRegression()
pipeline = make_pipeline(rt, rt_lm)
pipeline.fit(X_train, y_train)
y_pred_rt = pipeline.predict_proba(X_test)[:, 1]
fpr_rt_lm, tpr_rt_lm, _ = roc_curve(y_test, y_pred_rt)
# Supervised transformation based on random forests
rf = RandomForestClassifier(max_depth=3, n_estimators=n_estimator)
rf_enc = OneHotEncoder()
rf_lm = LogisticRegression()
rf.fit(X_train, y_train)
rf_enc.fit(rf.apply(X_train))
rf_lm.fit(rf_enc.transform(rf.apply(X_train_lr)), y_train_lr)
y_pred_rf_lm = rf_lm.predict_proba(rf_enc.transform(rf.apply(X_test)))[:, 1]
fpr_rf_lm, tpr_rf_lm, _ = roc_curve(y_test, y_pred_rf_lm)
grd = GradientBoostingClassifier(n_estimators=n_estimator)
grd_enc = OneHotEncoder()
grd_lm = LogisticRegression()
grd.fit(X_train, y_train)
grd_enc.fit(grd.apply(X_train)[:, :, 0])
grd_lm.fit(grd_enc.transform(grd.apply(X_train_lr)[:, :, 0]), y_train_lr)
y_pred_grd_lm = grd_lm.predict_proba(
grd_enc.transform(grd.apply(X_test)[:, :, 0]))[:, 1]
fpr_grd_lm, tpr_grd_lm, _ = roc_curve(y_test, y_pred_grd_lm)
# The gradient boosted model by itself
y_pred_grd = grd.predict_proba(X_test)[:, 1]
fpr_grd, tpr_grd, _ = roc_curve(y_test, y_pred_grd)
# The random forest model by itself
y_pred_rf = rf.predict_proba(X_test)[:, 1]
fpr_rf, tpr_rf, _ = roc_curve(y_test, y_pred_rf)
plt.figure(1)
plt.plot([0, 1], [0, 1], 'k--')
plt.plot(fpr_rt_lm, tpr_rt_lm, label='RT + LR')
plt.plot(fpr_rf, tpr_rf, label='RF')
plt.plot(fpr_rf_lm, tpr_rf_lm, label='RF + LR')
plt.plot(fpr_grd, tpr_grd, label='GBT')
plt.plot(fpr_grd_lm, tpr_grd_lm, label='GBT + LR')
plt.xlabel('False positive rate')
plt.ylabel('True positive rate')
plt.title('ROC curve')
plt.legend(loc='best')
plt.show()
plt.figure(2)
plt.xlim(0, 0.2)
plt.ylim(0.8, 1)
plt.plot([0, 1], [0, 1], 'k--')
plt.plot(fpr_rt_lm, tpr_rt_lm, label='RT + LR')
plt.plot(fpr_rf, tpr_rf, label='RF')
plt.plot(fpr_rf_lm, tpr_rf_lm, label='RF + LR')
plt.plot(fpr_grd, tpr_grd, label='GBT')
plt.plot(fpr_grd_lm, tpr_grd_lm, label='GBT + LR')
plt.xlabel('False positive rate')
plt.ylabel('True positive rate')
plt.title('ROC curve (zoomed in at top left)')
plt.legend(loc='best')
plt.show() | bsd-3-clause |
BreakVoid/DL_Project | Torch/data_regenerate/data_utils.py | 1 | 12273 | import os
import copy
import scipy.interpolate as spi
import math
import numpy as np
import matplotlib.pyplot as plt
import random
from scipy.optimize import curve_fit
data_root = 'toneclassifier'
train_data_path = "%s/train" % data_root
val_data_path = "%s/test" % data_root
test_data_path = "%s/test_new" % data_root
def SetPath(root):
global data_root, train_data_path, val_data_path, test_data_path
data_root = root
train_data_path = "%s/train" % data_root
val_data_path = "%s/test" % data_root
test_data_path = "%s/test_new" % data_root
labels = {
'one': 0,
'two': 1,
'three': 2,
'four': 3
}
def LoadData(mode='train'):
data_path = train_data_path
if mode == 'val':
data_path = val_data_path
elif mode == 'test':
data_path = test_data_path
Engy = []
F0 = []
y = []
for labelName, label in labels.iteritems():
data_subset_path = "%s/%s" % (data_path, labelName)
data_names = set()
for filename in os.listdir(data_subset_path):
if filename[0] == ".":
continue
if ".engy" in filename:
data_names.add(filename[0:-5])
elif ".f0" in filename:
data_names.add(filename[0:-3])
for data_name in data_names:
engy = map(float, open("%s/%s.engy" % (data_subset_path, data_name)).readlines())
f0 = map(float, open("%s/%s.f0" % (data_subset_path, data_name)).readlines())
Engy.append(engy)
F0.append(f0)
y.append(label)
return Engy, F0, y
def IgnoreLowEnergyFrequence(Engy, F0):
data_num = len(Engy)
if data_num != len(F0):
raise ValueError("the number of input data mismatched. len(Engy)==%d and len(F0)==%d" % (len(Engy), len(F0)))
resEngy = []
resF0 = []
for i in xrange(data_num):
engy = copy.copy(Engy[i])
f0 = copy.copy(F0[i])
data_len = len(engy)
if data_len != len(f0):
raise ValueError("the length of %d-th data mismatched. len(engy)==%d and len(f0)==%d" % (i, len(engy), len(f0)))
zero_freq_engy_sum = 0.0
zero_freq_count = 0.0
for j in xrange(data_len):
if f0[j] < 1e-4:
zero_freq_count += 1
zero_freq_engy_sum += math.sqrt(engy[j])
mean_engy = zero_freq_engy_sum / zero_freq_count
for j in xrange(data_len):
if math.sqrt(engy[j]) <= max(mean_engy, 1.0):
f0[j] = 0.0
resEngy.append(engy)
resF0.append(f0)
return resEngy, resF0
def TrimData(Engy, F0):
data_num = len(Engy)
if data_num != len(F0):
raise ValueError("the number of input data mismatched. len(Engy)==%d and len(F0)==%d" % (len(Engy), len(F0)))
resEngy = []
resF0 = []
for i in xrange(data_num):
engy = copy.copy(Engy[i])
f0 = copy.copy(F0[i])
data_len = len(engy)
if data_len != len(f0):
raise ValueError("the length of %d-th data mismatched. len(engy)==%d and len(f0)==%d" % (i, len(engy), len(f0)))
start = None
end = None
for i in xrange(len(f0)):
if f0[i] > 1e-5:
start = i
break
for i in xrange(len(f0) - 1, -1, -1):
if f0[i] > 1e-5:
end = i + 1
break
resEngy.append(copy.copy(engy[start:end]))
resF0.append(copy.copy(f0[start:end]))
return resEngy, resF0
def TransformToMelFrequencyScale(F0):
data_num = len(F0)
resF0 = []
for i in xrange(data_num):
f0 = copy.copy(F0[i])
data_len = len(f0)
for j in xrange(data_len):
f0[j] = 1127 * math.log(1 + f0[j] / 700)
resF0.append(f0)
return resF0
def DivSingleDataStd(F0):
data_num = len(F0)
resF0 = []
for i in xrange(data_num):
f0 = copy.copy(F0[i])
data_len = len(f0)
f0arr = np.asarray(f0)
std = f0arr.std()
f0arr = f0arr / std
for j in xrange(data_len):
f0[j] = f0arr[j]
resF0.append(f0)
return resF0
def DivDataStd(F0):
data_num = len(F0)
resF0 = []
tmp = []
for i in xrange(data_num):
for j in xrange(len(F0[i])):
tmp.append(F0[i][j])
F0arr = np.asarray(tmp)
std = F0arr.std()
for i in xrange(data_num):
f0 = copy.copy(F0[i])
data_len = len(f0)
for j in xrange(data_len):
f0[j] = f0[j] / std
resF0.append(f0)
return resF0
def SmoothF0(F0):
C1 = 0.16
C2 = 0.4
data_num = len(F0)
resF0 = []
for i in xrange(data_num):
f0 = copy.copy(F0[i])
data_len = len(f0)
for j in xrange(1, data_len):
if abs(f0[j] - f0[j - 1]) < C1:
continue
if abs(f0[j] / 2 - f0[j - 1]) < C1:
f0[j] /= 2
elif abs(2 * f0[j] - f0[j - 1]) < C1:
f0[j] *= 2
ff0 = copy.copy([f0[0]] + f0 + [f0[-1]])
fff0 = copy.copy(ff0)
data_len = len(ff0)
f0_2 = (ff0[0], ff0[0])
for j in xrange(1, data_len - 1):
if abs(ff0[j] - ff0[j - 1]) > C1 and abs(ff0[j + 1] - ff0[j - 1]) > C2:
ff0[j] = 2 * f0_2[1] - f0_2[0]
elif abs(ff0[j] - ff0[j - 1]) > C1 and abs(ff0[j + 1] - ff0[j - 1]) <= C2:
ff0[j] = (ff0[j - 1] + ff0[j + 1]) / 2
f0_2 = (f0_2[1], ff0[j])
res_f0 = None
if abs(ff0[-1] - fff0[-1]) <= C1:
res_f0 = ff0
else:
f0_2 = (fff0[-1], fff0[-1])
for j in xrange(data_len - 2, 0, -1):
if abs(fff0[j] - fff0[j + 1]) > C1 and abs(fff0[j - 1] - fff0[j + 1]) > C2:
fff0[j] = 2 * f0_2[1] - f0_2[0]
elif abs(fff0[j] - fff0[j + 1]) > C1 and abs(fff0[j - 1] - fff0[j + 1]) <= C2:
fff0[j] = (fff0[j - 1] + fff0[j + 1]) / 2
f0_2 = (f0_2[1], fff0[j])
s = 0
for j in xrange(data_len - 2, 0, -1):
if abs(fff0[j] - ff0[j]) < C1:
s = j
break
res_f0 = ff0[: s + 1] + fff0[s + 1: ]
res_f0 = [res_f0[0]] + res_f0 + [res_f0[-1]]
data_len = len(res_f0)
for j in xrange(2, data_len - 2):
res_f0[j] = (res_f0[j - 2] + res_f0[j - 1] + res_f0[j] + res_f0[j + 1] + res_f0[j + 2]) / 5.0
resF0.append(res_f0[2:-2])
return resF0
def NormalizeDataLengthWithInterpolation(Engy, F0, result_len=200):
data_num = len(Engy)
if data_num != len(F0):
raise ValueError("the number of input data mismatched. len(Engy)==%d and len(F0)==%d" % (len(Engy), len(F0)))
resEngy = []
resF0 = []
for i in xrange(data_num):
engy = copy.copy(Engy[i])
f0 = copy.copy(F0[i])
data_len = len(engy)
if data_len != len(f0):
raise ValueError(
"the length of %d-th data mismatched. len(engy)==%d and len(f0)==%d" % (i, len(engy), len(f0)))
k = float(result_len - 1) / float(data_len - 1)
x = [i * k for i in xrange(data_len)]
newX = [i * 1.0 for i in xrange(result_len)]
newX[-1] = x[-1]
new_engy = spi.interp1d(x, engy, kind='cubic')(newX)
new_f0 = spi.interp1d(x, f0, kind='cubic')(newX)
resEngy.append(new_engy)
resF0.append(new_f0)
return resEngy, resF0
def CenterlizeSingleData(data):
mean = np.asarray(data).mean()
for i in xrange(len(data)):
data[i] /= mean
mean = np.asarray(data).mean()
for i in xrange(len(data)):
data[i] -= mean
return data
def CenterlizeData(Data):
for i in xrange(len(Data)):
Data[i] = CenterlizeSingleData(Data[i])
return Data
def SaveData(Engy, F0, y, mode='train'):
save_engy_name = 'train_engys'
save_f0_name = 'train_f0s'
save_y_name = 'train_labels'
if mode == 'val':
save_engy_name = 'val_engys'
save_f0_name = 'val_f0s'
save_y_name = 'val_labels'
elif mode == 'test':
save_engy_name = 'test_engys'
save_f0_name = 'test_f0s'
save_y_name = 'test_labels'
engy_file = open(save_engy_name, "w")
f0_file = open(save_f0_name, "w")
y_file = open(save_y_name, "w")
data_num = len(Engy)
if data_num != len(F0) or data_num != len(y):
raise ValueError("the number of data mismatched, Engy:%d, F0:%d, y:%d" % (len(Engy), len(F0), len(y)))
for i in xrange(data_num):
engy_file.write("%s\n" % (' '.join(map(lambda x: "%.5f" % x, Engy[i]))))
f0_file.write("%s\n" % (' '.join(map(lambda x: "%.5f" % x, F0[i]))))
y_file.write("%d\n"% y[i])
engy_file.close()
f0_file.close()
y_file.close()
def PlotF0(mode='train', F0=None, y=None):
max_len = max(map(len, F0))
for label in xrange(4):
for i in xrange(len(F0)):
if (y[i] != label):
continue
coff = float(max_len - 1) / (len(F0[i]) - 1)
x = np.arange(0, len(F0[i]), 1)
x = coff * x
fx = np.asarray(F0[i])
plt.plot(x, fx)
plt.savefig('%s-plt_%d' % (mode, label))
plt.clf()
def Amplify(Data, times):
for i in xrange(len(Data)):
for j in xrange(len(Data[i])):
Data[i][j] *= times
return Data
def unison_shuffled_copies(a, b):
assert len(a) == len(b)
p = np.random.permutation(len(a))
return a[p], b[p]
def order_two_f(x, a, b, c):
return a * x**2 + b * x + c
# def FitMissPoint(F0):
# data_num = len(F0)
# resF0 = []
# for i in xrange(data_num):
# f0 = F0[i]
# data_len = len(f0)
# f0arr = np.asarray(f0)
# minimal = min(f0arr)
# x = []
# y = []
# for j in xrange(data_len):
# if f0[j] > minimal + 0.05:
# x.append(j)
# y.append(f0[j])
# popt, pcov = curve_fit(order_two_f, x, y)
# for j in xrange(data_len):
# if f0[j] <= minimal + 0.05:
# f0[j] = order_two_f(j, popt[0], popt[1], popt[2])
# resF0.append(f0)
# return resF0
# def FitMissPoint(F0):
# data_num = len(F0)
# resF0 = []
# for i in xrange(data_num):
# f0 = F0[i]
# data_len = len(f0)
# f0arr = np.asarray(f0)
# mean = f0arr.mean()
# x = []
# y = []
# for j in xrange(data_len):
# if f0[j] > 0.4 * mean:
# x.append(j)
# y.append(f0[j])
# popt, pcov = curve_fit(order_two_f, x, y)
# for j in xrange(data_len):
# if f0[j] <= 0.4 * mean:
# f0[j] = order_two_f(j, popt[0], popt[1], popt[2])
# resF0.append(f0)
# return resF0
def fitting(f0):
data_len = len(f0)
resf0 = []
x = []
y = []
last1 = f0[0]
last2 = f0[1]
x.append(0)
y.append(f0[0])
x.append(1)
y.append(f0[1])
x.append(2)
y.append(f0[2])
resf0.append(f0[0])
resf0.append(f0[1])
resf0.append(f0[2])
decay_rate = 0.98
rates = 1
for j in xrange(data_len - 3):
k = j + 3
if (abs(f0[k] - last2) > 8 * abs(last2 - last1)):
popt, pcov = curve_fit(order_two_f, x, y)
f0[k] = order_two_f(k, popt[0], popt[1], popt[2])
rates = rates * decay_rate
f0[k] *= rates
resf0.append(f0[k])
x.append(k)
y.append(f0[k])
last1 = last2
last2 = f0[k]
return resf0
def FitMissPoint(F0):
resF0 = []
data_num = len(F0)
for i in xrange(data_num):
ino = []
for j in xrange(len(F0[i])):
ino.append(F0[i][j])
rev = ino
rev.reverse()
u = fitting(F0[i])
v = fitting(rev)
v.reverse()
w = []
for j in xrange(len(u)):
w.append(u[j] + v[j])
resF0.append(w)
return resF0
def AddWhiteNoise(F0):
data_num = len(F0)
for i in xrange(data_num):
data_len = len(F0[i])
for j in xrange(data_len):
F0[i][j] += np.random.normal(0, 0.01)
return F0
| mit |
JsNoNo/scikit-learn | examples/svm/plot_svm_anova.py | 250 | 2000 | """
=================================================
SVM-Anova: SVM with univariate feature selection
=================================================
This example shows how to perform univariate feature before running a SVC
(support vector classifier) to improve the classification scores.
"""
print(__doc__)
import numpy as np
import matplotlib.pyplot as plt
from sklearn import svm, datasets, feature_selection, cross_validation
from sklearn.pipeline import Pipeline
###############################################################################
# Import some data to play with
digits = datasets.load_digits()
y = digits.target
# Throw away data, to be in the curse of dimension settings
y = y[:200]
X = digits.data[:200]
n_samples = len(y)
X = X.reshape((n_samples, -1))
# add 200 non-informative features
X = np.hstack((X, 2 * np.random.random((n_samples, 200))))
###############################################################################
# Create a feature-selection transform and an instance of SVM that we
# combine together to have an full-blown estimator
transform = feature_selection.SelectPercentile(feature_selection.f_classif)
clf = Pipeline([('anova', transform), ('svc', svm.SVC(C=1.0))])
###############################################################################
# Plot the cross-validation score as a function of percentile of features
score_means = list()
score_stds = list()
percentiles = (1, 3, 6, 10, 15, 20, 30, 40, 60, 80, 100)
for percentile in percentiles:
clf.set_params(anova__percentile=percentile)
# Compute cross-validation score using all CPUs
this_scores = cross_validation.cross_val_score(clf, X, y, n_jobs=1)
score_means.append(this_scores.mean())
score_stds.append(this_scores.std())
plt.errorbar(percentiles, score_means, np.array(score_stds))
plt.title(
'Performance of the SVM-Anova varying the percentile of features selected')
plt.xlabel('Percentile')
plt.ylabel('Prediction rate')
plt.axis('tight')
plt.show()
| bsd-3-clause |
crichardson17/starburst_atlas | Low_resolution_sims/Dusty_LowRes/Padova_cont_5_fullelines/fullgrid/postergrid_backup.py | 1 | 9761 | import csv
import matplotlib.pyplot as plt
from numpy import *
import scipy.interpolate
import math
from pylab import *
from matplotlib.ticker import MultipleLocator, FormatStrFormatter
import matplotlib.patches as patches
from matplotlib.path import Path
import os
# ------------------------------------------------------------------------------------------------------
#inputs
for file in os.listdir('.'):
if file.endswith("1.grd"):
gridfile1 = file
for file in os.listdir('.'):
if file.endswith("2.grd"):
gridfile2 = file
for file in os.listdir('.'):
if file.endswith("3.grd"):
gridfile3 = file
# ------------------------
for file in os.listdir('.'):
if file.endswith("1.txt"):
Elines1 = file
for file in os.listdir('.'):
if file.endswith("2.txt"):
Elines2 = file
for file in os.listdir('.'):
if file.endswith("3.txt"):
Elines3 = file
# ------------------------------------------------------------------------------------------------------
#Patches data
#for the Kewley and Levesque data
verts = [
(1., 7.97712125471966000000), # left, bottom
(1., 9.57712125471966000000), # left, top
(2., 10.57712125471970000000), # right, top
(2., 8.97712125471966000000), # right, bottom
(0., 0.), # ignored
]
codes = [Path.MOVETO,
Path.LINETO,
Path.LINETO,
Path.LINETO,
Path.CLOSEPOLY,
]
path = Path(verts, codes)
# ------------------------
#for the Kewley 01 data
verts2 = [
(2.4, 9.243038049), # left, bottom
(2.4, 11.0211893), # left, top
(2.6, 11.0211893), # right, top
(2.6, 9.243038049), # right, bottom
(0, 0.), # ignored
]
path = Path(verts, codes)
path2 = Path(verts2, codes)
# -------------------------
#for the Moy et al data
verts3 = [
(1., 6.86712125471966000000), # left, bottom
(1., 10.18712125471970000000), # left, top
(3., 12.18712125471970000000), # right, top
(3., 8.86712125471966000000), # right, bottom
(0., 0.), # ignored
]
path = Path(verts, codes)
path3 = Path(verts3, codes)
# ------------------------------------------------------------------------------------------------------
#the routine to add patches for others peoples' data onto our plots.
def add_patches(ax):
patch3 = patches.PathPatch(path3, facecolor='yellow', lw=0)
patch2 = patches.PathPatch(path2, facecolor='green', lw=0)
patch = patches.PathPatch(path, facecolor='red', lw=0)
ax1.add_patch(patch3)
ax1.add_patch(patch2)
ax1.add_patch(patch)
# ------------------------------------------------------------------------------------------------------
#the subplot routine; will be called later
numplots = 16
fig = plt.figure()
def add_sub_plot(sub_num):
ax = fig.add_subplot(4,4,sub_num)
rbf = scipy.interpolate.Rbf(x, y, z[:,sub_num-1], function='linear')
zi = rbf(xi, yi)
contour = ax.contour(xi,yi,zi, levels, colors='c', linestyles = 'dashed')
contour2 = ax.contour(xi,yi,zi, levels2, colors='k', linewidths=1.5)
ax.scatter(max_values[line[sub_num-1],2], max_values[line[sub_num-1],3], c ='k',marker = '*')
ax.annotate(headers[line[sub_num-1]], xy=(8,11), xytext=(6,8.5), fontsize = 10)
ax.annotate(max_values[line[sub_num-1],0], xy= (max_values[line[sub_num-1],2], max_values[line[sub_num-1],3]), xytext = (0, -10), textcoords = 'offset points', ha = 'right', va = 'bottom', fontsize=10)
if sub_num == numplots / 2.:
print "half the plots are complete"
#axis limits
yt_min = 8
yt_max = 23
xt_min = 0
xt_max = 10
ax.set_ylim(yt_min,yt_max)
ax.set_xlim(xt_min,xt_max)
ax.set_yticks(arange(yt_min+1,yt_max,2),fontsize=10)
ax.set_xticks(arange(xt_min+1,xt_max,1), fontsize = 10)
if sub_num in [2,3,4,6,7,8,10,11,12,14,15,16]:
ax.set_tick_params(labelleft = 'off')
else:
ax.set_tick_params(labelleft = 'on')
if sub_num in [1,2,3,4,5,6,7,8,9,10,11,12]:
ax.set_tick_params(labelbottom = 'off')
else:
ax.set_tick_params(labelbottom = 'on')
if sub_num == 1:
ax.set_yticks(arange(yt_min+1,yt_max+1,2),fontsize=10)
if sub_num == 9:
ax.set_yticks(arange(yt_min,yt_max,2),fontsize=10)
ax.set_xticks(arange(xt_min,xt_max,1), fontsize = 10)
if sub_num == 12:
ax.set_xticks(arange(xt_min+1,xt_max+1,1), fontsize = 10)
# ---------------------------------------------------
#this is where the grid information (phi and hdens) is read in and saved to grid.
grid1 = [];
grid2 = [];
grid3 = [];
with open(gridfile1, 'rb') as f:
csvReader = csv.reader(f,delimiter='\t')
for row in csvReader:
grid1.append(row);
grid1 = asarray(grid1)
with open(gridfile2, 'rb') as f:
csvReader = csv.reader(f,delimiter='\t')
for row in csvReader:
grid2.append(row);
grid2 = asarray(grid2)
with open(gridfile3, 'rb') as f:
csvReader = csv.reader(f,delimiter='\t')
for row in csvReader:
grid3.append(row);
grid3 = asarray(grid3)
#here is where the data for each line is read in and saved to dataEmissionlines
dataEmissionlines1 = [];
dataEmissionlines2 = [];
dataEmissionlines3 = [];
with open(Elines1, 'rb') as f:
csvReader = csv.reader(f,delimiter='\t')
headers = csvReader.next()
for row in csvReader:
dataEmissionlines1.append(row);
dataEmissionlines1 = asarray(dataEmissionlines1)
with open(Elines2, 'rb') as f:
csvReader = csv.reader(f,delimiter='\t')
headers2 = csvReader.next()
for row in csvReader:
dataEmissionlines2.append(row);
dataEmissionlines2 = asarray(dataEmissionlines2)
with open(Elines3, 'rb') as f:
csvReader = csv.reader(f,delimiter='\t')
headers3 = csvReader.next()
for row in csvReader:
dataEmissionlines3.append(row);
dataEmissionlines3 = asarray(dataEmissionlines3)
print "import files complete"
# ---------------------------------------------------
#for concatenating grid
#pull the phi and hdens values from each of the runs. exclude header lines
grid1new = zeros((len(grid1[:,0])-1,2))
grid1new[:,0] = grid1[1:,6]
grid1new[:,1] = grid1[1:,7]
grid2new = zeros((len(grid2[:,0])-1,2))
x = array(17.00000)
grid2new[:,0] = repeat(x,len(grid2[:,0])-1)
grid2new[:,1] = grid2[1:,6]
grid3new = zeros((len(grid3[:,0])-1,2))
grid3new[:,0] = grid3[1:,6]
grid3new[:,1] = grid3[1:,7]
grid = concatenate((grid1new,grid2new,grid3new))
hdens_values = grid[:,1]
phi_values = grid[:,0]
# ---------------------------------------------------
#for concatenating Emission lines data
Emissionlines = concatenate((dataEmissionlines1[:,1:],dataEmissionlines2[:,1:],dataEmissionlines3[:,1:]))
#for lines
headers = headers[1:]
concatenated_data = zeros((len(Emissionlines),len(Emissionlines[0])))
max_values = zeros((len(concatenated_data[0]),4))
# ---------------------------------------------------
#constructing grid by scaling
#select the scaling factor
#for 1215
#incident = Emissionlines[1:,4]
#for 4860
incident = concatenated_data[:,57]
#take the ratio of incident and all the lines and put it all in an array concatenated_data
for i in range(len(Emissionlines)):
for j in range(len(Emissionlines[0])):
if math.log(4860.*(float(Emissionlines[i,j])/float(Emissionlines[i,57])), 10) > 0:
concatenated_data[i,j] = math.log(4860.*(float(Emissionlines[i,j])/float(Emissionlines[i,57])), 10)
else:
concatenated_data[i,j] == 0
# for 1215
#for i in range(len(Emissionlines)):
# for j in range(len(Emissionlines[0])):
# if math.log(1215.*(float(Emissionlines[i,j])/float(Emissionlines[i,4])), 10) > 0:
# concatenated_data[i,j] = math.log(1215.*(float(Emissionlines[i,j])/float(Emissionlines[i,4])), 10)
# else:
# concatenated_data[i,j] == 0
# ---------------------------------------------------
#find the maxima to plot onto the contour plots
for j in range(len(concatenated_data[0])):
max_values[j,0] = max(concatenated_data[:,j])
max_values[j,1] = argmax(concatenated_data[:,j], axis = 0)
max_values[j,2] = hdens_values[max_values[j,1]]
max_values[j,3] = phi_values[max_values[j,1]]
#to round off the maxima
max_values[:,0] = [ '%.1f' % elem for elem in max_values[:,0] ]
print "data arranged"
# ---------------------------------------------------
#Creating the grid to interpolate with for contours.
gridarray = zeros((len(concatenated_data),2))
gridarray[:,0] = hdens_values
gridarray[:,1] = phi_values
x = gridarray[:,0]
y = gridarray[:,1]
# ---------------------------------------------------
#change desired lines here!
line = [0, #977
3, #1034
18, #1549
27, #2297
33, #2798
37, #Ne V 3426
43, #3869
52, #4363
58, #4861
59, #O III 4959
60, #O 3 5007
63, #O 1 5577
69, #H 1 6563
70, #N 2 6584
97, #C II 158
101] #O I 63
#105] #O I 88
#create z array for this plot with given lines
z = concatenated_data[:,line[:]]
# ---------------------------------------------------
# Interpolate
print "starting interpolation"
xi, yi = linspace(x.min(), x.max(), 10), linspace(y.min(), y.max(), 10)
xi, yi = meshgrid(xi, yi)
# ---------------------------------------------------
print "interpolatation complete; now plotting"
#plot
plt.subplots_adjust(wspace=0, hspace=0) #remove space between plots
levels = arange(10**-1,10, .2)
levels2 = arange(10**-2,10**2, 1)
plt.suptitle("Dusty IR Lines", fontsize=14)
# ---------------------------------------------------
ax = fig.add_subplot(111)
ax.set_xlabel('Log($n _{\mathrm{H}} $)')
ax.set_ylabel('Log ($ \phi _{\mathrm{H}} $)')
ax.spines['top'].set_color('none')
ax.spines['bottom'].set_color('none')
ax.spines['left'].set_color('none')
ax.spines['right'].set_color('none')
ax.tick_params(labelcolor='w', top='off', bottom='off', left='off', right='off')
for i in range(20):
add_sub_plot(i)
ax1 = ax.add_subplot(5,4,1)
add_patches(ax1)
print "figure complete"
#plt.savefig('Dusty_Near_IR.pdf')
#plt.clf()
plt.show()
print "figure saved"
| gpl-2.0 |
jmmease/pandas | pandas/core/missing.py | 6 | 23369 | """
Routines for filling missing data
"""
import numpy as np
from distutils.version import LooseVersion
from pandas._libs import algos, lib
from pandas.compat import range, string_types
from pandas.core.dtypes.common import (
is_numeric_v_string_like,
is_float_dtype,
is_datetime64_dtype,
is_datetime64tz_dtype,
is_integer_dtype,
is_scalar,
is_integer,
needs_i8_conversion,
_ensure_float64)
from pandas.core.dtypes.cast import infer_dtype_from_array
from pandas.core.dtypes.missing import isna
def mask_missing(arr, values_to_mask):
"""
Return a masking array of same size/shape as arr
with entries equaling any member of values_to_mask set to True
"""
dtype, values_to_mask = infer_dtype_from_array(values_to_mask)
try:
values_to_mask = np.array(values_to_mask, dtype=dtype)
except Exception:
values_to_mask = np.array(values_to_mask, dtype=object)
na_mask = isna(values_to_mask)
nonna = values_to_mask[~na_mask]
mask = None
for x in nonna:
if mask is None:
# numpy elementwise comparison warning
if is_numeric_v_string_like(arr, x):
mask = False
else:
mask = arr == x
# if x is a string and arr is not, then we get False and we must
# expand the mask to size arr.shape
if is_scalar(mask):
mask = np.zeros(arr.shape, dtype=bool)
else:
# numpy elementwise comparison warning
if is_numeric_v_string_like(arr, x):
mask |= False
else:
mask |= arr == x
if na_mask.any():
if mask is None:
mask = isna(arr)
else:
mask |= isna(arr)
return mask
def clean_fill_method(method, allow_nearest=False):
# asfreq is compat for resampling
if method in [None, 'asfreq']:
return None
if isinstance(method, string_types):
method = method.lower()
if method == 'ffill':
method = 'pad'
elif method == 'bfill':
method = 'backfill'
valid_methods = ['pad', 'backfill']
expecting = 'pad (ffill) or backfill (bfill)'
if allow_nearest:
valid_methods.append('nearest')
expecting = 'pad (ffill), backfill (bfill) or nearest'
if method not in valid_methods:
msg = ('Invalid fill method. Expecting {expecting}. Got {method}'
.format(expecting=expecting, method=method))
raise ValueError(msg)
return method
def clean_interp_method(method, **kwargs):
order = kwargs.get('order')
valid = ['linear', 'time', 'index', 'values', 'nearest', 'zero', 'slinear',
'quadratic', 'cubic', 'barycentric', 'polynomial', 'krogh',
'piecewise_polynomial', 'pchip', 'akima', 'spline',
'from_derivatives']
if method in ('spline', 'polynomial') and order is None:
raise ValueError("You must specify the order of the spline or "
"polynomial.")
if method not in valid:
raise ValueError("method must be one of {valid}. Got '{method}' "
"instead.".format(valid=valid, method=method))
return method
def interpolate_1d(xvalues, yvalues, method='linear', limit=None,
limit_direction='forward', fill_value=None,
bounds_error=False, order=None, **kwargs):
"""
Logic for the 1-d interpolation. The result should be 1-d, inputs
xvalues and yvalues will each be 1-d arrays of the same length.
Bounds_error is currently hardcoded to False since non-scipy ones don't
take it as an argumnet.
"""
# Treat the original, non-scipy methods first.
invalid = isna(yvalues)
valid = ~invalid
if not valid.any():
# have to call np.asarray(xvalues) since xvalues could be an Index
# which cant be mutated
result = np.empty_like(np.asarray(xvalues), dtype=np.float64)
result.fill(np.nan)
return result
if valid.all():
return yvalues
if method == 'time':
if not getattr(xvalues, 'is_all_dates', None):
# if not issubclass(xvalues.dtype.type, np.datetime64):
raise ValueError('time-weighted interpolation only works '
'on Series or DataFrames with a '
'DatetimeIndex')
method = 'values'
valid_limit_directions = ['forward', 'backward', 'both']
limit_direction = limit_direction.lower()
if limit_direction not in valid_limit_directions:
msg = ('Invalid limit_direction: expecting one of {valid!r}, '
'got {invalid!r}.')
raise ValueError(msg.format(valid=valid_limit_directions,
invalid=limit_direction))
from pandas import Series
ys = Series(yvalues)
start_nans = set(range(ys.first_valid_index()))
end_nans = set(range(1 + ys.last_valid_index(), len(valid)))
# violate_limit is a list of the indexes in the series whose yvalue is
# currently NaN, and should still be NaN after the interpolation.
# Specifically:
#
# If limit_direction='forward' or None then the list will contain NaNs at
# the beginning of the series, and NaNs that are more than 'limit' away
# from the prior non-NaN.
#
# If limit_direction='backward' then the list will contain NaNs at
# the end of the series, and NaNs that are more than 'limit' away
# from the subsequent non-NaN.
#
# If limit_direction='both' then the list will contain NaNs that
# are more than 'limit' away from any non-NaN.
#
# If limit=None, then use default behavior of filling an unlimited number
# of NaNs in the direction specified by limit_direction
# default limit is unlimited GH #16282
if limit is None:
# limit = len(xvalues)
pass
elif not is_integer(limit):
raise ValueError('Limit must be an integer')
elif limit < 1:
raise ValueError('Limit must be greater than 0')
# each possible limit_direction
# TODO: do we need sorted?
if limit_direction == 'forward' and limit is not None:
violate_limit = sorted(start_nans |
set(_interp_limit(invalid, limit, 0)))
elif limit_direction == 'forward':
violate_limit = sorted(start_nans)
elif limit_direction == 'backward' and limit is not None:
violate_limit = sorted(end_nans |
set(_interp_limit(invalid, 0, limit)))
elif limit_direction == 'backward':
violate_limit = sorted(end_nans)
elif limit_direction == 'both' and limit is not None:
violate_limit = sorted(_interp_limit(invalid, limit, limit))
else:
violate_limit = []
xvalues = getattr(xvalues, 'values', xvalues)
yvalues = getattr(yvalues, 'values', yvalues)
result = yvalues.copy()
if method in ['linear', 'time', 'index', 'values']:
if method in ('values', 'index'):
inds = np.asarray(xvalues)
# hack for DatetimeIndex, #1646
if needs_i8_conversion(inds.dtype.type):
inds = inds.view(np.int64)
if inds.dtype == np.object_:
inds = lib.maybe_convert_objects(inds)
else:
inds = xvalues
result[invalid] = np.interp(inds[invalid], inds[valid], yvalues[valid])
result[violate_limit] = np.nan
return result
sp_methods = ['nearest', 'zero', 'slinear', 'quadratic', 'cubic',
'barycentric', 'krogh', 'spline', 'polynomial',
'from_derivatives', 'piecewise_polynomial', 'pchip', 'akima']
if method in sp_methods:
inds = np.asarray(xvalues)
# hack for DatetimeIndex, #1646
if issubclass(inds.dtype.type, np.datetime64):
inds = inds.view(np.int64)
result[invalid] = _interpolate_scipy_wrapper(inds[valid],
yvalues[valid],
inds[invalid],
method=method,
fill_value=fill_value,
bounds_error=bounds_error,
order=order, **kwargs)
result[violate_limit] = np.nan
return result
def _interpolate_scipy_wrapper(x, y, new_x, method, fill_value=None,
bounds_error=False, order=None, **kwargs):
"""
passed off to scipy.interpolate.interp1d. method is scipy's kind.
Returns an array interpolated at new_x. Add any new methods to
the list in _clean_interp_method
"""
try:
from scipy import interpolate
# TODO: Why is DatetimeIndex being imported here?
from pandas import DatetimeIndex # noqa
except ImportError:
raise ImportError('{method} interpolation requires SciPy'
.format(method=method))
new_x = np.asarray(new_x)
# ignores some kwargs that could be passed along.
alt_methods = {
'barycentric': interpolate.barycentric_interpolate,
'krogh': interpolate.krogh_interpolate,
'from_derivatives': _from_derivatives,
'piecewise_polynomial': _from_derivatives,
}
if getattr(x, 'is_all_dates', False):
# GH 5975, scipy.interp1d can't hande datetime64s
x, new_x = x._values.astype('i8'), new_x.astype('i8')
if method == 'pchip':
try:
alt_methods['pchip'] = interpolate.pchip_interpolate
except AttributeError:
raise ImportError("Your version of Scipy does not support "
"PCHIP interpolation.")
elif method == 'akima':
try:
from scipy.interpolate import Akima1DInterpolator # noqa
alt_methods['akima'] = _akima_interpolate
except ImportError:
raise ImportError("Your version of Scipy does not support "
"Akima interpolation.")
interp1d_methods = ['nearest', 'zero', 'slinear', 'quadratic', 'cubic',
'polynomial']
if method in interp1d_methods:
if method == 'polynomial':
method = order
terp = interpolate.interp1d(x, y, kind=method, fill_value=fill_value,
bounds_error=bounds_error)
new_y = terp(new_x)
elif method == 'spline':
# GH #10633
if not order:
raise ValueError("order needs to be specified and greater than 0")
terp = interpolate.UnivariateSpline(x, y, k=order, **kwargs)
new_y = terp(new_x)
else:
# GH 7295: need to be able to write for some reason
# in some circumstances: check all three
if not x.flags.writeable:
x = x.copy()
if not y.flags.writeable:
y = y.copy()
if not new_x.flags.writeable:
new_x = new_x.copy()
method = alt_methods[method]
new_y = method(x, y, new_x, **kwargs)
return new_y
def _from_derivatives(xi, yi, x, order=None, der=0, extrapolate=False):
"""
Convenience function for interpolate.BPoly.from_derivatives
Construct a piecewise polynomial in the Bernstein basis, compatible
with the specified values and derivatives at breakpoints.
Parameters
----------
xi : array_like
sorted 1D array of x-coordinates
yi : array_like or list of array-likes
yi[i][j] is the j-th derivative known at xi[i]
orders : None or int or array_like of ints. Default: None.
Specifies the degree of local polynomials. If not None, some
derivatives are ignored.
der : int or list
How many derivatives to extract; None for all potentially nonzero
derivatives (that is a number equal to the number of points), or a
list of derivatives to extract. This numberincludes the function
value as 0th derivative.
extrapolate : bool, optional
Whether to extrapolate to ouf-of-bounds points based on first and last
intervals, or to return NaNs. Default: True.
See Also
--------
scipy.interpolate.BPoly.from_derivatives
Returns
-------
y : scalar or array_like
The result, of length R or length M or M by R,
"""
import scipy
from scipy import interpolate
if LooseVersion(scipy.__version__) < '0.18.0':
try:
method = interpolate.piecewise_polynomial_interpolate
return method(xi, yi.reshape(-1, 1), x,
orders=order, der=der)
except AttributeError:
pass
# return the method for compat with scipy version & backwards compat
method = interpolate.BPoly.from_derivatives
m = method(xi, yi.reshape(-1, 1),
orders=order, extrapolate=extrapolate)
return m(x)
def _akima_interpolate(xi, yi, x, der=0, axis=0):
"""
Convenience function for akima interpolation.
xi and yi are arrays of values used to approximate some function f,
with ``yi = f(xi)``.
See `Akima1DInterpolator` for details.
Parameters
----------
xi : array_like
A sorted list of x-coordinates, of length N.
yi : array_like
A 1-D array of real values. `yi`'s length along the interpolation
axis must be equal to the length of `xi`. If N-D array, use axis
parameter to select correct axis.
x : scalar or array_like
Of length M.
der : int or list, optional
How many derivatives to extract; None for all potentially
nonzero derivatives (that is a number equal to the number
of points), or a list of derivatives to extract. This number
includes the function value as 0th derivative.
axis : int, optional
Axis in the yi array corresponding to the x-coordinate values.
See Also
--------
scipy.interpolate.Akima1DInterpolator
Returns
-------
y : scalar or array_like
The result, of length R or length M or M by R,
"""
from scipy import interpolate
try:
P = interpolate.Akima1DInterpolator(xi, yi, axis=axis)
except TypeError:
# Scipy earlier than 0.17.0 missing axis
P = interpolate.Akima1DInterpolator(xi, yi)
if der == 0:
return P(x)
elif interpolate._isscalar(der):
return P(x, der=der)
else:
return [P(x, nu) for nu in der]
def interpolate_2d(values, method='pad', axis=0, limit=None, fill_value=None,
dtype=None):
""" perform an actual interpolation of values, values will be make 2-d if
needed fills inplace, returns the result
"""
transf = (lambda x: x) if axis == 0 else (lambda x: x.T)
# reshape a 1 dim if needed
ndim = values.ndim
if values.ndim == 1:
if axis != 0: # pragma: no cover
raise AssertionError("cannot interpolate on a ndim == 1 with "
"axis != 0")
values = values.reshape(tuple((1,) + values.shape))
if fill_value is None:
mask = None
else: # todo create faster fill func without masking
mask = mask_missing(transf(values), fill_value)
method = clean_fill_method(method)
if method == 'pad':
values = transf(pad_2d(
transf(values), limit=limit, mask=mask, dtype=dtype))
else:
values = transf(backfill_2d(
transf(values), limit=limit, mask=mask, dtype=dtype))
# reshape back
if ndim == 1:
values = values[0]
return values
def _interp_wrapper(f, wrap_dtype, na_override=None):
def wrapper(arr, mask, limit=None):
view = arr.view(wrap_dtype)
f(view, mask, limit=limit)
return wrapper
_pad_1d_datetime = _interp_wrapper(algos.pad_inplace_int64, np.int64)
_pad_2d_datetime = _interp_wrapper(algos.pad_2d_inplace_int64, np.int64)
_backfill_1d_datetime = _interp_wrapper(algos.backfill_inplace_int64, np.int64)
_backfill_2d_datetime = _interp_wrapper(algos.backfill_2d_inplace_int64,
np.int64)
def pad_1d(values, limit=None, mask=None, dtype=None):
if dtype is None:
dtype = values.dtype
_method = None
if is_float_dtype(values):
name = 'pad_inplace_{name}'.format(name=dtype.name)
_method = getattr(algos, name, None)
elif is_datetime64_dtype(dtype) or is_datetime64tz_dtype(dtype):
_method = _pad_1d_datetime
elif is_integer_dtype(values):
values = _ensure_float64(values)
_method = algos.pad_inplace_float64
elif values.dtype == np.object_:
_method = algos.pad_inplace_object
if _method is None:
raise ValueError('Invalid dtype for pad_1d [{name}]'
.format(name=dtype.name))
if mask is None:
mask = isna(values)
mask = mask.view(np.uint8)
_method(values, mask, limit=limit)
return values
def backfill_1d(values, limit=None, mask=None, dtype=None):
if dtype is None:
dtype = values.dtype
_method = None
if is_float_dtype(values):
name = 'backfill_inplace_{name}'.format(name=dtype.name)
_method = getattr(algos, name, None)
elif is_datetime64_dtype(dtype) or is_datetime64tz_dtype(dtype):
_method = _backfill_1d_datetime
elif is_integer_dtype(values):
values = _ensure_float64(values)
_method = algos.backfill_inplace_float64
elif values.dtype == np.object_:
_method = algos.backfill_inplace_object
if _method is None:
raise ValueError('Invalid dtype for backfill_1d [{name}]'
.format(name=dtype.name))
if mask is None:
mask = isna(values)
mask = mask.view(np.uint8)
_method(values, mask, limit=limit)
return values
def pad_2d(values, limit=None, mask=None, dtype=None):
if dtype is None:
dtype = values.dtype
_method = None
if is_float_dtype(values):
name = 'pad_2d_inplace_{name}'.format(name=dtype.name)
_method = getattr(algos, name, None)
elif is_datetime64_dtype(dtype) or is_datetime64tz_dtype(dtype):
_method = _pad_2d_datetime
elif is_integer_dtype(values):
values = _ensure_float64(values)
_method = algos.pad_2d_inplace_float64
elif values.dtype == np.object_:
_method = algos.pad_2d_inplace_object
if _method is None:
raise ValueError('Invalid dtype for pad_2d [{name}]'
.format(name=dtype.name))
if mask is None:
mask = isna(values)
mask = mask.view(np.uint8)
if np.all(values.shape):
_method(values, mask, limit=limit)
else:
# for test coverage
pass
return values
def backfill_2d(values, limit=None, mask=None, dtype=None):
if dtype is None:
dtype = values.dtype
_method = None
if is_float_dtype(values):
name = 'backfill_2d_inplace_{name}'.format(name=dtype.name)
_method = getattr(algos, name, None)
elif is_datetime64_dtype(dtype) or is_datetime64tz_dtype(dtype):
_method = _backfill_2d_datetime
elif is_integer_dtype(values):
values = _ensure_float64(values)
_method = algos.backfill_2d_inplace_float64
elif values.dtype == np.object_:
_method = algos.backfill_2d_inplace_object
if _method is None:
raise ValueError('Invalid dtype for backfill_2d [{name}]'
.format(name=dtype.name))
if mask is None:
mask = isna(values)
mask = mask.view(np.uint8)
if np.all(values.shape):
_method(values, mask, limit=limit)
else:
# for test coverage
pass
return values
_fill_methods = {'pad': pad_1d, 'backfill': backfill_1d}
def get_fill_func(method):
method = clean_fill_method(method)
return _fill_methods[method]
def clean_reindex_fill_method(method):
return clean_fill_method(method, allow_nearest=True)
def fill_zeros(result, x, y, name, fill):
"""
if this is a reversed op, then flip x,y
if we have an integer value (or array in y)
and we have 0's, fill them with the fill,
return the result
mask the nan's from x
"""
if fill is None or is_float_dtype(result):
return result
if name.startswith(('r', '__r')):
x, y = y, x
is_variable_type = (hasattr(y, 'dtype') or hasattr(y, 'type'))
is_scalar_type = is_scalar(y)
if not is_variable_type and not is_scalar_type:
return result
if is_scalar_type:
y = np.array(y)
if is_integer_dtype(y):
if (y == 0).any():
# GH 7325, mask and nans must be broadcastable (also: PR 9308)
# Raveling and then reshaping makes np.putmask faster
mask = ((y == 0) & ~np.isnan(result)).ravel()
shape = result.shape
result = result.astype('float64', copy=False).ravel()
np.putmask(result, mask, fill)
# if we have a fill of inf, then sign it correctly
# (GH 6178 and PR 9308)
if np.isinf(fill):
signs = np.sign(y if name.startswith(('r', '__r')) else x)
negative_inf_mask = (signs.ravel() < 0) & mask
np.putmask(result, negative_inf_mask, -fill)
if "floordiv" in name: # (PR 9308)
nan_mask = ((y == 0) & (x == 0)).ravel()
np.putmask(result, nan_mask, np.nan)
result = result.reshape(shape)
return result
def _interp_limit(invalid, fw_limit, bw_limit):
"""Get idx of values that won't be filled b/c they exceed the limits.
This is equivalent to the more readable, but slower
.. code-block:: python
for x in np.where(invalid)[0]:
if invalid[max(0, x - fw_limit):x + bw_limit + 1].all():
yield x
"""
# handle forward first; the backward direction is the same except
# 1. operate on the reversed array
# 2. subtract the returned indicies from N - 1
N = len(invalid)
def inner(invalid, limit):
limit = min(limit, N)
windowed = _rolling_window(invalid, limit + 1).all(1)
idx = (set(np.where(windowed)[0] + limit) |
set(np.where((~invalid[:limit + 1]).cumsum() == 0)[0]))
return idx
if fw_limit == 0:
f_idx = set(np.where(invalid)[0])
else:
f_idx = inner(invalid, fw_limit)
if bw_limit == 0:
# then we don't even need to care about backwards, just use forwards
return f_idx
else:
b_idx = set(N - 1 - np.asarray(list(inner(invalid[::-1], bw_limit))))
if fw_limit == 0:
return b_idx
return f_idx & b_idx
def _rolling_window(a, window):
"""
[True, True, False, True, False], 2 ->
[
[True, True],
[True, False],
[False, True],
[True, False],
]
"""
# https://stackoverflow.com/a/6811241
shape = a.shape[:-1] + (a.shape[-1] - window + 1, window)
strides = a.strides + (a.strides[-1],)
return np.lib.stride_tricks.as_strided(a, shape=shape, strides=strides)
| bsd-3-clause |
valexandersaulys/airbnb_kaggle_contest | venv/lib/python3.4/site-packages/sklearn/svm/tests/test_bounds.py | 280 | 2541 | import nose
from nose.tools import assert_equal, assert_true
from sklearn.utils.testing import clean_warning_registry
import warnings
import numpy as np
from scipy import sparse as sp
from sklearn.svm.bounds import l1_min_c
from sklearn.svm import LinearSVC
from sklearn.linear_model.logistic import LogisticRegression
dense_X = [[-1, 0], [0, 1], [1, 1], [1, 1]]
sparse_X = sp.csr_matrix(dense_X)
Y1 = [0, 1, 1, 1]
Y2 = [2, 1, 0, 0]
def test_l1_min_c():
losses = ['squared_hinge', 'log']
Xs = {'sparse': sparse_X, 'dense': dense_X}
Ys = {'two-classes': Y1, 'multi-class': Y2}
intercepts = {'no-intercept': {'fit_intercept': False},
'fit-intercept': {'fit_intercept': True,
'intercept_scaling': 10}}
for loss in losses:
for X_label, X in Xs.items():
for Y_label, Y in Ys.items():
for intercept_label, intercept_params in intercepts.items():
check = lambda: check_l1_min_c(X, Y, loss,
**intercept_params)
check.description = ('Test l1_min_c loss=%r %s %s %s' %
(loss, X_label, Y_label,
intercept_label))
yield check
def test_l2_deprecation():
clean_warning_registry()
with warnings.catch_warnings(record=True) as w:
assert_equal(l1_min_c(dense_X, Y1, "l2"),
l1_min_c(dense_X, Y1, "squared_hinge"))
assert_equal(w[0].category, DeprecationWarning)
def check_l1_min_c(X, y, loss, fit_intercept=True, intercept_scaling=None):
min_c = l1_min_c(X, y, loss, fit_intercept, intercept_scaling)
clf = {
'log': LogisticRegression(penalty='l1'),
'squared_hinge': LinearSVC(loss='squared_hinge',
penalty='l1', dual=False),
}[loss]
clf.fit_intercept = fit_intercept
clf.intercept_scaling = intercept_scaling
clf.C = min_c
clf.fit(X, y)
assert_true((np.asarray(clf.coef_) == 0).all())
assert_true((np.asarray(clf.intercept_) == 0).all())
clf.C = min_c * 1.01
clf.fit(X, y)
assert_true((np.asarray(clf.coef_) != 0).any() or
(np.asarray(clf.intercept_) != 0).any())
@nose.tools.raises(ValueError)
def test_ill_posed_min_c():
X = [[0, 0], [0, 0]]
y = [0, 1]
l1_min_c(X, y)
@nose.tools.raises(ValueError)
def test_unsupported_loss():
l1_min_c(dense_X, Y1, 'l1')
| gpl-2.0 |
huig-/Computational-Geometry | Curves/convex_hull.py | 1 | 3087 | from __future__ import division
import math
import numpy as np
from operator import itemgetter
import matplotlib.pyplot as plt
class ConvexHull:
#AUXILIARY FUNCTIONS
def _compute_angle(self, o, x, y):
ox = (x[0]-o[0], x[1]-o[1])
xy = (y[0]-x[0], y[1]-x[1])
norm_xy = np.linalg.norm(xy)
if norm_xy == 0:
return 6.5
arcos = np.dot(ox, xy)/np.linalg.norm(ox)/norm_xy
if arcos > 1: arcos = 1.0
elif arcos < -1: arcos = -1.0
return math.acos(arcos)
"""
Three points are a counter-clock-wise turn if ccw > 0, clockwise if ccw < 0 and collinear if ccw = 0. ccw is a determinant that gives twice the signed area of the triangle formed by p1, p2 and p3.
"""
def _ccw(self, p1, p2, p3):
return (p2[0]-p1[0])*(p3[1]-p1[1]) - (p2[1]-p1[1])*(p3[0]-p1[0])
#MAIN FUNCTIONS
def __call__(self, points, method='andrew'):
if method == 'gift wrapping':
return self.gift_wrapping(points)
elif method == 'graham scan':
return self.graham_scan(points)
elif method == 'andrew':
return self.andrew_hull(points)
#elif method == 'quickhull':
# return None
def gift_wrapping(self, L):
points = np.array(L)
i0 = np.argmin(points[:,1]); i = i0
v = (points[i][0]-1, points[i][1])
hull = [points[i]]
while(True):
k = np.argmin([self._compute_angle(v, points[i], p) for p in np.concatenate((points[0:i], points[i+1:]))])
if k >= i:
k = k + 1
hull.append(points[k])
if i0 == k:
break
i = k
v = hull[-2]
return np.array(hull)
def graham_scan(self, points):
P = np.array(sorted(points, key=itemgetter(0), reverse=True))
P = np.array(sorted(P, key=itemgetter(1)))
angles = [self._compute_angle((P[0][0]-1,P[0][1]), P[0], p) for p in P[1:]]
sorted_angles = [i[0] + 1 for i in sorted(enumerate(angles), key=lambda x:x[1])]
S = [P[0], P[sorted_angles[0]]]
i = 1
while i < len(points) - 1:
if len(S) == 1:
S.append(P[sorted_angles[i]])
i = i + 1
elif (self._ccw(P[sorted_angles[i]], S[-2], S[-1]) > 0):
S.append(P[sorted_angles[i]])
i = i + 1
else:
S.pop()
S.append(S[0]) #for plotting purposes only
return np.array(S)
def andrew_hull(self, points):
P = sorted(points)
L_upper = [P[0], P[1]]
L_lower = [P[-1], P[-2]]
n = len(points)
for i in xrange(2, n):
L_upper.append(P[i])
L_lower.append(P[n-i-1])
while len(L_upper) > 2 and self._ccw(L_upper[-3], L_upper[-2], L_upper[-1]) >= 0:
L_upper.pop(-2)
while len(L_lower) > 2 and self._ccw(L_lower[-3], L_lower[-2], L_lower[-1]) >= 0:
L_lower.pop(-2)
#L_lower.pop(-1);
L_lower.pop(0)
L_upper.extend(L_lower)
return L_upper
def plot(self, points, hull):
points_np = np.array(points)
hull_np = np.array(hull)
x_lim = max(np.absolute(points_np[:,0])) + 2
y_lim = max(np.absolute(points_np[:,1])) + 2
fig = plt.figure()
ax = fig.add_subplot(111, aspect=1)
ax.set_xlim(-x_lim, x_lim)
ax.set_ylim(-y_lim, y_lim)
plt.plot(points_np[:,0], points_np[:,1], color='blue', marker='o', linestyle='None')
plt.plot(hull_np[:,0], hull_np[:,1])
fig.canvas.draw()
plt.show()
| gpl-3.0 |
HowDoIUseThis/AGNClassification | utils/analysis/NeIII_line_fitting.py | 1 | 4402 | from utils.filters import general
from utils.graphing import graph
from tqdm import tqdm_notebook
import numpy as np
import pandas as pd
import itertools
def calculate_best_fit_parameters(dataframe, parameterMeshDensity):
"""Determines the best parameters for the Ne III fit line of a specific shape/
Fit line :
has the form: [O_III/H_Beta] = alpha + beta/([Ne_III/O_II] + gamma)
Parameters:
-------------
dataframe : dataframe object
this data frame should only contain already processed data
parameterMeshDensity : int
The number of different values test between each parameters.
This value exponetial increase computation time for little improvement
Retruns
-------------
parameter_list : list
List of the best ten parameter sets for [alpha,beta,gamma]
"""
dataframe = general.signal_noise_weights(dataframe)
# These spans can be chagned, but it might result in an odd fit
alpha_span = np.linspace(-1, 2, num=parameterMeshDensity)
beta_span = np.linspace(0, 2, num=parameterMeshDensity)
gamma_span = np.linspace(-1, 1, num=parameterMeshDensity)
parameterMeshGrid = make_parameter_grid(
alpha_span, beta_span, gamma_span)
best_fit_parameters, parameter_list = determine_best_fit(
dataframe, parameterMeshGrid)
# plots results to insure the result is reasonable
plot = graph.Ne_III(dataframe, best_fit_parameters[0],
best_fit_parameters[1], best_fit_parameters[2])
print("Best parameter: {}" .format(best_fit_parameters))
labels = ['Alpha', 'Beta', 'Gamma', 'Fit Strength',
'AboveLine(AGN_Weight)', 'belowFit(SF_Weight)']
parameterDF = pd.DataFrame.from_records(parameter_list, columns=labels)
print(parameterDF)
return parameterDF
def make_parameter_grid(*args):
ones = np.ones([len(arg) for arg in args])
midx = [(ix * ones)[None] for ix in np.ix_(*args)]
idx = np.concatenate(midx).reshape((len(args), -1)).transpose()
return idx
def determine_best_fit(dataframe, paraMeshGrid):
"""This function does all the actaul calcutions for the best Ne III parameters.
Parameters:
-------------
dataframe : dataframe object
this data frame should only contain already processed data
paraMeshGrid : array
this array is the mesh grid constructed to pass all permutations to
this function
Retruns
-------------
parameter_list[best_para_indx] : lsit
Returns the value of the parameters determined to be the best using
its index from the parameterMeshGrid.
"""
max_differential = 0
best_para_indx = 0
largestWeightAboveFit = 0
df = dataframe
listOfParaAndWeights = []
for indx, para_set in enumerate(tqdm_notebook(paraMeshGrid)):
alpha, beta, gamma = para_set
mod_gamma = -gamma
elementsAboveFit = ((df['O_III/H_Beta'] -
df['Ne_III/O_II'].map(lambda x: alpha + beta / (x + gamma)) >= 0) | (df['Ne_III/O_II'] > mod_gamma))
aboveFitDF = df[elementsAboveFit]
try:
aboveFitWeight = aboveFitDF[(
'BPT:AGN_Weight')].sum()
# This allows the loop to skip any extraneous computation
# if aboveFitWeight > largestWeightAboveFit:
# largestWeightAboveFit = aboveFitWeight
# else:
# continue
elementsBelowFit = ((df['O_III/H_Beta'] -
df['Ne_III/O_II'].map(lambda x: alpha + beta / (x + gamma)) < 0) & (df['Ne_III/O_II'] < mod_gamma))
belowFitDF = df[elementsBelowFit]
belowFitWeight = belowFitDF[(
'BPT:SF_Weight')].sum()
#print("AGN: {} SF: {}".format(aboveFitWeight, belowFitWeight))
except ZeroDivisionError as err:
continue
if (aboveFitWeight + belowFitWeight) > max_differential:
max_differential = (aboveFitWeight + belowFitWeight)
best_para_indx = indx
listOfParaAndWeights.append(
(alpha, beta, gamma, max_differential, aboveFitWeight, belowFitWeight))
return paraMeshGrid[best_para_indx], listOfParaAndWeights
| gpl-3.0 |
ShujiaHuang/AsmVar | src/AsmvarGenotype/Validate.py | 2 | 2087 | """
========================================
========================================
"""
import os
import sys
import re
import string
import numpy as np
import matplotlib.pyplot as plt
def DrawFig ( alleleCount ) :
fig = plt.figure()
if len( alleleCount ) > 0 :
plt.title('AC Number', fontsize=12)
plt.hist(alleleCount , 60, histtype='bar', normed=1, facecolor = 'c', color=['c'] )
plt.ylabel('#', fontsize=12)
figureFile = 'AC'
fig.savefig(figureFile + '.png')
fig.savefig(figureFile + '.pdf')
def main ( argv ) :
vcfInfile = argv[0]
if vcfInfile[-3:] == '.gz' :
I = os.popen( 'gzip -dc %s' % vcfInfile )
else :
I = open ( vcfInfile )
alleleCount = []
while 1 :
lines = I.readlines(100000)
if not lines : break;
for line in lines :
# S
line = line.strip( '\n' )
if re.search(r'^#', line) : continue
col = line.split()
if not re.search( r'^PASS', col[6]) : continue
format = {}
for i,t in enumerate( col[8].split(':') ) : format[t] = i # Get Format
for type in ['RR', 'VT', 'VS'] :
if type not in format.keys() :
print >> sys.stderr, '[ERROR] The format of VCF file is not right which you input, it did not contian %s field' % type
sys.exit(1)
svsize = re.search(r';SVSIZE=([^;]+)', col[7])
svtype = re.search(r';SVTYPE=([^;]+)', col[7])
factor = re.search(r';F=([^;]+)', col[7])
factor = string.atof( factor.group(1) )
ac = 0
for sample in col[9:] :
gt = sample.split(':')[0]
gnt= gt.split('/')
if len(gnt) < 2 : gnt = gt.split('|')
ac += ( string.atoi(gnt[0]) + string.atoi(gnt[1]) )
print col[0],'\t',col[1], '\t', col[6], '\t', ac
alleleCount.append(ac)
I.close()
DrawFig ( alleleCount )
if __name__ == '__main__' :
main(sys.argv[1:])
| mit |
sm-github/bzrflag | bzagents/plotPf.py | 1 | 2132 | #!/usr/bin/env python
import matplotlib.pyplot as plt
import numpy as np
from pFields import Pfield
class Tank(object):
def __init__(self, x, y):
self.x = x
self.y = y
self.flag = 'die'
pass
def limiter (value):
if value < -10:
return -10
if value > 10:
return 10
return value
if __name__ == '__main__':
# 1 - red, 2 - green, 3 - blue, 4 - purple
TARGET = '1'
MAP_NAME = '../maps/four_ls.bzw'
#MAP_NAME = '../maps/rotated_box_world.bzw'
pFields = Pfield('3', MAP_NAME)
fig = plt.figure()
ax = fig.add_subplot(111)
ax.axis([-400,400,-400,400])
# generate grid
x=np.linspace(-400, 400, 30)
y=np.linspace(-400, 400, 30)
# x, y=np.meshgrid(x, y)
base = pFields.bases[TARGET]
'''
# plot attractive field
for r in range(0, len(x)):
for c in range(0, len(y)):
tank = Tank(x[r], y[c])
newX, newY = pFields.attractive(tank, base)
ax.arrow(x[r], y[c], limiter(newX), limiter(newY), head_width=4, head_length=6)
'''
circ = plt.Circle((base.x, base.y), base.r, fc='r')
plt.gca().add_patch(circ)
#plot repulsive or tangential field
for r in range(0, len(x)):
for c in range(0, len(y)):
tank = Tank(x[r], y[c])
#newX, newY = pFields.attractive(tank, base)
newX, newY = pFields.repulsive(tank, base)
ax.arrow(x[r], y[c], limiter(newX), limiter(newY), head_width=4, head_length=6)
for o in pFields.obstacles:
plt.gca().add_patch(plt.Circle((o.x, o.y), o.r, fc='g'))
'''
#plot the combined fields
for r in range(0, len(x)):
for c in range(0, len(y)):
tank = Tank(x[r], y[c])
newX, newY = pFields.attractive(tank, base)
repX, repY = pFields.repulsive(tank, base)
ax.arrow(x[r], y[c], limiter(newX+ repX), limiter(newY + repY), head_width=4, head_length=6)
'''
ax.set_xlabel('x')
ax.set_ylabel('y')
plt.show()
plt.savefig('./plots/attrField.png')
| gpl-3.0 |
rueckstiess/dopamine | scripts/cube_logreg.py | 1 | 1604 | # demonstrate the logistic regression classifier on a dataset
# that has 8 classes, represented by point clouds at each corner
# of a 3d cube (input dim = 3). after starting, the script shows
# convergence to the correct classes after each round of training.
from dopamine.classifier import LogisticRegression
import numpy as np
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import Axes3D
import time
def cubeData(nPointsPerClass=100, nRandomFeatures=3):
points = np.random.random(size=(nPointsPerClass * 8, nRandomFeatures + 4))
for x in range(2):
for y in range(2):
for z in range(2):
cls = int(str(x)+str(y)+str(z), 2)
# normally distributed point cloud around the corner
points[cls*nPointsPerClass:(cls+1)*nPointsPerClass, :3] = np.random.normal([x, y, z], 0.1, [nPointsPerClass,3])
# class value
points[cls*nPointsPerClass:(cls+1)*nPointsPerClass, -1] = cls
points = np.random.permutation(points)
return points
data = cubeData(20, 0)
lr = LogisticRegression(3, 8)
colors = ['red', 'green', 'blue', 'yellow', 'black', 'cyan', 'magenta', 'gray']
plt.ion()
fig = plt.figure()
for i in range(25):
print i
plt.clf()
ax = fig.add_subplot(111, projection='3d')
# visualize
for d in data:
pcls = lr.classify(d[:-1])
ax.scatter([d[0]], [d[1]], [d[2]], color=colors[pcls])
plt.gcf().canvas.draw()
time.sleep(1)
# learn
for d in np.random.permutation(data):
lr.update(d[:-1], d[-1])
plt.ioff()
plt.show() | gpl-3.0 |
sibis-platform/sibispy | bulk_operations.py | 2 | 4826 | """
Generalized mechanism for mass-setting a FIELD to a particular VALUE.
"""
import pandas as pd
import redcap as rc
from six import string_types
from typing import Union, List, Dict
def bulk_mark(redcap_api: rc.Project, field_name: Union[List, str],
value: str, records_df: pd.DataFrame,
upload_individually: bool = False) -> Dict:
"""
Workhorse bulk-marking function.
If applied to repeating instruments, `records_df` must already have valid
`redcap_repeat_instrument` and `redcap_repeat_instance`.
"""
upload = records_df.copy(deep=True)
# upload.loc[:, field_name] = value
if isinstance(field_name, string_types):
assignments = {field_name: value}
else:
assignments = dict(zip(field_name, [value] * len(field_name)))
# Might need to create multiple new columns with the same value, which
# cannot be done with bare .loc if the columns don't already exist. This is
# the simplest way to do it quickly. (If field_name is str, then this is
# equivalent to upload.assign(field_name=value).)
upload = upload.assign(**assignments)
# TODO: Should probably wrap this in a try block, since we're not even
# checking existence of variables and not uploading records one at a time?
if upload_individually:
outcomes = []
for idx, _ in upload.iterrows():
outcome = redcap_api.import_records(upload.loc[[idx]])
outcomes.append(outcome)
else:
outcomes = redcap_api.import_records(upload)
return outcomes
def get_status_fields_for_form(redcap_api, form_name):
"""
Return completeness and (if available) missingness field names in a form.
If form_name doesn't exist in the project data dictionary, raise NameError.
Returns a dict with 'completeness' and 'missingness' keys.
"""
datadict = redcap_api.export_metadata(format='df').reset_index()
form_datadict = datadict.loc[datadict['form_name'] == form_name, :]
if form_datadict.empty:
raise NameError('{}: No such form in selected API!'.format(form_name))
field_names = {'completeness': form_name + '_complete'}
missing_field_name = form_datadict.loc[
form_datadict['field_name'].str.endswith('_missing'),
'field_name'] # FIXME: What type does this return?
# TODO: Throw error if multiple fields are found?
try:
field_names.update({'missingness': missing_field_name.item()})
except ValueError: # no value available
pass
return field_names
def read_targets(redcap_api, from_file):
"""
Convert file to a columnless DataFrame indexed by Redcap primary keys.
If primary keys for the project are not present, raises AssertionError.
"""
targets = pd.read_csv(from_file)
index_cols = [redcap_api.def_field]
assert redcap_api.def_field in targets.columns
if redcap_api.is_longitudinal():
assert 'redcap_event_name' in targets.columns
index_cols.append('redcap_event_name')
out_cols = index_cols.copy()
if 'redcap_repeat_instrument' in targets.columns:
out_cols.extend(['redcap_repeat_instrument', 'redcap_repeat_instance'])
# If the file contains any other columns, strip them - don't want to add
# them to the later upload
targets = targets[out_cols].drop_duplicates()
# Return a DataFrame with *only* appropriate indexing. This is necessary
# for redcap.Project.import_records() to work properly once the variable of
# interest is set for the DataFrame.
#
# (If multiple Redcap primary keys are standard columns, the MultiIndex is
# wrongly assigned by .import_records() and the upload fails.)
targets.set_index(index_cols, inplace=True)
return targets
def bulk_mark_status(redcap_api, form_name, missingness, completeness,
records_df, verbose=False):
"""
Courtesy function to bulk-mark completeness and missingness.
Returns a tuple of import outcomes for completeness and missingness upload,
respectively.
Relies on argparse to provide valid completness and missingness values.
"""
field_names = get_status_fields_for_form(redcap_api, form_name)
comp_results = None
miss_results = None
# In either case, only set the status if it has been passed
if completeness is not None:
comp_results = bulk_mark(redcap_api, field_names['completeness'],
completeness, records_df)
if missingness is not None:
if not field_names.get('missingness'):
raise TypeError('Missingness cannot be set for selected form!')
else:
miss_results = bulk_mark(redcap_api, field_names['missingness'],
missingness, records_df)
return (comp_results, miss_results)
| bsd-3-clause |
ssaeger/scikit-learn | examples/model_selection/plot_learning_curve.py | 33 | 4505 | """
========================
Plotting Learning Curves
========================
On the left side the learning curve of a naive Bayes classifier is shown for
the digits dataset. Note that the training score and the cross-validation score
are both not very good at the end. However, the shape of the curve can be found
in more complex datasets very often: the training score is very high at the
beginning and decreases and the cross-validation score is very low at the
beginning and increases. On the right side we see the learning curve of an SVM
with RBF kernel. We can see clearly that the training score is still around
the maximum and the validation score could be increased with more training
samples.
"""
print(__doc__)
import numpy as np
import matplotlib.pyplot as plt
from sklearn.naive_bayes import GaussianNB
from sklearn.svm import SVC
from sklearn.datasets import load_digits
from sklearn.model_selection import learning_curve
from sklearn.model_selection import ShuffleSplit
def plot_learning_curve(estimator, title, X, y, ylim=None, cv=None,
n_jobs=1, train_sizes=np.linspace(.1, 1.0, 5)):
"""
Generate a simple plot of the test and training learning curve.
Parameters
----------
estimator : object type that implements the "fit" and "predict" methods
An object of that type which is cloned for each validation.
title : string
Title for the chart.
X : array-like, shape (n_samples, n_features)
Training vector, where n_samples is the number of samples and
n_features is the number of features.
y : array-like, shape (n_samples) or (n_samples, n_features), optional
Target relative to X for classification or regression;
None for unsupervised learning.
ylim : tuple, shape (ymin, ymax), optional
Defines minimum and maximum yvalues plotted.
cv : int, cross-validation generator or an iterable, optional
Determines the cross-validation splitting strategy.
Possible inputs for cv are:
- None, to use the default 3-fold cross-validation,
- integer, to specify the number of folds.
- An object to be used as a cross-validation generator.
- An iterable yielding train/test splits.
For integer/None inputs, if ``y`` is binary or multiclass,
:class:`StratifiedKFold` used. If the estimator is not a classifier
or if ``y`` is neither binary nor multiclass, :class:`KFold` is used.
Refer :ref:`User Guide <cross_validation>` for the various
cross-validators that can be used here.
n_jobs : integer, optional
Number of jobs to run in parallel (default 1).
"""
plt.figure()
plt.title(title)
if ylim is not None:
plt.ylim(*ylim)
plt.xlabel("Training examples")
plt.ylabel("Score")
train_sizes, train_scores, test_scores = learning_curve(
estimator, X, y, cv=cv, n_jobs=n_jobs, train_sizes=train_sizes)
train_scores_mean = np.mean(train_scores, axis=1)
train_scores_std = np.std(train_scores, axis=1)
test_scores_mean = np.mean(test_scores, axis=1)
test_scores_std = np.std(test_scores, axis=1)
plt.grid()
plt.fill_between(train_sizes, train_scores_mean - train_scores_std,
train_scores_mean + train_scores_std, alpha=0.1,
color="r")
plt.fill_between(train_sizes, test_scores_mean - test_scores_std,
test_scores_mean + test_scores_std, alpha=0.1, color="g")
plt.plot(train_sizes, train_scores_mean, 'o-', color="r",
label="Training score")
plt.plot(train_sizes, test_scores_mean, 'o-', color="g",
label="Cross-validation score")
plt.legend(loc="best")
return plt
digits = load_digits()
X, y = digits.data, digits.target
title = "Learning Curves (Naive Bayes)"
# Cross validation with 100 iterations to get smoother mean test and train
# score curves, each time with 20% data randomly selected as a validation set.
cv = ShuffleSplit(n_iter=100, test_size=0.2, random_state=0)
estimator = GaussianNB()
plot_learning_curve(estimator, title, X, y, ylim=(0.7, 1.01), cv=cv, n_jobs=4)
title = "Learning Curves (SVM, RBF kernel, $\gamma=0.001$)"
# SVC is more expensive so we do a lower number of CV iterations:
cv = ShuffleSplit(n_iter=10, test_size=0.2, random_state=0)
estimator = SVC(gamma=0.001)
plot_learning_curve(estimator, title, X, y, (0.7, 1.01), cv=cv, n_jobs=4)
plt.show()
| bsd-3-clause |
LynnCo/galaxySim | old/create_data.py | 1 | 6100 | #sim.py
import math
import time
import numpy
import matplotlib
import matplotlib.pyplot as plt
from custom import sortkeys
from custom import rotate
from custom import make_sphere
class emitters (object):
def __init__ (self,x=0,y=0,v=0,m=0):
self.x = x; self.y = y
self.v = v; self.m = m
def emit(self,data):
x = self.x; y = self.y
v = self.v; m = self.m
data[x,y] = add_mass(data[x,y],m,v,v)
self.move()
return data
def move(self):
self.x,self.y = rotate(self.x,self.y,math.pi/64)
self.x = round(self.x)
self.y = round(self.y)
return 0
def fill (inp,size=100):
for x in range(-size,size):
for y in range(-size,size):
inp[x,y] = dict(m=0,vx=0,vy=0)
return inp
def diffuse (data,sphere,size):
spreads = list()
dataout = dict()
dataout = fill(dataout,size)
for x,y in data.keys():
m = data[x,y]["m"]
vx = data[x,y]["vx"]
vy = data[x,y]["vy"]
spreadinst = dict()
if not m: pass
elif m<1: spreadinst[x,y] = dict(vx=vx,vy=vy,m=m)
else:
#print("large mass")
pil = dict()
sumdist = 0
for p,d in sphere.items():
px,py = p[0]+x,p[1]+y
try:
sumdist += d
data[px,py]
pil[px,py] = d
except KeyError: pass
#print("pil ",pil)
for p,d in pil.items():
px,py = p[0],p[1]
div = d/sumdist
spreadinst[px,py] = dict(vx=vx,vy=vy,m=m*div)
#print("spread ",spreadinst)
spreads.append(spreadinst)
del spreadinst
for spreadinst in spreads:
for p,val in spreadinst.items():
x,y = p[0],p[1]
m = val["m"]
vx = val["vx"]
vy = val["vy"]
dataout[x,y] = add_mass(dataout[x,y],m,vx,vy)
return dataout
def move (data,size,ejected_mass):
dataout = dict()
dataout = fill(dataout,size)
for x,y in data.keys():
m = data[x,y]["m"]
if not m: continue
vx = data[x,y]["vx"]
vy = data[x,y]["vy"]
xn = round(x+vx)
yn = round(y+vy)
try: dataout[xn,yn] = add_mass(dataout[xn,yn],m,vx,vy)
except KeyError: ejected_mass += m; print("offmap")
return dataout
def add_mass (data,m_n,vx_n,vy_n):
m = data["m"]
if not m:
data["m"] = m_n
data["vx"] = vx_n
data["vy"] = vy_n
else:
vx = data["vx"]
vy = data["vy"]
data["m"] = m + m_n
data["vx"] = (vx*m + vx_n*m_n)/data["m"]
data["vy"] = (vy*m + vy_n*m_n)/data["m"]
return data
def c_o_m (data):
t_mass = 0
w_pos = [0,0]
for k,v in data.items():
t_mass += v["m"]
w_pos[0] += k[0]*v["m"]
w_pos[1] += k[1]*v["m"]
if t_mass:
x = round(w_pos[0]/t_mass)
y = round(w_pos[1]/t_mass)
center = [x,y]
return center,t_mass
else: return [0,0],0
def gravitate (data,min_dist=9,sphere=0):
fakeG = 0.02
if sphere:
min_dist = 1
center,t_mass = c_o_m(data)
cx = center[0]
cy = center[1]
for x,y in data.keys():
if not data[x,y]["m"]: continue
if sphere:
local_mass = dict()
for px,py in sphere.keys():
try:
pix = x+px; piy = y+py
lmass = data[pix,piy]["m"]
local_mass[pix,piy] = dict()
local_mass[pix,piy]["m"] = lmass
except KeyError: pass
center,t_mass = c_o_m(local_mass)
cx = center[0]
cy = center[1]
if len(local_mass) == 1: continue
dh = math.hypot(x-cx,y-cy)
if dh<min_dist: continue
acc = fakeG*t_mass/(dh**2)
data[x,y]["vx"] += -(x-cx)*acc/(dh)
data[x,y]["vy"] += -(y-cy)*acc/(dh)
return data
def print_atts (data,tag,min_mass=0):
mass = 0
print("\n",tag,"\n")
for k in sortkeys(data):
v = data[k]
if v["m"]>min_mass: print(k,v)
mass += v["m"]
print()
center,mass = c_o_m(data)
print("mass = ",round(mass))
print("C.O.M = ",center,"\n")
def write_to_ndarray (data,size=0):
npdata = numpy.zeros(shape=(size*2,size*2))
for k,v in data.items():
x = k[0] + abs(size)
y = k[1] + abs(size)
npdata[x,y] = v["m"]
return npdata
def start ():
### [CONDITIONS] ###
size = 100
small = 10
frames = 200
em1 = emitters(x=size-small*3,y=0,v=0,m=10)
em2 = emitters(x=-size+small*3,y=0,v=0,m=10)
### [/CONDITIONS] ##
diff_sphere = make_sphere(math.ceil(small*2))
grav_sphere = make_sphere(math.ceil(small*1.2))
data = dict()
data = fill(data,size)
tstart = time.time()
npframes = list()
ejected_mass = 0
for i in range(frames+1):
print("step ",i)
#print_atts(data,("step ",i),min_mass=1)
data = em1.emit(data)
data = em2.emit(data)
data = gravitate(data,min_dist=small-1)#global
data = gravitate(data,sphere=grav_sphere)#local
data = move(data,size=size,ejected_mass=ejected_mass)
data = diffuse(data,sphere=diff_sphere,size=size)
npframes.append(write_to_ndarray(data,size))
center,t_mass = c_o_m(data)
info = list(["---Simulation Log File---\n",
"Time taken: "+str(round(((time.time()-tstart)/60),1))+"minutes\n",
"\n",
"[Generation params]\n",
"Size: "+str(2*size)+"x by "+str(2*size)+"y\n",
"Frames: "+str(frames)+"\n",
"\n",
"[Results]\n",
"Total mass: "+str(round(t_mass))+"\n",
"Center of mass: "+str(center[0])+"x "+str(center[1])+"y\n",
])
with open("log.txt","w") as logfile:
logfile.writelines(info)
numpy.save("calculations",npframes)
start() | mit |
elijah513/scikit-learn | sklearn/feature_selection/tests/test_feature_select.py | 143 | 22295 | """
Todo: cross-check the F-value with stats model
"""
from __future__ import division
import itertools
import warnings
import numpy as np
from scipy import stats, sparse
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_almost_equal
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import assert_true
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_not_in
from sklearn.utils.testing import assert_less
from sklearn.utils.testing import assert_warns
from sklearn.utils.testing import ignore_warnings
from sklearn.utils.testing import assert_warns_message
from sklearn.utils.testing import assert_greater
from sklearn.utils.testing import assert_greater_equal
from sklearn.utils import safe_mask
from sklearn.datasets.samples_generator import (make_classification,
make_regression)
from sklearn.feature_selection import (chi2, f_classif, f_oneway, f_regression,
SelectPercentile, SelectKBest,
SelectFpr, SelectFdr, SelectFwe,
GenericUnivariateSelect)
##############################################################################
# Test the score functions
def test_f_oneway_vs_scipy_stats():
# Test that our f_oneway gives the same result as scipy.stats
rng = np.random.RandomState(0)
X1 = rng.randn(10, 3)
X2 = 1 + rng.randn(10, 3)
f, pv = stats.f_oneway(X1, X2)
f2, pv2 = f_oneway(X1, X2)
assert_true(np.allclose(f, f2))
assert_true(np.allclose(pv, pv2))
def test_f_oneway_ints():
# Smoke test f_oneway on integers: that it does raise casting errors
# with recent numpys
rng = np.random.RandomState(0)
X = rng.randint(10, size=(10, 10))
y = np.arange(10)
fint, pint = f_oneway(X, y)
# test that is gives the same result as with float
f, p = f_oneway(X.astype(np.float), y)
assert_array_almost_equal(f, fint, decimal=4)
assert_array_almost_equal(p, pint, decimal=4)
def test_f_classif():
# Test whether the F test yields meaningful results
# on a simple simulated classification problem
X, y = make_classification(n_samples=200, n_features=20,
n_informative=3, n_redundant=2,
n_repeated=0, n_classes=8,
n_clusters_per_class=1, flip_y=0.0,
class_sep=10, shuffle=False, random_state=0)
F, pv = f_classif(X, y)
F_sparse, pv_sparse = f_classif(sparse.csr_matrix(X), y)
assert_true((F > 0).all())
assert_true((pv > 0).all())
assert_true((pv < 1).all())
assert_true((pv[:5] < 0.05).all())
assert_true((pv[5:] > 1.e-4).all())
assert_array_almost_equal(F_sparse, F)
assert_array_almost_equal(pv_sparse, pv)
def test_f_regression():
# Test whether the F test yields meaningful results
# on a simple simulated regression problem
X, y = make_regression(n_samples=200, n_features=20, n_informative=5,
shuffle=False, random_state=0)
F, pv = f_regression(X, y)
assert_true((F > 0).all())
assert_true((pv > 0).all())
assert_true((pv < 1).all())
assert_true((pv[:5] < 0.05).all())
assert_true((pv[5:] > 1.e-4).all())
# again without centering, compare with sparse
F, pv = f_regression(X, y, center=False)
F_sparse, pv_sparse = f_regression(sparse.csr_matrix(X), y, center=False)
assert_array_almost_equal(F_sparse, F)
assert_array_almost_equal(pv_sparse, pv)
def test_f_regression_input_dtype():
# Test whether f_regression returns the same value
# for any numeric data_type
rng = np.random.RandomState(0)
X = rng.rand(10, 20)
y = np.arange(10).astype(np.int)
F1, pv1 = f_regression(X, y)
F2, pv2 = f_regression(X, y.astype(np.float))
assert_array_almost_equal(F1, F2, 5)
assert_array_almost_equal(pv1, pv2, 5)
def test_f_regression_center():
# Test whether f_regression preserves dof according to 'center' argument
# We use two centered variates so we have a simple relationship between
# F-score with variates centering and F-score without variates centering.
# Create toy example
X = np.arange(-5, 6).reshape(-1, 1) # X has zero mean
n_samples = X.size
Y = np.ones(n_samples)
Y[::2] *= -1.
Y[0] = 0. # have Y mean being null
F1, _ = f_regression(X, Y, center=True)
F2, _ = f_regression(X, Y, center=False)
assert_array_almost_equal(F1 * (n_samples - 1.) / (n_samples - 2.), F2)
assert_almost_equal(F2[0], 0.232558139) # value from statsmodels OLS
def test_f_classif_multi_class():
# Test whether the F test yields meaningful results
# on a simple simulated classification problem
X, y = make_classification(n_samples=200, n_features=20,
n_informative=3, n_redundant=2,
n_repeated=0, n_classes=8,
n_clusters_per_class=1, flip_y=0.0,
class_sep=10, shuffle=False, random_state=0)
F, pv = f_classif(X, y)
assert_true((F > 0).all())
assert_true((pv > 0).all())
assert_true((pv < 1).all())
assert_true((pv[:5] < 0.05).all())
assert_true((pv[5:] > 1.e-4).all())
def test_select_percentile_classif():
# Test whether the relative univariate feature selection
# gets the correct items in a simple classification problem
# with the percentile heuristic
X, y = make_classification(n_samples=200, n_features=20,
n_informative=3, n_redundant=2,
n_repeated=0, n_classes=8,
n_clusters_per_class=1, flip_y=0.0,
class_sep=10, shuffle=False, random_state=0)
univariate_filter = SelectPercentile(f_classif, percentile=25)
X_r = univariate_filter.fit(X, y).transform(X)
X_r2 = GenericUnivariateSelect(f_classif, mode='percentile',
param=25).fit(X, y).transform(X)
assert_array_equal(X_r, X_r2)
support = univariate_filter.get_support()
gtruth = np.zeros(20)
gtruth[:5] = 1
assert_array_equal(support, gtruth)
def test_select_percentile_classif_sparse():
# Test whether the relative univariate feature selection
# gets the correct items in a simple classification problem
# with the percentile heuristic
X, y = make_classification(n_samples=200, n_features=20,
n_informative=3, n_redundant=2,
n_repeated=0, n_classes=8,
n_clusters_per_class=1, flip_y=0.0,
class_sep=10, shuffle=False, random_state=0)
X = sparse.csr_matrix(X)
univariate_filter = SelectPercentile(f_classif, percentile=25)
X_r = univariate_filter.fit(X, y).transform(X)
X_r2 = GenericUnivariateSelect(f_classif, mode='percentile',
param=25).fit(X, y).transform(X)
assert_array_equal(X_r.toarray(), X_r2.toarray())
support = univariate_filter.get_support()
gtruth = np.zeros(20)
gtruth[:5] = 1
assert_array_equal(support, gtruth)
X_r2inv = univariate_filter.inverse_transform(X_r2)
assert_true(sparse.issparse(X_r2inv))
support_mask = safe_mask(X_r2inv, support)
assert_equal(X_r2inv.shape, X.shape)
assert_array_equal(X_r2inv[:, support_mask].toarray(), X_r.toarray())
# Check other columns are empty
assert_equal(X_r2inv.getnnz(), X_r.getnnz())
##############################################################################
# Test univariate selection in classification settings
def test_select_kbest_classif():
# Test whether the relative univariate feature selection
# gets the correct items in a simple classification problem
# with the k best heuristic
X, y = make_classification(n_samples=200, n_features=20,
n_informative=3, n_redundant=2,
n_repeated=0, n_classes=8,
n_clusters_per_class=1, flip_y=0.0,
class_sep=10, shuffle=False, random_state=0)
univariate_filter = SelectKBest(f_classif, k=5)
X_r = univariate_filter.fit(X, y).transform(X)
X_r2 = GenericUnivariateSelect(
f_classif, mode='k_best', param=5).fit(X, y).transform(X)
assert_array_equal(X_r, X_r2)
support = univariate_filter.get_support()
gtruth = np.zeros(20)
gtruth[:5] = 1
assert_array_equal(support, gtruth)
def test_select_kbest_all():
# Test whether k="all" correctly returns all features.
X, y = make_classification(n_samples=20, n_features=10,
shuffle=False, random_state=0)
univariate_filter = SelectKBest(f_classif, k='all')
X_r = univariate_filter.fit(X, y).transform(X)
assert_array_equal(X, X_r)
def test_select_kbest_zero():
# Test whether k=0 correctly returns no features.
X, y = make_classification(n_samples=20, n_features=10,
shuffle=False, random_state=0)
univariate_filter = SelectKBest(f_classif, k=0)
univariate_filter.fit(X, y)
support = univariate_filter.get_support()
gtruth = np.zeros(10, dtype=bool)
assert_array_equal(support, gtruth)
X_selected = assert_warns_message(UserWarning, 'No features were selected',
univariate_filter.transform, X)
assert_equal(X_selected.shape, (20, 0))
def test_select_heuristics_classif():
# Test whether the relative univariate feature selection
# gets the correct items in a simple classification problem
# with the fdr, fwe and fpr heuristics
X, y = make_classification(n_samples=200, n_features=20,
n_informative=3, n_redundant=2,
n_repeated=0, n_classes=8,
n_clusters_per_class=1, flip_y=0.0,
class_sep=10, shuffle=False, random_state=0)
univariate_filter = SelectFwe(f_classif, alpha=0.01)
X_r = univariate_filter.fit(X, y).transform(X)
gtruth = np.zeros(20)
gtruth[:5] = 1
for mode in ['fdr', 'fpr', 'fwe']:
X_r2 = GenericUnivariateSelect(
f_classif, mode=mode, param=0.01).fit(X, y).transform(X)
assert_array_equal(X_r, X_r2)
support = univariate_filter.get_support()
assert_array_almost_equal(support, gtruth)
##############################################################################
# Test univariate selection in regression settings
def assert_best_scores_kept(score_filter):
scores = score_filter.scores_
support = score_filter.get_support()
assert_array_equal(np.sort(scores[support]),
np.sort(scores)[-support.sum():])
def test_select_percentile_regression():
# Test whether the relative univariate feature selection
# gets the correct items in a simple regression problem
# with the percentile heuristic
X, y = make_regression(n_samples=200, n_features=20,
n_informative=5, shuffle=False, random_state=0)
univariate_filter = SelectPercentile(f_regression, percentile=25)
X_r = univariate_filter.fit(X, y).transform(X)
assert_best_scores_kept(univariate_filter)
X_r2 = GenericUnivariateSelect(
f_regression, mode='percentile', param=25).fit(X, y).transform(X)
assert_array_equal(X_r, X_r2)
support = univariate_filter.get_support()
gtruth = np.zeros(20)
gtruth[:5] = 1
assert_array_equal(support, gtruth)
X_2 = X.copy()
X_2[:, np.logical_not(support)] = 0
assert_array_equal(X_2, univariate_filter.inverse_transform(X_r))
# Check inverse_transform respects dtype
assert_array_equal(X_2.astype(bool),
univariate_filter.inverse_transform(X_r.astype(bool)))
def test_select_percentile_regression_full():
# Test whether the relative univariate feature selection
# selects all features when '100%' is asked.
X, y = make_regression(n_samples=200, n_features=20,
n_informative=5, shuffle=False, random_state=0)
univariate_filter = SelectPercentile(f_regression, percentile=100)
X_r = univariate_filter.fit(X, y).transform(X)
assert_best_scores_kept(univariate_filter)
X_r2 = GenericUnivariateSelect(
f_regression, mode='percentile', param=100).fit(X, y).transform(X)
assert_array_equal(X_r, X_r2)
support = univariate_filter.get_support()
gtruth = np.ones(20)
assert_array_equal(support, gtruth)
def test_invalid_percentile():
X, y = make_regression(n_samples=10, n_features=20,
n_informative=2, shuffle=False, random_state=0)
assert_raises(ValueError, SelectPercentile(percentile=-1).fit, X, y)
assert_raises(ValueError, SelectPercentile(percentile=101).fit, X, y)
assert_raises(ValueError, GenericUnivariateSelect(mode='percentile',
param=-1).fit, X, y)
assert_raises(ValueError, GenericUnivariateSelect(mode='percentile',
param=101).fit, X, y)
def test_select_kbest_regression():
# Test whether the relative univariate feature selection
# gets the correct items in a simple regression problem
# with the k best heuristic
X, y = make_regression(n_samples=200, n_features=20, n_informative=5,
shuffle=False, random_state=0, noise=10)
univariate_filter = SelectKBest(f_regression, k=5)
X_r = univariate_filter.fit(X, y).transform(X)
assert_best_scores_kept(univariate_filter)
X_r2 = GenericUnivariateSelect(
f_regression, mode='k_best', param=5).fit(X, y).transform(X)
assert_array_equal(X_r, X_r2)
support = univariate_filter.get_support()
gtruth = np.zeros(20)
gtruth[:5] = 1
assert_array_equal(support, gtruth)
def test_select_heuristics_regression():
# Test whether the relative univariate feature selection
# gets the correct items in a simple regression problem
# with the fpr, fdr or fwe heuristics
X, y = make_regression(n_samples=200, n_features=20, n_informative=5,
shuffle=False, random_state=0, noise=10)
univariate_filter = SelectFpr(f_regression, alpha=0.01)
X_r = univariate_filter.fit(X, y).transform(X)
gtruth = np.zeros(20)
gtruth[:5] = 1
for mode in ['fdr', 'fpr', 'fwe']:
X_r2 = GenericUnivariateSelect(
f_regression, mode=mode, param=0.01).fit(X, y).transform(X)
assert_array_equal(X_r, X_r2)
support = univariate_filter.get_support()
assert_array_equal(support[:5], np.ones((5, ), dtype=np.bool))
assert_less(np.sum(support[5:] == 1), 3)
def test_select_fdr_regression():
# Test that fdr heuristic actually has low FDR.
def single_fdr(alpha, n_informative, random_state):
X, y = make_regression(n_samples=150, n_features=20,
n_informative=n_informative, shuffle=False,
random_state=random_state, noise=10)
with warnings.catch_warnings(record=True):
# Warnings can be raised when no features are selected
# (low alpha or very noisy data)
univariate_filter = SelectFdr(f_regression, alpha=alpha)
X_r = univariate_filter.fit(X, y).transform(X)
X_r2 = GenericUnivariateSelect(
f_regression, mode='fdr', param=alpha).fit(X, y).transform(X)
assert_array_equal(X_r, X_r2)
support = univariate_filter.get_support()
num_false_positives = np.sum(support[n_informative:] == 1)
num_true_positives = np.sum(support[:n_informative] == 1)
if num_false_positives == 0:
return 0.
false_discovery_rate = (num_false_positives /
(num_true_positives + num_false_positives))
return false_discovery_rate
for alpha in [0.001, 0.01, 0.1]:
for n_informative in [1, 5, 10]:
# As per Benjamini-Hochberg, the expected false discovery rate
# should be lower than alpha:
# FDR = E(FP / (TP + FP)) <= alpha
false_discovery_rate = np.mean([single_fdr(alpha, n_informative,
random_state) for
random_state in range(30)])
assert_greater_equal(alpha, false_discovery_rate)
# Make sure that the empirical false discovery rate increases
# with alpha:
if false_discovery_rate != 0:
assert_greater(false_discovery_rate, alpha / 10)
def test_select_fwe_regression():
# Test whether the relative univariate feature selection
# gets the correct items in a simple regression problem
# with the fwe heuristic
X, y = make_regression(n_samples=200, n_features=20,
n_informative=5, shuffle=False, random_state=0)
univariate_filter = SelectFwe(f_regression, alpha=0.01)
X_r = univariate_filter.fit(X, y).transform(X)
X_r2 = GenericUnivariateSelect(
f_regression, mode='fwe', param=0.01).fit(X, y).transform(X)
assert_array_equal(X_r, X_r2)
support = univariate_filter.get_support()
gtruth = np.zeros(20)
gtruth[:5] = 1
assert_array_equal(support[:5], np.ones((5, ), dtype=np.bool))
assert_less(np.sum(support[5:] == 1), 2)
def test_selectkbest_tiebreaking():
# Test whether SelectKBest actually selects k features in case of ties.
# Prior to 0.11, SelectKBest would return more features than requested.
Xs = [[0, 1, 1], [0, 0, 1], [1, 0, 0], [1, 1, 0]]
y = [1]
dummy_score = lambda X, y: (X[0], X[0])
for X in Xs:
sel = SelectKBest(dummy_score, k=1)
X1 = ignore_warnings(sel.fit_transform)([X], y)
assert_equal(X1.shape[1], 1)
assert_best_scores_kept(sel)
sel = SelectKBest(dummy_score, k=2)
X2 = ignore_warnings(sel.fit_transform)([X], y)
assert_equal(X2.shape[1], 2)
assert_best_scores_kept(sel)
def test_selectpercentile_tiebreaking():
# Test if SelectPercentile selects the right n_features in case of ties.
Xs = [[0, 1, 1], [0, 0, 1], [1, 0, 0], [1, 1, 0]]
y = [1]
dummy_score = lambda X, y: (X[0], X[0])
for X in Xs:
sel = SelectPercentile(dummy_score, percentile=34)
X1 = ignore_warnings(sel.fit_transform)([X], y)
assert_equal(X1.shape[1], 1)
assert_best_scores_kept(sel)
sel = SelectPercentile(dummy_score, percentile=67)
X2 = ignore_warnings(sel.fit_transform)([X], y)
assert_equal(X2.shape[1], 2)
assert_best_scores_kept(sel)
def test_tied_pvalues():
# Test whether k-best and percentiles work with tied pvalues from chi2.
# chi2 will return the same p-values for the following features, but it
# will return different scores.
X0 = np.array([[10000, 9999, 9998], [1, 1, 1]])
y = [0, 1]
for perm in itertools.permutations((0, 1, 2)):
X = X0[:, perm]
Xt = SelectKBest(chi2, k=2).fit_transform(X, y)
assert_equal(Xt.shape, (2, 2))
assert_not_in(9998, Xt)
Xt = SelectPercentile(chi2, percentile=67).fit_transform(X, y)
assert_equal(Xt.shape, (2, 2))
assert_not_in(9998, Xt)
def test_tied_scores():
# Test for stable sorting in k-best with tied scores.
X_train = np.array([[0, 0, 0], [1, 1, 1]])
y_train = [0, 1]
for n_features in [1, 2, 3]:
sel = SelectKBest(chi2, k=n_features).fit(X_train, y_train)
X_test = sel.transform([0, 1, 2])
assert_array_equal(X_test[0], np.arange(3)[-n_features:])
def test_nans():
# Assert that SelectKBest and SelectPercentile can handle NaNs.
# First feature has zero variance to confuse f_classif (ANOVA) and
# make it return a NaN.
X = [[0, 1, 0], [0, -1, -1], [0, .5, .5]]
y = [1, 0, 1]
for select in (SelectKBest(f_classif, 2),
SelectPercentile(f_classif, percentile=67)):
ignore_warnings(select.fit)(X, y)
assert_array_equal(select.get_support(indices=True), np.array([1, 2]))
def test_score_func_error():
X = [[0, 1, 0], [0, -1, -1], [0, .5, .5]]
y = [1, 0, 1]
for SelectFeatures in [SelectKBest, SelectPercentile, SelectFwe,
SelectFdr, SelectFpr, GenericUnivariateSelect]:
assert_raises(TypeError, SelectFeatures(score_func=10).fit, X, y)
def test_invalid_k():
X = [[0, 1, 0], [0, -1, -1], [0, .5, .5]]
y = [1, 0, 1]
assert_raises(ValueError, SelectKBest(k=-1).fit, X, y)
assert_raises(ValueError, SelectKBest(k=4).fit, X, y)
assert_raises(ValueError,
GenericUnivariateSelect(mode='k_best', param=-1).fit, X, y)
assert_raises(ValueError,
GenericUnivariateSelect(mode='k_best', param=4).fit, X, y)
def test_f_classif_constant_feature():
# Test that f_classif warns if a feature is constant throughout.
X, y = make_classification(n_samples=10, n_features=5)
X[:, 0] = 2.0
assert_warns(UserWarning, f_classif, X, y)
def test_no_feature_selected():
rng = np.random.RandomState(0)
# Generate random uncorrelated data: a strict univariate test should
# rejects all the features
X = rng.rand(40, 10)
y = rng.randint(0, 4, size=40)
strict_selectors = [
SelectFwe(alpha=0.01).fit(X, y),
SelectFdr(alpha=0.01).fit(X, y),
SelectFpr(alpha=0.01).fit(X, y),
SelectPercentile(percentile=0).fit(X, y),
SelectKBest(k=0).fit(X, y),
]
for selector in strict_selectors:
assert_array_equal(selector.get_support(), np.zeros(10))
X_selected = assert_warns_message(
UserWarning, 'No features were selected', selector.transform, X)
assert_equal(X_selected.shape, (40, 0))
| bsd-3-clause |
rsivapr/scikit-learn | sklearn/linear_model/tests/test_coordinate_descent.py | 2 | 11993 | # Authors: Olivier Grisel <olivier.grisel@ensta.org>
# Alexandre Gramfort <alexandre.gramfort@inria.fr>
# License: BSD 3 clause
import warnings
from sys import version_info
import numpy as np
from scipy import interpolate
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_almost_equal
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import SkipTest
from sklearn.utils.testing import assert_true
from sklearn.utils.testing import assert_greater
from sklearn.linear_model.coordinate_descent import Lasso, \
LassoCV, ElasticNet, ElasticNetCV, MultiTaskLasso, MultiTaskElasticNet, \
lasso_path
from sklearn.linear_model import LassoLarsCV, lars_path
def check_warnings():
if version_info < (2, 6):
raise SkipTest("Testing for warnings is not supported in versions \
older than Python 2.6")
def test_lasso_zero():
"""Check that the lasso can handle zero data without crashing"""
X = [[0], [0], [0]]
y = [0, 0, 0]
clf = Lasso(alpha=0.1).fit(X, y)
pred = clf.predict([[1], [2], [3]])
assert_array_almost_equal(clf.coef_, [0])
assert_array_almost_equal(pred, [0, 0, 0])
assert_almost_equal(clf.dual_gap_, 0)
def test_lasso_toy():
"""
Test Lasso on a toy example for various values of alpha.
When validating this against glmnet notice that glmnet divides it
against nobs.
"""
X = [[-1], [0], [1]]
Y = [-1, 0, 1] # just a straight line
T = [[2], [3], [4]] # test sample
clf = Lasso(alpha=1e-8)
clf.fit(X, Y)
pred = clf.predict(T)
assert_array_almost_equal(clf.coef_, [1])
assert_array_almost_equal(pred, [2, 3, 4])
assert_almost_equal(clf.dual_gap_, 0)
clf = Lasso(alpha=0.1)
clf.fit(X, Y)
pred = clf.predict(T)
assert_array_almost_equal(clf.coef_, [.85])
assert_array_almost_equal(pred, [1.7, 2.55, 3.4])
assert_almost_equal(clf.dual_gap_, 0)
clf = Lasso(alpha=0.5)
clf.fit(X, Y)
pred = clf.predict(T)
assert_array_almost_equal(clf.coef_, [.25])
assert_array_almost_equal(pred, [0.5, 0.75, 1.])
assert_almost_equal(clf.dual_gap_, 0)
clf = Lasso(alpha=1)
clf.fit(X, Y)
pred = clf.predict(T)
assert_array_almost_equal(clf.coef_, [.0])
assert_array_almost_equal(pred, [0, 0, 0])
assert_almost_equal(clf.dual_gap_, 0)
def test_enet_toy():
"""
Test ElasticNet for various parameters of alpha and l1_ratio.
Actually, the parameters alpha = 0 should not be allowed. However,
we test it as a border case.
ElasticNet is tested with and without precomputed Gram matrix
"""
X = np.array([[-1.], [0.], [1.]])
Y = [-1, 0, 1] # just a straight line
T = [[2.], [3.], [4.]] # test sample
# this should be the same as lasso
clf = ElasticNet(alpha=1e-8, l1_ratio=1.0)
clf.fit(X, Y)
pred = clf.predict(T)
assert_array_almost_equal(clf.coef_, [1])
assert_array_almost_equal(pred, [2, 3, 4])
assert_almost_equal(clf.dual_gap_, 0)
clf = ElasticNet(alpha=0.5, l1_ratio=0.3, max_iter=100,
precompute=False)
clf.fit(X, Y)
pred = clf.predict(T)
assert_array_almost_equal(clf.coef_, [0.50819], decimal=3)
assert_array_almost_equal(pred, [1.0163, 1.5245, 2.0327], decimal=3)
assert_almost_equal(clf.dual_gap_, 0)
clf.set_params(max_iter=100, precompute=True)
clf.fit(X, Y) # with Gram
pred = clf.predict(T)
assert_array_almost_equal(clf.coef_, [0.50819], decimal=3)
assert_array_almost_equal(pred, [1.0163, 1.5245, 2.0327], decimal=3)
assert_almost_equal(clf.dual_gap_, 0)
clf.set_params(max_iter=100, precompute=np.dot(X.T, X))
clf.fit(X, Y) # with Gram
pred = clf.predict(T)
assert_array_almost_equal(clf.coef_, [0.50819], decimal=3)
assert_array_almost_equal(pred, [1.0163, 1.5245, 2.0327], decimal=3)
assert_almost_equal(clf.dual_gap_, 0)
clf = ElasticNet(alpha=0.5, l1_ratio=0.5)
clf.fit(X, Y)
pred = clf.predict(T)
assert_array_almost_equal(clf.coef_, [0.45454], 3)
assert_array_almost_equal(pred, [0.9090, 1.3636, 1.8181], 3)
assert_almost_equal(clf.dual_gap_, 0)
def build_dataset(n_samples=50, n_features=200, n_informative_features=10,
n_targets=1):
"""
build an ill-posed linear regression problem with many noisy features and
comparatively few samples
"""
random_state = np.random.RandomState(0)
if n_targets > 1:
w = random_state.randn(n_features, n_targets)
else:
w = random_state.randn(n_features)
w[n_informative_features:] = 0.0
X = random_state.randn(n_samples, n_features)
y = np.dot(X, w)
X_test = random_state.randn(n_samples, n_features)
y_test = np.dot(X_test, w)
return X, y, X_test, y_test
def test_lasso_cv():
X, y, X_test, y_test = build_dataset()
max_iter = 150
clf = LassoCV(n_alphas=10, eps=1e-3, max_iter=max_iter).fit(X, y)
assert_almost_equal(clf.alpha_, 0.056, 2)
clf = LassoCV(n_alphas=10, eps=1e-3, max_iter=max_iter, precompute=True)
clf.fit(X, y)
assert_almost_equal(clf.alpha_, 0.056, 2)
# Check that the lars and the coordinate descent implementation
# select a similar alpha
lars = LassoLarsCV(normalize=False, max_iter=30).fit(X, y)
# for this we check that they don't fall in the grid of
# clf.alphas further than 1
assert_true(np.abs(
np.searchsorted(clf.alphas_[::-1], lars.alpha_)
- np.searchsorted(clf.alphas_[::-1], clf.alpha_)) <= 1)
# check that they also give a similar MSE
mse_lars = interpolate.interp1d(lars.cv_alphas_, lars.cv_mse_path_.T)
np.testing.assert_approx_equal(mse_lars(clf.alphas_[5]).mean(),
clf.mse_path_[5].mean(), significant=2)
# test set
assert_greater(clf.score(X_test, y_test), 0.99)
def test_lasso_path_return_models_vs_new_return_gives_same_coefficients():
# Test that lasso_path with lars_path style output gives the
# same result
# Some toy data
X = np.array([[1, 2, 3.1], [2.3, 5.4, 4.3]]).T
y = np.array([1, 2, 3.1])
alphas = [5., 1., .5]
# Compute the lasso_path
with warnings.catch_warnings():
warnings.simplefilter("ignore", DeprecationWarning)
coef_path = [e.coef_ for e in lasso_path(X, y, alphas=alphas,
fit_intercept=False)]
# Use lars_path and lasso_path(new output) with 1D linear interpolation
# to compute the the same path
alphas_lars, _, coef_path_lars = lars_path(X, y, method='lasso')
coef_path_cont_lars = interpolate.interp1d(alphas_lars[::-1],
coef_path_lars[:, ::-1])
alphas_lasso2, coef_path_lasso2, _ = lasso_path(X, y, alphas=alphas,
fit_intercept=False,
return_models=False)
coef_path_cont_lasso = interpolate.interp1d(alphas_lasso2[::-1],
coef_path_lasso2[:, ::-1])
np.testing.assert_array_almost_equal(coef_path_cont_lasso(alphas),
np.asarray(coef_path).T, decimal=1)
np.testing.assert_array_almost_equal(coef_path_cont_lasso(alphas),
coef_path_cont_lars(alphas),
decimal=1)
def test_enet_path():
# We use a large number of samples and of informative features so that
# the l1_ratio selected is more toward ridge than lasso
X, y, X_test, y_test = build_dataset(n_samples=200, n_features=100,
n_informative_features=100)
max_iter = 150
with warnings.catch_warnings():
# Here we have a small number of iterations, and thus the
# ElasticNet might not converge. This is to speed up tests
warnings.simplefilter("ignore", UserWarning)
clf = ElasticNetCV(n_alphas=5, eps=2e-3, l1_ratio=[0.5, 0.7], cv=3,
max_iter=max_iter)
clf.fit(X, y)
# Well-conditioned settings, we should have selected our
# smallest penalty
assert_almost_equal(clf.alpha_, min(clf.alphas_))
# Non-sparse ground truth: we should have seleted an elastic-net
# that is closer to ridge than to lasso
assert_equal(clf.l1_ratio_, min(clf.l1_ratio))
clf = ElasticNetCV(n_alphas=5, eps=2e-3, l1_ratio=[0.5, 0.7], cv=3,
max_iter=max_iter, precompute=True)
clf.fit(X, y)
# Well-conditioned settings, we should have selected our
# smallest penalty
assert_almost_equal(clf.alpha_, min(clf.alphas_))
# Non-sparse ground truth: we should have seleted an elastic-net
# that is closer to ridge than to lasso
assert_equal(clf.l1_ratio_, min(clf.l1_ratio))
# We are in well-conditioned settings with low noise: we should
# have a good test-set performance
assert_greater(clf.score(X_test, y_test), 0.99)
def test_path_parameters():
X, y, _, _ = build_dataset()
max_iter = 50
clf = ElasticNetCV(n_alphas=50, eps=1e-3, max_iter=max_iter,
l1_ratio=0.5)
clf.fit(X, y) # new params
assert_almost_equal(0.5, clf.l1_ratio)
assert_equal(50, clf.n_alphas)
assert_equal(50, len(clf.alphas_))
def test_warm_start():
X, y, _, _ = build_dataset()
clf = ElasticNet(alpha=1.0, max_iter=50, warm_start=True)
clf.fit(X, y)
clf.set_params(alpha=0.1)
clf.fit(X, y)
clf2 = ElasticNet(alpha=0.1, max_iter=500)
clf2.fit(X, y)
assert_array_almost_equal(clf2.coef_, clf.coef_)
def test_lasso_alpha_warning():
check_warnings() # Skip if unsupported Python version
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter('always')
X = [[-1], [0], [1]]
Y = [-1, 0, 1] # just a straight line
clf = Lasso(alpha=0)
clf.fit(X, Y)
assert_greater(len(w), 0) # warnings should be raised
def test_lasso_positive_constraint():
X = [[-1], [0], [1]]
y = [1, 0, -1] # just a straight line with negative slope
lasso = Lasso(alpha=0.1, max_iter=1000, positive=True)
lasso.fit(X, y)
assert_true(min(lasso.coef_) >= 0)
lasso = Lasso(alpha=0.1, max_iter=1000, precompute=True, positive=True)
lasso.fit(X, y)
assert_true(min(lasso.coef_) >= 0)
def test_enet_positive_constraint():
X = [[-1], [0], [1]]
y = [1, 0, -1] # just a straight line with negative slope
enet = ElasticNet(alpha=0.1, max_iter=1000, positive=True)
enet.fit(X, y)
assert_true(min(enet.coef_) >= 0)
def test_multi_task_lasso_and_enet():
X, y, X_test, y_test = build_dataset()
Y = np.c_[y, y]
#Y_test = np.c_[y_test, y_test]
clf = MultiTaskLasso(alpha=1, tol=1e-8).fit(X, Y)
assert_true(0 < clf.dual_gap_ < 1e-5)
assert_array_almost_equal(clf.coef_[0], clf.coef_[1])
clf = MultiTaskElasticNet(alpha=1, tol=1e-8).fit(X, Y)
assert_true(0 < clf.dual_gap_ < 1e-5)
assert_array_almost_equal(clf.coef_[0], clf.coef_[1])
def test_enet_multitarget():
n_targets = 3
X, y, _, _ = build_dataset(n_samples=10, n_features=8,
n_informative_features=10, n_targets=n_targets)
estimator = ElasticNet(alpha=0.01, fit_intercept=True)
estimator.fit(X, y)
coef, intercept, dual_gap = (estimator.coef_, estimator.intercept_,
estimator.dual_gap_)
for k in range(n_targets):
estimator.fit(X, y[:, k])
assert_array_almost_equal(coef[k, :], estimator.coef_)
assert_array_almost_equal(intercept[k], estimator.intercept_)
assert_array_almost_equal(dual_gap[k], estimator.dual_gap_)
if __name__ == '__main__':
import nose
nose.runmodule()
| bsd-3-clause |
zhenv5/scikit-learn | sklearn/metrics/pairwise.py | 49 | 44088 | # -*- coding: utf-8 -*-
# Authors: Alexandre Gramfort <alexandre.gramfort@inria.fr>
# Mathieu Blondel <mathieu@mblondel.org>
# Robert Layton <robertlayton@gmail.com>
# Andreas Mueller <amueller@ais.uni-bonn.de>
# Philippe Gervais <philippe.gervais@inria.fr>
# Lars Buitinck <larsmans@gmail.com>
# Joel Nothman <joel.nothman@gmail.com>
# License: BSD 3 clause
import itertools
import numpy as np
from scipy.spatial import distance
from scipy.sparse import csr_matrix
from scipy.sparse import issparse
from ..utils import check_array
from ..utils import gen_even_slices
from ..utils import gen_batches
from ..utils.fixes import partial
from ..utils.extmath import row_norms, safe_sparse_dot
from ..preprocessing import normalize
from ..externals.joblib import Parallel
from ..externals.joblib import delayed
from ..externals.joblib.parallel import cpu_count
from .pairwise_fast import _chi2_kernel_fast, _sparse_manhattan
# Utility Functions
def _return_float_dtype(X, Y):
"""
1. If dtype of X and Y is float32, then dtype float32 is returned.
2. Else dtype float is returned.
"""
if not issparse(X) and not isinstance(X, np.ndarray):
X = np.asarray(X)
if Y is None:
Y_dtype = X.dtype
elif not issparse(Y) and not isinstance(Y, np.ndarray):
Y = np.asarray(Y)
Y_dtype = Y.dtype
else:
Y_dtype = Y.dtype
if X.dtype == Y_dtype == np.float32:
dtype = np.float32
else:
dtype = np.float
return X, Y, dtype
def check_pairwise_arrays(X, Y, precomputed=False):
""" Set X and Y appropriately and checks inputs
If Y is None, it is set as a pointer to X (i.e. not a copy).
If Y is given, this does not happen.
All distance metrics should use this function first to assert that the
given parameters are correct and safe to use.
Specifically, this function first ensures that both X and Y are arrays,
then checks that they are at least two dimensional while ensuring that
their elements are floats. Finally, the function checks that the size
of the second dimension of the two arrays is equal, or the equivalent
check for a precomputed distance matrix.
Parameters
----------
X : {array-like, sparse matrix}, shape (n_samples_a, n_features)
Y : {array-like, sparse matrix}, shape (n_samples_b, n_features)
precomputed : bool
True if X is to be treated as precomputed distances to the samples in
Y.
Returns
-------
safe_X : {array-like, sparse matrix}, shape (n_samples_a, n_features)
An array equal to X, guaranteed to be a numpy array.
safe_Y : {array-like, sparse matrix}, shape (n_samples_b, n_features)
An array equal to Y if Y was not None, guaranteed to be a numpy array.
If Y was None, safe_Y will be a pointer to X.
"""
X, Y, dtype = _return_float_dtype(X, Y)
if Y is X or Y is None:
X = Y = check_array(X, accept_sparse='csr', dtype=dtype)
else:
X = check_array(X, accept_sparse='csr', dtype=dtype)
Y = check_array(Y, accept_sparse='csr', dtype=dtype)
if precomputed:
if X.shape[1] != Y.shape[0]:
raise ValueError("Precomputed metric requires shape "
"(n_queries, n_indexed). Got (%d, %d) "
"for %d indexed." %
(X.shape[0], X.shape[1], Y.shape[0]))
elif X.shape[1] != Y.shape[1]:
raise ValueError("Incompatible dimension for X and Y matrices: "
"X.shape[1] == %d while Y.shape[1] == %d" % (
X.shape[1], Y.shape[1]))
return X, Y
def check_paired_arrays(X, Y):
""" Set X and Y appropriately and checks inputs for paired distances
All paired distance metrics should use this function first to assert that
the given parameters are correct and safe to use.
Specifically, this function first ensures that both X and Y are arrays,
then checks that they are at least two dimensional while ensuring that
their elements are floats. Finally, the function checks that the size
of the dimensions of the two arrays are equal.
Parameters
----------
X : {array-like, sparse matrix}, shape (n_samples_a, n_features)
Y : {array-like, sparse matrix}, shape (n_samples_b, n_features)
Returns
-------
safe_X : {array-like, sparse matrix}, shape (n_samples_a, n_features)
An array equal to X, guaranteed to be a numpy array.
safe_Y : {array-like, sparse matrix}, shape (n_samples_b, n_features)
An array equal to Y if Y was not None, guaranteed to be a numpy array.
If Y was None, safe_Y will be a pointer to X.
"""
X, Y = check_pairwise_arrays(X, Y)
if X.shape != Y.shape:
raise ValueError("X and Y should be of same shape. They were "
"respectively %r and %r long." % (X.shape, Y.shape))
return X, Y
# Pairwise distances
def euclidean_distances(X, Y=None, Y_norm_squared=None, squared=False,
X_norm_squared=None):
"""
Considering the rows of X (and Y=X) as vectors, compute the
distance matrix between each pair of vectors.
For efficiency reasons, the euclidean distance between a pair of row
vector x and y is computed as::
dist(x, y) = sqrt(dot(x, x) - 2 * dot(x, y) + dot(y, y))
This formulation has two advantages over other ways of computing distances.
First, it is computationally efficient when dealing with sparse data.
Second, if one argument varies but the other remains unchanged, then
`dot(x, x)` and/or `dot(y, y)` can be pre-computed.
However, this is not the most precise way of doing this computation, and
the distance matrix returned by this function may not be exactly
symmetric as required by, e.g., ``scipy.spatial.distance`` functions.
Read more in the :ref:`User Guide <metrics>`.
Parameters
----------
X : {array-like, sparse matrix}, shape (n_samples_1, n_features)
Y : {array-like, sparse matrix}, shape (n_samples_2, n_features)
Y_norm_squared : array-like, shape (n_samples_2, ), optional
Pre-computed dot-products of vectors in Y (e.g.,
``(Y**2).sum(axis=1)``)
squared : boolean, optional
Return squared Euclidean distances.
X_norm_squared : array-like, shape = [n_samples_1], optional
Pre-computed dot-products of vectors in X (e.g.,
``(X**2).sum(axis=1)``)
Returns
-------
distances : {array, sparse matrix}, shape (n_samples_1, n_samples_2)
Examples
--------
>>> from sklearn.metrics.pairwise import euclidean_distances
>>> X = [[0, 1], [1, 1]]
>>> # distance between rows of X
>>> euclidean_distances(X, X)
array([[ 0., 1.],
[ 1., 0.]])
>>> # get distance to origin
>>> euclidean_distances(X, [[0, 0]])
array([[ 1. ],
[ 1.41421356]])
See also
--------
paired_distances : distances betweens pairs of elements of X and Y.
"""
X, Y = check_pairwise_arrays(X, Y)
if X_norm_squared is not None:
XX = check_array(X_norm_squared)
if XX.shape == (1, X.shape[0]):
XX = XX.T
elif XX.shape != (X.shape[0], 1):
raise ValueError(
"Incompatible dimensions for X and X_norm_squared")
else:
XX = row_norms(X, squared=True)[:, np.newaxis]
if X is Y: # shortcut in the common case euclidean_distances(X, X)
YY = XX.T
elif Y_norm_squared is not None:
YY = np.atleast_2d(Y_norm_squared)
if YY.shape != (1, Y.shape[0]):
raise ValueError(
"Incompatible dimensions for Y and Y_norm_squared")
else:
YY = row_norms(Y, squared=True)[np.newaxis, :]
distances = safe_sparse_dot(X, Y.T, dense_output=True)
distances *= -2
distances += XX
distances += YY
np.maximum(distances, 0, out=distances)
if X is Y:
# Ensure that distances between vectors and themselves are set to 0.0.
# This may not be the case due to floating point rounding errors.
distances.flat[::distances.shape[0] + 1] = 0.0
return distances if squared else np.sqrt(distances, out=distances)
def pairwise_distances_argmin_min(X, Y, axis=1, metric="euclidean",
batch_size=500, metric_kwargs=None):
"""Compute minimum distances between one point and a set of points.
This function computes for each row in X, the index of the row of Y which
is closest (according to the specified distance). The minimal distances are
also returned.
This is mostly equivalent to calling:
(pairwise_distances(X, Y=Y, metric=metric).argmin(axis=axis),
pairwise_distances(X, Y=Y, metric=metric).min(axis=axis))
but uses much less memory, and is faster for large arrays.
Parameters
----------
X, Y : {array-like, sparse matrix}
Arrays containing points. Respective shapes (n_samples1, n_features)
and (n_samples2, n_features)
batch_size : integer
To reduce memory consumption over the naive solution, data are
processed in batches, comprising batch_size rows of X and
batch_size rows of Y. The default value is quite conservative, but
can be changed for fine-tuning. The larger the number, the larger the
memory usage.
metric : string or callable, default 'euclidean'
metric to use for distance computation. Any metric from scikit-learn
or scipy.spatial.distance can be used.
If metric is a callable function, it is called on each
pair of instances (rows) and the resulting value recorded. The callable
should take two arrays as input and return one value indicating the
distance between them. This works for Scipy's metrics, but is less
efficient than passing the metric name as a string.
Distance matrices are not supported.
Valid values for metric are:
- from scikit-learn: ['cityblock', 'cosine', 'euclidean', 'l1', 'l2',
'manhattan']
- from scipy.spatial.distance: ['braycurtis', 'canberra', 'chebyshev',
'correlation', 'dice', 'hamming', 'jaccard', 'kulsinski',
'mahalanobis', 'matching', 'minkowski', 'rogerstanimoto',
'russellrao', 'seuclidean', 'sokalmichener', 'sokalsneath',
'sqeuclidean', 'yule']
See the documentation for scipy.spatial.distance for details on these
metrics.
metric_kwargs : dict, optional
Keyword arguments to pass to specified metric function.
axis : int, optional, default 1
Axis along which the argmin and distances are to be computed.
Returns
-------
argmin : numpy.ndarray
Y[argmin[i], :] is the row in Y that is closest to X[i, :].
distances : numpy.ndarray
distances[i] is the distance between the i-th row in X and the
argmin[i]-th row in Y.
See also
--------
sklearn.metrics.pairwise_distances
sklearn.metrics.pairwise_distances_argmin
"""
dist_func = None
if metric in PAIRWISE_DISTANCE_FUNCTIONS:
dist_func = PAIRWISE_DISTANCE_FUNCTIONS[metric]
elif not callable(metric) and not isinstance(metric, str):
raise ValueError("'metric' must be a string or a callable")
X, Y = check_pairwise_arrays(X, Y)
if metric_kwargs is None:
metric_kwargs = {}
if axis == 0:
X, Y = Y, X
# Allocate output arrays
indices = np.empty(X.shape[0], dtype=np.intp)
values = np.empty(X.shape[0])
values.fill(np.infty)
for chunk_x in gen_batches(X.shape[0], batch_size):
X_chunk = X[chunk_x, :]
for chunk_y in gen_batches(Y.shape[0], batch_size):
Y_chunk = Y[chunk_y, :]
if dist_func is not None:
if metric == 'euclidean': # special case, for speed
d_chunk = safe_sparse_dot(X_chunk, Y_chunk.T,
dense_output=True)
d_chunk *= -2
d_chunk += row_norms(X_chunk, squared=True)[:, np.newaxis]
d_chunk += row_norms(Y_chunk, squared=True)[np.newaxis, :]
np.maximum(d_chunk, 0, d_chunk)
else:
d_chunk = dist_func(X_chunk, Y_chunk, **metric_kwargs)
else:
d_chunk = pairwise_distances(X_chunk, Y_chunk,
metric=metric, **metric_kwargs)
# Update indices and minimum values using chunk
min_indices = d_chunk.argmin(axis=1)
min_values = d_chunk[np.arange(chunk_x.stop - chunk_x.start),
min_indices]
flags = values[chunk_x] > min_values
indices[chunk_x][flags] = min_indices[flags] + chunk_y.start
values[chunk_x][flags] = min_values[flags]
if metric == "euclidean" and not metric_kwargs.get("squared", False):
np.sqrt(values, values)
return indices, values
def pairwise_distances_argmin(X, Y, axis=1, metric="euclidean",
batch_size=500, metric_kwargs=None):
"""Compute minimum distances between one point and a set of points.
This function computes for each row in X, the index of the row of Y which
is closest (according to the specified distance).
This is mostly equivalent to calling:
pairwise_distances(X, Y=Y, metric=metric).argmin(axis=axis)
but uses much less memory, and is faster for large arrays.
This function works with dense 2D arrays only.
Parameters
----------
X : array-like
Arrays containing points. Respective shapes (n_samples1, n_features)
and (n_samples2, n_features)
Y : array-like
Arrays containing points. Respective shapes (n_samples1, n_features)
and (n_samples2, n_features)
batch_size : integer
To reduce memory consumption over the naive solution, data are
processed in batches, comprising batch_size rows of X and
batch_size rows of Y. The default value is quite conservative, but
can be changed for fine-tuning. The larger the number, the larger the
memory usage.
metric : string or callable
metric to use for distance computation. Any metric from scikit-learn
or scipy.spatial.distance can be used.
If metric is a callable function, it is called on each
pair of instances (rows) and the resulting value recorded. The callable
should take two arrays as input and return one value indicating the
distance between them. This works for Scipy's metrics, but is less
efficient than passing the metric name as a string.
Distance matrices are not supported.
Valid values for metric are:
- from scikit-learn: ['cityblock', 'cosine', 'euclidean', 'l1', 'l2',
'manhattan']
- from scipy.spatial.distance: ['braycurtis', 'canberra', 'chebyshev',
'correlation', 'dice', 'hamming', 'jaccard', 'kulsinski',
'mahalanobis', 'matching', 'minkowski', 'rogerstanimoto',
'russellrao', 'seuclidean', 'sokalmichener', 'sokalsneath',
'sqeuclidean', 'yule']
See the documentation for scipy.spatial.distance for details on these
metrics.
metric_kwargs : dict
keyword arguments to pass to specified metric function.
axis : int, optional, default 1
Axis along which the argmin and distances are to be computed.
Returns
-------
argmin : numpy.ndarray
Y[argmin[i], :] is the row in Y that is closest to X[i, :].
See also
--------
sklearn.metrics.pairwise_distances
sklearn.metrics.pairwise_distances_argmin_min
"""
if metric_kwargs is None:
metric_kwargs = {}
return pairwise_distances_argmin_min(X, Y, axis, metric, batch_size,
metric_kwargs)[0]
def manhattan_distances(X, Y=None, sum_over_features=True,
size_threshold=5e8):
""" Compute the L1 distances between the vectors in X and Y.
With sum_over_features equal to False it returns the componentwise
distances.
Read more in the :ref:`User Guide <metrics>`.
Parameters
----------
X : array_like
An array with shape (n_samples_X, n_features).
Y : array_like, optional
An array with shape (n_samples_Y, n_features).
sum_over_features : bool, default=True
If True the function returns the pairwise distance matrix
else it returns the componentwise L1 pairwise-distances.
Not supported for sparse matrix inputs.
size_threshold : int, default=5e8
Unused parameter.
Returns
-------
D : array
If sum_over_features is False shape is
(n_samples_X * n_samples_Y, n_features) and D contains the
componentwise L1 pairwise-distances (ie. absolute difference),
else shape is (n_samples_X, n_samples_Y) and D contains
the pairwise L1 distances.
Examples
--------
>>> from sklearn.metrics.pairwise import manhattan_distances
>>> manhattan_distances([[3]], [[3]])#doctest:+ELLIPSIS
array([[ 0.]])
>>> manhattan_distances([[3]], [[2]])#doctest:+ELLIPSIS
array([[ 1.]])
>>> manhattan_distances([[2]], [[3]])#doctest:+ELLIPSIS
array([[ 1.]])
>>> manhattan_distances([[1, 2], [3, 4]],\
[[1, 2], [0, 3]])#doctest:+ELLIPSIS
array([[ 0., 2.],
[ 4., 4.]])
>>> import numpy as np
>>> X = np.ones((1, 2))
>>> y = 2 * np.ones((2, 2))
>>> manhattan_distances(X, y, sum_over_features=False)#doctest:+ELLIPSIS
array([[ 1., 1.],
[ 1., 1.]]...)
"""
X, Y = check_pairwise_arrays(X, Y)
if issparse(X) or issparse(Y):
if not sum_over_features:
raise TypeError("sum_over_features=%r not supported"
" for sparse matrices" % sum_over_features)
X = csr_matrix(X, copy=False)
Y = csr_matrix(Y, copy=False)
D = np.zeros((X.shape[0], Y.shape[0]))
_sparse_manhattan(X.data, X.indices, X.indptr,
Y.data, Y.indices, Y.indptr,
X.shape[1], D)
return D
if sum_over_features:
return distance.cdist(X, Y, 'cityblock')
D = X[:, np.newaxis, :] - Y[np.newaxis, :, :]
D = np.abs(D, D)
return D.reshape((-1, X.shape[1]))
def cosine_distances(X, Y=None):
"""
Compute cosine distance between samples in X and Y.
Cosine distance is defined as 1.0 minus the cosine similarity.
Read more in the :ref:`User Guide <metrics>`.
Parameters
----------
X : array_like, sparse matrix
with shape (n_samples_X, n_features).
Y : array_like, sparse matrix (optional)
with shape (n_samples_Y, n_features).
Returns
-------
distance matrix : array
An array with shape (n_samples_X, n_samples_Y).
See also
--------
sklearn.metrics.pairwise.cosine_similarity
scipy.spatial.distance.cosine (dense matrices only)
"""
# 1.0 - cosine_similarity(X, Y) without copy
S = cosine_similarity(X, Y)
S *= -1
S += 1
return S
# Paired distances
def paired_euclidean_distances(X, Y):
"""
Computes the paired euclidean distances between X and Y
Read more in the :ref:`User Guide <metrics>`.
Parameters
----------
X : array-like, shape (n_samples, n_features)
Y : array-like, shape (n_samples, n_features)
Returns
-------
distances : ndarray (n_samples, )
"""
X, Y = check_paired_arrays(X, Y)
return row_norms(X - Y)
def paired_manhattan_distances(X, Y):
"""Compute the L1 distances between the vectors in X and Y.
Read more in the :ref:`User Guide <metrics>`.
Parameters
----------
X : array-like, shape (n_samples, n_features)
Y : array-like, shape (n_samples, n_features)
Returns
-------
distances : ndarray (n_samples, )
"""
X, Y = check_paired_arrays(X, Y)
diff = X - Y
if issparse(diff):
diff.data = np.abs(diff.data)
return np.squeeze(np.array(diff.sum(axis=1)))
else:
return np.abs(diff).sum(axis=-1)
def paired_cosine_distances(X, Y):
"""
Computes the paired cosine distances between X and Y
Read more in the :ref:`User Guide <metrics>`.
Parameters
----------
X : array-like, shape (n_samples, n_features)
Y : array-like, shape (n_samples, n_features)
Returns
-------
distances : ndarray, shape (n_samples, )
Notes
------
The cosine distance is equivalent to the half the squared
euclidean distance if each sample is normalized to unit norm
"""
X, Y = check_paired_arrays(X, Y)
return .5 * row_norms(normalize(X) - normalize(Y), squared=True)
PAIRED_DISTANCES = {
'cosine': paired_cosine_distances,
'euclidean': paired_euclidean_distances,
'l2': paired_euclidean_distances,
'l1': paired_manhattan_distances,
'manhattan': paired_manhattan_distances,
'cityblock': paired_manhattan_distances}
def paired_distances(X, Y, metric="euclidean", **kwds):
"""
Computes the paired distances between X and Y.
Computes the distances between (X[0], Y[0]), (X[1], Y[1]), etc...
Read more in the :ref:`User Guide <metrics>`.
Parameters
----------
X : ndarray (n_samples, n_features)
Array 1 for distance computation.
Y : ndarray (n_samples, n_features)
Array 2 for distance computation.
metric : string or callable
The metric to use when calculating distance between instances in a
feature array. If metric is a string, it must be one of the options
specified in PAIRED_DISTANCES, including "euclidean",
"manhattan", or "cosine".
Alternatively, if metric is a callable function, it is called on each
pair of instances (rows) and the resulting value recorded. The callable
should take two arrays from X as input and return a value indicating
the distance between them.
Returns
-------
distances : ndarray (n_samples, )
Examples
--------
>>> from sklearn.metrics.pairwise import paired_distances
>>> X = [[0, 1], [1, 1]]
>>> Y = [[0, 1], [2, 1]]
>>> paired_distances(X, Y)
array([ 0., 1.])
See also
--------
pairwise_distances : pairwise distances.
"""
if metric in PAIRED_DISTANCES:
func = PAIRED_DISTANCES[metric]
return func(X, Y)
elif callable(metric):
# Check the matrix first (it is usually done by the metric)
X, Y = check_paired_arrays(X, Y)
distances = np.zeros(len(X))
for i in range(len(X)):
distances[i] = metric(X[i], Y[i])
return distances
else:
raise ValueError('Unknown distance %s' % metric)
# Kernels
def linear_kernel(X, Y=None):
"""
Compute the linear kernel between X and Y.
Read more in the :ref:`User Guide <linear_kernel>`.
Parameters
----------
X : array of shape (n_samples_1, n_features)
Y : array of shape (n_samples_2, n_features)
Returns
-------
Gram matrix : array of shape (n_samples_1, n_samples_2)
"""
X, Y = check_pairwise_arrays(X, Y)
return safe_sparse_dot(X, Y.T, dense_output=True)
def polynomial_kernel(X, Y=None, degree=3, gamma=None, coef0=1):
"""
Compute the polynomial kernel between X and Y::
K(X, Y) = (gamma <X, Y> + coef0)^degree
Read more in the :ref:`User Guide <polynomial_kernel>`.
Parameters
----------
X : ndarray of shape (n_samples_1, n_features)
Y : ndarray of shape (n_samples_2, n_features)
coef0 : int, default 1
degree : int, default 3
Returns
-------
Gram matrix : array of shape (n_samples_1, n_samples_2)
"""
X, Y = check_pairwise_arrays(X, Y)
if gamma is None:
gamma = 1.0 / X.shape[1]
K = safe_sparse_dot(X, Y.T, dense_output=True)
K *= gamma
K += coef0
K **= degree
return K
def sigmoid_kernel(X, Y=None, gamma=None, coef0=1):
"""
Compute the sigmoid kernel between X and Y::
K(X, Y) = tanh(gamma <X, Y> + coef0)
Read more in the :ref:`User Guide <sigmoid_kernel>`.
Parameters
----------
X : ndarray of shape (n_samples_1, n_features)
Y : ndarray of shape (n_samples_2, n_features)
coef0 : int, default 1
Returns
-------
Gram matrix: array of shape (n_samples_1, n_samples_2)
"""
X, Y = check_pairwise_arrays(X, Y)
if gamma is None:
gamma = 1.0 / X.shape[1]
K = safe_sparse_dot(X, Y.T, dense_output=True)
K *= gamma
K += coef0
np.tanh(K, K) # compute tanh in-place
return K
def rbf_kernel(X, Y=None, gamma=None):
"""
Compute the rbf (gaussian) kernel between X and Y::
K(x, y) = exp(-gamma ||x-y||^2)
for each pair of rows x in X and y in Y.
Read more in the :ref:`User Guide <rbf_kernel>`.
Parameters
----------
X : array of shape (n_samples_X, n_features)
Y : array of shape (n_samples_Y, n_features)
gamma : float
Returns
-------
kernel_matrix : array of shape (n_samples_X, n_samples_Y)
"""
X, Y = check_pairwise_arrays(X, Y)
if gamma is None:
gamma = 1.0 / X.shape[1]
K = euclidean_distances(X, Y, squared=True)
K *= -gamma
np.exp(K, K) # exponentiate K in-place
return K
def cosine_similarity(X, Y=None, dense_output=True):
"""Compute cosine similarity between samples in X and Y.
Cosine similarity, or the cosine kernel, computes similarity as the
normalized dot product of X and Y:
K(X, Y) = <X, Y> / (||X||*||Y||)
On L2-normalized data, this function is equivalent to linear_kernel.
Read more in the :ref:`User Guide <cosine_similarity>`.
Parameters
----------
X : ndarray or sparse array, shape: (n_samples_X, n_features)
Input data.
Y : ndarray or sparse array, shape: (n_samples_Y, n_features)
Input data. If ``None``, the output will be the pairwise
similarities between all samples in ``X``.
dense_output : boolean (optional), default True
Whether to return dense output even when the input is sparse. If
``False``, the output is sparse if both input arrays are sparse.
Returns
-------
kernel matrix : array
An array with shape (n_samples_X, n_samples_Y).
"""
# to avoid recursive import
X, Y = check_pairwise_arrays(X, Y)
X_normalized = normalize(X, copy=True)
if X is Y:
Y_normalized = X_normalized
else:
Y_normalized = normalize(Y, copy=True)
K = safe_sparse_dot(X_normalized, Y_normalized.T, dense_output=dense_output)
return K
def additive_chi2_kernel(X, Y=None):
"""Computes the additive chi-squared kernel between observations in X and Y
The chi-squared kernel is computed between each pair of rows in X and Y. X
and Y have to be non-negative. This kernel is most commonly applied to
histograms.
The chi-squared kernel is given by::
k(x, y) = -Sum [(x - y)^2 / (x + y)]
It can be interpreted as a weighted difference per entry.
Read more in the :ref:`User Guide <chi2_kernel>`.
Notes
-----
As the negative of a distance, this kernel is only conditionally positive
definite.
Parameters
----------
X : array-like of shape (n_samples_X, n_features)
Y : array of shape (n_samples_Y, n_features)
Returns
-------
kernel_matrix : array of shape (n_samples_X, n_samples_Y)
References
----------
* Zhang, J. and Marszalek, M. and Lazebnik, S. and Schmid, C.
Local features and kernels for classification of texture and object
categories: A comprehensive study
International Journal of Computer Vision 2007
http://research.microsoft.com/en-us/um/people/manik/projects/trade-off/papers/ZhangIJCV06.pdf
See also
--------
chi2_kernel : The exponentiated version of the kernel, which is usually
preferable.
sklearn.kernel_approximation.AdditiveChi2Sampler : A Fourier approximation
to this kernel.
"""
if issparse(X) or issparse(Y):
raise ValueError("additive_chi2 does not support sparse matrices.")
X, Y = check_pairwise_arrays(X, Y)
if (X < 0).any():
raise ValueError("X contains negative values.")
if Y is not X and (Y < 0).any():
raise ValueError("Y contains negative values.")
result = np.zeros((X.shape[0], Y.shape[0]), dtype=X.dtype)
_chi2_kernel_fast(X, Y, result)
return result
def chi2_kernel(X, Y=None, gamma=1.):
"""Computes the exponential chi-squared kernel X and Y.
The chi-squared kernel is computed between each pair of rows in X and Y. X
and Y have to be non-negative. This kernel is most commonly applied to
histograms.
The chi-squared kernel is given by::
k(x, y) = exp(-gamma Sum [(x - y)^2 / (x + y)])
It can be interpreted as a weighted difference per entry.
Read more in the :ref:`User Guide <chi2_kernel>`.
Parameters
----------
X : array-like of shape (n_samples_X, n_features)
Y : array of shape (n_samples_Y, n_features)
gamma : float, default=1.
Scaling parameter of the chi2 kernel.
Returns
-------
kernel_matrix : array of shape (n_samples_X, n_samples_Y)
References
----------
* Zhang, J. and Marszalek, M. and Lazebnik, S. and Schmid, C.
Local features and kernels for classification of texture and object
categories: A comprehensive study
International Journal of Computer Vision 2007
http://research.microsoft.com/en-us/um/people/manik/projects/trade-off/papers/ZhangIJCV06.pdf
See also
--------
additive_chi2_kernel : The additive version of this kernel
sklearn.kernel_approximation.AdditiveChi2Sampler : A Fourier approximation
to the additive version of this kernel.
"""
K = additive_chi2_kernel(X, Y)
K *= gamma
return np.exp(K, K)
# Helper functions - distance
PAIRWISE_DISTANCE_FUNCTIONS = {
# If updating this dictionary, update the doc in both distance_metrics()
# and also in pairwise_distances()!
'cityblock': manhattan_distances,
'cosine': cosine_distances,
'euclidean': euclidean_distances,
'l2': euclidean_distances,
'l1': manhattan_distances,
'manhattan': manhattan_distances,
'precomputed': None, # HACK: precomputed is always allowed, never called
}
def distance_metrics():
"""Valid metrics for pairwise_distances.
This function simply returns the valid pairwise distance metrics.
It exists to allow for a description of the mapping for
each of the valid strings.
The valid distance metrics, and the function they map to, are:
============ ====================================
metric Function
============ ====================================
'cityblock' metrics.pairwise.manhattan_distances
'cosine' metrics.pairwise.cosine_distances
'euclidean' metrics.pairwise.euclidean_distances
'l1' metrics.pairwise.manhattan_distances
'l2' metrics.pairwise.euclidean_distances
'manhattan' metrics.pairwise.manhattan_distances
============ ====================================
Read more in the :ref:`User Guide <metrics>`.
"""
return PAIRWISE_DISTANCE_FUNCTIONS
def _parallel_pairwise(X, Y, func, n_jobs, **kwds):
"""Break the pairwise matrix in n_jobs even slices
and compute them in parallel"""
if n_jobs < 0:
n_jobs = max(cpu_count() + 1 + n_jobs, 1)
if Y is None:
Y = X
if n_jobs == 1:
# Special case to avoid picklability checks in delayed
return func(X, Y, **kwds)
# TODO: in some cases, backend='threading' may be appropriate
fd = delayed(func)
ret = Parallel(n_jobs=n_jobs, verbose=0)(
fd(X, Y[s], **kwds)
for s in gen_even_slices(Y.shape[0], n_jobs))
return np.hstack(ret)
def _pairwise_callable(X, Y, metric, **kwds):
"""Handle the callable case for pairwise_{distances,kernels}
"""
X, Y = check_pairwise_arrays(X, Y)
if X is Y:
# Only calculate metric for upper triangle
out = np.zeros((X.shape[0], Y.shape[0]), dtype='float')
iterator = itertools.combinations(range(X.shape[0]), 2)
for i, j in iterator:
out[i, j] = metric(X[i], Y[j], **kwds)
# Make symmetric
# NB: out += out.T will produce incorrect results
out = out + out.T
# Calculate diagonal
# NB: nonzero diagonals are allowed for both metrics and kernels
for i in range(X.shape[0]):
x = X[i]
out[i, i] = metric(x, x, **kwds)
else:
# Calculate all cells
out = np.empty((X.shape[0], Y.shape[0]), dtype='float')
iterator = itertools.product(range(X.shape[0]), range(Y.shape[0]))
for i, j in iterator:
out[i, j] = metric(X[i], Y[j], **kwds)
return out
_VALID_METRICS = ['euclidean', 'l2', 'l1', 'manhattan', 'cityblock',
'braycurtis', 'canberra', 'chebyshev', 'correlation',
'cosine', 'dice', 'hamming', 'jaccard', 'kulsinski',
'mahalanobis', 'matching', 'minkowski', 'rogerstanimoto',
'russellrao', 'seuclidean', 'sokalmichener',
'sokalsneath', 'sqeuclidean', 'yule', "wminkowski"]
def pairwise_distances(X, Y=None, metric="euclidean", n_jobs=1, **kwds):
""" Compute the distance matrix from a vector array X and optional Y.
This method takes either a vector array or a distance matrix, and returns
a distance matrix. If the input is a vector array, the distances are
computed. If the input is a distances matrix, it is returned instead.
This method provides a safe way to take a distance matrix as input, while
preserving compatibility with many other algorithms that take a vector
array.
If Y is given (default is None), then the returned matrix is the pairwise
distance between the arrays from both X and Y.
Valid values for metric are:
- From scikit-learn: ['cityblock', 'cosine', 'euclidean', 'l1', 'l2',
'manhattan']. These metrics support sparse matrix inputs.
- From scipy.spatial.distance: ['braycurtis', 'canberra', 'chebyshev',
'correlation', 'dice', 'hamming', 'jaccard', 'kulsinski', 'mahalanobis',
'matching', 'minkowski', 'rogerstanimoto', 'russellrao', 'seuclidean',
'sokalmichener', 'sokalsneath', 'sqeuclidean', 'yule']
See the documentation for scipy.spatial.distance for details on these
metrics. These metrics do not support sparse matrix inputs.
Note that in the case of 'cityblock', 'cosine' and 'euclidean' (which are
valid scipy.spatial.distance metrics), the scikit-learn implementation
will be used, which is faster and has support for sparse matrices (except
for 'cityblock'). For a verbose description of the metrics from
scikit-learn, see the __doc__ of the sklearn.pairwise.distance_metrics
function.
Read more in the :ref:`User Guide <metrics>`.
Parameters
----------
X : array [n_samples_a, n_samples_a] if metric == "precomputed", or, \
[n_samples_a, n_features] otherwise
Array of pairwise distances between samples, or a feature array.
Y : array [n_samples_b, n_features], optional
An optional second feature array. Only allowed if metric != "precomputed".
metric : string, or callable
The metric to use when calculating distance between instances in a
feature array. If metric is a string, it must be one of the options
allowed by scipy.spatial.distance.pdist for its metric parameter, or
a metric listed in pairwise.PAIRWISE_DISTANCE_FUNCTIONS.
If metric is "precomputed", X is assumed to be a distance matrix.
Alternatively, if metric is a callable function, it is called on each
pair of instances (rows) and the resulting value recorded. The callable
should take two arrays from X as input and return a value indicating
the distance between them.
n_jobs : int
The number of jobs to use for the computation. This works by breaking
down the pairwise matrix into n_jobs even slices and computing them in
parallel.
If -1 all CPUs are used. If 1 is given, no parallel computing code is
used at all, which is useful for debugging. For n_jobs below -1,
(n_cpus + 1 + n_jobs) are used. Thus for n_jobs = -2, all CPUs but one
are used.
`**kwds` : optional keyword parameters
Any further parameters are passed directly to the distance function.
If using a scipy.spatial.distance metric, the parameters are still
metric dependent. See the scipy docs for usage examples.
Returns
-------
D : array [n_samples_a, n_samples_a] or [n_samples_a, n_samples_b]
A distance matrix D such that D_{i, j} is the distance between the
ith and jth vectors of the given matrix X, if Y is None.
If Y is not None, then D_{i, j} is the distance between the ith array
from X and the jth array from Y.
"""
if (metric not in _VALID_METRICS and
not callable(metric) and metric != "precomputed"):
raise ValueError("Unknown metric %s. "
"Valid metrics are %s, or 'precomputed', or a "
"callable" % (metric, _VALID_METRICS))
if metric == "precomputed":
X, _ = check_pairwise_arrays(X, Y, precomputed=True)
return X
elif metric in PAIRWISE_DISTANCE_FUNCTIONS:
func = PAIRWISE_DISTANCE_FUNCTIONS[metric]
elif callable(metric):
func = partial(_pairwise_callable, metric=metric, **kwds)
else:
if issparse(X) or issparse(Y):
raise TypeError("scipy distance metrics do not"
" support sparse matrices.")
X, Y = check_pairwise_arrays(X, Y)
if n_jobs == 1 and X is Y:
return distance.squareform(distance.pdist(X, metric=metric,
**kwds))
func = partial(distance.cdist, metric=metric, **kwds)
return _parallel_pairwise(X, Y, func, n_jobs, **kwds)
# Helper functions - distance
PAIRWISE_KERNEL_FUNCTIONS = {
# If updating this dictionary, update the doc in both distance_metrics()
# and also in pairwise_distances()!
'additive_chi2': additive_chi2_kernel,
'chi2': chi2_kernel,
'linear': linear_kernel,
'polynomial': polynomial_kernel,
'poly': polynomial_kernel,
'rbf': rbf_kernel,
'sigmoid': sigmoid_kernel,
'cosine': cosine_similarity, }
def kernel_metrics():
""" Valid metrics for pairwise_kernels
This function simply returns the valid pairwise distance metrics.
It exists, however, to allow for a verbose description of the mapping for
each of the valid strings.
The valid distance metrics, and the function they map to, are:
=============== ========================================
metric Function
=============== ========================================
'additive_chi2' sklearn.pairwise.additive_chi2_kernel
'chi2' sklearn.pairwise.chi2_kernel
'linear' sklearn.pairwise.linear_kernel
'poly' sklearn.pairwise.polynomial_kernel
'polynomial' sklearn.pairwise.polynomial_kernel
'rbf' sklearn.pairwise.rbf_kernel
'sigmoid' sklearn.pairwise.sigmoid_kernel
'cosine' sklearn.pairwise.cosine_similarity
=============== ========================================
Read more in the :ref:`User Guide <metrics>`.
"""
return PAIRWISE_KERNEL_FUNCTIONS
KERNEL_PARAMS = {
"additive_chi2": (),
"chi2": (),
"cosine": (),
"exp_chi2": frozenset(["gamma"]),
"linear": (),
"poly": frozenset(["gamma", "degree", "coef0"]),
"polynomial": frozenset(["gamma", "degree", "coef0"]),
"rbf": frozenset(["gamma"]),
"sigmoid": frozenset(["gamma", "coef0"]),
}
def pairwise_kernels(X, Y=None, metric="linear", filter_params=False,
n_jobs=1, **kwds):
"""Compute the kernel between arrays X and optional array Y.
This method takes either a vector array or a kernel matrix, and returns
a kernel matrix. If the input is a vector array, the kernels are
computed. If the input is a kernel matrix, it is returned instead.
This method provides a safe way to take a kernel matrix as input, while
preserving compatibility with many other algorithms that take a vector
array.
If Y is given (default is None), then the returned matrix is the pairwise
kernel between the arrays from both X and Y.
Valid values for metric are::
['rbf', 'sigmoid', 'polynomial', 'poly', 'linear', 'cosine']
Read more in the :ref:`User Guide <metrics>`.
Parameters
----------
X : array [n_samples_a, n_samples_a] if metric == "precomputed", or, \
[n_samples_a, n_features] otherwise
Array of pairwise kernels between samples, or a feature array.
Y : array [n_samples_b, n_features]
A second feature array only if X has shape [n_samples_a, n_features].
metric : string, or callable
The metric to use when calculating kernel between instances in a
feature array. If metric is a string, it must be one of the metrics
in pairwise.PAIRWISE_KERNEL_FUNCTIONS.
If metric is "precomputed", X is assumed to be a kernel matrix.
Alternatively, if metric is a callable function, it is called on each
pair of instances (rows) and the resulting value recorded. The callable
should take two arrays from X as input and return a value indicating
the distance between them.
n_jobs : int
The number of jobs to use for the computation. This works by breaking
down the pairwise matrix into n_jobs even slices and computing them in
parallel.
If -1 all CPUs are used. If 1 is given, no parallel computing code is
used at all, which is useful for debugging. For n_jobs below -1,
(n_cpus + 1 + n_jobs) are used. Thus for n_jobs = -2, all CPUs but one
are used.
filter_params: boolean
Whether to filter invalid parameters or not.
`**kwds` : optional keyword parameters
Any further parameters are passed directly to the kernel function.
Returns
-------
K : array [n_samples_a, n_samples_a] or [n_samples_a, n_samples_b]
A kernel matrix K such that K_{i, j} is the kernel between the
ith and jth vectors of the given matrix X, if Y is None.
If Y is not None, then K_{i, j} is the kernel between the ith array
from X and the jth array from Y.
Notes
-----
If metric is 'precomputed', Y is ignored and X is returned.
"""
if metric == "precomputed":
X, _ = check_pairwise_arrays(X, Y, precomputed=True)
return X
elif metric in PAIRWISE_KERNEL_FUNCTIONS:
if filter_params:
kwds = dict((k, kwds[k]) for k in kwds
if k in KERNEL_PARAMS[metric])
func = PAIRWISE_KERNEL_FUNCTIONS[metric]
elif callable(metric):
func = partial(_pairwise_callable, metric=metric, **kwds)
else:
raise ValueError("Unknown kernel %r" % metric)
return _parallel_pairwise(X, Y, func, n_jobs, **kwds)
| bsd-3-clause |
rs2/pandas | pandas/tests/frame/methods/test_rank.py | 2 | 11377 | from datetime import datetime, timedelta
import numpy as np
import pytest
import pandas.util._test_decorators as td
from pandas import DataFrame, Series
import pandas._testing as tm
class TestRank:
s = Series([1, 3, 4, 2, np.nan, 2, 1, 5, np.nan, 3])
df = DataFrame({"A": s, "B": s})
results = {
"average": np.array([1.5, 5.5, 7.0, 3.5, np.nan, 3.5, 1.5, 8.0, np.nan, 5.5]),
"min": np.array([1, 5, 7, 3, np.nan, 3, 1, 8, np.nan, 5]),
"max": np.array([2, 6, 7, 4, np.nan, 4, 2, 8, np.nan, 6]),
"first": np.array([1, 5, 7, 3, np.nan, 4, 2, 8, np.nan, 6]),
"dense": np.array([1, 3, 4, 2, np.nan, 2, 1, 5, np.nan, 3]),
}
@pytest.fixture(params=["average", "min", "max", "first", "dense"])
def method(self, request):
"""
Fixture for trying all rank methods
"""
return request.param
@td.skip_if_no_scipy
def test_rank(self, float_frame):
import scipy.stats # noqa:F401
from scipy.stats import rankdata
float_frame["A"][::2] = np.nan
float_frame["B"][::3] = np.nan
float_frame["C"][::4] = np.nan
float_frame["D"][::5] = np.nan
ranks0 = float_frame.rank()
ranks1 = float_frame.rank(1)
mask = np.isnan(float_frame.values)
fvals = float_frame.fillna(np.inf).values
exp0 = np.apply_along_axis(rankdata, 0, fvals)
exp0[mask] = np.nan
exp1 = np.apply_along_axis(rankdata, 1, fvals)
exp1[mask] = np.nan
tm.assert_almost_equal(ranks0.values, exp0)
tm.assert_almost_equal(ranks1.values, exp1)
# integers
df = DataFrame(np.random.randint(0, 5, size=40).reshape((10, 4)))
result = df.rank()
exp = df.astype(float).rank()
tm.assert_frame_equal(result, exp)
result = df.rank(1)
exp = df.astype(float).rank(1)
tm.assert_frame_equal(result, exp)
def test_rank2(self):
df = DataFrame([[1, 3, 2], [1, 2, 3]])
expected = DataFrame([[1.0, 3.0, 2.0], [1, 2, 3]]) / 3.0
result = df.rank(1, pct=True)
tm.assert_frame_equal(result, expected)
df = DataFrame([[1, 3, 2], [1, 2, 3]])
expected = df.rank(0) / 2.0
result = df.rank(0, pct=True)
tm.assert_frame_equal(result, expected)
df = DataFrame([["b", "c", "a"], ["a", "c", "b"]])
expected = DataFrame([[2.0, 3.0, 1.0], [1, 3, 2]])
result = df.rank(1, numeric_only=False)
tm.assert_frame_equal(result, expected)
expected = DataFrame([[2.0, 1.5, 1.0], [1, 1.5, 2]])
result = df.rank(0, numeric_only=False)
tm.assert_frame_equal(result, expected)
df = DataFrame([["b", np.nan, "a"], ["a", "c", "b"]])
expected = DataFrame([[2.0, np.nan, 1.0], [1.0, 3.0, 2.0]])
result = df.rank(1, numeric_only=False)
tm.assert_frame_equal(result, expected)
expected = DataFrame([[2.0, np.nan, 1.0], [1.0, 1.0, 2.0]])
result = df.rank(0, numeric_only=False)
tm.assert_frame_equal(result, expected)
# f7u12, this does not work without extensive workaround
data = [
[datetime(2001, 1, 5), np.nan, datetime(2001, 1, 2)],
[datetime(2000, 1, 2), datetime(2000, 1, 3), datetime(2000, 1, 1)],
]
df = DataFrame(data)
# check the rank
expected = DataFrame([[2.0, np.nan, 1.0], [2.0, 3.0, 1.0]])
result = df.rank(1, numeric_only=False, ascending=True)
tm.assert_frame_equal(result, expected)
expected = DataFrame([[1.0, np.nan, 2.0], [2.0, 1.0, 3.0]])
result = df.rank(1, numeric_only=False, ascending=False)
tm.assert_frame_equal(result, expected)
df = DataFrame({"a": [1e-20, -5, 1e-20 + 1e-40, 10, 1e60, 1e80, 1e-30]})
exp = DataFrame({"a": [3.5, 1.0, 3.5, 5.0, 6.0, 7.0, 2.0]})
tm.assert_frame_equal(df.rank(), exp)
def test_rank_does_not_mutate(self):
# GH#18521
# Check rank does not mutate DataFrame
df = DataFrame(np.random.randn(10, 3), dtype="float64")
expected = df.copy()
df.rank()
result = df
tm.assert_frame_equal(result, expected)
def test_rank_mixed_frame(self, float_string_frame):
float_string_frame["datetime"] = datetime.now()
float_string_frame["timedelta"] = timedelta(days=1, seconds=1)
result = float_string_frame.rank(1)
expected = float_string_frame.rank(1, numeric_only=True)
tm.assert_frame_equal(result, expected)
@td.skip_if_no_scipy
def test_rank_na_option(self, float_frame):
import scipy.stats # noqa:F401
from scipy.stats import rankdata
float_frame["A"][::2] = np.nan
float_frame["B"][::3] = np.nan
float_frame["C"][::4] = np.nan
float_frame["D"][::5] = np.nan
# bottom
ranks0 = float_frame.rank(na_option="bottom")
ranks1 = float_frame.rank(1, na_option="bottom")
fvals = float_frame.fillna(np.inf).values
exp0 = np.apply_along_axis(rankdata, 0, fvals)
exp1 = np.apply_along_axis(rankdata, 1, fvals)
tm.assert_almost_equal(ranks0.values, exp0)
tm.assert_almost_equal(ranks1.values, exp1)
# top
ranks0 = float_frame.rank(na_option="top")
ranks1 = float_frame.rank(1, na_option="top")
fval0 = float_frame.fillna((float_frame.min() - 1).to_dict()).values
fval1 = float_frame.T
fval1 = fval1.fillna((fval1.min() - 1).to_dict()).T
fval1 = fval1.fillna(np.inf).values
exp0 = np.apply_along_axis(rankdata, 0, fval0)
exp1 = np.apply_along_axis(rankdata, 1, fval1)
tm.assert_almost_equal(ranks0.values, exp0)
tm.assert_almost_equal(ranks1.values, exp1)
# descending
# bottom
ranks0 = float_frame.rank(na_option="top", ascending=False)
ranks1 = float_frame.rank(1, na_option="top", ascending=False)
fvals = float_frame.fillna(np.inf).values
exp0 = np.apply_along_axis(rankdata, 0, -fvals)
exp1 = np.apply_along_axis(rankdata, 1, -fvals)
tm.assert_almost_equal(ranks0.values, exp0)
tm.assert_almost_equal(ranks1.values, exp1)
# descending
# top
ranks0 = float_frame.rank(na_option="bottom", ascending=False)
ranks1 = float_frame.rank(1, na_option="bottom", ascending=False)
fval0 = float_frame.fillna((float_frame.min() - 1).to_dict()).values
fval1 = float_frame.T
fval1 = fval1.fillna((fval1.min() - 1).to_dict()).T
fval1 = fval1.fillna(np.inf).values
exp0 = np.apply_along_axis(rankdata, 0, -fval0)
exp1 = np.apply_along_axis(rankdata, 1, -fval1)
tm.assert_numpy_array_equal(ranks0.values, exp0)
tm.assert_numpy_array_equal(ranks1.values, exp1)
# bad values throw error
msg = "na_option must be one of 'keep', 'top', or 'bottom'"
with pytest.raises(ValueError, match=msg):
float_frame.rank(na_option="bad", ascending=False)
# invalid type
with pytest.raises(ValueError, match=msg):
float_frame.rank(na_option=True, ascending=False)
def test_rank_axis(self):
# check if using axes' names gives the same result
df = DataFrame([[2, 1], [4, 3]])
tm.assert_frame_equal(df.rank(axis=0), df.rank(axis="index"))
tm.assert_frame_equal(df.rank(axis=1), df.rank(axis="columns"))
@td.skip_if_no_scipy
def test_rank_methods_frame(self):
import scipy.stats # noqa:F401
from scipy.stats import rankdata
xs = np.random.randint(0, 21, (100, 26))
xs = (xs - 10.0) / 10.0
cols = [chr(ord("z") - i) for i in range(xs.shape[1])]
for vals in [xs, xs + 1e6, xs * 1e-6]:
df = DataFrame(vals, columns=cols)
for ax in [0, 1]:
for m in ["average", "min", "max", "first", "dense"]:
result = df.rank(axis=ax, method=m)
sprank = np.apply_along_axis(
rankdata, ax, vals, m if m != "first" else "ordinal"
)
sprank = sprank.astype(np.float64)
expected = DataFrame(sprank, columns=cols).astype("float64")
tm.assert_frame_equal(result, expected)
@pytest.mark.parametrize("dtype", ["O", "f8", "i8"])
def test_rank_descending(self, method, dtype):
if "i" in dtype:
df = self.df.dropna()
else:
df = self.df.astype(dtype)
res = df.rank(ascending=False)
expected = (df.max() - df).rank()
tm.assert_frame_equal(res, expected)
if method == "first" and dtype == "O":
return
expected = (df.max() - df).rank(method=method)
if dtype != "O":
res2 = df.rank(method=method, ascending=False, numeric_only=True)
tm.assert_frame_equal(res2, expected)
res3 = df.rank(method=method, ascending=False, numeric_only=False)
tm.assert_frame_equal(res3, expected)
@pytest.mark.parametrize("axis", [0, 1])
@pytest.mark.parametrize("dtype", [None, object])
def test_rank_2d_tie_methods(self, method, axis, dtype):
df = self.df
def _check2d(df, expected, method="average", axis=0):
exp_df = DataFrame({"A": expected, "B": expected})
if axis == 1:
df = df.T
exp_df = exp_df.T
result = df.rank(method=method, axis=axis)
tm.assert_frame_equal(result, exp_df)
disabled = {(object, "first")}
if (dtype, method) in disabled:
return
frame = df if dtype is None else df.astype(dtype)
_check2d(frame, self.results[method], method=method, axis=axis)
@pytest.mark.parametrize(
"method,exp",
[
("dense", [[1.0, 1.0, 1.0], [1.0, 0.5, 2.0 / 3], [1.0, 0.5, 1.0 / 3]]),
(
"min",
[
[1.0 / 3, 1.0, 1.0],
[1.0 / 3, 1.0 / 3, 2.0 / 3],
[1.0 / 3, 1.0 / 3, 1.0 / 3],
],
),
(
"max",
[[1.0, 1.0, 1.0], [1.0, 2.0 / 3, 2.0 / 3], [1.0, 2.0 / 3, 1.0 / 3]],
),
(
"average",
[[2.0 / 3, 1.0, 1.0], [2.0 / 3, 0.5, 2.0 / 3], [2.0 / 3, 0.5, 1.0 / 3]],
),
(
"first",
[
[1.0 / 3, 1.0, 1.0],
[2.0 / 3, 1.0 / 3, 2.0 / 3],
[3.0 / 3, 2.0 / 3, 1.0 / 3],
],
),
],
)
def test_rank_pct_true(self, method, exp):
# see gh-15630.
df = DataFrame([[2012, 66, 3], [2012, 65, 2], [2012, 65, 1]])
result = df.rank(method=method, pct=True)
expected = DataFrame(exp)
tm.assert_frame_equal(result, expected)
@pytest.mark.single
@pytest.mark.high_memory
def test_pct_max_many_rows(self):
# GH 18271
df = DataFrame(
{"A": np.arange(2 ** 24 + 1), "B": np.arange(2 ** 24 + 1, 0, -1)}
)
result = df.rank(pct=True).max()
assert (result == 1).all()
| bsd-3-clause |
MycChiu/tensorflow | tensorflow/contrib/learn/python/learn/tests/dataframe/feeding_queue_runner_test.py | 62 | 5053 | # Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests `FeedingQueueRunner` using arrays and `DataFrames`."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from tensorflow.contrib.learn.python.learn.dataframe.queues import feeding_functions as ff
from tensorflow.python.client import session
from tensorflow.python.framework import ops
from tensorflow.python.platform import test
from tensorflow.python.training import coordinator
from tensorflow.python.training import queue_runner_impl
# pylint: disable=g-import-not-at-top
try:
import pandas as pd
HAS_PANDAS = True
except ImportError:
HAS_PANDAS = False
def get_rows(array, row_indices):
rows = [array[i] for i in row_indices]
return np.vstack(rows)
class FeedingQueueRunnerTestCase(test.TestCase):
"""Tests for `FeedingQueueRunner`."""
def testArrayFeeding(self):
with ops.Graph().as_default():
array = np.arange(32).reshape([16, 2])
q = ff.enqueue_data(array, capacity=100)
batch_size = 3
dq_op = q.dequeue_many(batch_size)
with session.Session() as sess:
coord = coordinator.Coordinator()
threads = queue_runner_impl.start_queue_runners(sess=sess, coord=coord)
for i in range(100):
indices = [
j % array.shape[0]
for j in range(batch_size * i, batch_size * (i + 1))
]
expected_dq = get_rows(array, indices)
dq = sess.run(dq_op)
np.testing.assert_array_equal(indices, dq[0])
np.testing.assert_array_equal(expected_dq, dq[1])
coord.request_stop()
coord.join(threads)
def testArrayFeedingMultiThread(self):
with ops.Graph().as_default():
array = np.arange(256).reshape([128, 2])
q = ff.enqueue_data(array, capacity=128, num_threads=8, shuffle=True)
batch_size = 3
dq_op = q.dequeue_many(batch_size)
with session.Session() as sess:
coord = coordinator.Coordinator()
threads = queue_runner_impl.start_queue_runners(sess=sess, coord=coord)
for _ in range(100):
dq = sess.run(dq_op)
indices = dq[0]
expected_dq = get_rows(array, indices)
np.testing.assert_array_equal(expected_dq, dq[1])
coord.request_stop()
coord.join(threads)
def testPandasFeeding(self):
if not HAS_PANDAS:
return
with ops.Graph().as_default():
array1 = np.arange(32)
array2 = np.arange(32, 64)
df = pd.DataFrame({"a": array1, "b": array2}, index=np.arange(64, 96))
q = ff.enqueue_data(df, capacity=100)
batch_size = 5
dq_op = q.dequeue_many(5)
with session.Session() as sess:
coord = coordinator.Coordinator()
threads = queue_runner_impl.start_queue_runners(sess=sess, coord=coord)
for i in range(100):
indices = [
j % array1.shape[0]
for j in range(batch_size * i, batch_size * (i + 1))
]
expected_df_indices = df.index[indices]
expected_rows = df.iloc[indices]
dq = sess.run(dq_op)
np.testing.assert_array_equal(expected_df_indices, dq[0])
for col_num, col in enumerate(df.columns):
np.testing.assert_array_equal(expected_rows[col].values,
dq[col_num + 1])
coord.request_stop()
coord.join(threads)
def testPandasFeedingMultiThread(self):
if not HAS_PANDAS:
return
with ops.Graph().as_default():
array1 = np.arange(128, 256)
array2 = 2 * array1
df = pd.DataFrame({"a": array1, "b": array2}, index=np.arange(128))
q = ff.enqueue_data(df, capacity=128, num_threads=8, shuffle=True)
batch_size = 5
dq_op = q.dequeue_many(batch_size)
with session.Session() as sess:
coord = coordinator.Coordinator()
threads = queue_runner_impl.start_queue_runners(sess=sess, coord=coord)
for _ in range(100):
dq = sess.run(dq_op)
indices = dq[0]
expected_rows = df.iloc[indices]
for col_num, col in enumerate(df.columns):
np.testing.assert_array_equal(expected_rows[col].values,
dq[col_num + 1])
coord.request_stop()
coord.join(threads)
if __name__ == "__main__":
test.main()
| apache-2.0 |
kyoren/https-github.com-h2oai-h2o-3 | h2o-py/tests/testdir_misc/pyunit_frame_as_list2.py | 1 | 1040 | import sys
sys.path.insert(1, "../../")
import h2o, tests
def expr_as_list():
iris = h2o.import_file(path=tests.locate("smalldata/iris/iris_wheader.csv"))
# multiple rows and columns
res = 2 - iris
res = h2o.as_list(res, use_pandas=False)
assert abs(float(res[4][0]) - -2.6) < 1e-10 and abs(float(res[5][1]) - -1.6) < 1e-10 and \
abs(float(res[11][2]) - 0.5) < 1e-10, "incorrect values"
# single column
res = 2 - iris
res = h2o.as_list(res[0], use_pandas=False)
assert abs(float(res[4][0]) - -2.6) < 1e-10 and abs(float(res[18][0]) - -3.1) < 1e-10 and \
abs(float(res[25][0]) - -2.8) < 1e-10, "incorrect values"
# local data
frm = h2o.as_list(h2o.H2OFrame(python_obj=[1,2,3]), use_pandas=False)
assert float(frm[1][2]) == 3, "incorrect values"
frm = h2o.as_list(h2o.H2OFrame(python_obj=[[1,2,3], [4,5,6]]), use_pandas=False)
assert float(frm[2][1]) == 5, "incorrect values"
if __name__ == "__main__":
tests.run_test(sys.argv, expr_as_list)
| apache-2.0 |
harisbal/pandas | pandas/tests/frame/test_query_eval.py | 3 | 40886 | # -*- coding: utf-8 -*-
from __future__ import print_function
import operator
import pytest
from pandas.compat import (zip, range, lrange, StringIO)
from pandas import DataFrame, Series, Index, MultiIndex, date_range
import pandas as pd
import numpy as np
from numpy.random import randn
from pandas.util.testing import (assert_series_equal,
assert_frame_equal,
makeCustomDataframe as mkdf)
import pandas.util.testing as tm
import pandas.util._test_decorators as td
from pandas.core.computation.check import _NUMEXPR_INSTALLED
from pandas.tests.frame.common import TestData
PARSERS = 'python', 'pandas'
ENGINES = 'python', pytest.param('numexpr', marks=td.skip_if_no_ne)
@pytest.fixture(params=PARSERS, ids=lambda x: x)
def parser(request):
return request.param
@pytest.fixture(params=ENGINES, ids=lambda x: x)
def engine(request):
return request.param
def skip_if_no_pandas_parser(parser):
if parser != 'pandas':
pytest.skip("cannot evaluate with parser {0!r}".format(parser))
class TestCompat(object):
def setup_method(self, method):
self.df = DataFrame({'A': [1, 2, 3]})
self.expected1 = self.df[self.df.A > 0]
self.expected2 = self.df.A + 1
def test_query_default(self):
# GH 12749
# this should always work, whether _NUMEXPR_INSTALLED or not
df = self.df
result = df.query('A>0')
assert_frame_equal(result, self.expected1)
result = df.eval('A+1')
assert_series_equal(result, self.expected2, check_names=False)
def test_query_None(self):
df = self.df
result = df.query('A>0', engine=None)
assert_frame_equal(result, self.expected1)
result = df.eval('A+1', engine=None)
assert_series_equal(result, self.expected2, check_names=False)
def test_query_python(self):
df = self.df
result = df.query('A>0', engine='python')
assert_frame_equal(result, self.expected1)
result = df.eval('A+1', engine='python')
assert_series_equal(result, self.expected2, check_names=False)
def test_query_numexpr(self):
df = self.df
if _NUMEXPR_INSTALLED:
result = df.query('A>0', engine='numexpr')
assert_frame_equal(result, self.expected1)
result = df.eval('A+1', engine='numexpr')
assert_series_equal(result, self.expected2, check_names=False)
else:
pytest.raises(ImportError,
lambda: df.query('A>0', engine='numexpr'))
pytest.raises(ImportError,
lambda: df.eval('A+1', engine='numexpr'))
class TestDataFrameEval(TestData):
def test_ops(self):
# tst ops and reversed ops in evaluation
# GH7198
# smaller hits python, larger hits numexpr
for n in [4, 4000]:
df = DataFrame(1, index=range(n), columns=list('abcd'))
df.iloc[0] = 2
m = df.mean()
for op_str, op, rop in [('+', '__add__', '__radd__'),
('-', '__sub__', '__rsub__'),
('*', '__mul__', '__rmul__'),
('/', '__truediv__', '__rtruediv__')]:
base = (DataFrame(np.tile(m.values, n) # noqa
.reshape(n, -1),
columns=list('abcd')))
expected = eval("base{op}df".format(op=op_str))
# ops as strings
result = eval("m{op}df".format(op=op_str))
assert_frame_equal(result, expected)
# these are commutative
if op in ['+', '*']:
result = getattr(df, op)(m)
assert_frame_equal(result, expected)
# these are not
elif op in ['-', '/']:
result = getattr(df, rop)(m)
assert_frame_equal(result, expected)
# GH7192
df = DataFrame(dict(A=np.random.randn(25000)))
df.iloc[0:5] = np.nan
expected = (1 - np.isnan(df.iloc[0:25]))
result = (1 - np.isnan(df)).iloc[0:25]
assert_frame_equal(result, expected)
def test_query_non_str(self):
# GH 11485
df = pd.DataFrame({'A': [1, 2, 3], 'B': ['a', 'b', 'b']})
msg = "expr must be a string to be evaluated"
with tm.assert_raises_regex(ValueError, msg):
df.query(lambda x: x.B == "b")
with tm.assert_raises_regex(ValueError, msg):
df.query(111)
def test_query_empty_string(self):
# GH 13139
df = pd.DataFrame({'A': [1, 2, 3]})
msg = "expr cannot be an empty string"
with tm.assert_raises_regex(ValueError, msg):
df.query('')
def test_eval_resolvers_as_list(self):
# GH 14095
df = DataFrame(randn(10, 2), columns=list('ab'))
dict1 = {'a': 1}
dict2 = {'b': 2}
assert (df.eval('a + b', resolvers=[dict1, dict2]) ==
dict1['a'] + dict2['b'])
assert (pd.eval('a + b', resolvers=[dict1, dict2]) ==
dict1['a'] + dict2['b'])
class TestDataFrameQueryWithMultiIndex(object):
def test_query_with_named_multiindex(self, parser, engine):
skip_if_no_pandas_parser(parser)
a = np.random.choice(['red', 'green'], size=10)
b = np.random.choice(['eggs', 'ham'], size=10)
index = MultiIndex.from_arrays([a, b], names=['color', 'food'])
df = DataFrame(randn(10, 2), index=index)
ind = Series(df.index.get_level_values('color').values, index=index,
name='color')
# equality
res1 = df.query('color == "red"', parser=parser, engine=engine)
res2 = df.query('"red" == color', parser=parser, engine=engine)
exp = df[ind == 'red']
assert_frame_equal(res1, exp)
assert_frame_equal(res2, exp)
# inequality
res1 = df.query('color != "red"', parser=parser, engine=engine)
res2 = df.query('"red" != color', parser=parser, engine=engine)
exp = df[ind != 'red']
assert_frame_equal(res1, exp)
assert_frame_equal(res2, exp)
# list equality (really just set membership)
res1 = df.query('color == ["red"]', parser=parser, engine=engine)
res2 = df.query('["red"] == color', parser=parser, engine=engine)
exp = df[ind.isin(['red'])]
assert_frame_equal(res1, exp)
assert_frame_equal(res2, exp)
res1 = df.query('color != ["red"]', parser=parser, engine=engine)
res2 = df.query('["red"] != color', parser=parser, engine=engine)
exp = df[~ind.isin(['red'])]
assert_frame_equal(res1, exp)
assert_frame_equal(res2, exp)
# in/not in ops
res1 = df.query('["red"] in color', parser=parser, engine=engine)
res2 = df.query('"red" in color', parser=parser, engine=engine)
exp = df[ind.isin(['red'])]
assert_frame_equal(res1, exp)
assert_frame_equal(res2, exp)
res1 = df.query('["red"] not in color', parser=parser, engine=engine)
res2 = df.query('"red" not in color', parser=parser, engine=engine)
exp = df[~ind.isin(['red'])]
assert_frame_equal(res1, exp)
assert_frame_equal(res2, exp)
def test_query_with_unnamed_multiindex(self, parser, engine):
skip_if_no_pandas_parser(parser)
a = np.random.choice(['red', 'green'], size=10)
b = np.random.choice(['eggs', 'ham'], size=10)
index = MultiIndex.from_arrays([a, b])
df = DataFrame(randn(10, 2), index=index)
ind = Series(df.index.get_level_values(0).values, index=index)
res1 = df.query('ilevel_0 == "red"', parser=parser, engine=engine)
res2 = df.query('"red" == ilevel_0', parser=parser, engine=engine)
exp = df[ind == 'red']
assert_frame_equal(res1, exp)
assert_frame_equal(res2, exp)
# inequality
res1 = df.query('ilevel_0 != "red"', parser=parser, engine=engine)
res2 = df.query('"red" != ilevel_0', parser=parser, engine=engine)
exp = df[ind != 'red']
assert_frame_equal(res1, exp)
assert_frame_equal(res2, exp)
# list equality (really just set membership)
res1 = df.query('ilevel_0 == ["red"]', parser=parser, engine=engine)
res2 = df.query('["red"] == ilevel_0', parser=parser, engine=engine)
exp = df[ind.isin(['red'])]
assert_frame_equal(res1, exp)
assert_frame_equal(res2, exp)
res1 = df.query('ilevel_0 != ["red"]', parser=parser, engine=engine)
res2 = df.query('["red"] != ilevel_0', parser=parser, engine=engine)
exp = df[~ind.isin(['red'])]
assert_frame_equal(res1, exp)
assert_frame_equal(res2, exp)
# in/not in ops
res1 = df.query('["red"] in ilevel_0', parser=parser, engine=engine)
res2 = df.query('"red" in ilevel_0', parser=parser, engine=engine)
exp = df[ind.isin(['red'])]
assert_frame_equal(res1, exp)
assert_frame_equal(res2, exp)
res1 = df.query('["red"] not in ilevel_0', parser=parser,
engine=engine)
res2 = df.query('"red" not in ilevel_0', parser=parser, engine=engine)
exp = df[~ind.isin(['red'])]
assert_frame_equal(res1, exp)
assert_frame_equal(res2, exp)
# ## LEVEL 1
ind = Series(df.index.get_level_values(1).values, index=index)
res1 = df.query('ilevel_1 == "eggs"', parser=parser, engine=engine)
res2 = df.query('"eggs" == ilevel_1', parser=parser, engine=engine)
exp = df[ind == 'eggs']
assert_frame_equal(res1, exp)
assert_frame_equal(res2, exp)
# inequality
res1 = df.query('ilevel_1 != "eggs"', parser=parser, engine=engine)
res2 = df.query('"eggs" != ilevel_1', parser=parser, engine=engine)
exp = df[ind != 'eggs']
assert_frame_equal(res1, exp)
assert_frame_equal(res2, exp)
# list equality (really just set membership)
res1 = df.query('ilevel_1 == ["eggs"]', parser=parser, engine=engine)
res2 = df.query('["eggs"] == ilevel_1', parser=parser, engine=engine)
exp = df[ind.isin(['eggs'])]
assert_frame_equal(res1, exp)
assert_frame_equal(res2, exp)
res1 = df.query('ilevel_1 != ["eggs"]', parser=parser, engine=engine)
res2 = df.query('["eggs"] != ilevel_1', parser=parser, engine=engine)
exp = df[~ind.isin(['eggs'])]
assert_frame_equal(res1, exp)
assert_frame_equal(res2, exp)
# in/not in ops
res1 = df.query('["eggs"] in ilevel_1', parser=parser, engine=engine)
res2 = df.query('"eggs" in ilevel_1', parser=parser, engine=engine)
exp = df[ind.isin(['eggs'])]
assert_frame_equal(res1, exp)
assert_frame_equal(res2, exp)
res1 = df.query('["eggs"] not in ilevel_1', parser=parser,
engine=engine)
res2 = df.query('"eggs" not in ilevel_1', parser=parser, engine=engine)
exp = df[~ind.isin(['eggs'])]
assert_frame_equal(res1, exp)
assert_frame_equal(res2, exp)
def test_query_with_partially_named_multiindex(self, parser, engine):
skip_if_no_pandas_parser(parser)
a = np.random.choice(['red', 'green'], size=10)
b = np.arange(10)
index = MultiIndex.from_arrays([a, b])
index.names = [None, 'rating']
df = DataFrame(randn(10, 2), index=index)
res = df.query('rating == 1', parser=parser, engine=engine)
ind = Series(df.index.get_level_values('rating').values, index=index,
name='rating')
exp = df[ind == 1]
assert_frame_equal(res, exp)
res = df.query('rating != 1', parser=parser, engine=engine)
ind = Series(df.index.get_level_values('rating').values, index=index,
name='rating')
exp = df[ind != 1]
assert_frame_equal(res, exp)
res = df.query('ilevel_0 == "red"', parser=parser, engine=engine)
ind = Series(df.index.get_level_values(0).values, index=index)
exp = df[ind == "red"]
assert_frame_equal(res, exp)
res = df.query('ilevel_0 != "red"', parser=parser, engine=engine)
ind = Series(df.index.get_level_values(0).values, index=index)
exp = df[ind != "red"]
assert_frame_equal(res, exp)
def test_query_multiindex_get_index_resolvers(self):
df = mkdf(10, 3, r_idx_nlevels=2, r_idx_names=['spam', 'eggs'])
resolvers = df._get_index_resolvers()
def to_series(mi, level):
level_values = mi.get_level_values(level)
s = level_values.to_series()
s.index = mi
return s
col_series = df.columns.to_series()
expected = {'index': df.index,
'columns': col_series,
'spam': to_series(df.index, 'spam'),
'eggs': to_series(df.index, 'eggs'),
'C0': col_series}
for k, v in resolvers.items():
if isinstance(v, Index):
assert v.is_(expected[k])
elif isinstance(v, Series):
assert_series_equal(v, expected[k])
else:
raise AssertionError("object must be a Series or Index")
@pytest.mark.filterwarnings("ignore::FutureWarning")
def test_raise_on_panel_with_multiindex(self, parser, engine):
p = tm.makePanel(7)
p.items = tm.makeCustomIndex(len(p.items), nlevels=2)
with pytest.raises(NotImplementedError):
pd.eval('p + 1', parser=parser, engine=engine)
@td.skip_if_no_ne
class TestDataFrameQueryNumExprPandas(object):
@classmethod
def setup_class(cls):
cls.engine = 'numexpr'
cls.parser = 'pandas'
@classmethod
def teardown_class(cls):
del cls.engine, cls.parser
def test_date_query_with_attribute_access(self):
engine, parser = self.engine, self.parser
skip_if_no_pandas_parser(parser)
df = DataFrame(randn(5, 3))
df['dates1'] = date_range('1/1/2012', periods=5)
df['dates2'] = date_range('1/1/2013', periods=5)
df['dates3'] = date_range('1/1/2014', periods=5)
res = df.query('@df.dates1 < 20130101 < @df.dates3', engine=engine,
parser=parser)
expec = df[(df.dates1 < '20130101') & ('20130101' < df.dates3)]
assert_frame_equal(res, expec)
def test_date_query_no_attribute_access(self):
engine, parser = self.engine, self.parser
df = DataFrame(randn(5, 3))
df['dates1'] = date_range('1/1/2012', periods=5)
df['dates2'] = date_range('1/1/2013', periods=5)
df['dates3'] = date_range('1/1/2014', periods=5)
res = df.query('dates1 < 20130101 < dates3', engine=engine,
parser=parser)
expec = df[(df.dates1 < '20130101') & ('20130101' < df.dates3)]
assert_frame_equal(res, expec)
def test_date_query_with_NaT(self):
engine, parser = self.engine, self.parser
n = 10
df = DataFrame(randn(n, 3))
df['dates1'] = date_range('1/1/2012', periods=n)
df['dates2'] = date_range('1/1/2013', periods=n)
df['dates3'] = date_range('1/1/2014', periods=n)
df.loc[np.random.rand(n) > 0.5, 'dates1'] = pd.NaT
df.loc[np.random.rand(n) > 0.5, 'dates3'] = pd.NaT
res = df.query('dates1 < 20130101 < dates3', engine=engine,
parser=parser)
expec = df[(df.dates1 < '20130101') & ('20130101' < df.dates3)]
assert_frame_equal(res, expec)
def test_date_index_query(self):
engine, parser = self.engine, self.parser
n = 10
df = DataFrame(randn(n, 3))
df['dates1'] = date_range('1/1/2012', periods=n)
df['dates3'] = date_range('1/1/2014', periods=n)
df.set_index('dates1', inplace=True, drop=True)
res = df.query('index < 20130101 < dates3', engine=engine,
parser=parser)
expec = df[(df.index < '20130101') & ('20130101' < df.dates3)]
assert_frame_equal(res, expec)
def test_date_index_query_with_NaT(self):
engine, parser = self.engine, self.parser
n = 10
df = DataFrame(randn(n, 3))
df['dates1'] = date_range('1/1/2012', periods=n)
df['dates3'] = date_range('1/1/2014', periods=n)
df.iloc[0, 0] = pd.NaT
df.set_index('dates1', inplace=True, drop=True)
res = df.query('index < 20130101 < dates3', engine=engine,
parser=parser)
expec = df[(df.index < '20130101') & ('20130101' < df.dates3)]
assert_frame_equal(res, expec)
def test_date_index_query_with_NaT_duplicates(self):
engine, parser = self.engine, self.parser
n = 10
d = {}
d['dates1'] = date_range('1/1/2012', periods=n)
d['dates3'] = date_range('1/1/2014', periods=n)
df = DataFrame(d)
df.loc[np.random.rand(n) > 0.5, 'dates1'] = pd.NaT
df.set_index('dates1', inplace=True, drop=True)
res = df.query('dates1 < 20130101 < dates3', engine=engine,
parser=parser)
expec = df[(df.index.to_series() < '20130101') &
('20130101' < df.dates3)]
assert_frame_equal(res, expec)
def test_date_query_with_non_date(self):
engine, parser = self.engine, self.parser
n = 10
df = DataFrame({'dates': date_range('1/1/2012', periods=n),
'nondate': np.arange(n)})
result = df.query('dates == nondate', parser=parser, engine=engine)
assert len(result) == 0
result = df.query('dates != nondate', parser=parser, engine=engine)
assert_frame_equal(result, df)
for op in ['<', '>', '<=', '>=']:
with pytest.raises(TypeError):
df.query('dates %s nondate' % op, parser=parser, engine=engine)
def test_query_syntax_error(self):
engine, parser = self.engine, self.parser
df = DataFrame({"i": lrange(10), "+": lrange(3, 13),
"r": lrange(4, 14)})
with pytest.raises(SyntaxError):
df.query('i - +', engine=engine, parser=parser)
def test_query_scope(self):
from pandas.core.computation.ops import UndefinedVariableError
engine, parser = self.engine, self.parser
skip_if_no_pandas_parser(parser)
df = DataFrame(np.random.randn(20, 2), columns=list('ab'))
a, b = 1, 2 # noqa
res = df.query('a > b', engine=engine, parser=parser)
expected = df[df.a > df.b]
assert_frame_equal(res, expected)
res = df.query('@a > b', engine=engine, parser=parser)
expected = df[a > df.b]
assert_frame_equal(res, expected)
# no local variable c
with pytest.raises(UndefinedVariableError):
df.query('@a > b > @c', engine=engine, parser=parser)
# no column named 'c'
with pytest.raises(UndefinedVariableError):
df.query('@a > b > c', engine=engine, parser=parser)
def test_query_doesnt_pickup_local(self):
from pandas.core.computation.ops import UndefinedVariableError
engine, parser = self.engine, self.parser
n = m = 10
df = DataFrame(np.random.randint(m, size=(n, 3)), columns=list('abc'))
# we don't pick up the local 'sin'
with pytest.raises(UndefinedVariableError):
df.query('sin > 5', engine=engine, parser=parser)
def test_query_builtin(self):
from pandas.core.computation.engines import NumExprClobberingError
engine, parser = self.engine, self.parser
n = m = 10
df = DataFrame(np.random.randint(m, size=(n, 3)), columns=list('abc'))
df.index.name = 'sin'
with tm.assert_raises_regex(NumExprClobberingError,
'Variables in expression.+'):
df.query('sin > 5', engine=engine, parser=parser)
def test_query(self):
engine, parser = self.engine, self.parser
df = DataFrame(np.random.randn(10, 3), columns=['a', 'b', 'c'])
assert_frame_equal(df.query('a < b', engine=engine, parser=parser),
df[df.a < df.b])
assert_frame_equal(df.query('a + b > b * c', engine=engine,
parser=parser),
df[df.a + df.b > df.b * df.c])
def test_query_index_with_name(self):
engine, parser = self.engine, self.parser
df = DataFrame(np.random.randint(10, size=(10, 3)),
index=Index(range(10), name='blob'),
columns=['a', 'b', 'c'])
res = df.query('(blob < 5) & (a < b)', engine=engine, parser=parser)
expec = df[(df.index < 5) & (df.a < df.b)]
assert_frame_equal(res, expec)
res = df.query('blob < b', engine=engine, parser=parser)
expec = df[df.index < df.b]
assert_frame_equal(res, expec)
def test_query_index_without_name(self):
engine, parser = self.engine, self.parser
df = DataFrame(np.random.randint(10, size=(10, 3)),
index=range(10), columns=['a', 'b', 'c'])
# "index" should refer to the index
res = df.query('index < b', engine=engine, parser=parser)
expec = df[df.index < df.b]
assert_frame_equal(res, expec)
# test against a scalar
res = df.query('index < 5', engine=engine, parser=parser)
expec = df[df.index < 5]
assert_frame_equal(res, expec)
def test_nested_scope(self):
engine = self.engine
parser = self.parser
skip_if_no_pandas_parser(parser)
df = DataFrame(np.random.randn(5, 3))
df2 = DataFrame(np.random.randn(5, 3))
expected = df[(df > 0) & (df2 > 0)]
result = df.query('(@df > 0) & (@df2 > 0)', engine=engine,
parser=parser)
assert_frame_equal(result, expected)
result = pd.eval('df[df > 0 and df2 > 0]', engine=engine,
parser=parser)
assert_frame_equal(result, expected)
result = pd.eval('df[df > 0 and df2 > 0 and df[df > 0] > 0]',
engine=engine, parser=parser)
expected = df[(df > 0) & (df2 > 0) & (df[df > 0] > 0)]
assert_frame_equal(result, expected)
result = pd.eval('df[(df>0) & (df2>0)]', engine=engine, parser=parser)
expected = df.query('(@df>0) & (@df2>0)', engine=engine, parser=parser)
assert_frame_equal(result, expected)
def test_nested_raises_on_local_self_reference(self):
from pandas.core.computation.ops import UndefinedVariableError
df = DataFrame(np.random.randn(5, 3))
# can't reference ourself b/c we're a local so @ is necessary
with pytest.raises(UndefinedVariableError):
df.query('df > 0', engine=self.engine, parser=self.parser)
def test_local_syntax(self):
skip_if_no_pandas_parser(self.parser)
engine, parser = self.engine, self.parser
df = DataFrame(randn(100, 10), columns=list('abcdefghij'))
b = 1
expect = df[df.a < b]
result = df.query('a < @b', engine=engine, parser=parser)
assert_frame_equal(result, expect)
expect = df[df.a < df.b]
result = df.query('a < b', engine=engine, parser=parser)
assert_frame_equal(result, expect)
def test_chained_cmp_and_in(self):
skip_if_no_pandas_parser(self.parser)
engine, parser = self.engine, self.parser
cols = list('abc')
df = DataFrame(randn(100, len(cols)), columns=cols)
res = df.query('a < b < c and a not in b not in c', engine=engine,
parser=parser)
ind = (df.a < df.b) & (df.b < df.c) & ~df.b.isin(df.a) & ~df.c.isin(df.b) # noqa
expec = df[ind]
assert_frame_equal(res, expec)
def test_local_variable_with_in(self):
engine, parser = self.engine, self.parser
skip_if_no_pandas_parser(parser)
a = Series(np.random.randint(3, size=15), name='a')
b = Series(np.random.randint(10, size=15), name='b')
df = DataFrame({'a': a, 'b': b})
expected = df.loc[(df.b - 1).isin(a)]
result = df.query('b - 1 in a', engine=engine, parser=parser)
assert_frame_equal(expected, result)
b = Series(np.random.randint(10, size=15), name='b')
expected = df.loc[(b - 1).isin(a)]
result = df.query('@b - 1 in a', engine=engine, parser=parser)
assert_frame_equal(expected, result)
def test_at_inside_string(self):
engine, parser = self.engine, self.parser
skip_if_no_pandas_parser(parser)
c = 1 # noqa
df = DataFrame({'a': ['a', 'a', 'b', 'b', '@c', '@c']})
result = df.query('a == "@c"', engine=engine, parser=parser)
expected = df[df.a == "@c"]
assert_frame_equal(result, expected)
def test_query_undefined_local(self):
from pandas.core.computation.ops import UndefinedVariableError
engine, parser = self.engine, self.parser
skip_if_no_pandas_parser(parser)
df = DataFrame(np.random.rand(10, 2), columns=list('ab'))
with tm.assert_raises_regex(UndefinedVariableError,
"local variable 'c' is not defined"):
df.query('a == @c', engine=engine, parser=parser)
def test_index_resolvers_come_after_columns_with_the_same_name(self):
n = 1 # noqa
a = np.r_[20:101:20]
df = DataFrame({'index': a, 'b': np.random.randn(a.size)})
df.index.name = 'index'
result = df.query('index > 5', engine=self.engine, parser=self.parser)
expected = df[df['index'] > 5]
assert_frame_equal(result, expected)
df = DataFrame({'index': a,
'b': np.random.randn(a.size)})
result = df.query('ilevel_0 > 5', engine=self.engine,
parser=self.parser)
expected = df.loc[df.index[df.index > 5]]
assert_frame_equal(result, expected)
df = DataFrame({'a': a, 'b': np.random.randn(a.size)})
df.index.name = 'a'
result = df.query('a > 5', engine=self.engine, parser=self.parser)
expected = df[df.a > 5]
assert_frame_equal(result, expected)
result = df.query('index > 5', engine=self.engine, parser=self.parser)
expected = df.loc[df.index[df.index > 5]]
assert_frame_equal(result, expected)
def test_inf(self):
n = 10
df = DataFrame({'a': np.random.rand(n), 'b': np.random.rand(n)})
df.loc[::2, 0] = np.inf
ops = '==', '!='
d = dict(zip(ops, (operator.eq, operator.ne)))
for op, f in d.items():
q = 'a %s inf' % op
expected = df[f(df.a, np.inf)]
result = df.query(q, engine=self.engine, parser=self.parser)
assert_frame_equal(result, expected)
@td.skip_if_no_ne
class TestDataFrameQueryNumExprPython(TestDataFrameQueryNumExprPandas):
@classmethod
def setup_class(cls):
super(TestDataFrameQueryNumExprPython, cls).setup_class()
cls.engine = 'numexpr'
cls.parser = 'python'
cls.frame = TestData().frame
def test_date_query_no_attribute_access(self):
engine, parser = self.engine, self.parser
df = DataFrame(randn(5, 3))
df['dates1'] = date_range('1/1/2012', periods=5)
df['dates2'] = date_range('1/1/2013', periods=5)
df['dates3'] = date_range('1/1/2014', periods=5)
res = df.query('(dates1 < 20130101) & (20130101 < dates3)',
engine=engine, parser=parser)
expec = df[(df.dates1 < '20130101') & ('20130101' < df.dates3)]
assert_frame_equal(res, expec)
def test_date_query_with_NaT(self):
engine, parser = self.engine, self.parser
n = 10
df = DataFrame(randn(n, 3))
df['dates1'] = date_range('1/1/2012', periods=n)
df['dates2'] = date_range('1/1/2013', periods=n)
df['dates3'] = date_range('1/1/2014', periods=n)
df.loc[np.random.rand(n) > 0.5, 'dates1'] = pd.NaT
df.loc[np.random.rand(n) > 0.5, 'dates3'] = pd.NaT
res = df.query('(dates1 < 20130101) & (20130101 < dates3)',
engine=engine, parser=parser)
expec = df[(df.dates1 < '20130101') & ('20130101' < df.dates3)]
assert_frame_equal(res, expec)
def test_date_index_query(self):
engine, parser = self.engine, self.parser
n = 10
df = DataFrame(randn(n, 3))
df['dates1'] = date_range('1/1/2012', periods=n)
df['dates3'] = date_range('1/1/2014', periods=n)
df.set_index('dates1', inplace=True, drop=True)
res = df.query('(index < 20130101) & (20130101 < dates3)',
engine=engine, parser=parser)
expec = df[(df.index < '20130101') & ('20130101' < df.dates3)]
assert_frame_equal(res, expec)
def test_date_index_query_with_NaT(self):
engine, parser = self.engine, self.parser
n = 10
df = DataFrame(randn(n, 3))
df['dates1'] = date_range('1/1/2012', periods=n)
df['dates3'] = date_range('1/1/2014', periods=n)
df.iloc[0, 0] = pd.NaT
df.set_index('dates1', inplace=True, drop=True)
res = df.query('(index < 20130101) & (20130101 < dates3)',
engine=engine, parser=parser)
expec = df[(df.index < '20130101') & ('20130101' < df.dates3)]
assert_frame_equal(res, expec)
def test_date_index_query_with_NaT_duplicates(self):
engine, parser = self.engine, self.parser
n = 10
df = DataFrame(randn(n, 3))
df['dates1'] = date_range('1/1/2012', periods=n)
df['dates3'] = date_range('1/1/2014', periods=n)
df.loc[np.random.rand(n) > 0.5, 'dates1'] = pd.NaT
df.set_index('dates1', inplace=True, drop=True)
with pytest.raises(NotImplementedError):
df.query('index < 20130101 < dates3', engine=engine, parser=parser)
def test_nested_scope(self):
from pandas.core.computation.ops import UndefinedVariableError
engine = self.engine
parser = self.parser
# smoke test
x = 1 # noqa
result = pd.eval('x + 1', engine=engine, parser=parser)
assert result == 2
df = DataFrame(np.random.randn(5, 3))
df2 = DataFrame(np.random.randn(5, 3))
# don't have the pandas parser
with pytest.raises(SyntaxError):
df.query('(@df>0) & (@df2>0)', engine=engine, parser=parser)
with pytest.raises(UndefinedVariableError):
df.query('(df>0) & (df2>0)', engine=engine, parser=parser)
expected = df[(df > 0) & (df2 > 0)]
result = pd.eval('df[(df > 0) & (df2 > 0)]', engine=engine,
parser=parser)
assert_frame_equal(expected, result)
expected = df[(df > 0) & (df2 > 0) & (df[df > 0] > 0)]
result = pd.eval('df[(df > 0) & (df2 > 0) & (df[df > 0] > 0)]',
engine=engine, parser=parser)
assert_frame_equal(expected, result)
class TestDataFrameQueryPythonPandas(TestDataFrameQueryNumExprPandas):
@classmethod
def setup_class(cls):
super(TestDataFrameQueryPythonPandas, cls).setup_class()
cls.engine = 'python'
cls.parser = 'pandas'
cls.frame = TestData().frame
def test_query_builtin(self):
engine, parser = self.engine, self.parser
n = m = 10
df = DataFrame(np.random.randint(m, size=(n, 3)), columns=list('abc'))
df.index.name = 'sin'
expected = df[df.index > 5]
result = df.query('sin > 5', engine=engine, parser=parser)
assert_frame_equal(expected, result)
class TestDataFrameQueryPythonPython(TestDataFrameQueryNumExprPython):
@classmethod
def setup_class(cls):
super(TestDataFrameQueryPythonPython, cls).setup_class()
cls.engine = cls.parser = 'python'
cls.frame = TestData().frame
def test_query_builtin(self):
engine, parser = self.engine, self.parser
n = m = 10
df = DataFrame(np.random.randint(m, size=(n, 3)), columns=list('abc'))
df.index.name = 'sin'
expected = df[df.index > 5]
result = df.query('sin > 5', engine=engine, parser=parser)
assert_frame_equal(expected, result)
class TestDataFrameQueryStrings(object):
def test_str_query_method(self, parser, engine):
df = DataFrame(randn(10, 1), columns=['b'])
df['strings'] = Series(list('aabbccddee'))
expect = df[df.strings == 'a']
if parser != 'pandas':
col = 'strings'
lst = '"a"'
lhs = [col] * 2 + [lst] * 2
rhs = lhs[::-1]
eq, ne = '==', '!='
ops = 2 * ([eq] + [ne])
for lhs, op, rhs in zip(lhs, ops, rhs):
ex = '{lhs} {op} {rhs}'.format(lhs=lhs, op=op, rhs=rhs)
pytest.raises(NotImplementedError, df.query, ex,
engine=engine, parser=parser,
local_dict={'strings': df.strings})
else:
res = df.query('"a" == strings', engine=engine, parser=parser)
assert_frame_equal(res, expect)
res = df.query('strings == "a"', engine=engine, parser=parser)
assert_frame_equal(res, expect)
assert_frame_equal(res, df[df.strings.isin(['a'])])
expect = df[df.strings != 'a']
res = df.query('strings != "a"', engine=engine, parser=parser)
assert_frame_equal(res, expect)
res = df.query('"a" != strings', engine=engine, parser=parser)
assert_frame_equal(res, expect)
assert_frame_equal(res, df[~df.strings.isin(['a'])])
def test_str_list_query_method(self, parser, engine):
df = DataFrame(randn(10, 1), columns=['b'])
df['strings'] = Series(list('aabbccddee'))
expect = df[df.strings.isin(['a', 'b'])]
if parser != 'pandas':
col = 'strings'
lst = '["a", "b"]'
lhs = [col] * 2 + [lst] * 2
rhs = lhs[::-1]
eq, ne = '==', '!='
ops = 2 * ([eq] + [ne])
for lhs, op, rhs in zip(lhs, ops, rhs):
ex = '{lhs} {op} {rhs}'.format(lhs=lhs, op=op, rhs=rhs)
with pytest.raises(NotImplementedError):
df.query(ex, engine=engine, parser=parser)
else:
res = df.query('strings == ["a", "b"]', engine=engine,
parser=parser)
assert_frame_equal(res, expect)
res = df.query('["a", "b"] == strings', engine=engine,
parser=parser)
assert_frame_equal(res, expect)
expect = df[~df.strings.isin(['a', 'b'])]
res = df.query('strings != ["a", "b"]', engine=engine,
parser=parser)
assert_frame_equal(res, expect)
res = df.query('["a", "b"] != strings', engine=engine,
parser=parser)
assert_frame_equal(res, expect)
def test_query_with_string_columns(self, parser, engine):
df = DataFrame({'a': list('aaaabbbbcccc'),
'b': list('aabbccddeeff'),
'c': np.random.randint(5, size=12),
'd': np.random.randint(9, size=12)})
if parser == 'pandas':
res = df.query('a in b', parser=parser, engine=engine)
expec = df[df.a.isin(df.b)]
assert_frame_equal(res, expec)
res = df.query('a in b and c < d', parser=parser, engine=engine)
expec = df[df.a.isin(df.b) & (df.c < df.d)]
assert_frame_equal(res, expec)
else:
with pytest.raises(NotImplementedError):
df.query('a in b', parser=parser, engine=engine)
with pytest.raises(NotImplementedError):
df.query('a in b and c < d', parser=parser, engine=engine)
def test_object_array_eq_ne(self, parser, engine):
df = DataFrame({'a': list('aaaabbbbcccc'),
'b': list('aabbccddeeff'),
'c': np.random.randint(5, size=12),
'd': np.random.randint(9, size=12)})
res = df.query('a == b', parser=parser, engine=engine)
exp = df[df.a == df.b]
assert_frame_equal(res, exp)
res = df.query('a != b', parser=parser, engine=engine)
exp = df[df.a != df.b]
assert_frame_equal(res, exp)
def test_query_with_nested_strings(self, parser, engine):
skip_if_no_pandas_parser(parser)
raw = """id event timestamp
1 "page 1 load" 1/1/2014 0:00:01
1 "page 1 exit" 1/1/2014 0:00:31
2 "page 2 load" 1/1/2014 0:01:01
2 "page 2 exit" 1/1/2014 0:01:31
3 "page 3 load" 1/1/2014 0:02:01
3 "page 3 exit" 1/1/2014 0:02:31
4 "page 1 load" 2/1/2014 1:00:01
4 "page 1 exit" 2/1/2014 1:00:31
5 "page 2 load" 2/1/2014 1:01:01
5 "page 2 exit" 2/1/2014 1:01:31
6 "page 3 load" 2/1/2014 1:02:01
6 "page 3 exit" 2/1/2014 1:02:31
"""
df = pd.read_csv(StringIO(raw), sep=r'\s{2,}', engine='python',
parse_dates=['timestamp'])
expected = df[df.event == '"page 1 load"']
res = df.query("""'"page 1 load"' in event""", parser=parser,
engine=engine)
assert_frame_equal(expected, res)
def test_query_with_nested_special_character(self, parser, engine):
skip_if_no_pandas_parser(parser)
df = DataFrame({'a': ['a', 'b', 'test & test'],
'b': [1, 2, 3]})
res = df.query('a == "test & test"', parser=parser, engine=engine)
expec = df[df.a == 'test & test']
assert_frame_equal(res, expec)
def test_query_lex_compare_strings(self, parser, engine):
import operator as opr
a = Series(np.random.choice(list('abcde'), 20))
b = Series(np.arange(a.size))
df = DataFrame({'X': a, 'Y': b})
ops = {'<': opr.lt, '>': opr.gt, '<=': opr.le, '>=': opr.ge}
for op, func in ops.items():
res = df.query('X %s "d"' % op, engine=engine, parser=parser)
expected = df[func(df.X, 'd')]
assert_frame_equal(res, expected)
def test_query_single_element_booleans(self, parser, engine):
columns = 'bid', 'bidsize', 'ask', 'asksize'
data = np.random.randint(2, size=(1, len(columns))).astype(bool)
df = DataFrame(data, columns=columns)
res = df.query('bid & ask', engine=engine, parser=parser)
expected = df[df.bid & df.ask]
assert_frame_equal(res, expected)
def test_query_string_scalar_variable(self, parser, engine):
skip_if_no_pandas_parser(parser)
df = pd.DataFrame({'Symbol': ['BUD US', 'BUD US', 'IBM US', 'IBM US'],
'Price': [109.70, 109.72, 183.30, 183.35]})
e = df[df.Symbol == 'BUD US']
symb = 'BUD US' # noqa
r = df.query('Symbol == @symb', parser=parser, engine=engine)
assert_frame_equal(e, r)
class TestDataFrameEvalWithFrame(object):
def setup_method(self, method):
self.frame = DataFrame(randn(10, 3), columns=list('abc'))
def teardown_method(self, method):
del self.frame
def test_simple_expr(self, parser, engine):
res = self.frame.eval('a + b', engine=engine, parser=parser)
expect = self.frame.a + self.frame.b
assert_series_equal(res, expect)
def test_bool_arith_expr(self, parser, engine):
res = self.frame.eval('a[a < 1] + b', engine=engine, parser=parser)
expect = self.frame.a[self.frame.a < 1] + self.frame.b
assert_series_equal(res, expect)
@pytest.mark.parametrize('op', ['+', '-', '*', '/'])
def test_invalid_type_for_operator_raises(self, parser, engine, op):
df = DataFrame({'a': [1, 2], 'b': ['c', 'd']})
with tm.assert_raises_regex(TypeError,
r"unsupported operand type\(s\) "
"for .+: '.+' and '.+'"):
df.eval('a {0} b'.format(op), engine=engine, parser=parser)
| bsd-3-clause |
showandtellinar/askjarvis | modules/LocationContext/LocationContext.py | 1 | 4086 | import datetime, folium, random, numpy as np, pandas as pd
from bs4 import BeautifulSoup
def parse_kml(filename):
"""Parses a KML file into a Pandas DataFrame"""
with open(filename) as f:
rows = []
soup = BeautifulSoup(f)
for time, coords in zip(soup.findAll('when'), soup.findAll('gx:coord')):
timestamp = time.string
coords = coords.string.split(' ')[:2]
latitude = float(coords[0])
longitude = float(coords[1])
rows.append([timestamp, latitude, longitude])
df = pd.DataFrame(rows, columns=['Timestamp', 'Longitude', 'Latitude'])
df['Timestamp'] = pd.to_datetime(df.Timestamp.str.slice(0,23), format='%Y-%m-%dT%H:%M:%S.%f')
return df
def clean_data(df):
"""Only look at data within 75 miles of the median latitude and longitude."""
miles = 75.0
degrees = miles / 69.11
for col in ('Latitude', 'Longitude'):
median = df[col].median()
df = df[(df[col] >= median - degrees) & (df[col] <= median + degrees)]
return df
def get_work_df(df):
"""Get all data between 10AM and 4PM Monday-Friday"""
return df[(df.hour >= 10) & (df.hour <= 16) & (df.day >= 0) & (df.day <= 4)]
def get_home_df(df):
"""Get all data between 11PM and 5AM Monday-Thursday"""
return df[((df.hour >= 23) | (df.hour <= 5)) & (df.day >= 0) & (df.day <= 3)]
def format_for_clustering(df):
"""Format data for the clustering algorithm"""
lng = df.Longitude
lat = df.Latitude
return np.array(zip(lng, lat))
# Clustering algorithm from the internet
# ------------------------------------- #
def cluster_points(X, mu):
clusters = {}
for x in X:
bestmukey = min([(i[0], np.linalg.norm(x-mu[i[0]])) for i in enumerate(mu)], key=lambda t:t[1])[0]
try:
clusters[bestmukey].append(x)
except KeyError:
clusters[bestmukey] = [x]
return clusters
def reevaluate_centers(mu, clusters):
newmu = []
keys = sorted(clusters.keys())
for k in keys:
newmu.append(np.mean(clusters[k], axis = 0))
return newmu
def has_converged(mu, oldmu):
return (set([tuple(a) for a in mu]) == set([tuple(a) for a in oldmu]))
def find_centers(X, K):
# Initialize to K random centers
oldmu = random.sample(X, K)
mu = random.sample(X, K)
while not has_converged(mu, oldmu):
oldmu = mu
# Assign all points in X to clusters
clusters = cluster_points(X, mu)
# Reevaluate centers
mu = reevaluate_centers(oldmu, clusters)
return {'centers': mu, 'datapoints': clusters}
# ------------------------------------- #
def setup():
"""Set up the master DataFrame"""
df = parse_kml('brady_location.kml')
df = clean_data(df)
df['hour'] = df.Timestamp.map(lambda x: x.hour)
df['day'] = df.Timestamp.map(lambda x: x.dayofweek)
return df
def get_location(df, location_func, n):
"""Use clustering to get a location for a certain time period"""
location_df = location_func(df)
location_data = format_for_clustering(location_df)
location_cluster = find_centers(location_data, n)
def f(x):
err1 = abs(x[0] - location_df.Longitude.median())
err2 = abs(x[1] - location_df.Latitude.median())
return err1 + err2
location_result = min(location_cluster['centers'], key=lambda x: f(x))
return location_result
def display(initial_lat, initial_long, locations, map_path):
"""Use folium to display locations"""
fmap = folium.Map(location=[initial_lat, initial_long], zoom_start=13)
for location in locations:
fmap.simple_marker([location[0][1], location[0][0]], popup=location[1])
fmap.create_map(path=map_path)
def main():
"""Main function"""
df = setup()
work_location = get_location(df, get_work_df, 6)
home_location = get_location(df, get_home_df, 6)
locations = [(work_location, 'Work'), (home_location, 'Home')]
display(df.Latitude.irow(0), df.Longitude.irow(0), locations, "map.html")
return locations
main()
| mit |
sangzhe/sangzhe.github.io | markdown_generator/publications.py | 197 | 3887 |
# coding: utf-8
# # Publications markdown generator for academicpages
#
# Takes a TSV of publications with metadata and converts them for use with [academicpages.github.io](academicpages.github.io). This is an interactive Jupyter notebook, with the core python code in publications.py. Run either from the `markdown_generator` folder after replacing `publications.tsv` with one that fits your format.
#
# TODO: Make this work with BibTex and other databases of citations, rather than Stuart's non-standard TSV format and citation style.
#
# ## Data format
#
# The TSV needs to have the following columns: pub_date, title, venue, excerpt, citation, site_url, and paper_url, with a header at the top.
#
# - `excerpt` and `paper_url` can be blank, but the others must have values.
# - `pub_date` must be formatted as YYYY-MM-DD.
# - `url_slug` will be the descriptive part of the .md file and the permalink URL for the page about the paper. The .md file will be `YYYY-MM-DD-[url_slug].md` and the permalink will be `https://[yourdomain]/publications/YYYY-MM-DD-[url_slug]`
# ## Import pandas
#
# We are using the very handy pandas library for dataframes.
# In[2]:
import pandas as pd
# ## Import TSV
#
# Pandas makes this easy with the read_csv function. We are using a TSV, so we specify the separator as a tab, or `\t`.
#
# I found it important to put this data in a tab-separated values format, because there are a lot of commas in this kind of data and comma-separated values can get messed up. However, you can modify the import statement, as pandas also has read_excel(), read_json(), and others.
# In[3]:
publications = pd.read_csv("publications.tsv", sep="\t", header=0)
publications
# ## Escape special characters
#
# YAML is very picky about how it takes a valid string, so we are replacing single and double quotes (and ampersands) with their HTML encoded equivilents. This makes them look not so readable in raw format, but they are parsed and rendered nicely.
# In[4]:
html_escape_table = {
"&": "&",
'"': """,
"'": "'"
}
def html_escape(text):
"""Produce entities within text."""
return "".join(html_escape_table.get(c,c) for c in text)
# ## Creating the markdown files
#
# This is where the heavy lifting is done. This loops through all the rows in the TSV dataframe, then starts to concatentate a big string (```md```) that contains the markdown for each type. It does the YAML metadata first, then does the description for the individual page. If you don't want something to appear (like the "Recommended citation")
# In[5]:
import os
for row, item in publications.iterrows():
md_filename = str(item.pub_date) + "-" + item.url_slug + ".md"
html_filename = str(item.pub_date) + "-" + item.url_slug
year = item.pub_date[:4]
## YAML variables
md = "---\ntitle: \"" + item.title + '"\n'
md += """collection: publications"""
md += """\npermalink: /publication/""" + html_filename
if len(str(item.excerpt)) > 5:
md += "\nexcerpt: '" + html_escape(item.excerpt) + "'"
md += "\ndate: " + str(item.pub_date)
md += "\nvenue: '" + html_escape(item.venue) + "'"
if len(str(item.paper_url)) > 5:
md += "\npaperurl: '" + item.paper_url + "'"
md += "\ncitation: '" + html_escape(item.citation) + "'"
md += "\n---"
## Markdown description for individual page
if len(str(item.paper_url)) > 5:
md += "\n\n<a href='" + item.paper_url + "'>Download paper here</a>\n"
if len(str(item.excerpt)) > 5:
md += "\n" + html_escape(item.excerpt) + "\n"
md += "\nRecommended citation: " + item.citation
md_filename = os.path.basename(md_filename)
with open("../_publications/" + md_filename, 'w') as f:
f.write(md)
| mit |
mhue/scikit-learn | sklearn/qda.py | 140 | 7682 | """
Quadratic Discriminant Analysis
"""
# Author: Matthieu Perrot <matthieu.perrot@gmail.com>
#
# License: BSD 3 clause
import warnings
import numpy as np
from .base import BaseEstimator, ClassifierMixin
from .externals.six.moves import xrange
from .utils import check_array, check_X_y
from .utils.validation import check_is_fitted
from .utils.fixes import bincount
__all__ = ['QDA']
class QDA(BaseEstimator, ClassifierMixin):
"""
Quadratic Discriminant Analysis (QDA)
A classifier with a quadratic decision boundary, generated
by fitting class conditional densities to the data
and using Bayes' rule.
The model fits a Gaussian density to each class.
Read more in the :ref:`User Guide <lda_qda>`.
Parameters
----------
priors : array, optional, shape = [n_classes]
Priors on classes
reg_param : float, optional
Regularizes the covariance estimate as
``(1-reg_param)*Sigma + reg_param*np.eye(n_features)``
Attributes
----------
covariances_ : list of array-like, shape = [n_features, n_features]
Covariance matrices of each class.
means_ : array-like, shape = [n_classes, n_features]
Class means.
priors_ : array-like, shape = [n_classes]
Class priors (sum to 1).
rotations_ : list of arrays
For each class k an array of shape [n_features, n_k], with
``n_k = min(n_features, number of elements in class k)``
It is the rotation of the Gaussian distribution, i.e. its
principal axis.
scalings_ : list of arrays
For each class k an array of shape [n_k]. It contains the scaling
of the Gaussian distributions along its principal axes, i.e. the
variance in the rotated coordinate system.
Examples
--------
>>> from sklearn.qda import QDA
>>> import numpy as np
>>> X = np.array([[-1, -1], [-2, -1], [-3, -2], [1, 1], [2, 1], [3, 2]])
>>> y = np.array([1, 1, 1, 2, 2, 2])
>>> clf = QDA()
>>> clf.fit(X, y)
QDA(priors=None, reg_param=0.0)
>>> print(clf.predict([[-0.8, -1]]))
[1]
See also
--------
sklearn.lda.LDA: Linear discriminant analysis
"""
def __init__(self, priors=None, reg_param=0.):
self.priors = np.asarray(priors) if priors is not None else None
self.reg_param = reg_param
def fit(self, X, y, store_covariances=False, tol=1.0e-4):
"""
Fit the QDA model according to the given training data and parameters.
Parameters
----------
X : array-like, shape = [n_samples, n_features]
Training vector, where n_samples in the number of samples and
n_features is the number of features.
y : array, shape = [n_samples]
Target values (integers)
store_covariances : boolean
If True the covariance matrices are computed and stored in the
`self.covariances_` attribute.
tol : float, optional, default 1.0e-4
Threshold used for rank estimation.
"""
X, y = check_X_y(X, y)
self.classes_, y = np.unique(y, return_inverse=True)
n_samples, n_features = X.shape
n_classes = len(self.classes_)
if n_classes < 2:
raise ValueError('y has less than 2 classes')
if self.priors is None:
self.priors_ = bincount(y) / float(n_samples)
else:
self.priors_ = self.priors
cov = None
if store_covariances:
cov = []
means = []
scalings = []
rotations = []
for ind in xrange(n_classes):
Xg = X[y == ind, :]
meang = Xg.mean(0)
means.append(meang)
if len(Xg) == 1:
raise ValueError('y has only 1 sample in class %s, covariance '
'is ill defined.' % str(self.classes_[ind]))
Xgc = Xg - meang
# Xgc = U * S * V.T
U, S, Vt = np.linalg.svd(Xgc, full_matrices=False)
rank = np.sum(S > tol)
if rank < n_features:
warnings.warn("Variables are collinear")
S2 = (S ** 2) / (len(Xg) - 1)
S2 = ((1 - self.reg_param) * S2) + self.reg_param
if store_covariances:
# cov = V * (S^2 / (n-1)) * V.T
cov.append(np.dot(S2 * Vt.T, Vt))
scalings.append(S2)
rotations.append(Vt.T)
if store_covariances:
self.covariances_ = cov
self.means_ = np.asarray(means)
self.scalings_ = scalings
self.rotations_ = rotations
return self
def _decision_function(self, X):
check_is_fitted(self, 'classes_')
X = check_array(X)
norm2 = []
for i in range(len(self.classes_)):
R = self.rotations_[i]
S = self.scalings_[i]
Xm = X - self.means_[i]
X2 = np.dot(Xm, R * (S ** (-0.5)))
norm2.append(np.sum(X2 ** 2, 1))
norm2 = np.array(norm2).T # shape = [len(X), n_classes]
u = np.asarray([np.sum(np.log(s)) for s in self.scalings_])
return (-0.5 * (norm2 + u) + np.log(self.priors_))
def decision_function(self, X):
"""Apply decision function to an array of samples.
Parameters
----------
X : array-like, shape = [n_samples, n_features]
Array of samples (test vectors).
Returns
-------
C : array, shape = [n_samples, n_classes] or [n_samples,]
Decision function values related to each class, per sample.
In the two-class case, the shape is [n_samples,], giving the
log likelihood ratio of the positive class.
"""
dec_func = self._decision_function(X)
# handle special case of two classes
if len(self.classes_) == 2:
return dec_func[:, 1] - dec_func[:, 0]
return dec_func
def predict(self, X):
"""Perform classification on an array of test vectors X.
The predicted class C for each sample in X is returned.
Parameters
----------
X : array-like, shape = [n_samples, n_features]
Returns
-------
C : array, shape = [n_samples]
"""
d = self._decision_function(X)
y_pred = self.classes_.take(d.argmax(1))
return y_pred
def predict_proba(self, X):
"""Return posterior probabilities of classification.
Parameters
----------
X : array-like, shape = [n_samples, n_features]
Array of samples/test vectors.
Returns
-------
C : array, shape = [n_samples, n_classes]
Posterior probabilities of classification per class.
"""
values = self._decision_function(X)
# compute the likelihood of the underlying gaussian models
# up to a multiplicative constant.
likelihood = np.exp(values - values.max(axis=1)[:, np.newaxis])
# compute posterior probabilities
return likelihood / likelihood.sum(axis=1)[:, np.newaxis]
def predict_log_proba(self, X):
"""Return posterior probabilities of classification.
Parameters
----------
X : array-like, shape = [n_samples, n_features]
Array of samples/test vectors.
Returns
-------
C : array, shape = [n_samples, n_classes]
Posterior log-probabilities of classification per class.
"""
# XXX : can do better to avoid precision overflows
probas_ = self.predict_proba(X)
return np.log(probas_)
| bsd-3-clause |
abrigham1/abrigham | MovieReviewClassifier/MovieReviewClassifier.py | 1 | 2694 | import pandas as pd
from sklearn.model_selection import GridSearchCV
from sklearn.pipeline import Pipeline
from sklearn.linear_model import LogisticRegression
from sklearn.feature_extraction.text import TfidfVectorizer
import joblib
from pprint import pprint
from time import time
import os
from MovieReviewClassifierHelpers import preprocessor, tokenizer
# initialize our TfidfVectorizer
tfidf = TfidfVectorizer(strip_accents=None,
lowercase=False)
# initialize our logistic regression classifier
lr = LogisticRegression(solver='liblinear', random_state=0)
# set up our param grid for so we can tune our hyperparameters
# (specifically here we are tuning ngram_range, penalty, and C)
param_grid = {'vect__ngram_range': [(1, 1), (2, 2)],
'vect__preprocessor': [preprocessor],
'vect__stop_words': ['english'],
'vect__tokenizer': [tokenizer],
'clf__penalty': ['l1', 'l2'],
'clf__C': [1.0, 10.0, 100.0]
}
# initialize our pipeline with our vectorizer and our classifier
pipe = Pipeline([('vect', tfidf),
('clf', lr)])
# read our training data into the dataframe
df = pd.read_csv(os.path.join(os.path.dirname(os.path.realpath(__file__)), 'movie_data.csv'))
# get our training data
X_train = df.loc[:25000, 'review'].values
y_train = df.loc[:25000, 'sentiment'].values
X_test = df.loc[25000:, 'review'].values
y_test = df.loc[25000:, 'sentiment'].values
# multiprocessing requires the fork to happen in a __main__ protected block
if __name__ == "__main__":
# set up our grid search to find our best hyperparameters
gridsearch = GridSearchCV(pipe, param_grid,
scoring='accuracy',
cv=3, verbose=1,
n_jobs=-1)
print("Performing grid search...")
print("pipeline:", [name for name, _ in pipe.steps])
print("parameters:")
pprint(param_grid)
t0 = time()
# train our classifier
gridsearch.fit(X_train, y_train)
print("done in %0.3fs" % (time() - t0))
print()
print("Best score: %s" % '{:.1%}'.format(gridsearch.best_score_))
print("Best parameters set:")
best_parameters = gridsearch.best_estimator_.get_params()
for param_name in sorted(param_grid.keys()):
print("\t%s: %r" % (param_name, best_parameters[param_name]))
# sanity check to see our best estimator also performs well on our test set
score = gridsearch.best_estimator_.score(X_test, y_test)
print('Test Accuracy: %s' % '{:.1%}'.format(score))
# save our best classifier
joblib.dump(gridsearch.best_estimator_, 'lr_pipeline.pkl')
| mit |
WarrenWeckesser/scipy | doc/source/tutorial/stats/plots/kde_plot4.py | 142 | 1457 | from functools import partial
import numpy as np
from scipy import stats
import matplotlib.pyplot as plt
def my_kde_bandwidth(obj, fac=1./5):
"""We use Scott's Rule, multiplied by a constant factor."""
return np.power(obj.n, -1./(obj.d+4)) * fac
loc1, scale1, size1 = (-2, 1, 175)
loc2, scale2, size2 = (2, 0.2, 50)
x2 = np.concatenate([np.random.normal(loc=loc1, scale=scale1, size=size1),
np.random.normal(loc=loc2, scale=scale2, size=size2)])
x_eval = np.linspace(x2.min() - 1, x2.max() + 1, 500)
kde = stats.gaussian_kde(x2)
kde2 = stats.gaussian_kde(x2, bw_method='silverman')
kde3 = stats.gaussian_kde(x2, bw_method=partial(my_kde_bandwidth, fac=0.2))
kde4 = stats.gaussian_kde(x2, bw_method=partial(my_kde_bandwidth, fac=0.5))
pdf = stats.norm.pdf
bimodal_pdf = pdf(x_eval, loc=loc1, scale=scale1) * float(size1) / x2.size + \
pdf(x_eval, loc=loc2, scale=scale2) * float(size2) / x2.size
fig = plt.figure(figsize=(8, 6))
ax = fig.add_subplot(111)
ax.plot(x2, np.zeros(x2.shape), 'b+', ms=12)
ax.plot(x_eval, kde(x_eval), 'k-', label="Scott's Rule")
ax.plot(x_eval, kde2(x_eval), 'b-', label="Silverman's Rule")
ax.plot(x_eval, kde3(x_eval), 'g-', label="Scott * 0.2")
ax.plot(x_eval, kde4(x_eval), 'c-', label="Scott * 0.5")
ax.plot(x_eval, bimodal_pdf, 'r--', label="Actual PDF")
ax.set_xlim([x_eval.min(), x_eval.max()])
ax.legend(loc=2)
ax.set_xlabel('x')
ax.set_ylabel('Density')
plt.show()
| bsd-3-clause |
wateraccounting/wa | Functions/Four/SplitET.py | 1 | 9011 | # -*- coding: utf-8 -*-
"""
Authors: Bert Coerver, Tim Hessels
UNESCO-IHE 2017
Contact: t.hessels@unesco-ihe.org
Repository: https://github.com/wateraccounting/wa
Module: Function/Four
"""
# import general python modules
import pandas as pd
import numpy as np
import os
def Blue_Green(Dir_Basin, nc_outname, ETref_Product, P_Product, Startdate, Enddate):
"""
This functions split the evapotranspiration into green and blue evapotranspiration.
Parameters
----------
nc_outname : str
Path to the .nc file containing data
Moving_Averaging_Length: integer
Number defines the amount of months that are taken into account
Returns
-------
ET_Blue : array
Array[time, lat, lon] contains Blue Evapotranspiration
ET_Green : array
Array[time, lat, lon] contains Green Evapotranspiration
"""
import wa.General.raster_conversions as RC
import wa.Functions.Start.Get_Dictionaries as GD
# Input Parameters functions
scale = 1.1
# Open LU map for example
LU = RC.Open_nc_array(nc_outname, "Landuse")
# Define monthly dates
Dates = pd.date_range(Startdate, Enddate, freq = 'MS')
# Get moving window period
# Get dictionaries and keys for the moving average
ET_Blue_Green_Classes_dict, Moving_Window_Per_Class_dict = GD.get_bluegreen_classes(version = '1.0')
Classes = ET_Blue_Green_Classes_dict.keys()
Moving_Averages_Values_Array = np.ones(LU.shape) * np.nan
# Create array based on the dictionary that gives the Moving average tail for every pixel
for Class in Classes:
Values_Moving_Window_Class = Moving_Window_Per_Class_dict[Class]
for Values_Class in ET_Blue_Green_Classes_dict[Class]:
Moving_Averages_Values_Array[LU == Values_Class] = Values_Moving_Window_Class
Additional_Months_front = int(np.nanmax(Moving_Averages_Values_Array))
Additional_Months_tail = 0
Start_period = Additional_Months_front
End_period = Additional_Months_tail * -1
########################### Extract ETref data #################################
if ETref_Product is 'WA_ETref':
# Define data path
Data_Path_ETref = os.path.join(Dir_Basin, 'ETref', 'Monthly')
else:
Data_Path_ETref = ETref_Product
ETref = Complete_3D_Array(nc_outname, 'Reference_Evapotranspiration', Startdate, Enddate, Additional_Months_front, Additional_Months_tail, Data_Path_ETref)
######################## Extract Precipitation data ########################
if (P_Product == "CHIRPS" or P_Product == "RFE"):
# Define data path
Data_Path_P = os.path.join(Dir_Basin, 'Precipitation', P_Product, 'Monthly')
else:
Data_Path_P = P_Product
P = Complete_3D_Array(nc_outname, 'Precipitation', Startdate, Enddate, Additional_Months_front, Additional_Months_tail, Data_Path_P)
########################## Extract actET data ##############################
ET = RC.Open_nc_array(nc_outname, "Actual_Evapotranspiration", Startdate, Enddate)
############ Create average ETref and P using moving window ################
ETref_Ave = np.ones([len(Dates),int(LU.shape[0]),int(LU.shape[1])]) * np.nan
P_Ave = np.ones([len(Dates),int(LU.shape[0]),int(LU.shape[1])]) * np.nan
if End_period == 0:
P_period = P[Start_period:,:,:]
ETref_period = ETref[Start_period:,:,:]
else:
P_period = P[Start_period:End_period,:,:]
ETref_period = ETref[Start_period:End_period,:,:]
# Loop over the different moving average tails
for One_Value in np.unique(Moving_Window_Per_Class_dict.values()):
# If there is no moving average is 1 than use the value of the original ETref or P
if One_Value == 1:
Values_Ave_ETref = ETref[int(ETref.shape[0])-len(Dates):,:,:]
Values_Ave_P = P[int(ETref.shape[0])-len(Dates):,:,:]
# If there is tail, apply moving average over the whole datacube
else:
Values_Ave_ETref_tot = RC.Moving_average(ETref, One_Value - 1, 0)
Values_Ave_P_tot = RC.Moving_average(P, One_Value - 1, 0)
Values_Ave_ETref = Values_Ave_ETref_tot[int(Values_Ave_ETref_tot.shape[0])-len(Dates):,:,:]
Values_Ave_P = Values_Ave_P_tot[int(Values_Ave_P_tot.shape[0])-len(Dates):,:,:]
# Only add the data where the corresponding tail corresponds with the one_value
ETref_Ave[:,Moving_Averages_Values_Array == One_Value] = Values_Ave_ETref[:,Moving_Averages_Values_Array == One_Value]
P_Ave[:,Moving_Averages_Values_Array == One_Value] = Values_Ave_P[:,Moving_Averages_Values_Array == One_Value]
##################### Calculate ET blue and green ###########################
# Mask out the nan values(if one of the parameters is nan, then they are all nan)
mask = np.any([np.isnan(LU)*np.ones([len(Dates),int(LU.shape[0]),int(LU.shape[1])])==1, np.isnan(ET), np.isnan(ETref[int(ETref.shape[0])-len(Dates):,:,:]), np.isnan(P[int(ETref.shape[0])-len(Dates):,:,:]), np.isnan(P_Ave), np.isnan(ETref_Ave)],axis=0)
ETref_period[mask] = ETref_Ave[mask] = ET[mask] = P_period[mask] = P_Ave[mask] = np.nan
phi = ETref_Ave / P_Ave
# Calculate Budyko-index
Budyko = scale * np.sqrt(phi*np.tanh(1/phi)*(1-np.exp(-phi)))
# Calculate ET green
ETgreen_DataCube = np.minimum(Budyko*P[int(ETref.shape[0])-len(Dates):,:,:],ET)
# Calculate ET blue
ETblue_DataCube = ET - ETgreen_DataCube
return(np.array(ETblue_DataCube), np.array(ETgreen_DataCube))
def Calc_budyko(phi):
"""
This functions calculate the budyko number based on the aridity index
Parameters
----------
phi : Array
Array[time, lat, lon] containing phi
Returns
-------
Budyko : array
Array[time, lat, lon] containing Budyko number
"""
Budyko = np.sqrt(phi * np.tanh(1/phi) * (1-np.exp(-phi)))
return(Budyko)
def Complete_3D_Array(nc_outname, Var, Startdate, Enddate, Additional_Months_front, Additional_Months_tail, Data_Path):
from netCDF4 import Dataset
import wa.General.raster_conversions as RC
# Define startdate and enddate with moving average
Startdate_Moving_Average = pd.Timestamp(Startdate) - pd.DateOffset(months = Additional_Months_front)
Enddate_Moving_Average = pd.Timestamp(Enddate) + pd.DateOffset(months = Additional_Months_tail)
Startdate_Moving_Average_String = '%d-%02d-%02d' %(Startdate_Moving_Average.year, Startdate_Moving_Average.month, Startdate_Moving_Average.day)
Enddate_Moving_Average_String = '%d-%02d-%02d' %(Enddate_Moving_Average.year, Enddate_Moving_Average.month, Enddate_Moving_Average.day)
# Extract moving average period before
Year_front = int(Startdate_Moving_Average.year)
filename_front = os.path.join(os.path.dirname(nc_outname), "%d.nc" %Year_front)
Enddate_Front = pd.Timestamp(Startdate) - pd.DateOffset(days = 1)
# Extract inside start and enddate
Array_main = RC.Open_nc_array(nc_outname, Var, Startdate, Enddate)
if Additional_Months_front > 0:
# Extract moving average period before
if os.path.exists(filename_front):
# Open variables in netcdf
fh = Dataset(filename_front)
Variables_NC = [var for var in fh.variables]
fh.close()
if Var in Variables_NC:
Array_front = RC.Open_nc_array(filename_front, Var, Startdate_Moving_Average_String, Enddate_Front)
else:
Array_front = RC.Get3Darray_time_series_monthly(Data_Path, Startdate_Moving_Average_String, Enddate_Front, nc_outname)
else:
Array_front = RC.Get3Darray_time_series_monthly(Data_Path, Startdate_Moving_Average_String, Enddate_Front, nc_outname)
# Merge dataset
Array_main = np.vstack([Array_front,Array_main])
if Additional_Months_tail > 0:
# Extract moving average period after
Year_tail = int(Enddate_Moving_Average.year)
filename_tail = os.path.join(os.path.dirname(nc_outname), "%d.nc" %Year_tail)
Startdate_tail = pd.Timestamp(Enddate) + pd.DateOffset(days = 1)
# Extract moving average period after
if os.path.exists(filename_tail):
# Open variables in netcdf
fh = Dataset(filename_tail)
Variables_NC = [var for var in fh.variables]
fh.close()
if Var in Variables_NC:
Array_tail = RC.Open_nc_array(filename_tail, Var, Startdate_tail, Enddate_Moving_Average_String)
else:
Array_tail = RC.Get3Darray_time_series_monthly(Data_Path, Startdate_tail, Enddate_Moving_Average_String, nc_outname)
else:
Array_tail = RC.Get3Darray_time_series_monthly(Data_Path, Startdate_tail, Enddate_Moving_Average_String, nc_outname)
# Merge dataset
Array_main = np.vstack([Array_main,Array_tail])
return(Array_main)
| apache-2.0 |
Mazecreator/tensorflow | tensorflow/contrib/distributions/python/ops/mixture.py | 13 | 18780 | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""The Mixture distribution class."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from tensorflow.contrib.distributions.python.ops import distribution_util as distribution_utils
from tensorflow.python.framework import ops
from tensorflow.python.framework import tensor_shape
from tensorflow.python.framework import tensor_util
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import check_ops
from tensorflow.python.ops import data_flow_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import nn_ops
from tensorflow.python.ops.distributions import categorical
from tensorflow.python.ops.distributions import distribution
from tensorflow.python.ops.distributions import util as distribution_util
class Mixture(distribution.Distribution):
"""Mixture distribution.
The `Mixture` object implements batched mixture distributions.
The mixture model is defined by a `Categorical` distribution (the mixture)
and a python list of `Distribution` objects.
Methods supported include `log_prob`, `prob`, `mean`, `sample`, and
`entropy_lower_bound`.
#### Examples
```python
# Create a mixture of two Gaussians:
ds = tf.contrib.distributions
mix = 0.3
bimix_gauss = ds.Mixture(
cat=ds.Categorical(probs=[mix, 1.-mix]),
components=[
ds.Normal(loc=-1., scale=0.1),
ds.Normal(loc=+1., scale=0.5),
])
# Plot the PDF.
import matplotlib.pyplot as plt
x = tf.linspace(-2., 3., int(1e4)).eval()
plt.plot(x, bimix_gauss.prob(x).eval());
```
"""
def __init__(self,
cat,
components,
validate_args=False,
allow_nan_stats=True,
name="Mixture"):
"""Initialize a Mixture distribution.
A `Mixture` is defined by a `Categorical` (`cat`, representing the
mixture probabilities) and a list of `Distribution` objects
all having matching dtype, batch shape, event shape, and continuity
properties (the components).
The `num_classes` of `cat` must be possible to infer at graph construction
time and match `len(components)`.
Args:
cat: A `Categorical` distribution instance, representing the probabilities
of `distributions`.
components: A list or tuple of `Distribution` instances.
Each instance must have the same type, be defined on the same domain,
and have matching `event_shape` and `batch_shape`.
validate_args: Python `bool`, default `False`. If `True`, raise a runtime
error if batch or event ranks are inconsistent between cat and any of
the distributions. This is only checked if the ranks cannot be
determined statically at graph construction time.
allow_nan_stats: Boolean, default `True`. If `False`, raise an
exception if a statistic (e.g. mean/mode/etc...) is undefined for any
batch member. If `True`, batch members with valid parameters leading to
undefined statistics will return NaN for this statistic.
name: A name for this distribution (optional).
Raises:
TypeError: If cat is not a `Categorical`, or `components` is not
a list or tuple, or the elements of `components` are not
instances of `Distribution`, or do not have matching `dtype`.
ValueError: If `components` is an empty list or tuple, or its
elements do not have a statically known event rank.
If `cat.num_classes` cannot be inferred at graph creation time,
or the constant value of `cat.num_classes` is not equal to
`len(components)`, or all `components` and `cat` do not have
matching static batch shapes, or all components do not
have matching static event shapes.
"""
parameters = locals()
if not isinstance(cat, categorical.Categorical):
raise TypeError("cat must be a Categorical distribution, but saw: %s" %
cat)
if not components:
raise ValueError("components must be a non-empty list or tuple")
if not isinstance(components, (list, tuple)):
raise TypeError("components must be a list or tuple, but saw: %s" %
components)
if not all(isinstance(c, distribution.Distribution) for c in components):
raise TypeError(
"all entries in components must be Distribution instances"
" but saw: %s" % components)
dtype = components[0].dtype
if not all(d.dtype == dtype for d in components):
raise TypeError("All components must have the same dtype, but saw "
"dtypes: %s" % [(d.name, d.dtype) for d in components])
static_event_shape = components[0].event_shape
static_batch_shape = cat.batch_shape
for d in components:
static_event_shape = static_event_shape.merge_with(d.event_shape)
static_batch_shape = static_batch_shape.merge_with(d.batch_shape)
if static_event_shape.ndims is None:
raise ValueError(
"Expected to know rank(event_shape) from components, but "
"none of the components provide a static number of ndims")
# Ensure that all batch and event ndims are consistent.
with ops.name_scope(name, values=[cat.logits]):
num_components = cat.event_size
static_num_components = tensor_util.constant_value(num_components)
if static_num_components is None:
raise ValueError(
"Could not infer number of classes from cat and unable "
"to compare this value to the number of components passed in.")
# Possibly convert from numpy 0-D array.
static_num_components = int(static_num_components)
if static_num_components != len(components):
raise ValueError("cat.num_classes != len(components): %d vs. %d" %
(static_num_components, len(components)))
cat_batch_shape = cat.batch_shape_tensor()
cat_batch_rank = array_ops.size(cat_batch_shape)
if validate_args:
batch_shapes = [d.batch_shape_tensor() for d in components]
batch_ranks = [array_ops.size(bs) for bs in batch_shapes]
check_message = ("components[%d] batch shape must match cat "
"batch shape")
self._assertions = [
check_ops.assert_equal(
cat_batch_rank, batch_ranks[di], message=check_message % di)
for di in range(len(components))
]
self._assertions += [
check_ops.assert_equal(
cat_batch_shape, batch_shapes[di], message=check_message % di)
for di in range(len(components))
]
else:
self._assertions = []
self._cat = cat
self._components = list(components)
self._num_components = static_num_components
self._static_event_shape = static_event_shape
self._static_batch_shape = static_batch_shape
# We let the Mixture distribution access _graph_parents since its arguably
# more like a baseclass.
graph_parents = self._cat._graph_parents # pylint: disable=protected-access
for c in self._components:
graph_parents += c._graph_parents # pylint: disable=protected-access
super(Mixture, self).__init__(
dtype=dtype,
reparameterization_type=distribution.NOT_REPARAMETERIZED,
validate_args=validate_args,
allow_nan_stats=allow_nan_stats,
parameters=parameters,
graph_parents=graph_parents,
name=name)
@property
def cat(self):
return self._cat
@property
def components(self):
return self._components
@property
def num_components(self):
return self._num_components
def _batch_shape_tensor(self):
return self._cat.batch_shape_tensor()
def _batch_shape(self):
return self._static_batch_shape
def _event_shape_tensor(self):
return self._components[0].event_shape_tensor()
def _event_shape(self):
return self._static_event_shape
def _expand_to_event_rank(self, x):
"""Expand the rank of x up to static_event_rank times for broadcasting.
The static event rank was checked to not be None at construction time.
Args:
x: A tensor to expand.
Returns:
The expanded tensor.
"""
expanded_x = x
for _ in range(self.event_shape.ndims):
expanded_x = array_ops.expand_dims(expanded_x, -1)
return expanded_x
def _mean(self):
with ops.control_dependencies(self._assertions):
distribution_means = [d.mean() for d in self.components]
cat_probs = self._cat_probs(log_probs=False)
cat_probs = [self._expand_to_event_rank(c_p) for c_p in cat_probs]
partial_means = [
c_p * m for (c_p, m) in zip(cat_probs, distribution_means)
]
# These should all be the same shape by virtue of matching
# batch_shape and event_shape.
return math_ops.add_n(partial_means)
def _stddev(self):
with ops.control_dependencies(self._assertions):
distribution_means = [d.mean() for d in self.components]
distribution_devs = [d.stddev() for d in self.components]
cat_probs = self._cat_probs(log_probs=False)
stacked_means = array_ops.stack(distribution_means, axis=-1)
stacked_devs = array_ops.stack(distribution_devs, axis=-1)
cat_probs = [self._expand_to_event_rank(c_p) for c_p in cat_probs]
broadcasted_cat_probs = (array_ops.stack(cat_probs, axis=-1) *
array_ops.ones_like(stacked_means))
batched_dev = distribution_utils.mixture_stddev(
array_ops.reshape(broadcasted_cat_probs, [-1, len(self.components)]),
array_ops.reshape(stacked_means, [-1, len(self.components)]),
array_ops.reshape(stacked_devs, [-1, len(self.components)]))
# I.e. re-shape to list(batch_shape) + list(event_shape).
return array_ops.reshape(batched_dev,
array_ops.shape(broadcasted_cat_probs)[:-1])
def _log_prob(self, x):
with ops.control_dependencies(self._assertions):
x = ops.convert_to_tensor(x, name="x")
distribution_log_probs = [d.log_prob(x) for d in self.components]
cat_log_probs = self._cat_probs(log_probs=True)
final_log_probs = [
cat_lp + d_lp
for (cat_lp, d_lp) in zip(cat_log_probs, distribution_log_probs)
]
concat_log_probs = array_ops.stack(final_log_probs, 0)
log_sum_exp = math_ops.reduce_logsumexp(concat_log_probs, [0])
return log_sum_exp
def _log_cdf(self, x):
with ops.control_dependencies(self._assertions):
x = ops.convert_to_tensor(x, name="x")
distribution_log_cdfs = [d.log_cdf(x) for d in self.components]
cat_log_probs = self._cat_probs(log_probs=True)
final_log_cdfs = [
cat_lp + d_lcdf
for (cat_lp, d_lcdf) in zip(cat_log_probs, distribution_log_cdfs)
]
concatted_log_cdfs = array_ops.stack(final_log_cdfs, axis=0)
mixture_log_cdf = math_ops.reduce_logsumexp(concatted_log_cdfs, [0])
return mixture_log_cdf
def _sample_n(self, n, seed=None):
with ops.control_dependencies(self._assertions):
n = ops.convert_to_tensor(n, name="n")
static_n = tensor_util.constant_value(n)
n = int(static_n) if static_n is not None else n
cat_samples = self.cat.sample(n, seed=seed)
static_samples_shape = cat_samples.get_shape()
if static_samples_shape.is_fully_defined():
samples_shape = static_samples_shape.as_list()
samples_size = static_samples_shape.num_elements()
else:
samples_shape = array_ops.shape(cat_samples)
samples_size = array_ops.size(cat_samples)
static_batch_shape = self.batch_shape
if static_batch_shape.is_fully_defined():
batch_shape = static_batch_shape.as_list()
batch_size = static_batch_shape.num_elements()
else:
batch_shape = self.batch_shape_tensor()
batch_size = math_ops.reduce_prod(batch_shape)
static_event_shape = self.event_shape
if static_event_shape.is_fully_defined():
event_shape = np.array(static_event_shape.as_list(), dtype=np.int32)
else:
event_shape = self.event_shape_tensor()
# Get indices into the raw cat sampling tensor. We will
# need these to stitch sample values back out after sampling
# within the component partitions.
samples_raw_indices = array_ops.reshape(
math_ops.range(0, samples_size), samples_shape)
# Partition the raw indices so that we can use
# dynamic_stitch later to reconstruct the samples from the
# known partitions.
partitioned_samples_indices = data_flow_ops.dynamic_partition(
data=samples_raw_indices,
partitions=cat_samples,
num_partitions=self.num_components)
# Copy the batch indices n times, as we will need to know
# these to pull out the appropriate rows within the
# component partitions.
batch_raw_indices = array_ops.reshape(
array_ops.tile(math_ops.range(0, batch_size), [n]), samples_shape)
# Explanation of the dynamic partitioning below:
# batch indices are i.e., [0, 1, 0, 1, 0, 1]
# Suppose partitions are:
# [1 1 0 0 1 1]
# After partitioning, batch indices are cut as:
# [batch_indices[x] for x in 2, 3]
# [batch_indices[x] for x in 0, 1, 4, 5]
# i.e.
# [1 1] and [0 0 0 0]
# Now we sample n=2 from part 0 and n=4 from part 1.
# For part 0 we want samples from batch entries 1, 1 (samples 0, 1),
# and for part 1 we want samples from batch entries 0, 0, 0, 0
# (samples 0, 1, 2, 3).
partitioned_batch_indices = data_flow_ops.dynamic_partition(
data=batch_raw_indices,
partitions=cat_samples,
num_partitions=self.num_components)
samples_class = [None for _ in range(self.num_components)]
for c in range(self.num_components):
n_class = array_ops.size(partitioned_samples_indices[c])
seed = distribution_util.gen_new_seed(seed, "mixture")
samples_class_c = self.components[c].sample(n_class, seed=seed)
# Pull out the correct batch entries from each index.
# To do this, we may have to flatten the batch shape.
# For sample s, batch element b of component c, we get the
# partitioned batch indices from
# partitioned_batch_indices[c]; and shift each element by
# the sample index. The final lookup can be thought of as
# a matrix gather along locations (s, b) in
# samples_class_c where the n_class rows correspond to
# samples within this component and the batch_size columns
# correspond to batch elements within the component.
#
# Thus the lookup index is
# lookup[c, i] = batch_size * s[i] + b[c, i]
# for i = 0 ... n_class[c] - 1.
lookup_partitioned_batch_indices = (
batch_size * math_ops.range(n_class) +
partitioned_batch_indices[c])
samples_class_c = array_ops.reshape(
samples_class_c,
array_ops.concat([[n_class * batch_size], event_shape], 0))
samples_class_c = array_ops.gather(
samples_class_c, lookup_partitioned_batch_indices,
name="samples_class_c_gather")
samples_class[c] = samples_class_c
# Stitch back together the samples across the components.
lhs_flat_ret = data_flow_ops.dynamic_stitch(
indices=partitioned_samples_indices, data=samples_class)
# Reshape back to proper sample, batch, and event shape.
ret = array_ops.reshape(lhs_flat_ret,
array_ops.concat([samples_shape,
self.event_shape_tensor()], 0))
ret.set_shape(
tensor_shape.TensorShape(static_samples_shape).concatenate(
self.event_shape))
return ret
def entropy_lower_bound(self, name="entropy_lower_bound"):
r"""A lower bound on the entropy of this mixture model.
The bound below is not always very tight, and its usefulness depends
on the mixture probabilities and the components in use.
A lower bound is useful for ELBO when the `Mixture` is the variational
distribution:
\\(
\log p(x) >= ELBO = \int q(z) \log p(x, z) dz + H[q]
\\)
where \\( p \\) is the prior distribution, \\( q \\) is the variational,
and \\( H[q] \\) is the entropy of \\( q \\). If there is a lower bound
\\( G[q] \\) such that \\( H[q] \geq G[q] \\) then it can be used in
place of \\( H[q] \\).
For a mixture of distributions \\( q(Z) = \sum_i c_i q_i(Z) \\) with
\\( \sum_i c_i = 1 \\), by the concavity of \\( f(x) = -x \log x \\), a
simple lower bound is:
\\(
\begin{align}
H[q] & = - \int q(z) \log q(z) dz \\\
& = - \int (\sum_i c_i q_i(z)) \log(\sum_i c_i q_i(z)) dz \\\
& \geq - \sum_i c_i \int q_i(z) \log q_i(z) dz \\\
& = \sum_i c_i H[q_i]
\end{align}
\\)
This is the term we calculate below for \\( G[q] \\).
Args:
name: A name for this operation (optional).
Returns:
A lower bound on the Mixture's entropy.
"""
with self._name_scope(name, values=[self.cat.logits]):
with ops.control_dependencies(self._assertions):
distribution_entropies = [d.entropy() for d in self.components]
cat_probs = self._cat_probs(log_probs=False)
partial_entropies = [
c_p * m for (c_p, m) in zip(cat_probs, distribution_entropies)
]
# These are all the same shape by virtue of matching batch_shape
return math_ops.add_n(partial_entropies)
def _cat_probs(self, log_probs):
"""Get a list of num_components batchwise probabilities."""
which_softmax = nn_ops.log_softmax if log_probs else nn_ops.softmax
cat_probs = which_softmax(self.cat.logits)
cat_probs = array_ops.unstack(cat_probs, num=self.num_components, axis=-1)
return cat_probs
| apache-2.0 |
M4573R/playground-notes | scalable-machine-learning/lab-0/ML_lab0_student.py | 8 | 5711 |
# coding: utf-8
# #![Spark Logo](http://spark-mooc.github.io/web-assets/images/ta_Spark-logo-small.png) + ![Python Logo](http://spark-mooc.github.io/web-assets/images/python-logo-master-v3-TM-flattened_small.png)
# # **First Notebook: Virtual machine test and assignment submission**
# #### This notebook will test that the virtual machine (VM) is functioning properly and will show you how to submit an assignment to the autograder. To move through the notebook just run each of the cells. You will not need to solve any problems to complete this lab. You can run a cell by pressing "shift-enter", which will compute the current cell and advance to the next cell, or by clicking in a cell and pressing "control-enter", which will compute the current cell and remain in that cell. At the end of the notebook you will export / download the notebook and submit it to the autograder.
# #### ** This notebook covers: **
# #### *Part 1:* Test Spark functionality
# #### *Part 2:* Check class testing library
# #### *Part 3:* Check plotting
# #### *Part 4:* Check MathJax formulas
# #### *Part 5:* Export / download and submit
# ### ** Part 1: Test Spark functionality **
# #### ** (1a) Parallelize, filter, and reduce **
# In[1]:
# Check that Spark is working
largeRange = sc.parallelize(xrange(100000))
reduceTest = largeRange.reduce(lambda a, b: a + b)
filterReduceTest = largeRange.filter(lambda x: x % 7 == 0).sum()
print reduceTest
print filterReduceTest
# If the Spark jobs don't work properly these will raise an AssertionError
assert reduceTest == 4999950000
assert filterReduceTest == 714264285
# #### ** (1b) Loading a text file **
# In[2]:
# Check loading data with sc.textFile
import os.path
baseDir = os.path.join('data')
inputPath = os.path.join('cs100', 'lab1', 'shakespeare.txt')
fileName = os.path.join(baseDir, inputPath)
rawData = sc.textFile(fileName)
shakespeareCount = rawData.count()
print shakespeareCount
# If the text file didn't load properly an AssertionError will be raised
assert shakespeareCount == 122395
# ### ** Part 2: Check class testing library **
# #### ** (2a) Compare with hash **
# In[3]:
# TEST Compare with hash (2a)
# Check our testing library/package
# This should print '1 test passed.' on two lines
from test_helper import Test
twelve = 12
Test.assertEquals(twelve, 12, 'twelve should equal 12')
Test.assertEqualsHashed(twelve, '7b52009b64fd0a2a49e6d8a939753077792b0554',
'twelve, once hashed, should equal the hashed value of 12')
# #### ** (2b) Compare lists **
# In[4]:
# TEST Compare lists (2b)
# This should print '1 test passed.'
unsortedList = [(5, 'b'), (5, 'a'), (4, 'c'), (3, 'a')]
Test.assertEquals(sorted(unsortedList), [(3, 'a'), (4, 'c'), (5, 'a'), (5, 'b')],
'unsortedList does not sort properly')
# ### ** Part 3: Check plotting **
# #### ** (3a) Our first plot **
# #### After executing the code cell below, you should see a plot with 50 blue circles. The circles should start at the bottom left and end at the top right.
# In[5]:
# Check matplotlib plotting
import matplotlib.pyplot as plt
import matplotlib.cm as cm
from math import log
# function for generating plot layout
def preparePlot(xticks, yticks, figsize=(10.5, 6), hideLabels=False, gridColor='#999999', gridWidth=1.0):
plt.close()
fig, ax = plt.subplots(figsize=figsize, facecolor='white', edgecolor='white')
ax.axes.tick_params(labelcolor='#999999', labelsize='10')
for axis, ticks in [(ax.get_xaxis(), xticks), (ax.get_yaxis(), yticks)]:
axis.set_ticks_position('none')
axis.set_ticks(ticks)
axis.label.set_color('#999999')
if hideLabels: axis.set_ticklabels([])
plt.grid(color=gridColor, linewidth=gridWidth, linestyle='-')
map(lambda position: ax.spines[position].set_visible(False), ['bottom', 'top', 'left', 'right'])
return fig, ax
# generate layout and plot data
x = range(1, 50)
y = [log(x1 ** 2) for x1 in x]
fig, ax = preparePlot(range(5, 60, 10), range(0, 12, 1))
plt.scatter(x, y, s=14**2, c='#d6ebf2', edgecolors='#8cbfd0', alpha=0.75)
ax.set_xlabel(r'$range(1, 50)$'), ax.set_ylabel(r'$\log_e(x^2)$')
pass
# ### ** Part 4: Check MathJax Formulas **
# #### ** (4a) Gradient descent formula **
# #### You should see a formula on the line below this one: $$ \scriptsize \mathbf{w}_{i+1} = \mathbf{w}_i - \alpha_i \sum_j (\mathbf{w}_i^\top\mathbf{x}_j - y_j) \mathbf{x}_j \,.$$
#
# #### This formula is included inline with the text and is $ \scriptsize (\mathbf{w}^\top \mathbf{x} - y) \mathbf{x} $.
# #### ** (4b) Log loss formula **
# #### This formula shows log loss for single point. Log loss is defined as: $$ \begin{align} \scriptsize \ell_{log}(p, y) = \begin{cases} -\log (p) & \text{if } y = 1 \\\ -\log(1-p) & \text{if } y = 0 \end{cases} \end{align} $$
# ### ** Part 5: Export / download and submit **
# #### ** (5a) Time to submit **
# #### You have completed the lab. To submit the lab for grading you will need to download it from your IPython Notebook environment. You can do this by clicking on "File", then hovering your mouse over "Download as", and then clicking on "Python (.py)". This will export your IPython Notebook as a .py file to your computer.
# #### To upload this file to the course autograder, go to the edX website and find the page for submitting this assignment. Click "Choose file", then navigate to and click on the downloaded .py file. Now click the "Open" button and then the "Check" button. Your submission will be graded shortly and will be available on the page where you submitted. Note that when submission volumes are high, it may take as long as an hour to receive results.
| mit |
B3AU/waveTree | sklearn/neighbors/tests/test_kd_tree.py | 4 | 7938 | import numpy as np
from numpy.testing import assert_array_almost_equal
from sklearn.neighbors.kd_tree import (KDTree, NeighborsHeap,
simultaneous_sort, kernel_norm,
nodeheap_sort, DTYPE, ITYPE)
from sklearn.neighbors.dist_metrics import DistanceMetric
from sklearn.utils.testing import SkipTest, assert_allclose
V = np.random.random((3, 3))
V = np.dot(V, V.T)
DIMENSION = 3
METRICS = {'euclidean': {},
'manhattan': {},
'chebyshev': {},
'minkowski': dict(p=3)}
def brute_force_neighbors(X, Y, k, metric, **kwargs):
D = DistanceMetric.get_metric(metric, **kwargs).pairwise(Y, X)
ind = np.argsort(D, axis=1)[:, :k]
dist = D[np.arange(Y.shape[0])[:, None], ind]
return dist, ind
def test_kd_tree_query():
np.random.seed(0)
X = np.random.random((40, DIMENSION))
Y = np.random.random((10, DIMENSION))
def check_neighbors(dualtree, breadth_first, k, metric, kwargs):
kdt = KDTree(X, leaf_size=1, metric=metric, **kwargs)
dist1, ind1 = kdt.query(Y, k, dualtree=dualtree,
breadth_first=breadth_first)
dist2, ind2 = brute_force_neighbors(X, Y, k, metric, **kwargs)
# don't check indices here: if there are any duplicate distances,
# the indices may not match. Distances should not have this problem.
assert_array_almost_equal(dist1, dist2)
for (metric, kwargs) in METRICS.items():
for k in (1, 3, 5):
for dualtree in (True, False):
for breadth_first in (True, False):
yield (check_neighbors,
dualtree, breadth_first,
k, metric, kwargs)
def test_kd_tree_query_radius(n_samples=100, n_features=10):
np.random.seed(0)
X = 2 * np.random.random(size=(n_samples, n_features)) - 1
query_pt = np.zeros(n_features, dtype=float)
eps = 1E-15 # roundoff error can cause test to fail
kdt = KDTree(X, leaf_size=5)
rad = np.sqrt(((X - query_pt) ** 2).sum(1))
for r in np.linspace(rad[0], rad[-1], 100):
ind = kdt.query_radius(query_pt, r + eps)[0]
i = np.where(rad <= r + eps)[0]
ind.sort()
i.sort()
assert_array_almost_equal(i, ind)
def test_kd_tree_query_radius_distance(n_samples=100, n_features=10):
np.random.seed(0)
X = 2 * np.random.random(size=(n_samples, n_features)) - 1
query_pt = np.zeros(n_features, dtype=float)
eps = 1E-15 # roundoff error can cause test to fail
kdt = KDTree(X, leaf_size=5)
rad = np.sqrt(((X - query_pt) ** 2).sum(1))
for r in np.linspace(rad[0], rad[-1], 100):
ind, dist = kdt.query_radius(query_pt, r + eps, return_distance=True)
ind = ind[0]
dist = dist[0]
d = np.sqrt(((query_pt - X[ind]) ** 2).sum(1))
assert_array_almost_equal(d, dist)
def compute_kernel_slow(Y, X, kernel, h):
d = np.sqrt(((Y[:, None, :] - X) ** 2).sum(-1))
norm = kernel_norm(h, X.shape[1], kernel)
if kernel == 'gaussian':
return norm * np.exp(-0.5 * (d * d) / (h * h)).sum(-1)
elif kernel == 'tophat':
return norm * (d < h).sum(-1)
elif kernel == 'epanechnikov':
return norm * ((1.0 - (d * d) / (h * h)) * (d < h)).sum(-1)
elif kernel == 'exponential':
return norm * (np.exp(-d / h)).sum(-1)
elif kernel == 'linear':
return norm * ((1 - d / h) * (d < h)).sum(-1)
elif kernel == 'cosine':
return norm * (np.cos(0.5 * np.pi * d / h) * (d < h)).sum(-1)
else:
raise ValueError('kernel not recognized')
def test_kd_tree_kde(n_samples=100, n_features=3):
np.random.seed(0)
X = np.random.random((n_samples, n_features))
Y = np.random.random((n_samples, n_features))
kdt = KDTree(X, leaf_size=10)
for kernel in ['gaussian', 'tophat', 'epanechnikov',
'exponential', 'linear', 'cosine']:
for h in [0.01, 0.1, 1]:
dens_true = compute_kernel_slow(Y, X, kernel, h)
def check_results(kernel, h, atol, rtol, breadth_first):
dens = kdt.kernel_density(Y, h, atol=atol, rtol=rtol,
kernel=kernel,
breadth_first=breadth_first)
assert_allclose(dens, dens_true, atol=atol,
rtol=max(rtol, 1e-7))
for rtol in [0, 1E-5]:
for atol in [1E-6, 1E-2]:
for breadth_first in (True, False):
yield (check_results, kernel, h, atol, rtol,
breadth_first)
def test_gaussian_kde(n_samples=1000):
"""Compare gaussian KDE results to scipy.stats.gaussian_kde"""
from scipy.stats import gaussian_kde
np.random.seed(0)
x_in = np.random.normal(0, 1, n_samples)
x_out = np.linspace(-5, 5, 30)
for h in [0.01, 0.1, 1]:
kdt = KDTree(x_in[:, None])
try:
gkde = gaussian_kde(x_in, bw_method=h / np.std(x_in))
except TypeError:
# older versions of scipy don't accept explicit bandwidth
raise SkipTest
dens_kdt = kdt.kernel_density(x_out[:, None], h) / n_samples
dens_gkde = gkde.evaluate(x_out)
assert_array_almost_equal(dens_kdt, dens_gkde, decimal=3)
def test_kd_tree_two_point(n_samples=100, n_features=3):
np.random.seed(0)
X = np.random.random((n_samples, n_features))
Y = np.random.random((n_samples, n_features))
r = np.linspace(0, 1, 10)
kdt = KDTree(X, leaf_size=10)
D = DistanceMetric.get_metric("euclidean").pairwise(Y, X)
counts_true = [(D <= ri).sum() for ri in r]
def check_two_point(r, dualtree):
counts = kdt.two_point_correlation(Y, r=r, dualtree=dualtree)
assert_array_almost_equal(counts, counts_true)
for dualtree in (True, False):
yield check_two_point, r, dualtree
def test_kd_tree_pickle():
import pickle
np.random.seed(0)
X = np.random.random((10, 3))
kdt1 = KDTree(X, leaf_size=1)
ind1, dist1 = kdt1.query(X)
def check_pickle_protocol(protocol):
s = pickle.dumps(kdt1, protocol=protocol)
kdt2 = pickle.loads(s)
ind2, dist2 = kdt2.query(X)
assert_array_almost_equal(ind1, ind2)
assert_array_almost_equal(dist1, dist2)
for protocol in (0, 1, 2):
yield check_pickle_protocol, protocol
def test_neighbors_heap(n_pts=5, n_nbrs=10):
heap = NeighborsHeap(n_pts, n_nbrs)
for row in range(n_pts):
d_in = np.random.random(2 * n_nbrs).astype(DTYPE)
i_in = np.arange(2 * n_nbrs, dtype=ITYPE)
for d, i in zip(d_in, i_in):
heap.push(row, d, i)
ind = np.argsort(d_in)
d_in = d_in[ind]
i_in = i_in[ind]
d_heap, i_heap = heap.get_arrays(sort=True)
assert_array_almost_equal(d_in[:n_nbrs], d_heap[row])
assert_array_almost_equal(i_in[:n_nbrs], i_heap[row])
def test_node_heap(n_nodes=50):
vals = np.random.random(n_nodes).astype(DTYPE)
i1 = np.argsort(vals)
vals2, i2 = nodeheap_sort(vals)
assert_array_almost_equal(i1, i2)
assert_array_almost_equal(vals[i1], vals2)
def test_simultaneous_sort(n_rows=10, n_pts=201):
dist = np.random.random((n_rows, n_pts)).astype(DTYPE)
ind = (np.arange(n_pts) + np.zeros((n_rows, 1))).astype(ITYPE)
dist2 = dist.copy()
ind2 = ind.copy()
# simultaneous sort rows using function
simultaneous_sort(dist, ind)
# simultaneous sort rows using numpy
i = np.argsort(dist2, axis=1)
row_ind = np.arange(n_rows)[:, None]
dist2 = dist2[row_ind, i]
ind2 = ind2[row_ind, i]
assert_array_almost_equal(dist, dist2)
assert_array_almost_equal(ind, ind2)
if __name__ == '__main__':
import nose
nose.runmodule()
| bsd-3-clause |
ddbourgin/bookworm_api | bookworm/general_API.py | 1 | 14654 | #!/usr/bin/python
import MySQLdb
from pandas import merge
from pandas.io.sql import read_sql
from pandas import set_option
from SQLAPI import *
from copy import deepcopy
from collections import defaultdict
import ConfigParser
import os.path
#Some settings can be overridden here, if no where else.
prefs = dict()
def find_my_cnf():
"""
The password will be looked for in these places.
"""
for file in ["etc/bookworm/my.cnf","/etc/my.cnf","/etc/mysql/my.cnf","/root/.my.cnf"]:
if os.path.exists(file):
return file
class dbConnect(object):
#This is a read-only account
def __init__(self,prefs=prefs,database="federalist",host="localhost"):
self.dbname = database
#For back-compatibility:
if "HOST" in prefs:
host=prefs['HOST']
self.db = MySQLdb.connect(host=host,
db=database,
read_default_file = find_my_cnf(),
use_unicode='True',
charset='utf8')
self.cursor = self.db.cursor()
def calculateAggregates(df,parameters):
"""
We only collect "WordCoun" and "TextCount" for each query,
but there are a lot of cool things you can do with those:
basic things like frequency, all the way up to TF-IDF.
"""
parameters = set(parameters)
if "WordsPerMillion" in parameters:
df["WordsPerMillion"] = df["WordCount_x"].multiply(1000000)/df["WordCount_y"]
if "WordCount" in parameters:
df["WordCount"] = df["WordCount_x"]
if "TotalWords" in parameters:
df["TotalWords"] = df["WordCount_y"]
if "SumWords" in parameters:
df["SumWords"] = df["WordCount_y"] + df["WordCount_x"]
if "WordsRatio" in parameters:
df["WordsRatio"] = df["WordCount_x"]/df["WordCount_y"]
if "TextPercent" in parameters:
df["TextPercent"] = 100*df["TextCount_x"].divide(df["TextCount_y"])
if "TextCount" in parameters:
df["TextCount"] = df["TextCount_x"]
if "TotalTexts" in parameters:
df["TotalTexts"] = df["TextCount_y"]
if "HitsPerBook" in parameters:
df["HitsPerMatch"] = df["WordCount_x"]/df["TextCount_x"]
if "TextLength" in parameters:
df["HitsPerMatch"] = df["WordCount_y"]/df["TextCount_y"]
if "TFIDF" in parameters:
from numpy import log as log
df.eval("TF = WordCount_x/WordCount_y")
df["TFIDF"] = (df["WordCount_x"]/df["WordCount_y"])*log(df["TextCount_y"]/df['TextCount_x'])
def DunningLog(df=df,a = "WordCount_x",b = "WordCount_y"):
from numpy import log as log
destination = "Dunning"
df[a] = df[a].replace(0,1)
df[b] = df[b].replace(0,1)
if a=="WordCount_x":
# Dunning comparisons should be to the sums if counting:
c = sum(df[a])
d = sum(df[b])
if a=="TextCount_x":
# The max count isn't necessarily the total number of books, but it's a decent proxy.
c = max(df[a])
d = max(df[b])
expectedRate = (df[a] + df[b]).divide(c+d)
E1 = c*expectedRate
E2 = d*expectedRate
diff1 = log(df[a].divide(E1))
diff2 = log(df[b].divide(E2))
df[destination] = 2*(df[a].multiply(diff1) + df[b].multiply(diff2))
# A hack, but a useful one: encode the direction of the significance,
# in the sign, so negative
difference = diff1<diff2
df.ix[difference,destination] = -1*df.ix[difference,destination]
return df[destination]
if "Dunning" in parameters:
df["Dunning"] = DunningLog(df,"WordCount_x","WordCount_y")
if "DunningTexts" in parameters:
df["DunningTexts"] = DunningLog(df,"TextCount_x","TextCount_y")
return df
def intersectingNames(p1,p2,full=False):
"""
The list of intersection column names between two DataFrame objects.
'full' lets you specify that you want to include the count values:
Otherwise, they're kept separate for convenience in merges.
"""
exclude = set(['WordCount','TextCount'])
names1 = set([column for column in p1.columns if column not in exclude])
names2 = [column for column in p2.columns if column not in exclude]
if full:
return list(names1.union(names2))
return list(names1.intersection(names2))
def base_count_types(list_of_final_count_types):
"""
the final count types are calculated from some base types across both
the local query and the superquery.
"""
output = set()
for count_name in list_of_final_count_types:
if count_name in ["WordCount","WordsPerMillion","WordsRatio","TotalWords","SumWords","Dunning"]:
output.add("WordCount")
if count_name in ["TextCount","TextPercent","TextRatio","TotalTexts","SumTexts","DunningTexts"]:
output.add("TextCount")
if count_name in ["TextLength","HitsPerMatch","TFIDF"]:
output.add("TextCount")
output.add("WordCount")
return list(output)
def is_a_wordcount_field(string):
if string in ["unigram","bigram","trigram","four_gram","five_gram","six_gram","word"]:
return True
return False
class APIcall(object):
"""
This is the base class from which more specific classes for actual
methods can be dispatched.
Without a "return_pandas_frame" method, it won't run.
"""
def __init__(self,APIcall):
"""
Initialized with a dictionary unJSONed from the API defintion.
"""
self.query = APIcall
self.idiot_proof_arrays()
self.set_defaults()
def set_defaults(self):
query = self.query
if not "search_limits" in query:
self.query["search_limits"] = dict()
if "unigram" in query["search_limits"]:
#Hack: change somehow. You can't group on "word", just on "unigram"
query["search_limits"]["word"] = query["search_limits"]["unigram"]
del query["search_limits"]["unigram"]
def idiot_proof_arrays(self):
for element in ['counttype','groups']:
try:
if not isinstance(self.query[element],list):
self.query[element] = [self.query[element]]
except KeyError:
#It's OK if it's not there.
pass
def get_compare_limits(self):
"""
The compare limits will try to
first be the string specified:
if not that, then drop every term that begins with an asterisk:
if not that, then drop the words term;
if not that, then exactly the same as the search limits.
"""
if "compare_limits" in self.query:
return self.query['compare_limits']
search_limits = self.query['search_limits']
compare_limits = deepcopy(search_limits)
asterisked = False
for limit in search_limits.keys():
if re.search(r'^\*',limit):
search_limits[limit.replace('*','')] = search_limits[limit]
del search_limits[limit]
del compare_limits[limit]
asterisked = True
if asterisked:
return compare_limits
#Next, try deleting the word term.
for word_term in search_limits.keys():
if word_term in ['word','unigram','bigram', 'trigram', 'four_gram', 'five_gram', 'six_gram']:
del compare_limits[word_term]
#Finally, whether it's deleted a word term or not, return it all.
return compare_limits
def data(self):
if hasattr(self,"pandas_frame"):
return self.pandas_frame
else:
self.pandas_frame = self.get_data_from_source()
return self.pandas_frame
def get_data_from_source(self):
"""
This is a
Note that this method could be easily adapted to run on top of a Solr instance or
something else, just by changing the bits in the middle where it handles storage_format.
"""
call1 = deepcopy(self.query)
#The individual calls need only the base counts: not "Percentage of Words," but just "WordCount" twice, and so forth
call1['counttype'] = base_count_types(call1['counttype'])
call2 = deepcopy(call1)
call2['search_limits'] = self.get_compare_limits()
#Drop out asterisks for that syntactic sugar.
for limit in call1['search_limits'].keys():
if re.search(r'^\*',limit):
call1['search_limits'][limit.replace('*','')] = call1['search_limits'][limit]
del call1['search_limits'][limit]
for n,group in enumerate(self.query['groups']):
if re.search(r'^\*',group):
replacement = group.replace("*","")
call1['groups'][n] = replacement
self.query['groups'][n] = replacement
call2['groups'].remove(group)
#Special case: unigram groupings are dropped if they're not explicitly limited
#if "unigram" not in call2['search_limits']:
# call2['groups'] = filter(lambda x: not x in ["unigram","bigram","word"],call2['groups'])
"""
This could use any method other than pandas_SQL:
You'd just need to name objects df1 and df2 as pandas dataframes
"""
df1 = self.generate_pandas_frame(call1)
df2 = self.generate_pandas_frame(call2)
intersections = intersectingNames(df1,df2)
fullLabels = intersectingNames(df1,df2,full=True)
"""
Would this merge be faster with indexes?
"""
if len(intersections) > 0:
merged = merge(df1,df2,on=intersections,how='outer')
else:
"""
Pandas doesn't seem to have a full, unkeyed merge, so I simulate it with a dummy.
"""
df1['dummy_merge_variable'] = 1
df2['dummy_merge_variable'] = 1
merged = merge(df1,df2,on=["dummy_merge_variable"],how='outer')
merged = merged.fillna(int(0))
calculations = self.query['counttype']
calcced = calculateAggregates(merged,calculations)
calcced = calcced.fillna(int(0))
final_DataFrame = calcced[self.query['groups'] + self.query['counttype']]
return final_DataFrame
def execute(self):
method = self.query['method']
if isinstance(self.query['search_limits'],list):
if self.query['method'] not in ["json","return_json"]:
self.query['search_limits'] = self.query['search_limits'][0]
else:
return self.multi_execute()
if method=="return_json" or method=="json":
frame = self.data()
return self.return_json()
if method=="return_tsv" or method=="tsv":
import csv
frame = self.data()
return frame.to_csv(sep="\t",encoding="utf8",index=False,quoting=csv.QUOTE_NONE,escapechar="\\")
if method=="return_pickle" or method=="DataFrame":
frame = self.data()
from cPickle import dumps as pickleDumps
return pickleDumps(frame,protocol=-1)
# Temporary catch-all pushes to the old methods:
if method in ["returnPossibleFields","search_results","return_books"]:
query = userquery(self.query)
if method=="return_books":
return query.execute()
return json.dumps(query.execute())
def multi_execute(self):
"""
Queries may define several search limits in an array
if they use the return_json method.
"""
returnable = []
for limits in self.query['search_limits']:
child = deepcopy(self.query)
child['search_limits'] = limits
returnable.append(self.__class__(child).return_json(raw_python_object=True))
return json.dumps(returnable)
def return_json(self,raw_python_object=False):
query = self.query
data = self.data()
def fixNumpyType(input):
#This is, weirdly, an occasional problem but not a constant one.
if str(input.dtype)=="int64":
return int(input)
else:
return input
#Define a recursive structure to hold the stuff.
def tree():
return defaultdict(tree)
returnt = tree()
import numpy as np
for row in data.itertuples(index=False):
row = list(row)
destination = returnt
if len(row)==len(query['counttype']):
returnt = [fixNumpyType(num) for num in row]
while len(row) > len(query['counttype']):
key = row.pop(0)
if len(row) == len(query['counttype']):
# Assign the elements.
destination[key] = row
break
# This bit of the loop is where we descend the recursive dictionary.
destination = destination[key]
if raw_python_object:
return returnt
try:
return json.dumps(returnt,allow_nan=False)
except ValueError:
return json.dumps(returnt)
kludge = json.dumps(returnt)
kludge = kludge.replace("Infinity","null")
print kludge
class SQLAPIcall(APIcall):
"""
To make a new backend for the API, you just need to extend the base API call
class like this.
This one is comically short because all the real work is done in the userquery object.
But the point is, you need to define a function "generate_pandas_frame"
that accepts an API call and returns a pandas frame.
But that API call is more limited than the general API; you only need to support "WordCount" and "TextCount"
methods.
"""
def generate_pandas_frame(self,call):
"""
This is good example of the query that actually fetches the results.
It creates some SQL, runs it, and returns it as a pandas DataFrame.
The actual SQL production is handled by the userquery class, which uses more
legacy code.
"""
con=dbConnect(prefs,self.query['database'])
q = userquery(call).query()
if self.query['method']=="debug":
print q
df = read_sql(q, con.db)
return df
| mit |
Kleptobismol/scikit-bio | skbio/stats/ordination/__init__.py | 1 | 3884 | r"""
Ordination methods (:mod:`skbio.stats.ordination`)
==================================================
.. currentmodule:: skbio.stats.ordination
This module contains several ordination methods, including Principal
Coordinate Analysis, Correspondence Analysis, Redundancy Analysis and
Canonical Correspondence Analysis.
Classes
-------
.. autosummary::
:toctree: generated/
PCoA
CA
RDA
CCA
OrdinationResults
Functions
---------
.. autosummary::
:toctree: generated/
mean_and_std
corr
scale
svd_rank
Testing Utilities
-----------------
.. autosummary::
:toctree: generated/
assert_ordination_results_equal
Examples
--------
This is an artificial dataset (table 11.3 in [1]_) that represents fish
abundance in different sites (`Y`, the response variables) and
environmental variables (`X`, the explanatory variables).
>>> import numpy as np
>>> X = np.array([[1.0, 0.0, 1.0, 0.0],
... [2.0, 0.0, 1.0, 0.0],
... [3.0, 0.0, 1.0, 0.0],
... [4.0, 0.0, 0.0, 1.0],
... [5.0, 1.0, 0.0, 0.0],
... [6.0, 0.0, 0.0, 1.0],
... [7.0, 1.0, 0.0, 0.0],
... [8.0, 0.0, 0.0, 1.0],
... [9.0, 1.0, 0.0, 0.0],
... [10.0, 0.0, 0.0, 1.0]])
>>> Y = np.array([[1, 0, 0, 0, 0, 0, 2, 4, 4],
... [0, 0, 0, 0, 0, 0, 5, 6, 1],
... [0, 1, 0, 0, 0, 0, 0, 2, 3],
... [11, 4, 0, 0, 8, 1, 6, 2, 0],
... [11, 5, 17, 7, 0, 0, 6, 6, 2],
... [9, 6, 0, 0, 6, 2, 10, 1, 4],
... [9, 7, 13, 10, 0, 0, 4, 5, 4],
... [7, 8, 0, 0, 4, 3, 6, 6, 4],
... [7, 9, 10, 13, 0, 0, 6, 2, 0],
... [5, 10, 0, 0, 2, 4, 0, 1, 3]])
We can now create a CCA object to perform canonical correspondence
analysis. Matrix `X` contains a continuous variable (depth) and a
categorical one (substrate type) encoded using a one-hot encoding. We
explicitly need to avoid perfect collinearity, so we'll drop one of
the substrate types (the last column of `X`). We also expect to
increase pandas integration to ease analyses.
>>> from skbio.stats.ordination import CCA
>>> ordination_result = CCA(Y, X[:, :-1],
... ['Site0', 'Site1', 'Site2', 'Site3', 'Site4',
... 'Site5', 'Site6', 'Site7', 'Site8', 'Site9'],
... ['Species0', 'Species1', 'Species2', 'Species3',
... 'Species4', 'Species5', 'Species6', 'Species7',
... 'Species8'])
Exploring the results we see that the first three axes explain about
80% of all the variance.
>>> sc_2 = ordination_result.scores(scaling=2)
>>> print sc_2.proportion_explained
[ 0.46691091 0.23832652 0.10054837 0.10493671 0.04480535 0.02974698
0.01263112 0.00156168 0.00053235]
References
----------
.. [1] Legendre P. and Legendre L. 1998. Numerical Ecology. Elsevier,
Amsterdam.
"""
# ----------------------------------------------------------------------------
# Copyright (c) 2013--, scikit-bio development team.
#
# Distributed under the terms of the Modified BSD License.
#
# The full license is in the file COPYING.txt, distributed with this software.
# ----------------------------------------------------------------------------
from ._correspondence_analysis import CA
from ._redundancy_analysis import RDA
from ._canonical_correspondence_analysis import CCA
from ._principal_coordinate_analysis import PCoA
from ._base import OrdinationResults
from ._utils import (mean_and_std, scale, svd_rank, corr,
assert_ordination_results_equal)
__all__ = ['CA', 'RDA', 'CCA', 'PCoA', 'OrdinationResults', 'mean_and_std',
'scale', 'svd_rank', 'corr', 'assert_ordination_results_equal']
from numpy.testing import Tester
test = Tester().test
| bsd-3-clause |
mrtommyb/GP_model_Kepler_data | code/chain_samples.py | 2 | 6033 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
## turn koi2133 chains into data model
from __future__ import division, print_function
import os
import sys
import numpy as np
import matplotlib.pyplot as plt
import george
from george.kernels import ExpSquaredKernel, RBFKernel, ExpKernel
from ktransit import LCModel, FitTransit
from scipy import optimize
import h5py
def get_sample(chain_vals,time,flux,ferr,rvtime):
M = LCModel()
M.add_star(rho=chain_vals[0],zpt=chain_vals[1],ld1=chain_vals[2],
ld2=chain_vals[3],veloffset=chain_vals[4])
M.add_planet(T0=chain_vals[7],period=chain_vals[8],impact=chain_vals[9],
rprs=chain_vals[10],ecosw=chain_vals[11],esinw=chain_vals[12],
rvamp=chain_vals[13],occ=chain_vals[14],ell=chain_vals[15],alb=chain_vals[16])
M.add_data(time=time)
M.add_rv(rvtime=rvtime)
#kernel = ((chain_vals[5]**2 * RBFKernel(chain_vals[6])) +
# (chain_vals[7]**2 * RBFKernel(chain_vals[8])))
#kernel = ((chain_vals[5]**2 * ExpKernel(chain_vals[6])) +
# (chain_vals[7]**2 * RBFKernel(chain_vals[8])))
kernel = chain_vals[5]**2 * RBFKernel(chain_vals[6])
gp = george.GaussianProcess(kernel)
sample = np.array([])
for i in np.arange(len(time) // 1000):
section = np.arange(i*1000,i*1000 + 1000)
gp.compute(time[section], ferr[:][section])
sample = np.r_[sample,gp.predict(
flux[:][section] - M.transitmodel[section],time[section])[0]]
return sample, M.transitmodel
def get_many_samples(chain_vals,time,flux,ferr,rvtime,nsamples=300):
M = LCModel()
M.add_star(rho=chain_vals[0],zpt=chain_vals[1],ld1=chain_vals[2],
ld2=chain_vals[3],veloffset=chain_vals[4])
M.add_planet(T0=chain_vals[7],period=chain_vals[8],impact=chain_vals[9],
rprs=chain_vals[10],ecosw=chain_vals[11],esinw=chain_vals[12],
rvamp=chain_vals[13],occ=chain_vals[14],ell=chain_vals[15],alb=chain_vals[16])
M.add_data(time=time)
M.add_rv(rvtime=rvtime)
#kernel = ((chain_vals[5]**2 * RBFKernel(chain_vals[6])) +
# (chain_vals[7]**2 * RBFKernel(chain_vals[8])))
#kernel = ((chain_vals[5]**2 * ExpKernel(chain_vals[6])) +
# (chain_vals[7]**2 * RBFKernel(chain_vals[8])))
kernel = chain_vals[5]**2 * RBFKernel(chain_vals[6])
gp = george.GaussianProcess(kernel)
slist = np.arange(len(time) // 1000)
samples = np.zeros([nsamples,len(slist)*1000])
for i in slist:
section = np.arange(i*1000,i*1000 + 1000)
gp.compute(time[section], ferr[:][section])
samples[:,section] = gp.sample_conditional(
flux[:][section] - M.transitmodel[section],time[section],
size=nsamples)
return samples, M.transitmodel
def get_rv():
pass
if __name__ == '__main__':
fn = 'koi2133_np1_priorTrue_dil0.0GP.hdf5'
f = h5py.File(fn)
g = f['mcmc']['chain'][:]
lnprob = f['mcmc']['lnprob'][:]
mle_idx = np.unravel_index(lnprob.argmax(),
lnprob.shape)
mle = g[mle_idx]
time = f['time'][:]
flux = f['flux'][:]
ferr = f['err'][:]
rvtime = f['rvtime']
doplot = True
plot_many = False
if plot_many and doplot:
samples, tmod = get_many_samples(mle,time,flux,ferr,rvtime,
nsamples=300)
time = f['time'][:61000]
flux = f['flux'][:61000]
tmod1 = tmod[:61000]
fig, (ax1) = plt.subplots(1, 1, sharex=True, sharey=False,
figsize=[9,6])
ax1.scatter(time,flux,s=3,label='Kepler data')
med = np.median(samples+tmod1,axis=0)
stdm = np.std(samples+tmod1,axis=0)
ax1.plot(time,med,color='purple',
label='Light curve + noise model',lw=1,
alpha=0.6)
ax1.fill_between(time,med-stdm,med+stdm,
alpha=0.3,color='purple')
ax1.set_xlim([583.8,596.9])
ax1.legend()
ax1.set_xlabel('Time (BJD-2454833)',labelpad=12)
ax1.set_ylabel('Relative flux')
ax1.minorticks_on()
elif not plot_many and doplot:
sample, tmod = get_sample(mle,time,flux,ferr,rvtime)
time = f['time'][:61000]
flux = f['flux'][:61000]
tmod1 = tmod[:61000]
fig, (ax1) = plt.subplots(1, 1, sharex=True, sharey=False,
figsize=[9,6])
ax1.scatter(time,flux,s=1,label='Kepler data')
ax1.plot(time,sample,color='b',label='Noise model')
ax1.plot(time,tmod1,color='r',
label='Light curve model')
ax1.plot(time,sample+tmod1,color='purple',
label='Light curve + noise model',lw=2)
ax1.set_xlim([583.8,596.9])
ax1.legend()
ax1.set_xlabel('Time (BJD-2454833)',labelpad=12)
ax1.set_ylabel('Relative flux')
ax1.minorticks_on()
many_plots = True
if many_plots:
period = mle[8]
T0 = mle[7]
fig, (axes) = plt.subplots(5, 2, sharex=False, sharey=True,
figsize=[7,14])
axes = axes.flatten()
for i,offset in enumerate(T0 + period*np.arange(67,77,1)):
if i == 2:
offset = T0 + period*77
elif i == 7:
offset = T0 + period*78
win = 1.3
trange = (time < offset+win) & (time > offset-win)
axes[i].scatter(time[trange],flux[trange]
,s=1,label='Kepler data')
axes[i].plot(time[trange],sample[trange],
color='b',label='Noise model')
axes[i].plot(time[trange],tmod1[trange],
color='r',lw=1.5,
label='Light curve model')
axes[i].plot(time[trange],sample[trange]+tmod1[trange],
color='purple',
label='Light curve + noise model',lw=2)
axes[i].set_xlim([offset-win,offset+win])
axes[i].tick_params(labelbottom=False,labelleft=False)
axes[i].minorticks_on()
axes[i].set_ylim([-0.0017,0.0012])
plt.subplots_adjust(wspace=0, hspace=0)
| mit |
eggie5/ipython-notebooks | logistic/sgd.py | 2 | 3280 | from sklearn.linear_model import SGDClassifier
from sklearn.linear_model import LogisticRegression
import softmax as sm
from sklearn import preprocessing
class GradientDescent(object):
"""GradientDescent"""
def __init__(self):
super(GradientDescent, self).__init__()
self.W = np.array([]) #matrix of weights
self.b = 0 #bias
self.classes = []
def get_params(self, deep=True):
return {}
def logit(self, X):
scores = X.dot(self.W.T) + self.B
return scores#.ravel() if scores.shape[1] == 1 else scores
def softmax(self, scores):
return sm.Softmax("").softmax_T(scores)
def predict_proba(self, X):
#get logit scores
scores = self.logit(X)
#then feed to softmax to get probabilties
probs = self.softmax(scores)
return probs
def predict_log_proba(self, X):
off = 1e-6
return -np.log(self.predict_proba(X)+off)
def loss_function(self, x, y_true):
#log-loss/cross-entropy
y_log_prob = self.predict_log_proba(x)
lb = preprocessing.LabelBinarizer().fit(self.classes)
transformed_labels = lb.transform(y_true)
loss = (transformed_labels * y_log_prob)#.sum(axis=1)
return loss#np.average(loss)
def fit(self, X, Y):
nb_epochs = 500
params = []
learning_rate = .1
thresh = .001
epsilon = .2 #stop if error is below this
self.X = X
self.Y = Y
#get num of classes
self.classes = np.unique(Y)
#random init weights
self.W = 0.01 * np.random.randn(len(self.classes),X.shape[1]) #4x3
self.B = np.zeros((1,len(self.classes)))
L = lb = preprocessing.LabelBinarizer().fit(self.classes).transform(self.Y)
for i in range(nb_epochs):
loss = self.loss_function(X, Y) # function of W
error = np.average(loss.sum(axis=1))
# if i % 10 == 0:
# print "iteration %d: loss %f" % (i, error)
#terminate if error is below theshold -- should also check error delta theshold
if error <= epsilon:
# print "Terminating @ iteration %d: loss %f" % (i, error)
return
gradient = (L - self.predict_proba(self.X)).T.dot(self.X)
vector = gradient
self.W += learning_rate * vector
return self
def predict(self, X):
#get logit scores
scores = self.logit(X)
indices = scores.argmax(axis=1)
return self.classes[indices]
def score(self, X_test, Y_test):
preds = self.predict(X_test)
score = np.average(preds == Y_test)
return score
import numpy as np
from sklearn.model_selection import cross_val_score
from sklearn import datasets
iris = datasets.load_iris()
print "My Implementation:"
gd=GradientDescent()
scores = cross_val_score(gd, iris.data, iris.target, cv=10)
print("Accuracy: %0.2f (+/- %0.2f)" % (scores.mean(), scores.std() * 2))
print "\nSGDClassifier: "
clf = SGDClassifier(loss="log", penalty="l2")
scores = cross_val_score(clf, iris.data, iris.target, cv=10)
print("Accuracy: %0.2f (+/- %0.2f)" % (scores.mean(), scores.std() * 2))
print "\nLogisticRegression: "
clf = LogisticRegression()
scores = cross_val_score(clf, iris.data, iris.target, cv=10)
print("Accuracy: %0.2f (+/- %0.2f)" % (scores.mean(), scores.std() * 2))
| mit |
arokem/nipy | examples/labs/histogram_fits.py | 4 | 3236 | #!/usr/bin/env python
# emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*-
# vi: set ft=python sts=4 ts=4 sw=4 et:
from __future__ import print_function # Python 2/3 compatibility
__doc__ = """
Example of a script that perfoms histogram analysis of an activation
image, to estimate activation Z-score with various heuristics:
* Gamma-Gaussian model
* Gaussian mixture model
* Empirical normal null
This example is based on a (simplistic) simulated image.
Needs matplotlib
"""
# Author : Bertrand Thirion, Gael Varoquaux 2008-2009
print(__doc__)
import numpy as np
try:
import matplotlib.pyplot as plt
except ImportError:
raise RuntimeError("This script needs the matplotlib library")
import nipy.labs.utils.simul_multisubject_fmri_dataset as simul
import nipy.algorithms.statistics.empirical_pvalue as en
###############################################################################
# simulate the data
shape = (60, 60)
pos = 2 * np.array([[6, 7], [10, 10], [15, 10]])
ampli = np.array([3, 4, 4])
dataset = simul.surrogate_2d_dataset(n_subj=1, shape=shape, pos=pos,
ampli=ampli, width=10.0).squeeze()
fig = plt.figure(figsize=(12, 10))
plt.subplot(3, 3, 1)
plt.imshow(dataset, cmap=plt.cm.hot)
plt.colorbar()
plt.title('Raw data')
Beta = dataset.ravel().squeeze()
###############################################################################
# fit Beta's histogram with a Gamma-Gaussian mixture
gam_gaus_pp = en.gamma_gaussian_fit(Beta, Beta)
gam_gaus_pp = np.reshape(gam_gaus_pp, (shape[0], shape[1], 3))
plt.figure(fig.number)
plt.subplot(3, 3, 4)
plt.imshow(gam_gaus_pp[..., 0], cmap=plt.cm.hot)
plt.title('Gamma-Gaussian mixture,\n first component posterior proba.')
plt.colorbar()
plt.subplot(3, 3, 5)
plt.imshow(gam_gaus_pp[..., 1], cmap=plt.cm.hot)
plt.title('Gamma-Gaussian mixture,\n second component posterior proba.')
plt.colorbar()
plt.subplot(3, 3, 6)
plt.imshow(gam_gaus_pp[..., 2], cmap=plt.cm.hot)
plt.title('Gamma-Gaussian mixture,\n third component posterior proba.')
plt.colorbar()
###############################################################################
# fit Beta's histogram with a mixture of Gaussians
alpha = 0.01
gaus_mix_pp = en.three_classes_GMM_fit(Beta, None, alpha, prior_strength=100)
gaus_mix_pp = np.reshape(gaus_mix_pp, (shape[0], shape[1], 3))
plt.figure(fig.number)
plt.subplot(3, 3, 7)
plt.imshow(gaus_mix_pp[..., 0], cmap=plt.cm.hot)
plt.title('Gaussian mixture,\n first component posterior proba.')
plt.colorbar()
plt.subplot(3, 3, 8)
plt.imshow(gaus_mix_pp[..., 1], cmap=plt.cm.hot)
plt.title('Gaussian mixture,\n second component posterior proba.')
plt.colorbar()
plt.subplot(3, 3, 9)
plt.imshow(gaus_mix_pp[..., 2], cmap=plt.cm.hot)
plt.title('Gamma-Gaussian mixture,\n third component posterior proba.')
plt.colorbar()
###############################################################################
# Fit the null mode of Beta with an empirical normal null
efdr = en.NormalEmpiricalNull(Beta)
emp_null_fdr = efdr.fdr(Beta)
emp_null_fdr = emp_null_fdr.reshape(shape)
plt.subplot(3, 3, 3)
plt.imshow(1 - emp_null_fdr, cmap=plt.cm.hot)
plt.colorbar()
plt.title('Empirical FDR\n ')
plt.show()
| bsd-3-clause |
nok/sklearn-porter | examples/estimator/classifier/NuSVC/java/basics_imported.pct.py | 1 | 1222 | # %% [markdown]
# # sklearn-porter
#
# Repository: [https://github.com/nok/sklearn-porter](https://github.com/nok/sklearn-porter)
#
# ## NuSVC
#
# Documentation: [sklearn.svm.NuSVC](http://scikit-learn.org/stable/modules/generated/sklearn.svm.NuSVC.html)
# %%
import sys
sys.path.append('../../../../..')
# %% [markdown]
# ### Load data
# %%
from sklearn.datasets import load_iris
iris_data = load_iris()
X = iris_data.data
y = iris_data.target
print(X.shape, y.shape)
# %% [markdown]
# ### Train classifier
# %%
from sklearn import svm
clf = svm.NuSVC(gamma=0.001, kernel='rbf', random_state=0)
clf.fit(X, y)
# %% [markdown]
# ### Transpile classifier
# %%
from sklearn_porter import Porter
porter = Porter(clf, language='java')
output = porter.export(export_data=True)
print(output)
# %% [markdown]
# ### Run classification in Java
# %%
# Save classifier:
# with open('NuSVC.java', 'w') as f:
# f.write(output)
# Check model data:
# $ cat data.json
# Download dependencies:
# $ wget -O gson.jar http://central.maven.org/maven2/com/google/code/gson/gson/2.8.5/gson-2.8.5.jar
# Compile model:
# $ javac -cp .:gson.jar NuSVC.java
# Run classification:
# $ java -cp .:gson.jar NuSVC data.json 1 2 3 4
| mit |
taknevski/tensorflow-xsmm | tensorflow/contrib/learn/python/learn/tests/dataframe/feeding_queue_runner_test.py | 62 | 5053 | # Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests `FeedingQueueRunner` using arrays and `DataFrames`."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from tensorflow.contrib.learn.python.learn.dataframe.queues import feeding_functions as ff
from tensorflow.python.client import session
from tensorflow.python.framework import ops
from tensorflow.python.platform import test
from tensorflow.python.training import coordinator
from tensorflow.python.training import queue_runner_impl
# pylint: disable=g-import-not-at-top
try:
import pandas as pd
HAS_PANDAS = True
except ImportError:
HAS_PANDAS = False
def get_rows(array, row_indices):
rows = [array[i] for i in row_indices]
return np.vstack(rows)
class FeedingQueueRunnerTestCase(test.TestCase):
"""Tests for `FeedingQueueRunner`."""
def testArrayFeeding(self):
with ops.Graph().as_default():
array = np.arange(32).reshape([16, 2])
q = ff.enqueue_data(array, capacity=100)
batch_size = 3
dq_op = q.dequeue_many(batch_size)
with session.Session() as sess:
coord = coordinator.Coordinator()
threads = queue_runner_impl.start_queue_runners(sess=sess, coord=coord)
for i in range(100):
indices = [
j % array.shape[0]
for j in range(batch_size * i, batch_size * (i + 1))
]
expected_dq = get_rows(array, indices)
dq = sess.run(dq_op)
np.testing.assert_array_equal(indices, dq[0])
np.testing.assert_array_equal(expected_dq, dq[1])
coord.request_stop()
coord.join(threads)
def testArrayFeedingMultiThread(self):
with ops.Graph().as_default():
array = np.arange(256).reshape([128, 2])
q = ff.enqueue_data(array, capacity=128, num_threads=8, shuffle=True)
batch_size = 3
dq_op = q.dequeue_many(batch_size)
with session.Session() as sess:
coord = coordinator.Coordinator()
threads = queue_runner_impl.start_queue_runners(sess=sess, coord=coord)
for _ in range(100):
dq = sess.run(dq_op)
indices = dq[0]
expected_dq = get_rows(array, indices)
np.testing.assert_array_equal(expected_dq, dq[1])
coord.request_stop()
coord.join(threads)
def testPandasFeeding(self):
if not HAS_PANDAS:
return
with ops.Graph().as_default():
array1 = np.arange(32)
array2 = np.arange(32, 64)
df = pd.DataFrame({"a": array1, "b": array2}, index=np.arange(64, 96))
q = ff.enqueue_data(df, capacity=100)
batch_size = 5
dq_op = q.dequeue_many(5)
with session.Session() as sess:
coord = coordinator.Coordinator()
threads = queue_runner_impl.start_queue_runners(sess=sess, coord=coord)
for i in range(100):
indices = [
j % array1.shape[0]
for j in range(batch_size * i, batch_size * (i + 1))
]
expected_df_indices = df.index[indices]
expected_rows = df.iloc[indices]
dq = sess.run(dq_op)
np.testing.assert_array_equal(expected_df_indices, dq[0])
for col_num, col in enumerate(df.columns):
np.testing.assert_array_equal(expected_rows[col].values,
dq[col_num + 1])
coord.request_stop()
coord.join(threads)
def testPandasFeedingMultiThread(self):
if not HAS_PANDAS:
return
with ops.Graph().as_default():
array1 = np.arange(128, 256)
array2 = 2 * array1
df = pd.DataFrame({"a": array1, "b": array2}, index=np.arange(128))
q = ff.enqueue_data(df, capacity=128, num_threads=8, shuffle=True)
batch_size = 5
dq_op = q.dequeue_many(batch_size)
with session.Session() as sess:
coord = coordinator.Coordinator()
threads = queue_runner_impl.start_queue_runners(sess=sess, coord=coord)
for _ in range(100):
dq = sess.run(dq_op)
indices = dq[0]
expected_rows = df.iloc[indices]
for col_num, col in enumerate(df.columns):
np.testing.assert_array_equal(expected_rows[col].values,
dq[col_num + 1])
coord.request_stop()
coord.join(threads)
if __name__ == "__main__":
test.main()
| apache-2.0 |
ThomasBrouwer/BNMTF | plots/missing_values/plot_nmf_missing_values_predictions.py | 1 | 78933 | """
Plot the performances of the many different NMF algorithms in a single graph.
We plot the average performance across all 10 attempts for different fractions:
[0.1, 0.2, ..., 0.9].
We use a dataset of I=100, J=80, K=10, with unit mean priors and zero mean unit
variance noise.
We have the following methods:
- VB NMF
- Gibbs NMF
- ICM NMF
- Non-probabilistic NMF
"""
import matplotlib.pyplot as plt
metrics = ['MSE']#['MSE','R^2','Rp']
MSE_max = 15
fractions_unknown = [ 0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8, 0.9 ] #[0.05, 0.1, 0.15, 0.2, 0.25, 0.3, 0.35, 0.4, 0.45, 0.5, 0.55, 0.6, 0.65, 0.7, 0.75, 0.8, 0.85, 0.9, 0.95] #
# VB NMF
vb_all_performances = {'R^2': [[0.9630856037244127, 0.9670215107562922, 0.9661045051134491, 0.9722018575258015, 0.9675999313577555, 0.9714297004377326, 0.9608472667855322, 0.9730333544231496, 0.9666510063942921, 0.9603541051163762], [0.9665170350567773, 0.964946197097694, 0.9663350781598471, 0.9627130095757848, 0.9616497277432379, 0.9640427240029992, 0.9678159049304254, 0.9669349000437315, 0.9641029182026513, 0.9685062857795189], [0.959773536111611, 0.9619285393512471, 0.9612240892216553, 0.9614987452100451, 0.9584291900683835, 0.9625229889130691, 0.9613925806791348, 0.9635766555104377, 0.9619711274413051, 0.9637773429736658], [0.9582224235087118, 0.9591420955113374, 0.9595049864880968, 0.9589604749797365, 0.9590359277212913, 0.9585875441047893, 0.960902037779746, 0.9614427654183628, 0.9609469576204902, 0.9589845126561516], [0.9527508903860804, 0.9532794041128106, 0.9514561283382098, 0.9542335555606037, 0.9566239021533259, 0.9540670856579738, 0.9545119685902426, 0.9565341570138918, 0.9549420865598607, 0.9552309386352669], [0.944824160627708, 0.9443509173528474, 0.9417312447125505, 0.9438389652711955, 0.9427625876422, 0.9425193651599165, 0.9418179180323941, 0.9424462796553271, 0.9420853896708117, 0.9465518001065166], [0.9001951862432918, 0.9183769138373294, 0.9166251929599342, 0.9069886075247062, 0.9115460497313663, 0.9151656497212621, 0.9123698613361313, 0.9124628297593562, 0.9092181617299763, 0.9196836026740838], [0.5718452222629703, 0.6583451513873824, 0.556741155657702, 0.583570843165066, 0.5104570853808321, 0.6360167602951334, 0.6512768325827929, 0.5214192895053769, 0.6275751203453768, 0.564905893846642], [0.5027844119329963, 0.4933257161288198, 0.4496685398864789, 0.4746077863315795, 0.4522656764387484, 0.5368917178074404, 0.4755783852096349, 0.4087673131892968, 0.4898747418380873, 0.4562544542743766]], 'MSE': [[1.4292554648676139, 1.3974051235158393, 1.29440666612722, 1.3217147142340593, 1.3208653575825622, 1.2551131044832673, 1.3757415666406905, 1.3065565812238904, 1.2042610271487089, 1.353798372612641], [1.3515803752693407, 1.3733054586267974, 1.3605784424251741, 1.5082085923523942, 1.412474232111794, 1.3422121840192229, 1.3633847814127587, 1.3403919024877198, 1.4268521219705763, 1.3289609470877093], [1.5502007005526373, 1.4662981005339264, 1.4619454920602812, 1.4335526755575416, 1.4595506807654832, 1.4625057118999791, 1.5535336837966451, 1.4382786571200279, 1.5083283919668102, 1.3930085927285758], [1.5541487972104295, 1.5713266193529145, 1.5946890731711545, 1.6056428426898903, 1.6251711444313219, 1.6215805013925217, 1.6220695413856037, 1.5378342018350912, 1.5531413220895049, 1.5350785451061721], [1.8117382277125307, 1.8477089630749928, 1.8897308174736871, 1.8201218699708099, 1.7956362330203772, 1.752797709484893, 1.7216079538498541, 1.6453618999158146, 1.8054631354026731, 1.8195334536523358], [2.184303640722197, 2.1949240106826875, 2.2056897515984528, 2.2641485953679252, 2.3428786178800967, 2.2597863299291139, 2.302011581068792, 2.1774461811293935, 2.3360642336198585, 2.1121378157470256], [3.9794794510337508, 3.2397568599609925, 3.3357028761301137, 3.7144139551688489, 3.4156036936427556, 3.3906177543890812, 3.3771166204052192, 3.386053503948375, 3.4216546182019933, 3.2108561252818357], [16.679921188539794, 13.650105558808882, 16.988458368541757, 16.29013292645822, 18.769667736252153, 14.529014512907999, 14.194886291079053, 18.888984430976617, 14.368616642611762, 17.500043934952473], [19.728859852733237, 20.0357738274881, 21.614472803471031, 20.376215739188623, 21.481467397245979, 18.315206156715181, 20.793134858429365, 23.458756395361693, 20.374177558979476, 21.495922836713685]], 'Rp': [[0.98137357044894324, 0.98356969304565223, 0.98307183949334243, 0.986023974645173, 0.983791401261639, 0.98567445089057881, 0.98024941158262702, 0.98645450391382405, 0.98319465746836576, 0.98020000016184972], [0.98313446665444237, 0.98232877010194075, 0.98304226090514368, 0.98118639812951347, 0.98065246443942522, 0.98185897615701179, 0.98377658992615691, 0.98333144836210784, 0.98199493342705979, 0.98412744103150163], [0.97975501823888478, 0.98088479147381558, 0.98045650434671505, 0.98057262460682237, 0.97902209539002805, 0.98118645391138437, 0.98053707911525445, 0.9816498220269525, 0.98084506735664256, 0.98174740424723672], [0.97890361614299271, 0.97939109918387257, 0.9795531120627371, 0.97932255725184125, 0.9793042901481851, 0.97914786008990751, 0.98025799335713648, 0.98055794984324751, 0.98029105475801981, 0.9792854032132573], [0.97613493661568473, 0.97641827119444946, 0.97556467703224747, 0.97687271912817053, 0.97817876610563415, 0.97683425927468293, 0.97701341756789306, 0.97808502827763166, 0.97723236000443159, 0.97738278258968225], [0.97203460912583317, 0.97183443904940936, 0.97053747482518804, 0.97151838581568295, 0.97096119919381663, 0.97090027611260332, 0.97050254907300826, 0.97086349701148777, 0.97066332933436827, 0.97297703058961549], [0.94884176188711122, 0.95871692564457744, 0.9574286431553255, 0.95245998639735729, 0.95482371817969269, 0.95665745045296158, 0.95521411307579607, 0.95532060353134696, 0.95377749055837036, 0.95903245723711927], [0.77169610839500669, 0.81194990763251429, 0.77315400488399622, 0.77421630884784298, 0.75639479857647018, 0.80636943929628646, 0.80820079878518392, 0.74909538974484358, 0.80153421908357991, 0.76058696280751781], [0.73902171116167437, 0.7298352820200974, 0.73032526666911179, 0.73544281542769874, 0.73313362065671095, 0.75912748936719998, 0.73653668038292519, 0.69850055406951517, 0.73406943170778083, 0.73171766157704843]]}
vb_average_performances = {'R^2': [0.9668328841634795, 0.9653563780592667, 0.9616094795480554, 0.9595729725788713, 0.9543630117008268, 0.9432928628231467, 0.9122632055517437, 0.5882153354429274, 0.4740018743037459], 'MSE': [1.3259117978436494, 1.3807949037763489, 1.4727202686981908, 1.5820682588664607, 1.7909700263557968, 2.2379390757745541, 3.4471255458162964, 16.185983159112869, 20.767398742632636], 'Rp': [0.9833603502911995, 0.98254337491343047, 0.98066568607137372, 0.97960149360511983, 0.97697172177905078, 0.97127927901310118, 0.95522731501196601, 0.78131979380532424, 0.73277105130397613]}
#vb_all_performances = {'R^2': [[0.9713589349246919, 0.9613653056828934, 0.9578459394828349, 0.9695090586821206, 0.974670647338806, 0.9661349858322029, 0.9600936184646388, 0.9633704324461373, 0.9737122601556497, 0.9717431881480445], [0.9636798722085953, 0.97135537554884, 0.9660213915797089, 0.962658616403802, 0.9651549570032799, 0.963956989696327, 0.9636075202791187, 0.9667223946359815, 0.9625122170491301, 0.9646179966903984], [0.9631681612672657, 0.963581772202902, 0.9667238211581988, 0.9668039584771945, 0.9674038725039209, 0.9645805149058847, 0.9648104650092288, 0.9616935393190678, 0.9626685313481125, 0.9697202627193672], [0.9671166046555666, 0.9628341828882709, 0.9640158575346893, 0.9623508936358526, 0.9659872966916291, 0.9654029011491398, 0.965872800499762, 0.9657943432312635, 0.9650116518556658, 0.9658644455289328], [0.9644851508579957, 0.9601224476511965, 0.9643609599375405, 0.9642784255104482, 0.964677615606015, 0.961131739490249, 0.9629087019847005, 0.9651886910576233, 0.9654204003053286, 0.9633760524687857], [0.9623314700856768, 0.9616822303300745, 0.9659837899155818, 0.9604879597515781, 0.9619071854679841, 0.9635218158906672, 0.9649744842100604, 0.961962513776393, 0.9629186905520082, 0.9599513049827448], [0.9612576270571351, 0.9619398655987964, 0.959041987887408, 0.9625742339291395, 0.9600486785866935, 0.9602283663399814, 0.9603668292526074, 0.9631444414963682, 0.9595706409987232, 0.9625800423510145], [0.960913699284826, 0.9614263418846921, 0.9589156886534647, 0.9579184707193593, 0.9594025708805531, 0.9617102362198126, 0.9600787257333216, 0.9599811804785796, 0.9563585029920976, 0.9585612475532661], [0.9548292126000331, 0.9566978652840079, 0.9588999561180335, 0.9583250432302336, 0.9558489169256413, 0.955232406842699, 0.9570192368406675, 0.9562225906844626, 0.9537347696486741, 0.955159603503791], [0.954596381822336, 0.9532049971721274, 0.951292005914273, 0.9533452063656485, 0.9526323008848665, 0.9546575448804012, 0.9519517929087362, 0.953310246448709, 0.955420636330911, 0.9556176303726336], [0.9490518291611395, 0.952966821398839, 0.9459918952684331, 0.953993698118477, 0.9489294200636649, 0.9505020032172347, 0.9461592371515639, 0.9463056187328195, 0.9500945402948587, 0.949867212982645], [0.9447508499446656, 0.9369011128772314, 0.9416853892447595, 0.9440380962727131, 0.9421796228759396, 0.945259776883033, 0.9418066778687182, 0.9376704527538923, 0.9432597955726773, 0.9458203908359556], [0.9343641388448143, 0.9295257257157101, 0.9305326701701697, 0.9299840211130274, 0.9329627500122584, 0.9320319562092831, 0.9300686214971303, 0.935057754847032, 0.9301712824853379, 0.9263833325309817], [0.9119584811644569, 0.902746781686113, 0.9060856974479864, 0.9121485563904951, 0.912187647777133, 0.9213757261418238, 0.909734339847664, 0.9128096693034096, 0.8981755637060735, 0.9131673314224751], [0.7321141078876212, 0.7095403595249552, 0.753534642734358, 0.815981950465259, 0.851057362097303, 0.7994545657659315, 0.755341261477482, 0.8307025070230891, 0.8415425598499783, 0.8569854244415853], [0.6355682975476671, 0.7132235922533652, 0.6347015190450611, 0.6021796211481596, 0.6551333901246257, 0.6044143819130556, 0.6017566587926781, 0.7109559223307017, 0.6687174756435141, 0.561748011846501], [0.5697266450676727, 0.5435662492183205, 0.572085730372485, 0.5275580939033262, 0.5790898824688919, 0.5508874661391778, 0.5940182584666973, 0.4629211088331898, 0.5779401153771142, 0.5750962316606895], [0.4799035799068715, 0.4434362812948748, 0.41042093645597677, 0.4640938757303553, 0.43240182637463576, 0.44695231255755263, 0.46151924737204775, 0.5094719052592487, 0.4409507746888591, 0.474853153012351], [-1989.1302053590314, -2017.534303892534, -2029.687563047177, -2014.6237301607932, -1999.9876502103364, -2003.1266006663122, -2019.2171720762249, -2039.5850761787626, -1995.0222599936974, -2046.2012340183178]], 'MSE': [[1.248306016424114, 1.3639637042351636, 1.4615061821851159, 1.270220834173261, 1.3285420088126207, 1.1115612700335202, 1.2621059060951434, 1.441149932049032, 1.0995771190145662, 1.2715740577226682], [1.3606720062233268, 1.1930686411959273, 1.4432712928156797, 1.3625510365287716, 1.4779905818824255, 1.3720270655597424, 1.4370423262600216, 1.3592648784809098, 1.3362291076053521, 1.2966520821163181], [1.3019746354986228, 1.375300879109884, 1.3353978694134283, 1.3713291559183753, 1.3846738697310765, 1.4319870661798664, 1.3953103539131375, 1.3806480867555377, 1.3717084832766493, 1.3197313167768396], [1.3613183211950894, 1.3800155951279354, 1.3934226104661389, 1.506273015204818, 1.3574322714287388, 1.3460337631899888, 1.3735199093575177, 1.3600477031654983, 1.3911697453557736, 1.3158261740635258], [1.42690979181699, 1.4348139667582591, 1.3774046570586123, 1.4228705324296926, 1.4282392124086625, 1.4706569591686032, 1.4532882025572651, 1.4557759507682893, 1.4123182727570736, 1.4478115993504692], [1.4733748800470998, 1.4850713837965233, 1.4001507336840686, 1.4976759649218743, 1.5804973305271042, 1.4267969836653205, 1.4169610739945677, 1.4950450874625369, 1.4373417200732512, 1.4757978675159358], [1.5525623416076484, 1.5532768024032446, 1.5985956419760496, 1.4908285461208506, 1.5181204044386574, 1.5452250085406725, 1.5401348826171193, 1.5085775214879411, 1.5778535983313136, 1.4960758448303126], [1.5921519766961223, 1.5211939936487318, 1.6152609851613104, 1.6040293755119763, 1.6181054634384393, 1.6040187154663175, 1.6471971772066494, 1.5273698262221223, 1.5768595602163771, 1.5870316713717796], [1.7415025035014071, 1.6626898981658662, 1.6223840721044611, 1.6567335180117055, 1.698788596844915, 1.7809190493967804, 1.697021993003516, 1.7325913489061617, 1.7634841496727116, 1.7133197692869881], [1.7829312666379749, 1.7971193247731543, 1.9498554597773075, 1.8035964093488321, 1.831085215148442, 1.8904227370485294, 1.8103526253249722, 1.9100941137115011, 1.7017486932754817, 1.7484119235254405], [2.0313506986519942, 1.8587563635952851, 2.1321806161656065, 1.9360345488784594, 1.9771847085154608, 1.99293128072348, 1.996361502262417, 2.0520245758745581, 2.0003563807312528, 1.9470063795085382], [2.2327330996122337, 2.4910703698533236, 2.2094616157278306, 2.2562281187949806, 2.2061165285630411, 2.12845925158918, 2.2260863179710868, 2.5079124195265319, 2.2100550757912627, 2.0597470777165738], [2.5786369120712682, 2.8685413163958815, 2.6630207085463051, 2.7757950373775055, 2.6628688045137858, 2.5735706292396801, 2.7540313249519182, 2.4783399941094113, 2.7982668604671348, 2.8824689855946595], [3.4916208302279692, 3.885697280998639, 3.7192975536786466, 3.443085925179028, 3.459986867660243, 3.1418645045787397, 3.6661622656385298, 3.4737064468057453, 4.0913162220527806, 3.349388234647984], [10.46687854324427, 11.422043470971676, 9.8518405311643704, 7.3050480075665041, 6.0354494551704008, 7.8653765651403305, 9.4466615063122799, 6.7351006993043461, 6.239211788325898, 5.5699318264077409], [14.000363723651681, 11.244076077206696, 14.364777128811573, 15.485877449630184, 13.308022524777178, 15.893117480876739, 15.548918984639361, 11.528225713588379, 13.215088035490574, 17.261978654126732], [16.914051488405892, 18.080680393943993, 16.868998539079605, 18.441020044134628, 16.276710402933141, 17.821022513366071, 15.827750989854616, 20.758199133277646, 16.80650010478616, 16.83741956643912], [20.319680508136759, 21.995204617907714, 23.173426617013448, 20.99867918726796, 22.291950893864488, 22.063894228568095, 21.073548140073839, 19.358218156216402, 22.155850698834183, 20.629071695361233], [79045.670014629955, 79983.788418772921, 80177.448825241634, 79169.478797008545, 78778.597792737433, 79805.399228529786, 79187.52155414423, 80477.357516681135, 78560.211874014494, 79305.423349242017]], 'Rp': [[0.98563868506659835, 0.98056012860060882, 0.97880263790093003, 0.98467993615313099, 0.98726650224875279, 0.98293086394454598, 0.97984463566517188, 0.98157453771910852, 0.98684609335279772, 0.98577040866584442], [0.98187220307897827, 0.98560781342835269, 0.98287962360330194, 0.98155853781840163, 0.98242975910011032, 0.98182693437610491, 0.98173011174569769, 0.98322407599653538, 0.98117715074285061, 0.98215915130152354], [0.9814581022214055, 0.98176300098095193, 0.9832551304497944, 0.98326729761209342, 0.98357238886775922, 0.98226537876624231, 0.98226639128713744, 0.98068177783201693, 0.98122417492239211, 0.98474494661009704], [0.98342392903890841, 0.98130546229235971, 0.98188774853654393, 0.98099519966937654, 0.98285854669185269, 0.98260101417415657, 0.98288421035585227, 0.9828371554860954, 0.98237897071357083, 0.98279287269754079], [0.98211998888356733, 0.97986465490742525, 0.98203437472924149, 0.98203588557901611, 0.9822441324207607, 0.98038605974725124, 0.98131548346342112, 0.98246392971232754, 0.98256669634616312, 0.98161289400497809], [0.98100499987139278, 0.98067253703981483, 0.98285945308456502, 0.9800483407599857, 0.98081049939083786, 0.98164668977623715, 0.9823599065675912, 0.98086746671278602, 0.98128854367651619, 0.97986161327888588], [0.98044058716529681, 0.98079837104939815, 0.97933323212200196, 0.98111013449699291, 0.97983789759432172, 0.97997132375257612, 0.98000089959612613, 0.98140901927533597, 0.97958657330670063, 0.98112096883800648], [0.98030346279322134, 0.98052377237857313, 0.97934408331994283, 0.97876696427722676, 0.97964775845597585, 0.98067263520866388, 0.97994520159034948, 0.97982099978442838, 0.9779657285510831, 0.97907200911413961], [0.97717252405613009, 0.97815072063021613, 0.97934842116690168, 0.97894314123805404, 0.97772815605099561, 0.97736920642334035, 0.9783390738562483, 0.97790570511222275, 0.97663680247657769, 0.97748522316162512], [0.97711992660509395, 0.97633424973114447, 0.97546075956081491, 0.97640999208485091, 0.9760848177281618, 0.9770690728555298, 0.97572235786829187, 0.97640568363207658, 0.97756522474543395, 0.97757046232520972], [0.97423012861039004, 0.97627205032411746, 0.97263747446758864, 0.97678798485983642, 0.97417983724229718, 0.97499143990263304, 0.97280591494362423, 0.97289009297203832, 0.97476320232225377, 0.97465301464423693], [0.97201230439845776, 0.96832023028167713, 0.97053043728477384, 0.97162432822620382, 0.97067490095952924, 0.97228385113681226, 0.97060793094420794, 0.96836211946584805, 0.97122672894594864, 0.97254317623142672], [0.96662884961191886, 0.9642183636022611, 0.96468869520493561, 0.96436624024254569, 0.96610924185993996, 0.96557743411901165, 0.96441599377912368, 0.96700362798217065, 0.96477712034989815, 0.96249285541534446], [0.95523749060178353, 0.95045609043489188, 0.95192385382007727, 0.95522036675374911, 0.95518560549750986, 0.96022363884082684, 0.95390329041645183, 0.95564704992309768, 0.94799666303703578, 0.95609518997350273], [0.86229033464672655, 0.84529019492980828, 0.87189712879979142, 0.90463284588723702, 0.92286027820214656, 0.89514871251494665, 0.87368687458810135, 0.91210837640577436, 0.91757899135156384, 0.92608878833755426], [0.80890924604900394, 0.84639822900961426, 0.79831596081883682, 0.78976270079796362, 0.81471582821700217, 0.78433497335980673, 0.78916391908995842, 0.8434583933849672, 0.81874089076734879, 0.76580354478191037], [0.76317960659181483, 0.74015536667393433, 0.7597001142387384, 0.73820541599343448, 0.76480514947240974, 0.74736586055023024, 0.77337965555622457, 0.71872109452842048, 0.76246806610976492, 0.75947771400393616], [0.73384253193646609, 0.70866500232433571, 0.69724575240287123, 0.72204999353520682, 0.71312041668735415, 0.70951208767898832, 0.72588346585145636, 0.74244687191704839, 0.71355126533933899, 0.72964201870648304], [-0.056349802928094467, 0.11042068385928523, -0.013485229804784732, 0.021755745176307125, 0.038005126018088309, 0.17507303882960071, -0.08720112410134212, 0.0021399334844361974, 0.13769771722837879, -0.038590338870275499]]}
#vb_average_performances = {'R^2': [0.966980437115802, 0.9650287331095182, 0.9651154898911143, 0.9650250977670772, 0.9635950184869883, 0.9625721444962769, 0.9610752713497867, 0.9595266664399972, 0.9561969601678243, 0.9536028743100644, 0.9493862276389674, 0.9423372165129583, 0.9311082253425745, 0.9100389794887631, 0.7946254741267562, 0.638839887064533, 0.5552889781507565, 0.45640038926527726, -2015.4115795603186], 'MSE': [1.2858507030745203, 1.3638769018668473, 1.3668061716573416, 1.3785059108555022, 1.4330089145073919, 1.4688713025688283, 1.5381250592353808, 1.5893218744939825, 1.7069434898894513, 1.8225617768571634, 1.9924187054907052, 2.2527869875146047, 2.7035540573267549, 3.5722126131468306, 8.0937542393607806, 14.185044577279907, 17.463235317622086, 21.405952474324412, 79449.089737100207], 'Rp': [0.98339144293174896, 0.98244653611918564, 0.98244985895498904, 0.98239651096562564, 0.98166440997941518, 0.98114200501586113, 0.98036090071967563, 0.97960626154736041, 0.97790789741723128, 0.97657425471366077, 0.97442111402890141, 0.97081860078748861, 0.96502784221671511, 0.95418892392989263, 0.89315825256636483, 0.80596036862764131, 0.75274580437189087, 0.71959594063795485, 0.02894657488915995]}
# Gibbs NMF
gibbs_all_performances = {'R^2': [[0.9619059001208281, 0.9640827667211137, 0.9694512746263138, 0.9701886721742091, 0.9657569043522852, 0.9645199588372723, 0.9634705713931656, 0.9676267917708213, 0.9675622254798245, 0.9684506981142136], [0.9661983256111081, 0.9616205514088321, 0.964782466321369, 0.9642198246274559, 0.9609647615999493, 0.9645080846532873, 0.9628567568565517, 0.9625100016209683, 0.9647052208065231, 0.9617419670924906], [0.9643234840870709, 0.9617430678118996, 0.961338418396636, 0.9621685540717345, 0.9590790976588984, 0.9630979205180834, 0.9648309654552717, 0.9610122836488321, 0.961987672033106, 0.9612220362972822], [0.9595101862277302, 0.9594776094947263, 0.957823871926474, 0.9570380281896163, 0.9565386172828394, 0.9598820325488254, 0.9578290274243525, 0.9568794205531495, 0.9614822869442783, 0.9590428076940245], [0.9522941300113166, 0.9592824357136986, 0.9523343094760195, 0.9530924121407341, 0.9545449371032484, 0.9552885193209901, 0.9535007755625815, 0.9533325771726907, 0.9538848182936318, 0.9526588158499125], [0.9431535719627517, 0.9430805152703715, 0.9438988157247572, 0.939609300028728, 0.9419858916360788, 0.9414720533278618, 0.9431853506983003, 0.9450629953350186, 0.9420390340603483, 0.940740567078019], [0.907184519966465, 0.8893815616999061, 0.8947153607127359, 0.9210263887880934, 0.9165479957564602, 0.9011617012566961, 0.914193937886436, 0.9096273664291488, 0.8954458717355149, 0.9141336520293362], [0.7156007465858046, 0.6608549871190714, 0.5081403620663952, 0.5878899388839927, 0.569112134184095, 0.5922612076738332, 0.6788791286000178, 0.6756450333289706, 0.6368380147278165, 0.6412391457100886], [-0.03883065769894101, 0.39937236004066956, 0.28768810475157225, 0.09263549338150567, 0.3168058803008834, -0.09290240474776601, 0.35323399570551717, 0.3214323519141459, 0.4174905865373635, 0.33869715838260983]], 'MSE': [[1.3435161026525739, 1.422961444249444, 1.3631881946753663, 1.2758401195570195, 1.3723055080209405, 1.3591839088535866, 1.3073380616795633, 1.2759250524622099, 1.3171618674650449, 1.3008268160586309], [1.4052993493654045, 1.4061825751225325, 1.4145324544645925, 1.416226609062057, 1.3686757539628065, 1.3963021810316414, 1.3295080392294472, 1.413876209948091, 1.3895909892824541, 1.419473598871462], [1.4527645268465443, 1.4826447252288502, 1.4254506857209015, 1.4201453258828076, 1.5561327664672613, 1.3896139933122358, 1.4093211449035519, 1.496694920695091, 1.5080125674525022, 1.4776551431702634], [1.6273693364025781, 1.5613122281634206, 1.6454573438972488, 1.5741174307843806, 1.6605585104955143, 1.5484879861149485, 1.5673643158482429, 1.602940012847651, 1.583455906458755, 1.6025335487371899], [1.8894807573583676, 1.7080057208270207, 1.8633500585485623, 1.7974722582784888, 1.8347187228173907, 1.7174347001477308, 1.8581357148871396, 1.929932788496548, 1.8084403552739736, 1.913959453942534], [2.2776321156547823, 2.1936042177364317, 2.2241365566054401, 2.3483636322993302, 2.3154926960962983, 2.2067045058831347, 2.2426850409475771, 2.1446587159580708, 2.2992147876063465, 2.2943319870938215], [3.7685336471769131, 4.3642248206791576, 4.1420738260722327, 3.1913847733566691, 3.16274898450681, 3.8163163270379021, 3.4308795689453064, 3.5367105688459781, 3.969659603845769, 3.4073620151958273], [11.059472488515201, 13.473029553247274, 19.291326753948525, 15.885738351988971, 16.899217668762873, 15.993607044250318, 12.602174595100042, 12.64226363202151, 14.496466797700567, 14.251389800630605], [40.67778578143124, 23.597337597218569, 27.599872865640666, 35.191150325252806, 27.086630055043276, 43.222847307391774, 25.432423502446227, 26.565062181981109, 22.777538269386849, 26.061386510100139]], 'Rp': [[0.9808300176523761, 0.98191012955331602, 0.9846447380668788, 0.98502372410143113, 0.98295436323656427, 0.98218222196692218, 0.98158559788405897, 0.98384206390239581, 0.98365853961110095, 0.98422252322658887], [0.98305770886688504, 0.98070818786031999, 0.98237072800530101, 0.98206139647059831, 0.98049188373631291, 0.98216731690336034, 0.9814417415253478, 0.98131137814915259, 0.98232864345704773, 0.98082516803012287], [0.9820606197511198, 0.9807351745160261, 0.98051949001280969, 0.98101659543957065, 0.97957670034473365, 0.98148427289695284, 0.98237543663511029, 0.98054936315594643, 0.98081102068623438, 0.98044887461040298], [0.97969381119965981, 0.97981830085026567, 0.97887161359914421, 0.97844051123524944, 0.97818134826056602, 0.97981148323928802, 0.97895143393769946, 0.97837373509892023, 0.98068194861727975, 0.97933890222115283], [0.97596914606319907, 0.97948998779897789, 0.97608484457282485, 0.97631491477655108, 0.97722559468427406, 0.97745619114463023, 0.97664134200129493, 0.97653440518293821, 0.97676845964285663, 0.9760570600875722], [0.97156149840243344, 0.97130590105423775, 0.97183373860842126, 0.96992716504553211, 0.97100065017491433, 0.97049381180634986, 0.97126367292558591, 0.97259156409279002, 0.97088110253196591, 0.97014609337518065], [0.95278667585694221, 0.94408340080594666, 0.94637016462172452, 0.96041168374829489, 0.95772863817233489, 0.95008710901536975, 0.95697256258891428, 0.95486074666490039, 0.94714480274787571, 0.95637229731612861], [0.85582341403166551, 0.83015757498009135, 0.76494459314321051, 0.80433937728497462, 0.79221780577170087, 0.79481080120454572, 0.84458468684503507, 0.83521337317818112, 0.82050034724166165, 0.81678073214726177], [0.67367981676225963, 0.7433971381896638, 0.70087233632151957, 0.68318666823004459, 0.69454459159521076, 0.66938293877487642, 0.73835592140083472, 0.71422274278175091, 0.74078332437265537, 0.73012959685238132]]}
gibbs_average_performances = {'R^2': [0.9663015763590048, 0.9634107960598536, 0.9620803499978814, 0.9585503888286014, 0.9540213730644822, 0.9424228095122235, 0.9063418356260794, 0.6266460698880085, 0.23956228685675604], 'MSE': [1.3338247075674379, 1.3959667760340486, 1.461843579968001, 1.5973596619749928, 1.8320930530577755, 2.2546824255881233, 3.6789894135662573, 14.659468668616592, 29.821203439589265], 'Rp': [0.9830853919201632, 0.98167641530044492, 0.98095775480489067, 0.97921630882592259, 0.97685419459551182, 0.97110051980174106, 0.95268180815384329, 0.81593727058283272, 0.70885550752811977]}
#gibbs_all_performances = {'R^2': [[0.9607798929114855, 0.9710272629142306, 0.9750606939641251, 0.9672150629247449, 0.965529590194902, 0.9641499516462126, 0.9632815412204784, 0.9708360780579849, 0.9698006738901898, 0.9684055872227426], [0.958783020353138, 0.9600937018201786, 0.9711834403123623, 0.9646057288699935, 0.9658883218986087, 0.9646170890032887, 0.9646037868340434, 0.9680218095641087, 0.9621463784607518, 0.966234030714299], [0.968220907778332, 0.963382424925033, 0.9675733709969558, 0.9638559578736655, 0.963471120594757, 0.9641918044362111, 0.9656355289841441, 0.9666580675473812, 0.9636819283324142, 0.9677779963243166], [0.96432045175121, 0.9633443743390868, 0.9619259384705858, 0.9614971727884671, 0.964528820649247, 0.9634693185935134, 0.9652524108655722, 0.9622239456226049, 0.9659040872785305, 0.9668480303403203], [0.9647041366166297, 0.9668953847011753, 0.9662440295906, 0.9673312217811363, 0.9627950232313067, 0.9624941576277093, 0.9655012231746964, 0.9663883488282061, 0.9671079372208565, 0.9620711327340062], [0.9669801146721941, 0.9605349942424394, 0.9633473546252095, 0.9604757775545533, 0.9606155774156504, 0.9633595014904399, 0.9621695532846865, 0.9634645901438151, 0.9624394185485761, 0.9605289183049577], [0.9608327877951591, 0.9594547997567079, 0.9583594659448513, 0.9615726121305787, 0.9616016015845456, 0.9600440306226663, 0.9617317731399933, 0.9599983974373109, 0.962257869451462, 0.9611810957867228], [0.9564144539704932, 0.9595308515885782, 0.958957133122521, 0.9582533540027679, 0.9581616921473787, 0.9588321908268644, 0.9593878521800903, 0.9590852260068806, 0.9574007813560488, 0.9593998663109279], [0.9567117071744511, 0.9563999156667284, 0.9551903282324833, 0.9555694779978903, 0.9565889919487213, 0.9559841591620458, 0.9572562712554429, 0.956051684117678, 0.9548308274355927, 0.9554840299491005], [0.9555719456091492, 0.9562565028187119, 0.9546247227762248, 0.9518694304019937, 0.9543604732125919, 0.9530417932124018, 0.9529946368706319, 0.9502398694871862, 0.9532551222347229, 0.9540070721314693], [0.9497182447692392, 0.9476294233671535, 0.9474571088688516, 0.9473156766713224, 0.9452039213422637, 0.9513290905060965, 0.9543599945890949, 0.9523191347425972, 0.9494928218577068, 0.9492799527218023], [0.9399959986808165, 0.9444398182395941, 0.9431316612802955, 0.9405073887465492, 0.9438968819291124, 0.9450876736014285, 0.9394374574558161, 0.9451960529071889, 0.9422560857474985, 0.9401280167641233], [0.9287249220459928, 0.9314507416329076, 0.9340618219281188, 0.9315396518691099, 0.9362969077757889, 0.9328864996829905, 0.9263505369676939, 0.9324569173536664, 0.9300960608093917, 0.9290000864913832], [0.9181537731183478, 0.9186640787529018, 0.9099141823338749, 0.9088622073409541, 0.9122256599092969, 0.9210754425703919, 0.9115904559494141, 0.9181114058908746, 0.9097023251820305, 0.9110740634874674], [0.8104133515636324, 0.8672236218036454, 0.858517312493944, 0.8117413197235543, 0.8604966017602202, 0.826928170055675, 0.7695828636001907, 0.8332203356953722, 0.8380762208335375, 0.8705942814755447], [0.6426762685886962, 0.6208739529913971, 0.6239918909767616, 0.6250215207381622, 0.7258254468047323, 0.5355763592541853, 0.655168943739147, 0.6736704012544106, 0.637964716354918, 0.7181238791161657], [0.43133889124167024, 0.45452840674560935, 0.4795852486469693, 0.5176213815788429, 0.5461386790344832, 0.46295223222354354, 0.5232155927431766, 0.5371515518942449, 0.4909807412911521, 0.37997903484056306], [0.3764939754057922, 0.35904471953490524, 0.26370223547256066, 0.013703651845309528, 0.2674811872253342, 0.220513016113406, 0.3237872491510495, 0.2866741540656138, 0.40531998885182885, 0.2711600238985731], [-1474.4391503338761, -1605.436715365241, -1529.1494451947392, -1535.6185056145814, -1532.2459246073408, -1655.6453978078657, -1586.4680356523002, -1451.8218156379273, -1465.8944705429644, -1505.763112758571]], 'MSE': [[1.4265218151159913, 1.1169104857429066, 1.2299914545050663, 1.2215984837111211, 1.0827960485121455, 1.4030625795543161, 1.2948758596814849, 1.2185063697914134, 1.2094372159714846, 1.2066613741914842], [1.3532913369654509, 1.3767451192516642, 1.2159299412967695, 1.3395149469839536, 1.3373456607495484, 1.3479583158037332, 1.4476464910538573, 1.307207709801653, 1.5434223707765653, 1.238181144444076], [1.3129058791080943, 1.3777445055047692, 1.2969347605401593, 1.4146850704595126, 1.3399063394497632, 1.4244593189954367, 1.3948795353882708, 1.317092623972355, 1.4646213504771111, 1.3536571115480875], [1.4014052604547234, 1.3637353672506221, 1.4738258610985695, 1.3775485810731458, 1.4223712496274949, 1.4014603109717978, 1.2809712776366984, 1.3617889815704118, 1.4015574155791841, 1.385801960582308], [1.3946533978672526, 1.4396574799511013, 1.3731205743145984, 1.3421064061444721, 1.4626710246060286, 1.4453565470808027, 1.4586922134369515, 1.3958644432259273, 1.4501492258134403, 1.4381394028977419], [1.3791414025606923, 1.5282873252267251, 1.4656681493862389, 1.4913535513941942, 1.5375502609509026, 1.4923293045156587, 1.4800363530063427, 1.4687222028615801, 1.5146642177015128, 1.5212463682335486], [1.5212153792562539, 1.5467850911434078, 1.6105275460407744, 1.5460464246960959, 1.5839625375974249, 1.5944265802665121, 1.4966763697135286, 1.5361069618907577, 1.5212684478682055, 1.5339905331331352], [1.6251216903436463, 1.5449333459208798, 1.5714218282690056, 1.5891372807568833, 1.5746046830728642, 1.6309696121429602, 1.5461396531387437, 1.5688599181958427, 1.6147199733356299, 1.63011790943587], [1.7497683084275484, 1.6914956292599357, 1.6758963832907352, 1.6656213008298097, 1.7171651833637918, 1.6604015034906547, 1.7161338613509487, 1.6854499196207176, 1.7465820295948289, 1.7158902442179045], [1.7853348949657128, 1.7008567899341629, 1.7289405318146964, 1.9554009959262144, 1.8035444799892026, 1.8734777806146252, 1.8688770773487917, 1.9088551995838507, 1.8108802700447448, 1.8421284725411542], [1.9593391931419046, 2.0531676571219095, 2.0281192281263785, 1.9742399113307398, 2.14149405214093, 1.9083743034133873, 1.8610728454238608, 1.8837496930054634, 1.9963318128905476, 1.99810055635224], [2.3655609398243129, 2.1541065072038799, 2.1242376897984925, 2.3080877544715874, 2.2571465693629063, 2.2368911391292965, 2.3248668038036659, 2.1873752925036629, 2.2218184941823202, 2.2685772224958338], [2.8742275884612449, 2.7535649620221272, 2.5239329174059391, 2.6682973586998973, 2.5790790037962892, 2.6524227340705036, 3.0055276225779837, 2.7207273140998853, 2.7003363332658168, 2.8645794175370858], [3.3121156039227375, 3.2635734783792274, 3.4274337201212166, 3.6700211738856425, 3.3990372421250132, 3.0707965577540253, 3.5606322963208483, 3.1652274324816103, 3.6145851507035989, 3.5565887904851299], [7.6039021091476755, 5.2590515583348596, 5.5285460929183197, 7.2867151313709657, 5.3027388107316611, 6.9931290602916425, 9.044230982389184, 6.6406875560913177, 6.5016200314645838, 4.9906921677256326], [14.176568526167319, 14.700772063343193, 14.46959846271398, 14.677467597401193, 10.773391423303778, 18.321932920591259, 13.406746672441495, 13.100763547623405, 14.135554476976957, 10.918525356734516], [22.049461352086567, 21.024709634648083, 20.273079959083962, 18.959871011261654, 17.8133375950481, 21.086986825048751, 18.922448278219541, 17.889207827149946, 20.100888739729754, 24.539444413615282], [24.917386299268291, 25.365431767376592, 28.681102460709496, 38.214396096748693, 29.055468582813916, 31.18669137630981, 26.560906601069128, 27.972433274860496, 23.59834818984282, 28.244272446703096], [57379.536067098379, 62934.604411053842, 59631.300600612929, 60039.754750939937, 60259.875816399828, 65599.200157667947, 62828.979063864739, 57455.424774948115, 58025.283378618769, 58788.875565681439]], 'Rp': [[0.9803040551635479, 0.9855562469061756, 0.98751527872724376, 0.98361032178418961, 0.98269185410830273, 0.98209265265308587, 0.98167394421201504, 0.98532402498874017, 0.98479309800799952, 0.98411879466024876], [0.97936154819109189, 0.98000839071606127, 0.98555466852474027, 0.98219537799541401, 0.9828701381004008, 0.98216436654363137, 0.98216996007246815, 0.98390366083429093, 0.98092935797064995, 0.98331016803709215], [0.98401662148203206, 0.98160271501039231, 0.98365461273484533, 0.98183175602459338, 0.98157736847009458, 0.9819342101421209, 0.98269646306728597, 0.98328141140756464, 0.98179988175508659, 0.98388873560164081], [0.98203149617787011, 0.98163998630819593, 0.98082707354146137, 0.98065101952195755, 0.9821664116998402, 0.98189259369005577, 0.98251466872928017, 0.98095931331003661, 0.9828278387697349, 0.98329170117133402], [0.98240012738703553, 0.98339814021351413, 0.98308774723419856, 0.98356730102423195, 0.98137019276629667, 0.98108698510024483, 0.9827554325915564, 0.98306538360636064, 0.98343149985350609, 0.98105806951583985], [0.9834380188498083, 0.98013176527728574, 0.98170274981451633, 0.98027630007593147, 0.98021134467922111, 0.98163751864523507, 0.9809477452923584, 0.98171340707770116, 0.98110728336036379, 0.98013061564321791], [0.98033845106761752, 0.97957053908562253, 0.97899054444960931, 0.98065938385565499, 0.98071681525529031, 0.97993165089260681, 0.98070613485605529, 0.97989456069858849, 0.98120331176248576, 0.9804831937072146], [0.97803384097502466, 0.97964649916120161, 0.97948214432945524, 0.97913048821707649, 0.97888168035124845, 0.97923373149738058, 0.97952202437417857, 0.97934247744467351, 0.97855919579273931, 0.97953328523752781], [0.97815822291428189, 0.97821751641086596, 0.97741916597145795, 0.97763492668407903, 0.97817223043780432, 0.97778546567547375, 0.97855527700141964, 0.97782192238881516, 0.97727453305078038, 0.9776335519873105], [0.97755128253912971, 0.97797659944817772, 0.97706370306864798, 0.9758431098075353, 0.97694245879944397, 0.97643398982131369, 0.97666363098379472, 0.9749760041488863, 0.97673918926302739, 0.97701058242388683], [0.97462621954757045, 0.97355890628793751, 0.97384619739672817, 0.97339214757880654, 0.97232109547010248, 0.97541838925701552, 0.97699736681312499, 0.9759087285544904, 0.97460197173813556, 0.9748680659792609], [0.96986011110916537, 0.97196940819550748, 0.97136712033730077, 0.96986773639783053, 0.97178441714324604, 0.97237498117818288, 0.96956190357570138, 0.97230007098769733, 0.97092334539405623, 0.96988177815918575], [0.96401167775138186, 0.96536635546788097, 0.96689777343126448, 0.9661327840558771, 0.96786777182683026, 0.9664105153714635, 0.96268641873051186, 0.96623814685988874, 0.96466718846616173, 0.96440517740077025], [0.95858738660856868, 0.95938603131250189, 0.95474020030676254, 0.95443469228997324, 0.95564824622086297, 0.96014670693381321, 0.95502635440456574, 0.95841174703814758, 0.95429814664553969, 0.95573136783293755], [0.90301942834367144, 0.93361473809222117, 0.93014908872659019, 0.90592629625834931, 0.92936740388450367, 0.91089609182958908, 0.88300391157171165, 0.91377740052404111, 0.91838341788498723, 0.9345848079077822], [0.82871796437788103, 0.80611983906883733, 0.81670759380710711, 0.811717889548212, 0.8621711545696521, 0.78610268609953249, 0.82872531588108045, 0.8339124467544019, 0.81785952866175304, 0.85469446659460013], [0.73315232971191358, 0.74956801813911678, 0.75927164499298005, 0.76882640571707583, 0.7743506975822273, 0.7606753341276481, 0.76758905782322739, 0.77858011483255207, 0.75864230035222946, 0.72392387479854758], [0.71930871207524516, 0.73608033976843845, 0.72434534585018517, 0.67195509532640385, 0.69514659247078403, 0.66425677781148462, 0.71622822481126081, 0.72560571066655899, 0.74530208198515702, 0.71357777738918571], [-0.092889293377691584, -0.044925867814768775, 0.038164496416365629, 0.015311988053378524, -0.012530071632152986, -0.0098182201600854804, 0.097551084339485478, 0.099165408145080985, 0.03373752248686477, -0.032157990434524421]]}
#gibbs_average_performances = {'R^2': [0.9676086334947096, 0.9646177307830772, 0.965444910779321, 0.9639314550699138, 0.9651532595506322, 0.9623915800282521, 0.9607034433649997, 0.9585423401512552, 0.9560067392940136, 0.9536221568755083, 0.9494105369436129, 0.9424077035352425, 0.9312864146557043, 0.9139373594535553, 0.8346794079005317, 0.6458893379818577, 0.48234917602402555, 0.2787880201564373, -1534.2482573515408], 'MSE': [1.2410361686777414, 1.350724303712727, 1.3696886495443561, 1.3870466265844954, 1.4200410715338314, 1.4878999135837396, 1.5491005871606096, 1.5896025894612325, 1.7024404363446877, 1.8278296492763157, 1.9803989252947365, 2.2448668412775961, 2.7342695251936773, 3.4040011446179053, 6.5151313500465831, 13.86813210472971, 20.265943563589165, 28.379643709570235, 60294.283458688602], 'Rp': [0.98376802712115496, 0.9822467636985841, 0.98262837756956567, 0.9818802102919767, 0.98252208792927842, 0.98112967487156388, 0.98024945856307466, 0.97913653673805057, 0.97786728125222877, 0.97672005503038439, 0.97455390886231696, 0.97098908724778732, 0.96546838093620302, 0.95664108795936742, 0.91627225850234473, 0.82467288853630583, 0.7574579778077517, 0.71118066581547035, 0.009160905602195216]}
# ICM NMF
icm_all_performances = {'R^2': [[0.9675308056342089, 0.9706731183201378, 0.963515388656442, 0.9641470395458833, 0.9655826279217266, 0.9672294102359171, 0.9646001028981867, 0.9640619445467564, 0.9690036553956349, 0.9654409469483706], [0.9639738759573419, 0.963567010099971, 0.9654223808085468, 0.9643410967733337, 0.9624560435500431, 0.964324471631967, 0.9623909262839379, 0.9608493186526555, 0.9657672670199301, 0.9659115228364663], [0.9614572511485779, 0.9589819094877396, 0.963989588472418, 0.9608121334638506, 0.9642719642530541, 0.9608971571013196, 0.9612212583750915, 0.9612404133396623, 0.9618299957652703, 0.961369171789454], [0.9585160253993189, 0.9575779061062479, 0.9574643720429868, 0.9594241418510507, 0.9584959767975174, 0.9604005841420141, 0.9589059353654282, 0.9604642942489454, 0.95750675995146, 0.9607169405608774], [0.9512913413870069, 0.9542372391958707, 0.9570930945184041, 0.9536261975478091, 0.9535052419653446, 0.9511312110680381, 0.9509401809556668, 0.9504006562363108, 0.9512952600303107, 0.9533992700930061], [0.9421897455772862, 0.9377710137581358, 0.93923072477646, 0.9359583884993236, 0.9401515756921959, 0.9160080327252482, 0.9388377747710379, 0.9333978218918502, 0.9410161118470081, 0.4172077771215297], [0.6492870470824728, 0.41808562676856553, 0.8601954384863069, 0.18391754468568988, -0.3712969781058011, 0.1928831001369935, -0.04476264055036028, -0.07822039466291453, 0.7782838803614673, 0.8471368345342142], [-15.154083055897928, -3.2260813954603647, -3.0404150913693346, -2.8912227984149217, -1.6365028486809656, -3.701950932661954, -1.3347890580260828, -2.9721167590862185, -7.579224246222399, -0.8305063766953791], [-25.229116347034754, -2.584717328359927, -7.805478147382384, -4.3858408057911555, -45.58291647127771, -172.62011291578443, -11.135481520827124, -38.9551194732589, -45.484597899013046, -9.811203434168704]], 'MSE': [[1.187157164954578, 1.3898297200725369, 1.3783972956361257, 1.3143989657901607, 1.3677641943629277, 1.32361845717388, 1.3648837705275227, 1.3285044801517296, 1.1829726301849688, 1.2651118784833626], [1.453364418735541, 1.3700096800001251, 1.3376849941391245, 1.4339828974782167, 1.3969112642935315, 1.4114103780266951, 1.4552342906907807, 1.5236834547871951, 1.4261476182236223, 1.396701158803284], [1.5405572085051735, 1.5127404138095704, 1.4583016848597918, 1.5515746462240012, 1.4437979737297169, 1.5164660159943102, 1.4769147682537309, 1.4750441757138248, 1.5148205392710512, 1.5329485512131671], [1.6332378436324435, 1.6091543761045901, 1.6674524973738523, 1.5849445704145741, 1.6275463811308035, 1.5545596959921908, 1.7123338748280075, 1.5426972993965666, 1.626169671913325, 1.5774155055271952], [1.8827083597062471, 1.8869800012441869, 1.6869415553799403, 1.8643145673603752, 1.8209797481971748, 1.8191245605478923, 1.8684620275369468, 1.8263604829944977, 1.9202701967828466, 1.8196651252973335], [2.2619641354569513, 2.4692946761188788, 2.3902065843598028, 2.4807743653310128, 2.3144651089404418, 3.4190015777729346, 2.4776173216749866, 2.6065406236207629, 2.4281860289525961, 23.157360984753783], [13.888138409142835, 23.191374771463085, 5.4875067563411362, 32.046162842779452, 52.300603873623579, 31.832906750343106, 41.306738372637341, 42.640943734195858, 8.7846347172410315, 6.0689159243055224], [637.82330632690832, 164.64775884872782, 160.14683783654888, 154.27762959286534, 104.33047904751766, 183.18459490341436, 90.908851265961275, 158.29703989937892, 340.46365883945168, 72.517089326534872], [1027.0193630787844, 140.01123141089118, 345.50548111476979, 209.4833772439263, 1841.2300141903368, 6816.5173720613329, 476.1871633364039, 1591.571192212532, 1818.4092301141911, 425.908306211411]], 'Rp': [[0.98371254375777983, 0.9852593042200033, 0.98169025730922743, 0.9820463841118251, 0.98264745037432177, 0.98364597296075718, 0.98215363830320457, 0.98189268015836306, 0.98442356198749348, 0.98263394115834157], [0.98191812061810446, 0.98162072345629237, 0.98270050186274183, 0.98206503621597818, 0.98108147768777143, 0.98213101679579162, 0.98114539350026431, 0.98024769311959414, 0.98286605201274857, 0.98294943837519688], [0.98056513858036443, 0.97933317147454091, 0.98186393539653039, 0.98021947354660055, 0.98198582351158248, 0.98040588610494672, 0.98048303802774139, 0.98055718063417985, 0.98086247278375616, 0.98049684325918762], [0.97913294286247166, 0.9786355908419283, 0.97856675075222943, 0.97967119266141989, 0.97909324390033037, 0.98018288502510165, 0.97946117067749672, 0.98005418069212935, 0.97855095740841946, 0.98022701605169793], [0.97541613729030174, 0.97698358058682688, 0.97842381853910809, 0.97659228688440214, 0.97663770073844913, 0.97534140334396213, 0.97528835458281882, 0.97513083651584087, 0.97555595094597558, 0.97651242166614194], [0.97072674123090485, 0.96872680951692303, 0.96967332358728719, 0.96779800442128827, 0.96999790525567497, 0.95802255524500712, 0.96950122809024442, 0.96641199125903743, 0.97057469601659108, 0.77834325806882843], [0.8458462324193643, 0.76874646860456841, 0.93532706265827947, 0.73021663349220967, 0.66315121760061924, 0.71472380265694679, 0.6823375995997647, 0.69144435374124924, 0.89964626185006069, 0.92403445885840774], [0.31332946027253189, 0.39537928253773769, 0.47679860124063189, 0.39897732785199841, 0.4991530080173564, 0.45565956293722693, 0.4961447404344001, 0.45378913782676389, 0.33345827202983858, 0.5135021247128172], [0.3434072088157063, 0.47061044993748996, 0.39680929075293253, 0.39124671575428283, 0.34861242077073645, 0.22469325738464976, 0.29643158775236977, 0.28413749892097034, 0.28533432255364599, 0.34385929058618586]]}
icm_average_performances = {'R^2': [0.9661785040103265, 0.9639003913614193, 0.9616070843196438, 0.9589472936465846, 0.9526919692997768, 0.8841768966660075, 0.3435509458736634, -4.236689256251556, -36.35945843428981], 'MSE': [1.3102638557337791, 1.4205130155178114, 1.5023165977574338, 1.6135511716313551, 1.8395806625047442, 4.6005411406982146, 25.7547926152073, 206.65972458873088, 1469.1842730974581], 'Rp': [0.98301057343413178, 0.98187254536444846, 0.980677296331943, 0.9793575930873224, 0.97618824910938273, 0.94897765126917866, 0.78554740914814702, 0.43361915178613025, 0.33851420432289692]}
#icm_all_performances = {'R^2': [[0.9646588220320363, 0.9744017897369437, 0.9661210190894278, 0.969802848062672, 0.965476872595392, 0.9670624316916201, 0.9728664524056745, 0.971176375566901, 0.9648256397442762, 0.9649026044524933], [0.9640763008036166, 0.9626339752479472, 0.9681567935024334, 0.9661353174417285, 0.9662175675124474, 0.9715662939098224, 0.9707315218179083, 0.9699923600970786, 0.9653035655898666, 0.9664734894086203], [0.9633972797734844, 0.9613889477090745, 0.9633155400542408, 0.9631868017024905, 0.9650005051889149, 0.9683135728474723, 0.9646223689489576, 0.962812973503456, 0.9678537500615519, 0.9630758881678614], [0.9647906168171374, 0.9615626276121297, 0.9664017963616047, 0.965807030429661, 0.9635705064247597, 0.9651284643798599, 0.9661642561079591, 0.9611595050726959, 0.9648979122538076, 0.9656864637861943], [0.9617910761509388, 0.9622938519210792, 0.9664243504358435, 0.9654731800857685, 0.9662465103997169, 0.9642327939957898, 0.9658007830159676, 0.9637036898952889, 0.9657552613705543, 0.9664413893301579], [0.9612495027889931, 0.9603477783110483, 0.9585368175053959, 0.9638765137419028, 0.9622678858049218, 0.9623215799662406, 0.9608749263149422, 0.9627668392405861, 0.9619310420008448, 0.9621780756495445], [0.9585764566451669, 0.9619247686593589, 0.9582557644679749, 0.9588577800309206, 0.9592089991210807, 0.9585017000597045, 0.9603166347644169, 0.9589136483511163, 0.9618894579719319, 0.9625461136872147], [0.9573348772422919, 0.9606195868670749, 0.9576288002345882, 0.9586687487253401, 0.9600221001417386, 0.956582609039941, 0.9599676242758199, 0.9576461865633775, 0.9588797397181873, 0.9583114532248218], [0.957728108793864, 0.9570402091693019, 0.9575428135832431, 0.956450915408119, 0.9569389333676624, 0.951696208169665, 0.957591926883792, 0.9565914359033977, 0.9555384201874338, 0.9564214296003611], [0.9515852501929368, 0.953367242361147, 0.9505134991265246, 0.9546104749807054, 0.9549505688726565, 0.9537153287945578, 0.955091971488353, 0.9542244131658796, 0.9529869866197305, 0.9527748576523243], [0.9420434595756004, 0.9479026161921178, 0.9467033602490232, 0.9480673330148461, 0.9453125370917438, 0.9420166088826993, 0.9505653368810594, 0.9467512614422515, 0.9470082359084901, 0.9431031933098671], [0.9360494819384965, 0.936639463368596, 0.9351492199438556, 0.9399089558229732, 0.9447938065299957, 0.9384641662214507, 0.9394766190203735, 0.9376294934870604, 0.9417799891360793, 0.9419438114364511], [0.9024199640151825, 0.8987390371758867, 0.9234426785016546, 0.7990116016523392, 0.9160834584130289, 0.9277815400940939, 0.921553778305238, 0.9201685700731983, 0.7816528084852962, 0.9230727579545454], [0.5948346697949861, 0.38966937921254097, 0.7152509107408433, 0.5121347832324528, 0.6917203481806345, -0.19570932116933726, 0.6281907453334525, 0.5191308550817573, 0.20883268555770118, 0.7460564107414729], [-1.114305363686979, -0.14317123682778887, -0.8227083192787743, -2.5168276797134976, -2.8314145540478184, -1.7157689835918806, -1.4460036756536314, -4.432028363293397, -1.9542062524824177, -2.20537571931408], [-7.158008488200778, -2.149561560181636, -4.153230092342037, -6.59788743353105, -4.982813892375419, -18.821374948545802, -2.54708243533606, -1.0093264080486315, -7.538723904838152, -0.9224534688603399], [-10.638628539166355, -3.1353272699715955, -9.832468280750915, -7.748568416579909, -12.01180556851429, -2.521000468598625, -8.912678390492218, -17.053360366455575, -2.418925332065935, -10.203398167152656], [-16.71119840822288, -18.86304304649908, -17.490919492764306, -8.988213386773568, -2.2443277363559595, -53.63948069764641, -6.340076371868526, -11.586703960357593, -39.77090188489713, -2.670565527948003], [-2.9991157910674557, -1.1047812921283673, -4.252211807572713, -8.655115190125118, -1.941764250155218, -2.2501238622149757, -2.398908934598071, -2.9831664956574446, -3.0110616315482464, -4.392950526379743]], 'MSE': [[1.3068878553534384, 1.2728600658795628, 1.1694231900614449, 1.3435147321596586, 1.3241734835378016, 1.3111065171418457, 1.1146207618462132, 1.381435269976397, 1.4067231747631745, 1.2954293537465802], [1.4049645974251284, 1.3985004418947549, 1.3373578191091928, 1.3389832835693232, 1.342244514889221, 1.3235213165695416, 1.2251176863382529, 1.2318869420603697, 1.2871522152420181, 1.2675665938521545], [1.3043884852215519, 1.4109949764516914, 1.3652341396127343, 1.3752535094846476, 1.3227887368153155, 1.2405841708054504, 1.4325202373215173, 1.3990547413838681, 1.3408730928982637, 1.4019895862338529], [1.4211193839205623, 1.4364331602420395, 1.4269733463787011, 1.3285520324251072, 1.4037041392890501, 1.4636076896335206, 1.3288615089259281, 1.4434497506181614, 1.3805780591263934, 1.3228378322209524], [1.4585658158640977, 1.3825012038494837, 1.3653008971663836, 1.3781637335667971, 1.4144387982359425, 1.4922567535995772, 1.3787812370651165, 1.5098810096750064, 1.3705949802824897, 1.3961970465759497], [1.4704221482301882, 1.514866214725253, 1.5228499684576888, 1.4567995620940899, 1.5109131992575717, 1.4860704611643829, 1.5082706699447088, 1.4784484798791904, 1.5248415976500245, 1.5326369992621036], [1.6311607043367158, 1.5634287989271278, 1.5872903387842261, 1.6286343737620241, 1.5864632357890343, 1.5931842919910362, 1.6120479988091887, 1.5536546849286197, 1.4779829384686463, 1.4745732716012327], [1.7492716869018545, 1.5996959412372846, 1.7097206037792916, 1.6672119518139275, 1.5876010519427772, 1.6201291256301225, 1.5696675224718621, 1.6814412961487653, 1.6585153035908424, 1.6352960498206781], [1.635829933236604, 1.7229299569189627, 1.6704598913543416, 1.7304118927555308, 1.7103959581333288, 1.8471680210951695, 1.6837003659750991, 1.810366226226191, 1.6884044979868267, 1.6367092389810465], [1.8424004764078987, 1.8603294582801375, 1.8832443220917485, 1.7470688318892944, 1.7678447414557066, 1.8759421335604214, 1.8162861920282789, 1.7900390962903414, 1.9127546825574491, 1.7992695241359944], [2.2735614291301172, 1.9458790176624092, 2.0590688033100615, 2.0641272766922714, 2.158803019364933, 2.2183034337119163, 1.9181863312806529, 2.0154184079153441, 2.0695856789133931, 2.212505355254621], [2.5358924185739995, 2.3829145329484023, 2.4670439478279591, 2.3753226027967074, 2.273732662908436, 2.4063115088343094, 2.3405416824714407, 2.4547998920227805, 2.2221412751288998, 2.2527137635075767], [3.910399952057066, 3.9200786274483428, 3.0254036700789904, 7.8286585052955049, 3.3361952508472354, 2.8062706523382284, 3.133639101932423, 3.113728198725799, 8.5756422796414533, 3.0541640559658201], [16.087144060205272, 23.628572367305289, 10.888949694616919, 19.106037582501401, 12.392796432409028, 46.866483969850137, 14.836241984623172, 19.173276485046067, 30.494454509437169, 9.8426184534571615], [82.219177946016572, 45.776112953383866, 72.485356819877538, 138.11819497368788, 147.40119067961797, 105.94748683055944, 98.174591072050276, 216.39741676830656, 115.85268401894912, 124.06343350565874], [317.28165441221523, 122.88221954380145, 202.86406861012668, 294.3944546435381, 235.49554562020839, 780.91898466318412, 138.89855722772879, 81.531471360797568, 339.16242340534939, 74.874626228073652], [463.10232594666491, 158.86301509202335, 419.21093847868559, 343.63845775052295, 515.65959093461845, 140.32594316500976, 387.02269589321122, 700.76981150741028, 135.83640865973368, 441.13465489904326], [694.97781814069515, 772.45229410477282, 735.35115794113813, 395.73542965618213, 126.42550944338964, 2112.1393628573542, 289.63773664141087, 491.17268192719689, 1596.9561179733985, 143.19256477611415], [157.47353560409803, 83.19569191263561, 203.67057231042682, 380.74356119105926, 116.13813453928844, 127.46342106389626, 134.89949620637577, 158.56858595065574, 158.34610177280038, 214.39919548358225]], 'Rp': [[0.98219200873993129, 0.98717601673482969, 0.98302969238037108, 0.98479690398735076, 0.98267937790726634, 0.98348259343575595, 0.98634724257835593, 0.98563229169003896, 0.98229527682325368, 0.98234635222274003], [0.98188599962825374, 0.98114449899603584, 0.98410129022071868, 0.98312659798589874, 0.98299690266885753, 0.98568089842924855, 0.98526785569134157, 0.98499399059373172, 0.9825120941300941, 0.98319452126783136], [0.98155089163587239, 0.98054033707083743, 0.98151959434040603, 0.98142960986840833, 0.98236908412995039, 0.98405874535326987, 0.98218645954640393, 0.98125359876027796, 0.98381164704474766, 0.98136813861361405], [0.98233023301584965, 0.98064851416459098, 0.98310739930807711, 0.98275992693733261, 0.98162469929022655, 0.98245472717986027, 0.98294399336158256, 0.98039223694191791, 0.98234678639485373, 0.98279492364985865], [0.98071596923824922, 0.98101577919428284, 0.98312849051934681, 0.98263010378556748, 0.98307530252020636, 0.98198219906456274, 0.98281355006586124, 0.98175137023295411, 0.98273664402360805, 0.98327650371070618], [0.98049018722765391, 0.98004645685921332, 0.9790802937819022, 0.98181579577861877, 0.98102310784704116, 0.98102995062014087, 0.98047098990162795, 0.98126032289459075, 0.98101844225244739, 0.98092808644708696], [0.97914522876284948, 0.98082158578790823, 0.97901315915579989, 0.97930636024192796, 0.97942440574410838, 0.97911605032112947, 0.97997327766132558, 0.97927681958279056, 0.98094288306982969, 0.9811705348073102], [0.97851962927011493, 0.98026112151525069, 0.97874980247187748, 0.97920172925988613, 0.97998819749932575, 0.97816279419609287, 0.97978542474022257, 0.97881983809355766, 0.9794982819739515, 0.97905127532640224], [0.97870290594141562, 0.97833344771318898, 0.97857413946316851, 0.9779923426543442, 0.97864853497511117, 0.97570192078716544, 0.97869589802966639, 0.97868496480078282, 0.97766064417602649, 0.97805112347846501], [0.97579897102752255, 0.97663990046972338, 0.97497846391076426, 0.97734618449231447, 0.97753585207864702, 0.97684525868545236, 0.97738497447218764, 0.97704748602416924, 0.97652981094275626, 0.97640650698354536], [0.97139282092183443, 0.97378287785565187, 0.97319739040823039, 0.97405654622410665, 0.97245379650919539, 0.97100212931306185, 0.9751917704587274, 0.97321388886214233, 0.97354011109505012, 0.97159164234985396], [0.96801072707240776, 0.96829834132676773, 0.96743193614557366, 0.96979112617883423, 0.97231838930697034, 0.96950638767605646, 0.96969846707772267, 0.96884614044885542, 0.97132668622547058, 0.97062842817121675], [0.95233249521014263, 0.95102699866195661, 0.96218245148170678, 0.90296045003909009, 0.95894206885700217, 0.96371338306350984, 0.96280527692116213, 0.96052068745403407, 0.89743756732658986, 0.96170244035756214], [0.82626322494349136, 0.75249301751214437, 0.87334244061026267, 0.80214414525122479, 0.85861789503841246, 0.64000139991140459, 0.83508672145834983, 0.80988350939221454, 0.73011864423779438, 0.8796382480894519], [0.52117241565645311, 0.64268520204696955, 0.56374124452080465, 0.42852564127993314, 0.43728532542191717, 0.48414023905722137, 0.51996125750378308, 0.41299288061797368, 0.49373656329002297, 0.4897073586318399], [0.32741774929740824, 0.420213287162369, 0.435686438852952, 0.34869809798245854, 0.42320220523062912, 0.29088149747615893, 0.46939678742089097, 0.4897942669732141, 0.33658464184062548, 0.49720298313747274], [0.30889655015237838, 0.45233073107402999, 0.29694101649314042, 0.34202356093940889, 0.27246242053082953, 0.43836463079717619, 0.35262071551978302, 0.2950164596439343, 0.46566221354838588, 0.35225579133249207], [0.39726931970189905, 0.2941512869653673, 0.24289147967670127, 0.42302434548549495, 0.45582435838488983, 0.31656179953862373, 0.36280725910290029, 0.33278218431431439, 0.27818828566325021, 0.4716814206540299], [1.2438453189076574e-16, 0.46293256569061891, 0.42322081204890288, 0.30484014499444811, 0.41147453903910358, 0.43918932217592527, 0.38196879781489285, -1.0540860479575021e-15, 2.2883671484449312e-16, 0.43298534066997812]]}
#icm_average_performances = {'R^2': [0.9681294855377438, 0.9671287185331471, 0.9642967627957505, 0.964516917924581, 0.9648162886601105, 0.9616350961324421, 0.9598991323758884, 0.9585661726033182, 0.956354040106684, 0.9533820593254815, 0.9459473942547698, 0.9391835006905331, 0.8913926194670463, 0.4810111466706505, -1.9181810147890268, -5.58804626322599, -8.447616079974807, -17.830543051333343, -3.3989199781447352], 'MSE': [1.2926174404466115, 1.3157295410949961, 1.3593681676228893, 1.3956116902780416, 1.4146681475880842, 1.5006119300665204, 1.5708420637397853, 1.6478550533337404, 1.7136375982663103, 1.829517945869727, 2.0935438753235722, 2.3711414287020505, 4.2704180294330865, 20.33165755394516, 114.64356455681079, 258.83040057150231, 370.55638423269227, 735.80406734616531, 173.48982960348184], 'Rp': [0.98399777564998936, 0.98349046496120118, 0.9820088106363789, 0.98214034402441508, 0.9823125912355346, 0.9807163633610323, 0.97981903051349784, 0.97920380943466812, 0.97810459220193346, 0.9766513409087082, 0.97294229739978544, 0.96958566296298765, 0.94736238193727562, 0.80075892464447507, 0.49939481280269182, 0.40390779553741785, 0.35765740900315585, 0.35751817394874708, 0.28566115224338695]}
# Non-probabilistic NMF
np_all_performances = {'R^2': [[0.9008720358092512, 0.9601292556538269, 0.9612346327882337, 0.9632370353344099, 0.9633416652818967, 0.9622524390617689, 0.9603462698770026, 0.9671543638475553, 0.963321176365319, 0.9647502561162388], [0.9587423807980398, 0.9594551242359872, -3.4181530290314203e+31, 0.9667705271422598, 0.9594795927427342, 0.9555980739163884, 0.9641509195014247, 0.9600012125982704, 0.9607054683210157, 0.9661084282762575], [0.9382181233060924, 0.9555854839521907, 0.9368037902628257, 0.9582622859261749, 0.9620448532122527, -9.575051602187483e+42, 0.9471224531572379, 0.7562029493130721, 0.9548875991119727, 0.9555910437196921], [0.9496402766263484, 0.9392930203111193, 0.9442935216869608, 0.9237404390903083, 0.9252803184201542, 0.9523531935481676, 0.9307580665584911, -8.133649834554712e+20, 0.9388936675208388, 0.9048524050134544], [-1.1598779258816443e+27, 0.9372133879068926, -3.961690031106814e+20, 0.9440248592259005, 0.9394813432143667, 0.9271719222979232, 0.9071206814997375, 0.9298946916088988, 0.9401015477550159, 0.9236712109281022], [0.8383378704069995, 0.8782152271084063, 0.917929309782159, 0.9064879190269263, -1.7332687521184305e+39, 0.8912375905665959, 0.8842081930840828, 0.9140398760507505, 0.9003029739978541, 0.9050855834326664], [0.4291718721362058, 0.2408221511005295, 0.633924944674207, 0.6059517361949012, 0.4764804275809116, 0.5876339971706936, 0.30137189935767383, 0.4659353092966517, 0.49461273376976167, 0.6103288971363448], [-2.342700361645398, -0.8577504348916534, -1.9468133763345055, -0.34252122761398285, -0.5991572676542325, -2.0317122701006682, -0.8312242875276055, -0.108334186297536, -0.17997801861930718, -0.5320222201096987], [-0.11243196632724661, 0.03940719301589024, -0.13528418448295598, -0.042675187196357545, -0.2120072997424549, -0.29388458483317614, -0.44435489468160605, -0.3475109932763185, -0.5122262467188277, -0.2011759238351314]], 'MSE': [[4.0862243392120989, 1.4299724526688331, 1.5478315731222576, 1.4485566598730231, 1.3419174296956933, 1.5154235230084641, 1.4766552639482657, 1.3331249217264756, 1.4567687519365884, 1.366132427982282], [1.5620403080588923, 1.4860195677252179, 1.3493431600534702e+33, 1.4751202752786454, 1.5220438439697228, 1.5607297865294163, 1.580556659686865, 1.5575718640615679, 1.5531311163348624, 1.4407339893520088], [2.3582786615099707, 1.7864543793984091, 2.4581876144444004, 1.6801830604913042, 1.6522341731931889, 3.7924425129609333e+44, 2.2929758478949553, 9.7356857972305821, 1.6214648973461219, 1.6976979410909789], [1.9142355382829572, 2.2816879721009702, 2.1346299355636189, 3.0480041665231195, 2.8851446240861547, 1.8758131708232826, 2.6862617238018971, 3.2579614472979008e+22, 2.4249045742967557, 3.8554037589132593], [4.6664732336351262e+28, 2.4824394405414516, 1.6265363356071028e+22, 2.1730815060057629, 2.3421266313290734, 2.743063926102657, 3.6572585424434725, 2.7496852131846103, 2.4005124113313241, 2.8468050558976619], [6.3329922360184616, 4.7910446382609768, 3.1623679445952599, 3.7030770129419066, 6.7674716120028753e+40, 4.2640269018660009, 4.567875948754005, 3.2404421535746017, 3.8234178604209772, 3.74623027931688], [22.002711432613282, 30.064556119472648, 14.683106704960636, 15.508973138068894, 20.51025684726568, 16.187068250196642, 26.826087912654543, 21.201330005593618, 20.008006653752226, 15.132184360577902], [131.83114074936503, 73.02538576627569, 119.33165121728589, 53.356695570391032, 63.919770764393228, 118.94930276474862, 71.317449613104927, 43.423013478239774, 47.509622297765574, 60.297065158413986], [43.577479696712679, 37.858347282609508, 45.531407591938404, 41.668997730021523, 48.279141935932088, 50.56517035994198, 57.213708264108064, 52.686229820235006, 59.166699743979777, 47.645945746358706]], 'Rp': [[0.95037857405740567, 0.97988404582615307, 0.98053049839702078, 0.98153477649269993, 0.98152521993533759, 0.98104821713427215, 0.97999923277144063, 0.98355933957506247, 0.98149880299043935, 0.98223755187360473], [0.97920353349754341, 0.97961173074760421, 0.030200726212311672, 0.98332653963263617, 0.97974966075550363, 0.97790157562680458, 0.98199762785234446, 0.97992759439844479, 0.98032876744747344, 0.98300650347533247], [0.96920967282957282, 0.97758323107328726, 0.96804413788979149, 0.97922104249115349, 0.98084388878994777, 0.0034839916357522665, 0.97329363289935722, 0.89072044389126603, 0.97732163806555494, 0.97764917791994499], [0.97464651976991989, 0.96944953281324575, 0.97210318321723144, 0.96179891441702603, 0.96218400971305429, 0.97598836501352693, 0.96543049507150214, -0.0051467773616229607, 0.9695531230206752, 0.95432741700674417], [-0.013080893326172505, 0.9687709598282892, 0.0154222415085759, 0.97228143642665765, 0.9698418909324481, 0.96359737010165236, 0.95359032632328022, 0.96465991235344772, 0.96975971293391483, 0.96172047493248258], [0.92145297881044197, 0.93887770864955522, 0.95923092603473969, 0.95307064461122093, 0.0019191481769650135, 0.94475008508827107, 0.94116943735172731, 0.9569184836230441, 0.94975816616296227, 0.95213011428914041], [0.75281984174534367, 0.69344700368562529, 0.82433916270976315, 0.8149423647725671, 0.77192858325793101, 0.80172230931040822, 0.71370441334474222, 0.76595337257687091, 0.7795462531197147, 0.81482529264009951], [0.4022157265391581, 0.49987556610742007, 0.40335727391104315, 0.55323898257513349, 0.5197183311959761, 0.41229207287725705, 0.53897287199072397, 0.60251608432676695, 0.56678540039532477, 0.54895666941604127], [0.58262057427916281, 0.59721078387221338, 0.53555919427868337, 0.5712455094801655, 0.53586240999279489, 0.5234212547737318, 0.49312408378952566, 0.53725758782329591, 0.52895742581609639, 0.56688119771431333]]}
np_average_performances = {'R^2': [0.9566639130135502, -3.41815302903142e+30, -9.575051602187484e+41, -8.133649834554712e+19, -1.1598783220506474e+26, -1.7332687521184306e+38, 0.48462339684178807, -0.9772213650794589, -0.2262144088078184], 'MSE': [1.7002607343173981, 1.3493431600534702e+32, 3.7924425129609332e+43, 3.2579614472979009e+21, 4.6664748601714616e+27, 6.7674716120028753e+39, 20.212428142515609, 78.296109737998364, 48.41931281718378], 'Rp': [0.9782196259053435, 0.88552542596459993, 0.86973708574856268, 0.87003347826813027, 0.7726563432014576, 0.85192776927980685, 0.77332285971630665, 0.50479289793348447, 0.54721400218199834]}
#np_all_performances = {'R^2': [[0.9689982320056162, 0.9689917090673902, 0.9584654970391766, 0.9702397316702163, 0.9659699311572578, 0.9607874067781528, 0.9620344908386754, 0.9652920571129262, 0.9622916408094269, 0.9617588832149964], [0.9598437316888161, 0.9622924145658958, 0.9677960408561996, 0.9602434737470198, 0.9594643815302007, 0.9693661064070987, 0.9695486139574616, 0.9604711451375875, 0.9610284970693976, 0.9617925706594852], [0.9631133665266515, 0.9622463690704579, 0.9644905915126607, 0.9624600432303675, 0.9666850830046918, 0.9663524825759241, 0.9572154317059923, 0.9580084823000874, 0.962397059627732, 0.966867665543729], [0.9628790861715747, -3254066661770114.5, 0.957833080545854, 0.9575328931203445, 0.8900584102712475, 0.9583618928046787, 0.9657816626801784, 0.841580082114134, 0.9525619016639351, 0.9569640020126929], [0.9607880533317379, 0.9157439062264688, 0.9589573186083807, 0.9180226447384124, 0.9469749189838722, 0.9597140810809299, 0.9622271720890319, 0.958118547187024, 0.9540428525348618, 0.9566849003010336], [-2.55868148161001e+42, 0.957683182882155, 0.9587232855238976, -1.3415450235337558e+28, 0.9593870187594239, 0.9622407821599628, 0.9407569208274621, 0.9612911929277, 0.9593698469748104, 0.9371284041428767], [0.9567818821374593, 0.954149388490502, 0.9592523234860723, 0.9525245083026567, 0.9505813363404699, 0.9529680711662232, 0.9562292080514075, 0.9582795812576681, -6.771507556480724e+48, 0.9586656036159685], [0.9519234697608772, 0.9254403519065733, 0.9535435966511446, 0.953042814767579, 0.953842865985006, 0.9481449963045427, -3.342898208367701e+29, 0.9537344937713133, 0.9520765614961054, 0.9524343165954939], [0.9260161563363417, 0.949102125328357, 0.9516657231859105, 0.9424782338059539, 0.9528841462273592, 0.9458148637457714, -1.344325854907069e+49, 0.9342351118142681, 0.9421536825264263, 0.9366036279708401], [0.9456769430736538, 0.9206456954910301, 0.8912473296353778, 0.9145563360767159, -3.3421416171517766e+43, 0.9463731176571757, 0.9363812798211818, -5.044368521293507e+37, 0.9451235856937855, -4.815728647882289e+39], [0.9264118011874416, 0.9319537578255552, 0.9308555374523724, 0.7931815399864768, 0.8517528187732719, -2.3339054163897264e+47, 0.9381256448257252, 0.9360939404776755, 0.9184807048679889, 0.9355681331830422], [-1.7828311957942265e+42, 0.9155502861253152, -9.50361552573879e+26, -2.1487464433752587e+22, 0.911004420997275, 0.8970489762062017, 0.9085837816089444, 0.9199986520873049, 0.8635546374415994, 0.8955021057003668], [0.7889711656904008, 0.7035937739289586, -7.600237543299226e+39, 0.8905470493348479, 0.662818734883198, 0.5857342249504401, 0.7298748250346663, -4.8768599038663476e+35, 0.775201105727966, 0.8432735002742656], [-1.5681012657202642e+33, 0.5296116621493883, 0.16894790684150274, 0.6286771509001827, 0.5277955356265289, 0.32158413992182533, -0.20362791685214754, 0.05728122000535929, 0.5540660363757446, -1.176104872802147e+22], [0.16858042412274465, -1.1250762581170175, 0.23414151949605255, -0.47627590166173284, -0.2917214342152843, -0.019385740840076604, 0.34052104276537487, -0.5436105806994096, 0.3692490334513584, 0.1500210782625928], [-0.4750244959474528, -0.2100999024110033, -0.40185498231576644, -1.2996346501824472, -0.44286374758647673, -3.077743108044516e+31, -1.3457161039842909, -0.52598194624073, -0.40769432340512113, -0.26048882687732], [-0.36215033222912907, -0.9117409339503786, -0.8898736211118687, -0.5738977323331467, -0.45584815709294113, -0.5428881223555295, -1.9183092272410014, -0.6745141643604253, -0.8530675846021665, -0.5308027988549875], [-0.19864828000522228, -0.18469100420696805, -0.3084046383467123, -0.03134978563147128, -0.6247612355338237, -0.27912563505798005, -0.3630148129793844, -0.30046909769890773, -0.11319161374774467, -0.2547511374374025], [-0.5842406128103734, -1.1423230553785806, -1.0345608572604004, -0.8683489576868813, -0.4220735018942332, -0.5677464555522593, -0.7283767576125377, -0.10139318601236713, -0.7239328487602648, -0.5908808231418072]], 'MSE': [[1.1802712051106858, 1.4126262627064516, 1.3923692667473091, 1.2449925479234853, 1.2877932481021859, 1.4236867126039514, 1.4018130667156958, 1.4013138866247794, 1.6455582642658668, 1.4826511458047276], [1.5410649856343073, 1.303102060087437, 1.4358020607608071, 1.5793946511293586, 1.403074633596856, 1.2683215421358502, 1.3140945090364153, 1.4925795904278212, 1.3782972813548691, 1.4216230597083566], [1.4284095121822744, 1.4649654547359812, 1.459510390227442, 1.4728224512956694, 1.4682270263910033, 1.4029980295146676, 1.6662092738702408, 1.5450993059621192, 1.4619019006284073, 1.4607329431942684], [1.4527887731270694, 1.28471194978576e+17, 1.5978594131366182, 1.7060529164773934, 4.1405428510325377, 1.6963038832046735, 1.4056426211887652, 6.2175898134952945, 1.7686046714639767, 1.5933653897347384], [1.576472439252127, 3.4942121712835421, 1.5713451544780426, 3.1574823012504871, 2.1164399179301929, 1.5463139412545714, 1.5577352386966241, 1.5563986668325174, 1.6941649916290349, 1.6474415026221223], [9.7435369133537068e+43, 1.683463803447468, 1.6380058016525623, 5.1438466653810227e+29, 1.6817967692472262, 1.5358185902719765, 2.4774127086023636, 1.6287533435786079, 1.6231301199604844, 2.5573851372265568], [1.7196523514646631, 1.819155204427821, 1.6100147728666587, 1.8020918542697395, 1.867270585868499, 1.8102501713325201, 1.7452458803631219, 1.6829507289594419, 2.6377407036408025e+50, 1.6867755713199684], [1.8844705158092441, 2.9603043927499164, 1.9219347391611974, 1.8394204285223834, 1.8707331840186461, 2.0826265642915986, 1.3542401945513279e+31, 1.7861342506410813, 1.9042447585546483, 1.8613669573398948], [2.9578069332408026, 2.0758341265594167, 1.953742080364389, 2.2008448542602528, 1.8447826468176223, 2.282414574568568, 5.2259339601978801e+50, 2.653528295921848, 2.208609769660197, 2.4809563654362146], [2.09369073740059, 3.1705299172627837, 4.1222275259875296, 3.4698419507856411, 1.2819563046400907e+45, 2.1093322600681481, 2.4841845856791802, 2.0051503600031294e+39, 2.1252389680422934, 1.9232132124808484e+41], [2.7541215904009415, 2.6627352501698751, 2.7533384462302251, 7.9439920760319467, 5.7784899529506815, 9.3102307935582702e+48, 2.4374485882397976, 2.4840103110403429, 3.0728073191245775, 2.5707657440905067], [7.1120552354010848e+43, 3.3590401226907005, 3.7470380492005788e+28, 8.6430369800031231e+23, 3.6664148100338192, 3.9355205663497221, 3.5080913079881522, 3.0632051911468978, 5.2556160776850316, 3.9532166534727646], [8.2140023605177817, 11.837555765030311, 3.0474640261512159e+41, 4.4274353069611152, 12.952825545632704, 16.119197497591752, 10.677342801192514, 1.9211169014058091e+37, 9.014872677123801, 6.0682969472140016], [5.964336501347473e+34, 17.932575767468986, 33.072599261530634, 14.768252316927763, 18.143817135325932, 26.339020648085057, 46.634705256237503, 38.106739564081963, 18.255055412834867, 4.7151296494996335e+23], [33.041437207861975, 84.851950719380639, 30.237197815698917, 58.2445851581829, 50.60851964565547, 40.141668672252891, 26.015523071919421, 57.920090994787209, 24.360551446402727, 32.976211260607172], [57.984790534559451, 47.81508501354574, 55.812434212385206, 90.784343664348924, 56.204466810719033, 1.1792894866306272e+33, 92.97829994380146, 58.639036572691147, 56.094034210991161, 50.344217931643016], [54.021377664968909, 76.908903438334164, 73.425636123523844, 62.461391725976362, 57.152733372469157, 60.925560160560117, 112.88417970554843, 66.30652951883981, 72.547888753439054, 59.053812135843401], [47.743480120657715, 45.367252816441152, 51.637637952619883, 40.582623491159282, 63.957366560854311, 49.680642861708606, 53.012992159273139, 51.558369991964696, 43.750666071340795, 48.577461814030329], [62.202041491149032, 83.667810862065295, 79.718821802198292, 73.166805592742421, 55.835740352014, 61.774983793541601, 67.25576053930709, 43.136059224316718, 67.706943836689973, 61.712698069700565]], 'Rp': [[0.98465775510093068, 0.98440237618724624, 0.97911925346370121, 0.98500811514719377, 0.98284718166105978, 0.98024738746185613, 0.98085155260956436, 0.98261066414885456, 0.98100703694691394, 0.98084847546332932], [0.97992939133473445, 0.9810142404849177, 0.98380841637764627, 0.9800009522768145, 0.98004578555265687, 0.98461051891877605, 0.98477444184598584, 0.98011748031275214, 0.98032561387379369, 0.98085702601386004], [0.98150846134487124, 0.98099890724685557, 0.98218683627643355, 0.98105262308582208, 0.98321815405911472, 0.98303681551128708, 0.97862875073385225, 0.97904659353777579, 0.98103029929312602, 0.98332531308699755], [0.98129726904739023, -0.0049120308121148029, 0.97876236880065337, 0.97857619824877251, 0.94732055878368593, 0.97904142688124018, 0.9827496093435294, 0.92675001100298804, 0.97622665646572149, 0.97853942220224266], [0.98023268133408836, 0.9579508250243427, 0.97927020576541424, 0.95883908095730441, 0.97326380790488265, 0.9797609489423712, 0.98107332560865268, 0.97884996873242847, 0.97694766613776329, 0.97811126095017831], [0.0031883950180226438, 0.97875561242318154, 0.97925460284105881, -0.021651372145925451, 0.97954272813842358, 0.98110019637312573, 0.97047347155771557, 0.98064479721580777, 0.97956816871020591, 0.96850329877907126], [0.97822373215404068, 0.97682285049970052, 0.97951837490220339, 0.97606424931018954, 0.97504448306808944, 0.9763012830377914, 0.97798504657982199, 0.9789721267516549, -0.015942100929582023, 0.97930038985069301], [0.97578459272558671, 0.96310342422866069, 0.97668804927476449, 0.97648951896078928, 0.97668910848454293, 0.97400933626136343, -0.0099835882110452332, 0.97665633357941384, 0.97592100674377535, 0.97632796424738222], [0.96248591519084981, 0.97429199568920388, 0.975619638094533, 0.97105122683329004, 0.97643416259453553, 0.97275194575278223, 0.0068388438226997536, 0.96715033607594758, 0.97085300714616962, 0.96869581540056193], [0.9726581614564479, 0.96024580685902727, 0.94552292290344098, 0.95744629144892557, 0.05636481653700439, 0.97304973196075928, 0.96776803069727158, -0.0035101415067815334, 0.97228155657838933, -0.0082754766372177511], [0.96345311566912339, 0.96580117183901959, 0.96524088907976713, 0.9034839564205005, 0.92730464612073871, -0.023267118495971514, 0.96896843692824342, 0.9680254000183206, 0.95910374328898396, 0.96753733966915456], [0.013043336926545336, 0.9573913587040106, -0.0069263580139984557, -0.015273496727157509, 0.95546030682699845, 0.9489348384191103, 0.95455028920798179, 0.95962387952357642, 0.9330029205523579, 0.94718585201272731], [0.89522762892607743, 0.85961581352269234, -0.014575615087317129, 0.94664230880954781, 0.84672909252064454, 0.81023239697196481, 0.86866420042656589, 0.021618411266052295, 0.89234277011765917, 0.9226811614599304], [-0.01950436567811074, 0.79462273955956053, 0.70833982061806411, 0.82583768673492608, 0.80482376487336071, 0.73299716323250819, 0.65163891259137841, 0.69291327516217016, 0.78499965087236401, 0.018292899645978456], [0.6886338084788225, 0.4981679599944373, 0.6836182596782896, 0.54902644488388352, 0.61188235993581541, 0.64225137181813985, 0.72363243403613964, 0.57380804566480847, 0.71294693512561713, 0.66948246534889233], [0.56095625960120499, 0.5655642223132914, 0.55562650639979705, 0.47791063850242815, 0.55448917382386187, -0.0063349302370715619, 0.51207695582492607, 0.5438505905263572, 0.5554895845144926, 0.5771852678863193], [0.55012582935098686, 0.47731388752097337, 0.51568035139701274, 0.53812739530622056, 0.56797193224881048, 0.52313899777476558, 0.42047688197233918, 0.51325645969317957, 0.48614450370630469, 0.52245265693858189], [0.56725183001055868, 0.57888029300813937, 0.52402483719445025, 0.57606980010301911, 0.53782341244400145, 0.54145419923433535, 0.55378844797235449, 0.53446600620951068, 0.57519922544107238, 0.57104861985921385], [0.51628999325013769, 0.43627902867432566, 0.46270143561543892, 0.45884566085590112, 0.51791106593512037, 0.43880781799457313, 0.50282493075512535, 0.51824988530748706, 0.4518772485859705, 0.4471729511972467]]}
#np_average_performances = {'R^2': [0.9644829579693834, 0.9631846975619162, 0.9629836575098294, -325406666177010.56, 0.9491274395081752, -2.5586814816100233e+41, -6.7715075564807235e+47, -3.3428982083677006e+28, -1.3443258549070688e+48, -3.342628234385086e+42, -2.3339054163897263e+46, -1.7828311957942274e+41, -7.600725229289613e+38, -1.568101265732025e+32, -0.11935568174353975, -3.077743108044516e+30, -0.7713092674131573, -0.26584072406456166, -0.6763877056109706], 'MSE': [1.3873075606605139, 1.4137354373872077, 1.4830876288002073, 12847119497857600.0, 1.9918006325229261, 9.7435369133537581e+42, 2.6377407036408024e+49, 1.3542401945513278e+30, 5.2259339601978799e+49, 1.2821506311116989e+44, 9.3102307935582698e+47, 7.1120552354010885e+42, 3.0476561378413562e+40, 5.9643365013946247e+33, 43.83977359927492, 1.1792894866306272e+32, 69.568801259950334, 49.586849384004992, 65.617766556372501], 'Rp': [0.982159979819065, 0.98154838669919386, 0.98140327541761363, 0.87243514899641084, 0.97442997713574275, 0.77993798989106877, 0.87822904352246023, 0.87616857462952336, 0.87461728866005739, 0.67935517002972667, 0.85656515805378797, 0.66469929274321526, 0.70491781689338173, 0.59949615476121998, 0.63534500849648456, 0.48968142691556071, 0.51146888959091741, 0.55600066714766549, 0.47509600181713257]}
# Assemble the average performances and method names
methods = ['VB-NMF', 'G-NMF', 'ICM-NMF', 'NP-NMF']
avr_performances = [
vb_average_performances,
gibbs_average_performances,
icm_average_performances,
np_average_performances
]
colours = ['r','b','g','c']
for metric in metrics:
fig = plt.figure(figsize=(1.9,1.5))
fig.subplots_adjust(left=0.14, right=0.95, bottom=0.17, top=0.95)
#plt.title("Performances (%s) for different fractions of missing values" % metric)
plt.xlabel("Fraction missing", fontsize=8, labelpad=1)
plt.ylabel(metric, fontsize=8, labelpad=-1)
if metric == 'MSE':
plt.yticks(range(0,MSE_max+1,2),fontsize=6)
else:
plt.yticks(fontsize=6)
plt.xticks(fontsize=6)
x = fractions_unknown
for method,avr_performance,colour in zip(methods,avr_performances,colours):
y = avr_performance[metric]
#plt.plot(x,y,label=method)
plt.plot(x,y,linestyle='-', marker='o', label=method, c=colour, markersize=3)
plt.xlim(0.0,1.)
if metric == 'MSE':
plt.ylim(0,MSE_max)
else:
plt.ylim(0.5,1.05)
plt.savefig("../graphs_toy/mse_nmf_missing_values_predictions.png", dpi=600)
#plt.savefig("/home/tab43/Dropbox/Posters/Poster NIPS AABI 2016 v2 png/images/mse_nmf_missing_values_predictions.png", dpi=1200) | apache-2.0 |
polarise/BioClasses | test_frameshift_signals.py | 2 | 1509 | #!/usr/bin/env python
# -*- encoding: utf-8 -*-
from __future__ import division
import sys
from Sequence import *
from TransitionMatrix import *
import numpy
import matplotlib.pyplot as plt
from matplotlib.backends.backend_pdf import PdfPages
from Bio import SeqIO
def main( fn ):
TM = TransitionMatrix()
TM.read( "euplotid_transition_matrix.pic" )
pdf = PdfPages( "likelihood_profiles_test.pdf" )
b = 0 # count the ones that pass
c = 0 # count all
for seq_record in SeqIO.parse( fn, "fasta" ):
if c > 1000:
break
sequence = str( seq_record.seq )
seq_name = seq_record.id
s = Sequence( sequence=sequence, name=seq_name )
s.truncate( effect_truncation=True, verbose=False )
no_of_leaves = s.count_leaves()
if no_of_leaves > 1000:
print >> sys.stderr, "Complex tree with %s leaves...omitting." % no_of_leaves
continue
s.set_transition_matrix( TM )
s.build_tree()
s.get_frameshift_signals()
s.estimate_likelihood()
s.estimate_frameshift_likelihood()
s.get_most_likely_frameshift()
if s.most_likely_frameshift is not None:
if 1 < len( s.most_likely_frameshift.path ) < 4:
#s.plot_differential_graded_likelihood( outfile=pdf, show_path_str=True )
s.plot_differential_graded_likelihood()
b += 1
c += 1
pdf.close()
print >> sys.stderr, "Processed %d (of %d) sequences [%.2f%%]." % ( b, c, b/c*100 )
if __name__ == "__main__":
try:
fn = sys.argv[1]
except IndexError:
print >> sys.stderr, "./script.py <fasta-file>"
sys.exit( 1 )
main( fn )
| gpl-2.0 |
JamesLuoau/deep-learning-getting-started | b_gradient.py | 1 | 3563 | import numpy as np
def data_prep():
import pandas as pd
admissions = pd.read_csv('data/gradient_binary.csv')
# Make dummy variables for rank
data = pd.concat([admissions, pd.get_dummies(admissions['rank'], prefix='rank')], axis=1)
data = data.drop('rank', axis=1)
# Standarize features
for field in ['gre', 'gpa']:
mean, std = data[field].mean(), data[field].std()
data.loc[:, field] = (data[field] - mean) / std
# Split off random 10% of the data for testing
np.random.seed(42)
sample = np.random.choice(data.index, size=int(len(data) * 0.9), replace=False)
data, test_data = data.ix[sample], data.drop(sample)
# Split into features and targets
features, targets = data.drop('admit', axis=1), data['admit']
features_test, targets_test = test_data.drop('admit', axis=1), test_data['admit']
return features, targets, features_test, targets_test
def sigmoid(x):
"""
Calculate sigmoid
"""
return 1 / (1 + np.exp(-x))
def gradient_descent():
features, targets, features_test, targets_test = data_prep()
# TODO: We haven't provided the sigmoid_prime function like we did in
# the previous lesson to encourage you to come up with a more
# efficient solution. If you need a hint, check out the comments
# in solution.py from the previous lecture.
# Use to same seed to make debugging easier
np.random.seed(42)
n_records, n_features = features.shape
last_loss = None
# Initialize weights
weights = np.random.normal(scale=1 / n_features ** .5, size=n_features)
# Neural Network hyperparameters
epochs = 1000
learnrate = 0.5
for e in range(epochs):
del_w = np.zeros(weights.shape)
for x, y in zip(features.values, targets):
# Loop through all records, x is the input, y is the target
# Activation of the output unit
# Notice we multiply the inputs and the weights here
# rather than storing h as a separate variable
output = sigmoid(np.dot(x, weights))
# The error, the target minus the network output
error = y - output
# The error term
# Notice we calulate f'(h) here instead of defining a separate
# sigmoid_prime function. This just makes it faster because we
# can re-use the result of the sigmoid function stored in
# the output variable
error_term = error * output * (1 - output)
# The gradient descent step, the error times the gradient times the inputs
del_w += error_term * x
# Update the weights here. The learning rate times the
# change in weights, divided by the number of records to average
weights += learnrate * del_w / n_records
# Printing out the mean square error on the training set
if e % (epochs / 10) == 0:
out = sigmoid(np.dot(features, weights))
loss = np.mean((out - targets) ** 2)
if last_loss and last_loss < loss:
print("Train loss: ", loss, " WARNING - Loss Increasing")
else:
print("Train loss: ", loss)
last_loss = loss
# Calculate accuracy on test data
tes_out = sigmoid(np.dot(features_test, weights))
predictions = tes_out > 0.5
accuracy = np.mean(predictions == targets_test)
print("Prediction accuracy: {:.3f}".format(accuracy))
if __name__ == "__main__":
gradient_descent()
| apache-2.0 |
mtoce/Modeling-Analysis | Dynamical_Chaos.py | 1 | 3770 | from math import sin, cos
import numpy as np
from scipy.integrate import odeint
from scipy.optimize import newton
from scipy.misc import imresize
import matplotlib.pyplot as pl
import pylab as plb
from mpl_toolkits.mplot3d import Axes3D
from matplotlib import animation
import matplotlib.cm as cm
import cv2
import Image
from numpy import *
from scipy.misc import lena,imsave
import math
from PIL import Image
# Logistic Map function
def Logistic_Map():
# Initial X value
X = 0.1
# Initialize arrays
XA = []
FA = []
GI = []
GI2 = []
IA = []
# Rate of change
R = 3
# Iteration count total
I = 500
# Plot X array against itself (1)
'''pl.figure("Geometric Iteration", figsize = (8, 8))
pl.title("Geometric Iteration")
pl.plot(X, 0, color = "Red", label = "Geometric Iteration")'''
# Calculate and store X value of each iteration
for i in range (I):
XA.append(X)
X = R * X * (1 - X)
FA.append(X)
IA.append(i)
GI.append(XA[i])
GI.append(XA[i])
GI2.append(XA[i])
GI2.append(FA[i])
# Geometric Iteration broken - needs to be fixed
# Plot X array against itself (2)
'''pl.plot(GI, GI2, color = "Red")
pl.plot(XA, XA, color = "Green", label = "f(x) = x")
pl.plot(XA, FA, ",", color = "Red", label = "Logistic Map")
pl.xlabel("x value")
pl.ylabel("f(x) value")
pl.legend()'''
# Plot X array against corresponding iteration
pl.figure("Chaos of Feigenbaum Series", figsize = (8, 8))
pl.title("Chaos of Feigenbaum Series")
pl.plot(IA, XA, color='Red', label = "$\mu = 3$")
pl.xlabel("Iterations")
pl.ylabel("X-value")
pl.legend()
pl.show()
# Logistic bifurcation
def Logistic_Bifurcation():
# Iteratation count total
I = 4000
# Truncator
T = 1000
# Rate of change
R = np.linspace(1, 4, I)
# X value
X = np.ndarray((I,I))
X[0].fill(0.1)
# Calculate and X value of each iteration for a given R
for i in range (I - 1):
X[i + 1] = R * X[i] * (1 - X[i])
# Reform arrays
X = (X[(I-T):I]).reshape((1,T*I)).T
R = np.tile(R, T)
# Plot bifurcation
pl.figure("Logistic Bifurcation", figsize = (8, 8))
pl.title("Feigenbaum's Map")
pl.plot(R, X, ",", color = "Blue")
pl.xlabel('$\mu$')
pl.ylabel("x value")
# Arnold's Cat Map function
def Arnold_Cat_Map():
# load image
im = array(Image.open("Geass.png"))
print(im)
N = im.shape[0]
print(N)
# create x and y components of Arnold's cat mapping
x,y = meshgrid(range(N),range(N))
xmap = (2*x+y) % N
ymap = (x+y) % N
for i in xrange(N):
imsave("GeassMap/geass_{0}.png".format(i),im)
im = im[xmap,ymap]
def Taylor_Greene_Chirikov(K):
# Iteration count total
I = 500
# Angle
A = np.ndarray((I, I))
A[0].fill(np.pi)
# Angular momentum and array
M = np.ndarray((I, I))
M[0] = np.linspace(0, 2 * np.pi, 500)
# Calculate system change over time
for i in range (I - 1):
M[i + 1] = (M[i] + K * np.sin(A[i]))%(2 * np.pi)
A[i + 1] = (A[i] + M[i + 1])%(2 * np.pi)
# Plot standard map
colors = iter(cm.rainbow(np.linspace(0, 2 * np.pi, len(A))))
pl.figure(K * 100, figsize = (8, 8))
pl.title("Taylor-Greene-Chirikov Phase Diagram for K = %.2f" % K)
pl.plot(A, M, ",")
pl.xlabel("Angle (Radians)")
pl.ylabel("Angular momentum")
pl.xlim([0, 2 * np.pi])
pl.ylim([0, 2 * np.pi])
#pl.show()
pl.savefig("TGC/%03d.png" %(K*100))
# Ask which function to run
print "Logistic map --> Enter 1"
print "Logistic bifurcation --> Enter 2"
print "Arnold's Cat Map --> Enter 3"
print "Taylor-Greene-Chirikov --> Enter 4"
Choice = input()
# Run corresponding function
if Choice == 1: Logistic_Map()
if Choice == 2: Logistic_Bifurcation()
if Choice == 3: Arnold_Cat_Map()
if Choice == 4:
# Kick parameter
K = 0
while (K <= 1):
Taylor_Greene_Chirikov(K)
K = K + 0.05
# Diplay Plots
if Choice != 4: pl.show()
| gpl-3.0 |
stablum/reimplementations | nade.py | 1 | 9810 | #!/usr/bin/env python3
import theano
from theano import tensor as T
import pandas as pd
from tqdm import trange,tqdm
import numpy as np
from sklearn.preprocessing import normalize
import sklearn.svm
import time
import mnist # pip3 install python-mnist
import os
import sys
import lasagne
import math
sys.setrecursionlimit(20000)
optimizer = "gpu"
if optimizer == "debug":
theano_mode = 'DebugMode'
theano.config.exception_verbosity="high"
theano.config.optimizer='None'
theano.config.floatX='float32'
elif optimizer == "gpu":
theano.config.optimizer='fast_run'
theano.config.openmp=True
theano.config.openmp_elemwise_minsize=4
#theano.config.device='gpu'
theano.config.floatX='float32'
theano.config.assert_no_cpu_op='raise'
theano.config.allow_gc=False
theano.config.nvcc.fastmath=True
assert theano.config.device=='gpu',theano.config.device
elif optimizer == "fast_cpu":
theano.config.optimizer='fast_run'
theano.config.floatX='float32'
lr=0.0001#0.02
n_epochs = 10000
z_dim = None
x_dim = None
repeat_training=1
log = None
g = T.nnet.sigmoid
data_amplify = 1
data_offset = 0
minibatch_size = 64
regularizer_lambda=0.01
rs = np.random.RandomState(1234)
rng = theano.tensor.shared_randomstreams.RandomStreams(rs.randint(999999))
class Logger():
def __init__(self,basename=""):
self.filename = basename+"_"+str(time.time())+".log"
self.f = open(self.filename,'w')
def __call__(self, *args):
print(*args, flush=True)
print(*args,file=self.f, flush=True)
def make_nade(D,z_dim):
log("make_nade with D={},z_dim={},g={}".format(D,z_dim,g))
x = T.fmatrix('x')
c_vals = np.random.normal(0,1,size=(1,z_dim)).astype('float32')
c = theano.shared(c_vals,name="c")
p_x = 1
def a_adder(W_col_T,x_i,acc):
W_col_T.name = "W_col_T"
prod = W_col_T * T.sum(x_i)
prod.name = "prod"
ret_T = acc.T + prod
return ret_T.T
"""
for i in range(D):
W_col_vals = np.random.normal(0,1,size=(z_dim,1)).astype('float32')
W_col = theano.shared(W_col_vals,name="W_col_%d"%(i+1))
W_cols.append(W_col)
"""
W_vals = np.random.normal(0,1,size=(z_dim,D)).astype('float32')
W = theano.shared(W_vals,name="W")
a_s_W,_u = theano.scan(
fn=a_adder,
outputs_info=c[0,:],
sequences = [ W.T,
x
]
)
a_s_excess = T.concatenate([c,a_s_W],axis=0)
a_s = a_s_excess[:D,:]
V_vals = np.random.normal(0,1,size=(D,z_dim)).astype('float32')
V = theano.shared(V_vals,name="V")
hs = g(a_s)
b_val = np.random.normal(0,1,size=(D,1)).astype('float32')
b = theano.shared(b_val,name="b")
def scan_p_x_cond(V_row,hi,b_i):
p_x_cond = g(T.dot(V_row,hi) + b_i)
return p_x_cond
p_x_cond,_u = theano.map(
fn=scan_p_x_cond,
sequences=[
V,
hs,
b
]
)
def scan_p_x_cond_obs(x_i,p):
ret = x_i * p + (1-x_i) * (1-p)
return ret
p_x_cond_obs,_u = theano.map(
fn=scan_p_x_cond_obs,
sequences=[
x,
p_x_cond
]
)
nll = - T.sum(T.log(p_x_cond_obs))
p_x = T.prod(p_x_cond_obs)
return (W,c,V,b),x,hs,p_x,nll,p_x_cond
def make_xcond(z_dummy,W):
global g
dot = T.dot(W,z_dummy)
dot.name = "dot_generated"
ret = g(dot)
ret.name = "xcond"
return ret
def make_xsample(xcond):
global x_dim
global rng
ret = rng.binomial(n=1, p=xcond, size=(x_dim+1,1)) # +1 because bias/dummy
return ret
def shuffle(X,Y):
sel = np.arange(X.shape[1])
np.random.shuffle(sel)
X = X[:,sel]
Y = Y[:,sel]
return X,Y
def fix_data(features,labels):
# please notice the transpose '.T' operator
# in a neural network, the datapoints needs to be scattered across the columns
# because dot product.
X = (np.array(features).T.astype('float32')/255.)*data_amplify + data_offset
Y = np.expand_dims(np.array(labels).astype('float32'),1).T
return X,Y
def load_data():
log("setting up mnist loader..")
_mnist = mnist.MNIST(path='./python-mnist/data')
log("loading training data..")
X_train,Y_train = fix_data(*_mnist.load_training())
log("X_train.shape=",X_train.shape,"Y_train.shape=",Y_train.shape)
log("loading testing data..")
X_test,Y_test = fix_data(*_mnist.load_testing())
log("X_test.shape=",X_test.shape,"Y_test.shape=",Y_test.shape)
return X_train[:,:], Y_train, X_test[:,:], Y_test
def step(xs, params, params_update_fn):
nll = 0
for i in range(xs.shape[1]):
orig_x = xs[:,[i]]
curr_nll,curr_p_x = params_update_fn(orig_x)
nll += curr_nll
return nll
def partition(a):
assert type(a) is np.ndarray
assert a.shape[1] > minibatch_size, "a.shape[1] should be larger than the minibatch size. a.shape=%s"%str(a.shape)
minibatches_num = int(a.shape[1] / minibatch_size)
assert minibatches_num > 0
off = lambda i : i * minibatch_size
return [
a[:,off(i):off(i+1)]
for i
in range(minibatches_num)
]
def train(X, params, params_update_fn, repeat=1):
_sum = 0
for xs in tqdm(partition(X)*repeat,desc="training"):
# pseudo-contrastive err += step(xs, params, params_update_fn, zcond_fn, xcond_fn, params_contr_update_fn)
step_nll = step(xs, params, params_update_fn)
average_nll = step_nll / xs.shape[1]
log("step average nll:{}".format(average_nll))
_sum += step_nll
ret = _sum / X.shape[1]
return ret
def test_classifier(Z,Y):
#classifier = sklearn.svm.SVC()
log("training classifier..")
classifier = sklearn.svm.SVC(
kernel='rbf',
max_iter=1000
)
# please notice the transpose '.T' operator: sklearn wants one datapoint per row
classifier.fit(Z.T,Y[0,:])
log("done. Scoring..")
svc_score = classifier.score(Z.T,Y[0,:])
log("SVC score: %s"%svc_score)
def test_nll(X_test,test_nll_fn,repeat=1):
_sum = 0.
for xs in tqdm(partition(X_test)*repeat,desc="testing"):
for i in range(xs.shape[1]):
x = xs[:,[i]]
nll, p_x = test_nll_fn(x)
_sum += nll
ret = _sum/X_test.shape[1]
return ret
def noise_nll(test_nll_fn):
global x_dim
_sum = 0.
amount = 1000
for i in tqdm(range(amount),desc="noise"):
x = np.random.binomial(1,0.5,size=(x_dim,1)).astype('float32')
nll, p_x = test_nll_fn(x)
_sum += nll
ret = _sum / amount
return ret
def draw_samples(epoch,xcond_fn):
log("generating a bunch of random samples")
samples = []
for i in range(10):
_z = np.random.normal(np.array([[0]*z_dim]),(i+1.)/2.).astype('float32')
sample = xcond_fn(_z)
samples.append(sample)
samples_np = np.stack(samples,axis=2)
filename = "random_samples_epoch_%d.npy"%(epoch)
np.save(filename, samples_np)
log("done generating random samples.")
def main():
global log
global z_dim
global x_dim
global minibatch_size
global g
assert len(sys.argv) > 1, "usage: %s z_dim"%(sys.argv[0])
z_dim = int(sys.argv[1])
random_int = np.random.randint(0,1000000)
harvest_dir = "nade_harvest_zdim_{}_{}".format(
z_dim,
random_int
)
np.set_printoptions(precision=4, suppress=True)
try:
os.mkdir(harvest_dir)
except OSError as e: # directory already exists. It's ok.
print(e)
log = Logger("{}/nadelog".format(harvest_dir)) # notice: before chdir to harvest_dir
for curr in [sys.argv[0],"config.py","nade_job.sh","engage.sh"]:
os.system("cp %s %s -vf"%(curr,harvest_dir+"/"))
X,Y,X_test,Y_test = load_data()
os.chdir(harvest_dir)
log("sys.argv",sys.argv)
x_dim = X.shape[0]
num_datapoints = X.shape[1]
# set up
params,x,hs,p_x, nll, p_x_cond= make_nade(x_dim,z_dim)
log("made nade")
(W,c,V,b) = params
grads = []
for param in tqdm(params):
log("gradient of param "+param.name)
grad = T.grad(nll,param)
grad.name = "grad_"#+param.name
grads.append(grad)
params_updates = lasagne.updates.adam(grads,params,learning_rate=lr)
# pseudo-contrastive params_update_fn = theano.function([x,z],[], updates=params_updates)
params_update_fn = theano.function([x],[nll,p_x], updates=params_updates)
params_update_fn.name="params_update_fn"
test_nll_fn = theano.function([x],[nll,p_x])
gen_fn = theano.function([hs],p_x_cond)
def generate_and_save(epoch):
hs_random = np.random.uniform(0,1,(x_dim,z_dim)).astype('float32')
samples = gen_fn(hs_random)
filename = "nade_samples_epoch_{0:04d}.npy".format(epoch)
np.save(filename,samples)
def log_shared(qs):
if type(qs) not in (list,tuple):
qs = [qs]
for q in qs:
log(q.name+": mean:{}, std:{}".format(
np.mean(q.eval()),
np.std(q.eval())
))
def summary():
log("epoch %d"%epoch)
log("harvest_dir",harvest_dir)
log("lr %f"%lr)
log_shared(W)
log("done. epochs loop..")
# train
for epoch in range(n_epochs):
X,Y = shuffle(X,Y)
summary()
generate_and_save(epoch)
nll_noise = noise_nll(test_nll_fn)
log("epoch average noise nll:", nll_noise)
nll_test = test_nll(X_test,test_nll_fn)
log("epoch average test nll:", nll_test)
nll = train(X,params,params_update_fn,repeat=repeat_training)
log("epoch average training nll:", nll)
log("epochs loop ended")
summary()
if __name__=="__main__":
main()
| gpl-3.0 |