repo_name
stringlengths 6
112
| path
stringlengths 4
204
| copies
stringlengths 1
3
| size
stringlengths 4
6
| content
stringlengths 714
810k
| license
stringclasses 15
values |
---|---|---|---|---|---|
junbochen/pylearn2 | pylearn2/gui/tangent_plot.py | 44 | 1730 | """
Code for plotting curves with tangent lines.
"""
__author__ = "Ian Goodfellow"
try:
from matplotlib import pyplot
except Exception:
pyplot = None
from theano.compat.six.moves import xrange
def tangent_plot(x, y, s):
"""
Plots a curve with tangent lines.
Parameters
----------
x : list
List of x coordinates.
Assumed to be sorted into ascending order, so that the tangent
lines occupy 80 percent of the horizontal space between each pair
of points.
y : list
List of y coordinates
s : list
List of slopes
"""
assert isinstance(x, list)
assert isinstance(y, list)
assert isinstance(s, list)
n = len(x)
assert len(y) == n
assert len(s) == n
if pyplot is None:
raise RuntimeError("Could not import pyplot, can't run this code.")
pyplot.plot(x, y, color='b')
if n == 0:
pyplot.show()
return
pyplot.hold(True)
# Add dummy entries so that the for loop can use the same code on every
# entry
if n == 1:
x = [x[0] - 1] + x[0] + [x[0] + 1.]
else:
x = [x[0] - (x[1] - x[0])] + x + [x[-2] + (x[-1] - x[-2])]
y = [0.] + y + [0]
s = [0.] + s + [0]
for i in xrange(1, n + 1):
ld = 0.4 * (x[i] - x[i - 1])
lx = x[i] - ld
ly = y[i] - ld * s[i]
rd = 0.4 * (x[i + 1] - x[i])
rx = x[i] + rd
ry = y[i] + rd * s[i]
pyplot.plot([lx, rx], [ly, ry], color='g')
pyplot.show()
if __name__ == "__main__":
# Demo by plotting a quadratic function
import numpy as np
x = np.arange(-5., 5., .1)
y = 0.5 * (x ** 2)
x = list(x)
y = list(y)
tangent_plot(x, y, x)
| bsd-3-clause |
vybstat/scikit-learn | examples/bicluster/plot_spectral_biclustering.py | 403 | 2011 | """
=============================================
A demo of the Spectral Biclustering algorithm
=============================================
This example demonstrates how to generate a checkerboard dataset and
bicluster it using the Spectral Biclustering algorithm.
The data is generated with the ``make_checkerboard`` function, then
shuffled and passed to the Spectral Biclustering algorithm. The rows
and columns of the shuffled matrix are rearranged to show the
biclusters found by the algorithm.
The outer product of the row and column label vectors shows a
representation of the checkerboard structure.
"""
print(__doc__)
# Author: Kemal Eren <kemal@kemaleren.com>
# License: BSD 3 clause
import numpy as np
from matplotlib import pyplot as plt
from sklearn.datasets import make_checkerboard
from sklearn.datasets import samples_generator as sg
from sklearn.cluster.bicluster import SpectralBiclustering
from sklearn.metrics import consensus_score
n_clusters = (4, 3)
data, rows, columns = make_checkerboard(
shape=(300, 300), n_clusters=n_clusters, noise=10,
shuffle=False, random_state=0)
plt.matshow(data, cmap=plt.cm.Blues)
plt.title("Original dataset")
data, row_idx, col_idx = sg._shuffle(data, random_state=0)
plt.matshow(data, cmap=plt.cm.Blues)
plt.title("Shuffled dataset")
model = SpectralBiclustering(n_clusters=n_clusters, method='log',
random_state=0)
model.fit(data)
score = consensus_score(model.biclusters_,
(rows[:, row_idx], columns[:, col_idx]))
print("consensus score: {:.1f}".format(score))
fit_data = data[np.argsort(model.row_labels_)]
fit_data = fit_data[:, np.argsort(model.column_labels_)]
plt.matshow(fit_data, cmap=plt.cm.Blues)
plt.title("After biclustering; rearranged to show biclusters")
plt.matshow(np.outer(np.sort(model.row_labels_) + 1,
np.sort(model.column_labels_) + 1),
cmap=plt.cm.Blues)
plt.title("Checkerboard structure of rearranged data")
plt.show()
| bsd-3-clause |
Mazecreator/tensorflow | tensorflow/contrib/timeseries/examples/predict.py | 69 | 5579 | # Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""An example of training and predicting with a TFTS estimator."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import argparse
import sys
import numpy as np
import tensorflow as tf
try:
import matplotlib # pylint: disable=g-import-not-at-top
matplotlib.use("TkAgg") # Need Tk for interactive plots.
from matplotlib import pyplot # pylint: disable=g-import-not-at-top
HAS_MATPLOTLIB = True
except ImportError:
# Plotting requires matplotlib, but the unit test running this code may
# execute in an environment without it (i.e. matplotlib is not a build
# dependency). We'd still like to test the TensorFlow-dependent parts of this
# example, namely train_and_predict.
HAS_MATPLOTLIB = False
FLAGS = None
def structural_ensemble_train_and_predict(csv_file_name):
# Cycle between 5 latent values over a period of 100. This leads to a very
# smooth periodic component (and a small model), which is a good fit for our
# example data. Modeling high-frequency periodic variations will require a
# higher cycle_num_latent_values.
structural = tf.contrib.timeseries.StructuralEnsembleRegressor(
periodicities=100, num_features=1, cycle_num_latent_values=5)
return train_and_predict(structural, csv_file_name, training_steps=150)
def ar_train_and_predict(csv_file_name):
# An autoregressive model, with periodicity handled as a time-based
# regression. Note that this requires windows of size 16 (input_window_size +
# output_window_size) for training.
ar = tf.contrib.timeseries.ARRegressor(
periodicities=100, input_window_size=10, output_window_size=6,
num_features=1,
# Use the (default) normal likelihood loss to adaptively fit the
# variance. SQUARED_LOSS overestimates variance when there are trends in
# the series.
loss=tf.contrib.timeseries.ARModel.NORMAL_LIKELIHOOD_LOSS)
return train_and_predict(ar, csv_file_name, training_steps=600)
def train_and_predict(estimator, csv_file_name, training_steps):
"""A simple example of training and predicting."""
# Read data in the default "time,value" CSV format with no header
reader = tf.contrib.timeseries.CSVReader(csv_file_name)
# Set up windowing and batching for training
train_input_fn = tf.contrib.timeseries.RandomWindowInputFn(
reader, batch_size=16, window_size=16)
# Fit model parameters to data
estimator.train(input_fn=train_input_fn, steps=training_steps)
# Evaluate on the full dataset sequentially, collecting in-sample predictions
# for a qualitative evaluation. Note that this loads the whole dataset into
# memory. For quantitative evaluation, use RandomWindowChunker.
evaluation_input_fn = tf.contrib.timeseries.WholeDatasetInputFn(reader)
evaluation = estimator.evaluate(input_fn=evaluation_input_fn, steps=1)
# Predict starting after the evaluation
(predictions,) = tuple(estimator.predict(
input_fn=tf.contrib.timeseries.predict_continuation_input_fn(
evaluation, steps=200)))
times = evaluation["times"][0]
observed = evaluation["observed"][0, :, 0]
mean = np.squeeze(np.concatenate(
[evaluation["mean"][0], predictions["mean"]], axis=0))
variance = np.squeeze(np.concatenate(
[evaluation["covariance"][0], predictions["covariance"]], axis=0))
all_times = np.concatenate([times, predictions["times"]], axis=0)
upper_limit = mean + np.sqrt(variance)
lower_limit = mean - np.sqrt(variance)
return times, observed, all_times, mean, upper_limit, lower_limit
def make_plot(name, training_times, observed, all_times, mean,
upper_limit, lower_limit):
"""Plot a time series in a new figure."""
pyplot.figure()
pyplot.plot(training_times, observed, "b", label="training series")
pyplot.plot(all_times, mean, "r", label="forecast")
pyplot.plot(all_times, upper_limit, "g", label="forecast upper bound")
pyplot.plot(all_times, lower_limit, "g", label="forecast lower bound")
pyplot.fill_between(all_times, lower_limit, upper_limit, color="grey",
alpha="0.2")
pyplot.axvline(training_times[-1], color="k", linestyle="--")
pyplot.xlabel("time")
pyplot.ylabel("observations")
pyplot.legend(loc=0)
pyplot.title(name)
def main(unused_argv):
if not HAS_MATPLOTLIB:
raise ImportError(
"Please install matplotlib to generate a plot from this example.")
make_plot("Structural ensemble",
*structural_ensemble_train_and_predict(FLAGS.input_filename))
make_plot("AR", *ar_train_and_predict(FLAGS.input_filename))
pyplot.show()
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument(
"--input_filename",
type=str,
required=True,
help="Input csv file.")
FLAGS, unparsed = parser.parse_known_args()
tf.app.run(main=main, argv=[sys.argv[0]] + unparsed)
| apache-2.0 |
IssamLaradji/scikit-learn | examples/decomposition/plot_kernel_pca.py | 353 | 2011 | """
==========
Kernel PCA
==========
This example shows that Kernel PCA is able to find a projection of the data
that makes data linearly separable.
"""
print(__doc__)
# Authors: Mathieu Blondel
# Andreas Mueller
# License: BSD 3 clause
import numpy as np
import matplotlib.pyplot as plt
from sklearn.decomposition import PCA, KernelPCA
from sklearn.datasets import make_circles
np.random.seed(0)
X, y = make_circles(n_samples=400, factor=.3, noise=.05)
kpca = KernelPCA(kernel="rbf", fit_inverse_transform=True, gamma=10)
X_kpca = kpca.fit_transform(X)
X_back = kpca.inverse_transform(X_kpca)
pca = PCA()
X_pca = pca.fit_transform(X)
# Plot results
plt.figure()
plt.subplot(2, 2, 1, aspect='equal')
plt.title("Original space")
reds = y == 0
blues = y == 1
plt.plot(X[reds, 0], X[reds, 1], "ro")
plt.plot(X[blues, 0], X[blues, 1], "bo")
plt.xlabel("$x_1$")
plt.ylabel("$x_2$")
X1, X2 = np.meshgrid(np.linspace(-1.5, 1.5, 50), np.linspace(-1.5, 1.5, 50))
X_grid = np.array([np.ravel(X1), np.ravel(X2)]).T
# projection on the first principal component (in the phi space)
Z_grid = kpca.transform(X_grid)[:, 0].reshape(X1.shape)
plt.contour(X1, X2, Z_grid, colors='grey', linewidths=1, origin='lower')
plt.subplot(2, 2, 2, aspect='equal')
plt.plot(X_pca[reds, 0], X_pca[reds, 1], "ro")
plt.plot(X_pca[blues, 0], X_pca[blues, 1], "bo")
plt.title("Projection by PCA")
plt.xlabel("1st principal component")
plt.ylabel("2nd component")
plt.subplot(2, 2, 3, aspect='equal')
plt.plot(X_kpca[reds, 0], X_kpca[reds, 1], "ro")
plt.plot(X_kpca[blues, 0], X_kpca[blues, 1], "bo")
plt.title("Projection by KPCA")
plt.xlabel("1st principal component in space induced by $\phi$")
plt.ylabel("2nd component")
plt.subplot(2, 2, 4, aspect='equal')
plt.plot(X_back[reds, 0], X_back[reds, 1], "ro")
plt.plot(X_back[blues, 0], X_back[blues, 1], "bo")
plt.title("Original space after inverse transform")
plt.xlabel("$x_1$")
plt.ylabel("$x_2$")
plt.subplots_adjust(0.02, 0.10, 0.98, 0.94, 0.04, 0.35)
plt.show()
| bsd-3-clause |
sdrdl/sdipylib | sdipylib/geo.py | 1 | 1843 | """Support functions for geographic operations"""
def aspect(df):
"""Return the aspect ratio of a Geopandas dataset"""
tb = df.total_bounds
return abs((tb[0] - tb[2]) / (tb[1] - tb[3]))
def scale(df, x):
"""Given an x dimension, return the x and y dimensions to maintain the dataframe aspect ratio"""
return (x, x / aspect(df))
def aspect_fig_size(df, width, subplots='111', **kwargs):
"""
Create a matplotlib figure and axis with a given X width and a height
to keep the boundary box aspect ratio.
:param df: Geopandas GeoDataFrame, from which to calculate the aspect ratio
:param width: X dimension, in inches, of the plot
:param subplots: A Matplotlib subplots string
:param kwargs: Other arguments for plt.figure
:return:
"""
import matplotlib.pylab as plt
fig = plt.figure(figsize = scale(df, width), **kwargs)
ax = fig.add_subplot(subplots)
return fig, ax
def total_centroid(df):
return list(reversed(df.geometry.unary_union.centroid.coords[0]))
def folium_map(df, data_column, tiles='Stamen Toner', fill_color='RdYlGn', zoom_start=12, **kwargs):
import folium
mapa = folium.Map(location=total_centroid(df),
tiles=tiles, zoom_start=zoom_start)
if not df.crs:
df.crs = {'init' :'epsg:4326'}
#threshold_scale = np.linspace(_['non_min_r'].min(),
# _['non_min_r'].max(), 6, dtype=float).tolist()
choro_args = dict(
fill_color=fill_color,
fill_opacity=.6,
line_weight=.7
)
mapa.choropleth(geo_data=df.reset_index(),
data=df.reset_index(),
key_on='feature.properties.geoid',
columns=['geoid',data_column],
**choro_args
)
return mapa
| bsd-2-clause |
samueldotj/TeeRISC-Simulator | util/stats/barchart.py | 90 | 12472 | # Copyright (c) 2005-2006 The Regents of The University of Michigan
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met: redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer;
# redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution;
# neither the name of the copyright holders nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
# Authors: Nathan Binkert
# Lisa Hsu
import matplotlib, pylab
from matplotlib.font_manager import FontProperties
from matplotlib.numerix import array, arange, reshape, shape, transpose, zeros
from matplotlib.numerix import Float
from matplotlib.ticker import NullLocator
matplotlib.interactive(False)
from chart import ChartOptions
class BarChart(ChartOptions):
def __init__(self, default=None, **kwargs):
super(BarChart, self).__init__(default, **kwargs)
self.inputdata = None
self.chartdata = None
self.inputerr = None
self.charterr = None
def gen_colors(self, count):
cmap = matplotlib.cm.get_cmap(self.colormap)
if count == 1:
return cmap([ 0.5 ])
if count < 5:
return cmap(arange(5) / float(4))[:count]
return cmap(arange(count) / float(count - 1))
# The input data format does not match the data format that the
# graph function takes because it is intuitive. The conversion
# from input data format to chart data format depends on the
# dimensionality of the input data. Check here for the
# dimensionality and correctness of the input data
def set_data(self, data):
if data is None:
self.inputdata = None
self.chartdata = None
return
data = array(data)
dim = len(shape(data))
if dim not in (1, 2, 3):
raise AttributeError, "Input data must be a 1, 2, or 3d matrix"
self.inputdata = data
# If the input data is a 1d matrix, then it describes a
# standard bar chart.
if dim == 1:
self.chartdata = array([[data]])
# If the input data is a 2d matrix, then it describes a bar
# chart with groups. The matrix being an array of groups of
# bars.
if dim == 2:
self.chartdata = transpose([data], axes=(2,0,1))
# If the input data is a 3d matrix, then it describes an array
# of groups of bars with each bar being an array of stacked
# values.
if dim == 3:
self.chartdata = transpose(data, axes=(1,2,0))
def get_data(self):
return self.inputdata
data = property(get_data, set_data)
def set_err(self, err):
if err is None:
self.inputerr = None
self.charterr = None
return
err = array(err)
dim = len(shape(err))
if dim not in (1, 2, 3):
raise AttributeError, "Input err must be a 1, 2, or 3d matrix"
self.inputerr = err
if dim == 1:
self.charterr = array([[err]])
if dim == 2:
self.charterr = transpose([err], axes=(2,0,1))
if dim == 3:
self.charterr = transpose(err, axes=(1,2,0))
def get_err(self):
return self.inputerr
err = property(get_err, set_err)
# Graph the chart data.
# Input is a 3d matrix that describes a plot that has multiple
# groups, multiple bars in each group, and multiple values stacked
# in each bar. The underlying bar() function expects a sequence of
# bars in the same stack location and same group location, so the
# organization of the matrix is that the inner most sequence
# represents one of these bar groups, then those are grouped
# together to make one full stack of bars in each group, and then
# the outer most layer describes the groups. Here is an example
# data set and how it gets plotted as a result.
#
# e.g. data = [[[10,11,12], [13,14,15], [16,17,18], [19,20,21]],
# [[22,23,24], [25,26,27], [28,29,30], [31,32,33]]]
#
# will plot like this:
#
# 19 31 20 32 21 33
# 16 28 17 29 18 30
# 13 25 14 26 15 27
# 10 22 11 23 12 24
#
# Because this arrangement is rather conterintuitive, the rearrange
# function takes various matricies and arranges them to fit this
# profile.
#
# This code deals with one of the dimensions in the matrix being
# one wide.
#
def graph(self):
if self.chartdata is None:
raise AttributeError, "Data not set for bar chart!"
dim = len(shape(self.inputdata))
cshape = shape(self.chartdata)
if self.charterr is not None and shape(self.charterr) != cshape:
raise AttributeError, 'Dimensions of error and data do not match'
if dim == 1:
colors = self.gen_colors(cshape[2])
colors = [ [ colors ] * cshape[1] ] * cshape[0]
if dim == 2:
colors = self.gen_colors(cshape[0])
colors = [ [ [ c ] * cshape[2] ] * cshape[1] for c in colors ]
if dim == 3:
colors = self.gen_colors(cshape[1])
colors = [ [ [ c ] * cshape[2] for c in colors ] ] * cshape[0]
colors = array(colors)
self.figure = pylab.figure(figsize=self.chart_size)
outer_axes = None
inner_axes = None
if self.xsubticks is not None:
color = self.figure.get_facecolor()
self.metaaxes = self.figure.add_axes(self.figure_size,
axisbg=color, frameon=False)
for tick in self.metaaxes.xaxis.majorTicks:
tick.tick1On = False
tick.tick2On = False
self.metaaxes.set_yticklabels([])
self.metaaxes.set_yticks([])
size = [0] * 4
size[0] = self.figure_size[0]
size[1] = self.figure_size[1] + .12
size[2] = self.figure_size[2]
size[3] = self.figure_size[3] - .12
self.axes = self.figure.add_axes(size)
outer_axes = self.metaaxes
inner_axes = self.axes
else:
self.axes = self.figure.add_axes(self.figure_size)
outer_axes = self.axes
inner_axes = self.axes
bars_in_group = len(self.chartdata)
width = 1.0 / ( bars_in_group + 1)
center = width / 2
bars = []
for i,stackdata in enumerate(self.chartdata):
bottom = array([0.0] * len(stackdata[0]), Float)
stack = []
for j,bardata in enumerate(stackdata):
bardata = array(bardata)
ind = arange(len(bardata)) + i * width + center
yerr = None
if self.charterr is not None:
yerr = self.charterr[i][j]
bar = self.axes.bar(ind, bardata, width, bottom=bottom,
color=colors[i][j], yerr=yerr)
if self.xsubticks is not None:
self.metaaxes.bar(ind, [0] * len(bardata), width)
stack.append(bar)
bottom += bardata
bars.append(stack)
if self.xlabel is not None:
outer_axes.set_xlabel(self.xlabel)
if self.ylabel is not None:
inner_axes.set_ylabel(self.ylabel)
if self.yticks is not None:
ymin, ymax = self.axes.get_ylim()
nticks = float(len(self.yticks))
ticks = arange(nticks) / (nticks - 1) * (ymax - ymin) + ymin
inner_axes.set_yticks(ticks)
inner_axes.set_yticklabels(self.yticks)
elif self.ylim is not None:
inner_axes.set_ylim(self.ylim)
if self.xticks is not None:
outer_axes.set_xticks(arange(cshape[2]) + .5)
outer_axes.set_xticklabels(self.xticks)
if self.xsubticks is not None:
numticks = (cshape[0] + 1) * cshape[2]
inner_axes.set_xticks(arange(numticks) * width + 2 * center)
xsubticks = list(self.xsubticks) + [ '' ]
inner_axes.set_xticklabels(xsubticks * cshape[2], fontsize=7,
rotation=30)
if self.legend is not None:
if dim == 1:
lbars = bars[0][0]
if dim == 2:
lbars = [ bars[i][0][0] for i in xrange(len(bars))]
if dim == 3:
number = len(bars[0])
lbars = [ bars[0][number - j - 1][0] for j in xrange(number)]
if self.fig_legend:
self.figure.legend(lbars, self.legend, self.legend_loc,
prop=FontProperties(size=self.legend_size))
else:
self.axes.legend(lbars, self.legend, self.legend_loc,
prop=FontProperties(size=self.legend_size))
if self.title is not None:
self.axes.set_title(self.title)
def savefig(self, name):
self.figure.savefig(name)
def savecsv(self, name):
f = file(name, 'w')
data = array(self.inputdata)
dim = len(data.shape)
if dim == 1:
#if self.xlabel:
# f.write(', '.join(list(self.xlabel)) + '\n')
f.write(', '.join([ '%f' % val for val in data]) + '\n')
if dim == 2:
#if self.xlabel:
# f.write(', '.join([''] + list(self.xlabel)) + '\n')
for i,row in enumerate(data):
ylabel = []
#if self.ylabel:
# ylabel = [ self.ylabel[i] ]
f.write(', '.join(ylabel + [ '%f' % v for v in row]) + '\n')
if dim == 3:
f.write("don't do 3D csv files\n")
pass
f.close()
if __name__ == '__main__':
from random import randrange
import random, sys
dim = 3
number = 5
args = sys.argv[1:]
if len(args) > 3:
sys.exit("invalid number of arguments")
elif len(args) > 0:
myshape = [ int(x) for x in args ]
else:
myshape = [ 3, 4, 8 ]
# generate a data matrix of the given shape
size = reduce(lambda x,y: x*y, myshape)
#data = [ random.randrange(size - i) + 10 for i in xrange(size) ]
data = [ float(i)/100.0 for i in xrange(size) ]
data = reshape(data, myshape)
# setup some test bar charts
if True:
chart1 = BarChart()
chart1.data = data
chart1.xlabel = 'Benchmark'
chart1.ylabel = 'Bandwidth (GBps)'
chart1.legend = [ 'x%d' % x for x in xrange(myshape[-1]) ]
chart1.xticks = [ 'xtick%d' % x for x in xrange(myshape[0]) ]
chart1.title = 'this is the title'
if len(myshape) > 2:
chart1.xsubticks = [ '%d' % x for x in xrange(myshape[1]) ]
chart1.graph()
chart1.savefig('/tmp/test1.png')
chart1.savefig('/tmp/test1.ps')
chart1.savefig('/tmp/test1.eps')
chart1.savecsv('/tmp/test1.csv')
if False:
chart2 = BarChart()
chart2.data = data
chart2.colormap = 'gray'
chart2.graph()
chart2.savefig('/tmp/test2.png')
chart2.savefig('/tmp/test2.ps')
# pylab.show()
| bsd-3-clause |
ma-compbio/PEP | genVecs.py | 1 | 7271 | #encoding:utf-8
from gensim.models import Word2Vec
from gensim.models.word2vec import LineSentence
import pandas as pd
import numpy as np
import os
import sys
import math
import random
import processSeq
import warnings
import threading
from multiprocessing.dummy import Pool as ThreadPool
from sklearn import preprocessing
import sklearn.preprocessing
from gensim import corpora, models, similarities
class mycorpuse(object):
def __iter__(self):
for line in open("./Data/Learning/unlabeled_train_enhancer_GM12878"):
yield line.split()
class mycorpusp(object):
def __iter__(self):
for line in open("./Data/Learning/unlabeled_train_promoter_GM12878"):
yield line.split()
# Load training data
def getData(type,cell):
data = pd.read_table('./Data/Learning/supervised_'+str(cell)+"_"+str(type))
return data
# Load trained Word2Vec model or train a new model
def getWord_model(word,num_features,min_count,type,cell):
word_model1 = ""
model_name = str(cell)+"_enhancer"
if not os.path.isfile("./" + model_name):
sentence = LineSentence("./Data/Learning/unlabeled_train_enhancer_"+str(cell),max_sentence_length=15000)
print "Start Training Word2Vec model..."
# Set values for various parameters
num_features = int(num_features) # Word vector dimensionality
min_word_count = int(min_count) # Minimum word count
num_workers = 20 # Number of threads to run in parallel
context = 20 # Context window size
downsampling = 1e-3 # Downsample setting for frequent words
# Initialize and train the model
print "Training Word2Vec model..."
word_model1 = Word2Vec(sentence, workers=num_workers,\
size=num_features, min_count=min_word_count, \
window =context, sample=downsampling, seed=1)
word_model1.init_sims(replace=False)
word_model1.save(model_name)
print word_model1.most_similar("CATAGT")
else:
print "Loading Word2Vec model..."
word_model1 = Word2Vec.load(model_name)
word_model2 = ""
model_name = str(cell)+"_promoter"
if not os.path.isfile("./" + model_name):
sentence = LineSentence("./Data/Learning/unlabeled_train_promoter_"+str(cell),max_sentence_length=15000)
print "Start Training Word2Vec model..."
# Set values for various parameters
num_features = int(num_features) # Word vector dimensionality
min_word_count = int(min_count) # Minimum word count
num_workers = 20 # Number of threads to run in parallel
context = 20 # Context window size
downsampling = 1e-3 # Downsample setting for frequent words
# Initialize and train the model
print "Training Word2Vec model..."
word_model2 = Word2Vec(sentence, workers=num_workers,\
size=num_features, min_count=min_word_count, \
window=context, sample=downsampling, seed=1)
word_model2.init_sims(replace=False)
word_model2.save(model_name)
print word_model2.most_similar("CATAGT")
else:
print "Loading Word2Vec model..."
word_model2 = Word2Vec.load(model_name)
return word_model1,word_model2
# Split sequences into words
def getCleanDNA_split(DNAdata,word):
dnalist = []
counter = 0
for dna in DNAdata:
if counter % 100 == 0:
print "DNA %d of %d\r" % (counter, len(DNAdata)),
sys.stdout.flush()
dna = str(dna).upper()
dnalist.append(processSeq.DNA2Sentence(dna,word).split(" "))
counter += 1
print
return dnalist
def makeFeatureVecs(words, model, num_features,word,k,temp):
featureVec = np.zeros((k,num_features), dtype="float32")
nwords = 0
index2word_set = set(model.index2word)
length = len(words)
for word in words:
if word in index2word_set:
# divide the words into k parts, add up in each part
featureVec[math.floor((nwords * k) / length)] += (model[word]) * temp[nwords]
nwords =nwords + 1
featureVec = featureVec.reshape(k * num_features)
#featureVec = featureVec/nwords
return featureVec
def mean2max(vec):
length = len(vec)
mean1 = np.max(vec[0:int(length*0.5)],axis = 0)
mean2 = np.max(vec[int(length*0.5):int(length)],axis = 0)
maxvec = np.mean([mean1,mean2],axis = 0)
return maxvec
def getAvgFeatureVecs(data,model1,model2, num_features, word,k,type,cell):
dnaFeatureVecs = np.zeros((len(data),2*k*num_features), dtype="float32")
if not os.path.isfile("./Data/enhancertfidf"+str(cell)):
print "Getting dictionary"
Corp = mycorpuse()
dictionary = corpora.Dictionary(Corp)
dictionary.save("./Data/enhancerdic"+str(cell))
corpus = [dictionary.doc2bow(text) for text in Corp]
print "Calculating TFIDF"
tfidf = models.TfidfModel(corpus)
tfidf.save("./Data/enhancertfidf"+str(cell))
else:
tfidf = models.TfidfModel.load("./Data/enhancertfidf"+str(cell))
dictionary = corpora.Dictionary.load("./Data/enhancerdic"+str(cell))
dict1 = {k:v for k, v in dictionary.items()}
DNAdata1 = getCleanDNA_split(data["seq1"],word)
counter = 0
for dna in DNAdata1:
if counter % 100 == 0:
print "DNA %d of %d\r" % (counter, len(DNAdata1)),
sys.stdout.flush()
vec_bow = dictionary.doc2bow(dna)
vec_tfidf = tfidf[vec_bow]
for i in xrange(len(vec_tfidf)):
dnaFeatureVecs[counter][0:k*num_features] += model1[dict1[vec_tfidf[i][0]]] * vec_tfidf[i][1]
counter += 1
print
del DNAdata1
counter = 0
if not os.path.isfile("./Data/promotertfidf"+str(cell)):
print "Getting dictionary"
Corp = mycorpusp()
dictionary = corpora.Dictionary(Corp)
dictionary.save("./Data/promoterdic"+str(cell))
corpus = [dictionary.doc2bow(text) for text in Corp]
print "Calculating TFIDF"
tfidf = models.TfidfModel(corpus)
tfidf.save("./Data/promotertfidf"+str(cell))
else:
tfidf = models.TfidfModel.load("./Data/promotertfidf"+str(cell))
dictionary = corpora.Dictionary.load("./Data/promoterdic"+str(cell))
dict2 = {k:v for k, v in dictionary.items()}
DNAdata2 = []
counter = 0
for dna in data["seq2"]:
if counter % 100 == 0:
print "DNA %d of %d\r" % (counter, len(data)),
sys.stdout.flush()
dna = str(dna).upper()
DNAdata2.append(processSeq.DNA2Sentence(dna,word).split(" "))
counter += 1
counter = 0
print
for dna in DNAdata2:
if counter % 100 == 0:
print "DNA %d of %d\r" % (counter, len(DNAdata2)),
sys.stdout.flush()
vec_bow = dictionary.doc2bow(dna)
vec_tfidf = tfidf[vec_bow]
for i in xrange(len(vec_tfidf)):
dnaFeatureVecs[counter][k*num_features:2*k*num_features] += model2[dict2[vec_tfidf[i][0]]] * vec_tfidf[i][1]
counter += 1
print
np.save("./Datavecs/datavecs_"+str(cell)+"_"+str(type)+".npy",dnaFeatureVecs)
return dnaFeatureVecs
def run(word, num_features,K,type,cell):
warnings.filterwarnings("ignore")
global word_model,data,k
word = int(word)
num_features = int(num_features)
k=int(K)
word_model=""
min_count=10
word_model1,word_model2 = getWord_model(word,num_features,min_count,type,cell)
# Read data
data = getData(type,cell)
length = data.shape[0]
print length
print "Generating Training and Testing Vector"
dataDataVecs = getAvgFeatureVecs(data,word_model1,word_model2,num_features,word,k,type,cell)
if __name__ == "__main__":
run(6,300,1,'new','GM12878')
| mit |
cucs-numpde/class | fdtools.py | 1 | 3922 | import numpy
def cosspace(a, b, n=50):
return (a + b)/2 + (b - a)/2 * (numpy.cos(numpy.linspace(-numpy.pi, 0, n)))
def vander_chebyshev(x, n=None):
if n is None:
n = len(x)
T = numpy.ones((len(x), n))
if n > 1:
T[:,1] = x
for k in range(2,n):
T[:,k] = 2 * x * T[:,k-1] - T[:,k-2]
return T
def chebeval(z, n=None):
"""Build matrices to evaluate the n-term Chebyshev expansion and its derivatives at point(s) z"""
z = numpy.array(z, ndmin=1)
if n is None:
n = len(z)
Tz = vander_chebyshev(z, n)
dTz = numpy.zeros_like(Tz)
dTz[:,1] = 1
dTz[:,2] = 4*z
ddTz = numpy.zeros_like(Tz)
ddTz[:,2] = 4
for n in range(3,n):
dTz[:,n] = n * (2*Tz[:,n-1] + dTz[:,n-2]/(n-2))
ddTz[:,n] = n * (2*dTz[:,n-1] + ddTz[:,n-2]/(n-2))
return [Tz, dTz, ddTz]
def fdstencilV(z, x):
"""Compute finite difference weights using a Vandermonde matrix"""
x = numpy.array(x)
V = numpy.vander(x - z, increasing=True)
scaling = numpy.array([numpy.math.factorial(i) for i in range(len(x))])
return (numpy.linalg.inv(V).T * scaling).T
def fdstencil(z, x, nderiv=None):
"""Compute finite difference weights using recurrences for Lagrange polynomials (see Fornberg 1998)"""
if nderiv is None:
nderiv = len(x)
x = numpy.array(x) - z
k = numpy.arange(nderiv+1)
c = numpy.outer(0.*k, x)
c[0,0] = 1
prod = 1
for j in range(1,len(x)):
dx = x[j] - x[:j]
c[1:,j] = x[j-1]*c[1:,j-1] - k[1:]*c[:-1,j-1]
c[0,j] = x[j-1]*c[0,j-1]
c[:,j] *= -prod
prod = numpy.prod(dx)
c[:,j] /= prod
c[1:,:j] = (x[j]*c[1:,:j] - k[1:,None]*c[:-1,:j]) / dx
c[0,:j] = x[j]*c[0,:j] / dx
return c
def fdcompact(z, x, k):
"""Compute a compact (implicit) differencing scheme
b @ u^(k)(z) = c @ u(x)
that maximizes the accuracy of u^(k)(z[0])."""
z = numpy.array(z)
x = numpy.array(x)
n = len(x)
x = x - z[0]
z = z - z[0]
xmin, xmax = min(x), max(x)
dx = (xmax - xmin) / (n - 1)
y = numpy.zeros(n + len(z) - 1)
y[:n] = x
for i in range(1, len(z)):
if (z[i] < 0):
xmin -= dx
y[n + i - 1] = xmin
else:
xmax += dx
y[n + i - 1] = xmax
S = numpy.array([fdstencil(t, y, k)[k] for t in z])
b = numpy.ones(len(z))
T = S[1:,n:].T
b[1:] = numpy.linalg.lstsq(T, -S[0,n:])[0]
c = b.dot(S[:,:n])
return b, c
def dispersion(z, x, b, c):
from matplotlib import pyplot
theta = numpy.linspace(0, numpy.pi, 100)[1:]
phiz = numpy.exp(1j*numpy.outer(z, theta))
phix = numpy.exp(1j*numpy.outer(x, theta))
pyplot.plot(theta, (c.dot(phix) / b.dot(phiz)).imag, '.')
pyplot.plot(theta, theta)
pyplot.plot(theta, numpy.sin(theta))
pyplot.show()
def rk_butcher_4():
A = numpy.array([[0,0,0,0],[.5,0,0,0],[0,.5,0,0],[0,0,1,0]])
b = numpy.array([1/6, 1/3, 1/3, 1/6])
return A, b
def rk_butcher_ssp32():
A = numpy.array([[0, 0, 0],
[1/2, 0, 0],
[1/2, 1/2, 0]])
b = numpy.array([1/3, 1/3, 1/3])
return A, b
def ode_rkexplicit(f, u0, butcher=None, tfinal=1, h=.1):
if butcher is None:
A, b = rk_butcher_4()
else:
A, b = butcher
c = numpy.sum(A, axis=1)
s = len(c)
u = u0.copy()
t = 0
hist = [(t,u0)]
while t < tfinal:
if tfinal - t < 1.01*h:
h = tfinal - t
tnext = tfinal
else:
tnext = t + h
h = min(h, tfinal - t)
fY = numpy.zeros((len(u0), s))
for i in range(s):
Yi = u.copy()
for j in range(i):
Yi += h * A[i,j] * fY[:,j]
fY[:,i] = f(t + h*c[i], Yi)
u += h * fY.dot(b)
t = tnext
hist.append((t, u.copy()))
return hist
| bsd-2-clause |
arkatebi/DynamicalSystems | toggleSwitch/tSwitch-det-pSet-3.py | 1 | 9567 | #/usr/bin/env python
import auxiliary_functions as aux
import PyDSTool as dst
from PyDSTool import common as cmn
import numpy as np
from matplotlib import pyplot as plt
import sys
#------------------------------------------------------------------------------#
def defineSystem():
'''
Create an object that defines the desired ODE system.
'''
# Create an object of args class from common module
DSargs = cmn.args(name='Toggle switch of two genes X and Y')
# Set the parameters:
DSargs.pars = aux.parameter_set_3()
# Set the variables:
DSargs.varspecs = aux.equations()
# Set the auxiliary functions:
DSargs.fnspecs = aux.functions()
# Set initial conditions:
DSargs.ics = {'X': 10, 'Y': 10}
DSargs.xdomain = {'X': [0, 1.0e+4], 'Y':[0, 1.0e+4]}
# Set the range of integration:
DSargs.tdomain = [0,100]
return DSargs
#------------------------------------------------------------------------------#
def t_dynamics_X(pts):
# PyPlot commands
plt.plot(pts['t'], pts['X'])
plt.xlabel('t') # Axes labels
plt.ylabel('X') # ...
#plt.xlim([0,7000])
plt.ylim([0,200]) # Range of the y axis
plt.title(ode.name) # Figure title from model name
plt.show()
plt.figure()
#------------------------------------------------------------------------------#
def t_dynamics_Y(pts):
# PyPlot commands
plt.plot(pts['t'], pts['Y'])
plt.xlabel('t') # Axes labels
plt.ylabel('Y') # ...
#plt.xlim([0,7000])
plt.ylim([0,200]) # Range of the y axis
plt.title(ode.name) # Figure title from model name
plt.show()
plt.figure()
#------------------------------------------------------------------------------#
def t_dynamics_XY(pts):
# PyPlot commands
plt.plot(pts['X'], pts['Y'])
plt.xlabel('X') # Axes labels
plt.ylabel('Y') # ...
#plt.xlim([0,7000])
plt.ylim([0,800]) # Range of the y axis
plt.title(ode.name) # Figure title from model name
plt.show()
#------------------------------------------------------------------------------#
def t_dynamics_multi_ICs_X(ode):
plt.ylim([0,200])
plt.hold(True) # Sequences of plot commands will not clear existing figures
for i, x0 in enumerate(np.linspace(-20,10,30)):
ode.set(ics = { 'X': x0 } ) # Initial condition
# Trajectories are called pol0, pol1, ...
# sample them on the fly to create Pointset tmp
tmp = ode.compute('pol%3i' % i).sample()
plt.plot(tmp['t'], tmp['X'])
plt.xlabel('time')
plt.ylabel('X')
plt.title(ode.name + ' multi ICs')
plt.show()
#------------------------------------------------------------------------------#
def t_dynamics_multi_ICs_Y(ode):
plt.ylim([0,200])
plt.hold(True) # Sequences of plot commands will not clear existing figures
for i, y0 in enumerate(np.linspace(-20,10,30)):
ode.set(ics = { 'Y': y0 } ) # Initial condition
# Trajectories are called pol0, pol1, ...
# sample them on the fly to create Pointset tmp
tmp = ode.compute('pol%3i' % i).sample()
plt.plot(tmp['t'], tmp['Y'])
plt.xlabel('time')
plt.ylabel('Y')
plt.title(ode.name + ' multi ICs')
plt.show()
#------------------------------------------------------------------------------#
def t_dynamics_multi_ICs_X(ode):
plt.figure()
plt.ylim([0,900])
plt.hold(True) # Sequences of plot commands will not clear existing figures
for i, x0 in enumerate(np.linspace(-20,10,30)):
ode.set(ics = { 'X': x0 } ) # Initial condition
# Trajectories are called pol0, pol1, ...
# sample them on the fly to create Pointset tmp
tmp = ode.compute('pol%3i' % i).sample()
plt.plot(tmp['t'], tmp['X'])
plt.xlabel('time')
plt.ylabel('X')
plt.title(ode.name + ' multi ICs X')
plt.show()
#------------------------------------------------------------------------------#
def t_dynamics_multi_ICs_Y(ode):
plt.figure()
plt.ylim([0,900])
plt.hold(True) # Sequences of plot commands will not clear existing figures
for i, y0 in enumerate(np.linspace(-20,10,30)):
ode.set(ics = { 'Y': y0 } ) # Initial condition
# Trajectories are called pol0, pol1, ...
# sample them on the fly to create Pointset tmp
tmp = ode.compute('pol%3i' % i).sample()
plt.plot(tmp['t'], tmp['Y'])
plt.xlabel('time')
plt.ylabel('Y')
plt.title(ode.name + ' multi ICs Y')
plt.show()
#------------------------------------------------------------------------------#
def t_dynamics_multi_ICs_XY(ode):
plt.figure()
plt.ylim([0,900])
# Sequences of plot commands will not clear existing figures:
plt.hold(True)
for i, x0 in enumerate(np.linspace(1,1000,4)):
for i, y0 in enumerate(np.linspace(1,1000,4)):
# Reset the initial conditions in the Vode_ODEsystem object ode:
ode.set(ics = { 'X': x0, 'Y': y0 } )
# Trajectories are called pol0, pol1, ...
# Sample them on the fly to create tmp, a Pointset object:
tmp = ode.compute('pol%3i' % i).sample()
plt.plot(tmp['X'], tmp['Y'])
plt.xlabel('X')
plt.ylabel('Y')
#plt.title(ode.name + ': multi ICs for both')
plt.show()
#plt.savefig('./figures/parSet-1_tdynamics.pdf')
#------------------------------------------------------------------------------#
def getBifDiagrams(ode):
freepar='gX'
fp=aux.fast_fixedpoint(ode)
print(fp.values())
aux.plot_continuation(ode, freepar, keys=['X','Y'], ncol=2, nrow=1,
LocBifPoints=['LP','B'], bif_startpoint=50,
maxstep=1e+1, minstep=0.01, step=0.1,
silence=True, fs=[4,4], ics=[fp],
xlim=[0,200], ylim=[0,700], fontsize=10)
freepar='gY'
fp=aux.fast_fixedpoint(ode)
aux.plot_continuation(ode, freepar, keys=['X','Y'], ncol=2, nrow=1,
LocBifPoints=['LP','B'], bif_startpoint=50,
maxstep=1e+1, minstep=1e-2, step=1e-1,
silence=True, fs=[4,4], ics=[fp],
xlim=[0,200], ylim=[0,700], fontsize=10)
sys.exit(0)
freepar='kX'
fp=aux.fast_fixedpoint(ode)
print(fp.values())
aux.plot_continuation(ode, freepar, keys=['X','Y'], ncol=2, nrow=1,
LocBifPoints=['LP','B'], bif_startpoint=0.1,
maxstep=1e+1, minstep=0.01, step=0.1,
silence=True, fs=[4,4], ics=[fp],
xlim=[0,1], ylim=[0,700], fontsize=10)
freepar='kY'
fp=aux.fast_fixedpoint(ode)
print(fp.values())
aux.plot_continuation(ode, freepar, keys=['X','Y'], ncol=2, nrow=1,
LocBifPoints=['LP','B'], bif_startpoint=0.1,
maxstep=1e+1, minstep=0.01, step=0.1,
silence=True, fs=[4,4], ics=[fp],
xlim=[0,1], ylim=[0,700], fontsize=10)
sys.exit(0)
freepar='lX'
fp=aux.fast_fixedpoint(ode)
print(fp.values())
aux.plot_continuation(ode, freepar, keys=['X','Y'], ncol=2, nrow=1,
LocBifPoints=['LP','B'], bif_startpoint=0.1,
maxstep=1e+1, minstep=0.01, step=0.1,
silence=True, fs=[4,4], ics=[fp],
xlim=[0,1], ylim=[0,700], fontsize=10)
freepar='lY'
fp=aux.fast_fixedpoint(ode)
print(fp.values())
aux.plot_continuation(ode, freepar, keys=['X','Y'], ncol=2, nrow=1,
LocBifPoints=['LP','B'], bif_startpoint=0.1,
maxstep=1e+1, minstep=0.01, step=0.1,
silence=True, fs=[4,4], ics=[fp],
xlim=[0,1], ylim=[0,700], fontsize=10)
sys.exit(0)
#------------------------------------------------------------------------------#
def getNullClines(DSargs, ode):
from PyDSTool.Toolbox import phaseplane as pp
vlim = {'X': [1, 700], 'Y': [1, 700]}
fp = aux.eliminate_redundants(pp.find_fixedpoints(ode, n=2, maxsearch=1e+4,
eps=1e-12),
4)
stab = aux.stability(fp, ode)
for i in range(len(fp)):
print(stab[i], fp[i])
nfp=0
aux.nullclines(['X','Y'], DSargs, stab, fp, nfp=nfp, vlim=vlim,
maxpoints=[800,800],
xticks=[0, 100, 200, 300, 400, 500, 600, 700],
yticks=[0, 100, 200, 300, 400, 500, 600, 700],
step=0.01, minstep=0.001, maxstep=10, fs=[3,3],
fontsize=8, silence=False)
#------------------------------------------------------------------------------#
if __name__ == '__main__':
DSargs = defineSystem()
# Obtain a Vode_ODEsystem object:
# (similar to VODE from SciPy)
ode = dst.Generator.Vode_ODEsystem(DSargs)
# Obtain a Trajectory object (integrate ODE):
traj = ode.compute('polarization')
# Collect data points as a Pointset object:
pts = traj.sample(dt=0.01)
#t_dynamics_X(pts)
#t_dynamics_Y(pts)
#t_dynamics_XY(pts)
#t_dynamics_multi_ICs_X(ode)
#t_dynamics_multi_ICs_Y(ode)
#t_dynamics_multi_ICs_XY(ode)
#getBifDiagrams(ode)
getNullClines(DSargs, ode)
| gpl-3.0 |
Vimos/scikit-learn | sklearn/metrics/__init__.py | 28 | 3604 | """
The :mod:`sklearn.metrics` module includes score functions, performance metrics
and pairwise metrics and distance computations.
"""
from .ranking import auc
from .ranking import average_precision_score
from .ranking import coverage_error
from .ranking import label_ranking_average_precision_score
from .ranking import label_ranking_loss
from .ranking import precision_recall_curve
from .ranking import roc_auc_score
from .ranking import roc_curve
from .classification import accuracy_score
from .classification import classification_report
from .classification import cohen_kappa_score
from .classification import confusion_matrix
from .classification import f1_score
from .classification import fbeta_score
from .classification import hamming_loss
from .classification import hinge_loss
from .classification import jaccard_similarity_score
from .classification import log_loss
from .classification import matthews_corrcoef
from .classification import precision_recall_fscore_support
from .classification import precision_score
from .classification import recall_score
from .classification import zero_one_loss
from .classification import brier_score_loss
from . import cluster
from .cluster import adjusted_mutual_info_score
from .cluster import adjusted_rand_score
from .cluster import completeness_score
from .cluster import consensus_score
from .cluster import homogeneity_completeness_v_measure
from .cluster import homogeneity_score
from .cluster import mutual_info_score
from .cluster import normalized_mutual_info_score
from .cluster import fowlkes_mallows_score
from .cluster import silhouette_samples
from .cluster import silhouette_score
from .cluster import calinski_harabaz_score
from .cluster import v_measure_score
from .pairwise import euclidean_distances
from .pairwise import pairwise_distances
from .pairwise import pairwise_distances_argmin
from .pairwise import pairwise_distances_argmin_min
from .pairwise import pairwise_kernels
from .regression import explained_variance_score
from .regression import mean_absolute_error
from .regression import mean_squared_error
from .regression import mean_squared_log_error
from .regression import median_absolute_error
from .regression import r2_score
from .scorer import make_scorer
from .scorer import SCORERS
from .scorer import get_scorer
__all__ = [
'accuracy_score',
'adjusted_mutual_info_score',
'adjusted_rand_score',
'auc',
'average_precision_score',
'classification_report',
'cluster',
'completeness_score',
'confusion_matrix',
'consensus_score',
'coverage_error',
'euclidean_distances',
'explained_variance_score',
'f1_score',
'fbeta_score',
'get_scorer',
'hamming_loss',
'hinge_loss',
'homogeneity_completeness_v_measure',
'homogeneity_score',
'jaccard_similarity_score',
'label_ranking_average_precision_score',
'label_ranking_loss',
'log_loss',
'make_scorer',
'matthews_corrcoef',
'mean_absolute_error',
'mean_squared_error',
'mean_squared_log_error',
'median_absolute_error',
'mutual_info_score',
'normalized_mutual_info_score',
'pairwise_distances',
'pairwise_distances_argmin',
'pairwise_distances_argmin_min',
'pairwise_distances_argmin_min',
'pairwise_kernels',
'precision_recall_curve',
'precision_recall_fscore_support',
'precision_score',
'r2_score',
'recall_score',
'roc_auc_score',
'roc_curve',
'SCORERS',
'silhouette_samples',
'silhouette_score',
'v_measure_score',
'zero_one_loss',
'brier_score_loss',
]
| bsd-3-clause |
jqug/microscopy-object-detection | readdata.py | 1 | 10627 | import skimage
from lxml import etree
import os
import glob
from sklearn.cross_validation import train_test_split
import numpy as np
from progress_bar import ProgressBar
from skimage import io
from scipy import misc
def create_sets(img_dir, train_set_proportion=.6, test_set_proportion=.2, val_set_proportion=.2):
'''Split a list of image files up into training, testing and validation sets.'''
imgfilenames = glob.glob(img_dir + '*.jpg')
baseimgfilenames = [os.path.basename(f) for f in imgfilenames]
if train_set_proportion + test_set_proportion < 1:
train,val = train_test_split(np.arange(len(baseimgfilenames)),
train_size=train_set_proportion+test_set_proportion,
test_size=val_set_proportion,
random_state=1)
else:
train = np.arange(len(baseimgfilenames))
val = []
train_test_prop = train_set_proportion + test_set_proportion
train,test = train_test_split(train,
train_size=train_set_proportion/train_test_prop,
test_size=test_set_proportion/train_test_prop,
random_state=1)
trainfiles = [baseimgfilenames[i] for i in train]
testfiles = [baseimgfilenames[i] for i in test]
valfiles = [baseimgfilenames[i] for i in val]
return trainfiles, valfiles,testfiles
def get_patch_labels_for_single_image(img_filename, image_dir,annotation_dir, size, step,width, height, objectclass=None):
'''
Read the XML annotation files to get the labels of each patch for a
given image. The labels are 0 if there is no object in the corresponding
patch, and 1 if an object is present.
'''
annotation_filename = annotation_dir + img_filename[:-3] + 'xml'
boundingboxes = get_bounding_boxes_for_single_image(annotation_filename, objectclass=objectclass)
# Scan through patch locations in the image
labels = []
y = (height-(height/step)*step)/2
while y+(size) < height:
#rows
x = (width-(width/step)*step)/2
while (x+(size) < width):
objecthere=0
for bb in boundingboxes:
margin = 0
xmin = bb[0] + margin
xmax = bb[1] - margin
ymin = bb[2] + margin
ymax = bb[3] - margin
cx = x + size/2
cy = y + size/2
if (cx>xmin and cx<xmax and cy>ymin and cy<ymax):
objecthere = 1
break
# Output the details for this patch
labels.append(objecthere)
x+=step
y += step
return np.array(labels)
#http://codereview.stackexchange.com/questions/31352/overlapping-rectangles
def range_overlap(a_min, a_max, b_min, b_max):
'''Neither range is completely greater than the other
'''
return (a_min <= b_max) and (b_min <= a_max)
def overlap(r1, r2):
'''Overlapping rectangles overlap both horizontally & vertically
'''
return range_overlap(r1[0], r1[1], r2[0], r2[1]) and range_overlap(r1[2], r1[3], r2[2], r2[3])
def get_image_negatives(img, boundingboxes, size, step, grayscale=False, downsample=1, discard_rate=0.9):
'''Negative-labelled patches, taken at random from any part of the image
not overlapping an annotated bounding box.
Since there are typically many potential negative patches in each image, only
the proprtion 1-discard_rate of negative patches are stored.'''
c,height, width = img.shape
patches_per_img = 0
#lazy way to count how many patches we can take
max_y=0
while max_y+(size) < height:
max_x = 0
while max_x+(size) < width:
patches_per_img += 1
max_x += step
max_y += step
max_x /= step
max_y /= step
neg = []
y = (height-(max_y * step))/2
while y+(size) < height:
#rows
x = (width-(max_x * step))/2
while (x+(size) < width):
if np.random.rand()>discard_rate:
left = x
right = x+(size)
top = y
bottom = y+(size)
is_pos=False
for bb in boundingboxes:
if overlap([left,right,top,bottom], bb):
is_pos=True
break
if not is_pos:
patch = img[:, top:bottom:downsample, left:right:downsample]
neg.append(patch.copy()) # without copy seems to leak memory
x += step
y += step
return neg
def get_image_positives(img, boundingboxes, size, downsample=1):
'''Positive-labelled patches, centred on annotated bounding boxes.'''
pos = []
for bb in boundingboxes:
cy = (bb[0] + (bb[1]-bb[0])/2)
cx = (bb[2] + (bb[3]-bb[2])/2)
patch = img[..., cx-size/2:cx+size/2,cy-size/2:cy+size/2]
s= patch.shape
if s[1]<size or s[2]<size:
continue
patch = patch[:,::downsample,::downsample]
pos.append(patch.copy())
return pos
def create_patches(img_basenames, annotation_dir, image_dir, size, step, grayscale=True, progressbar=True, downsample=1, objectclass=None, negative_discard_rate=.9):
'''Extract a set of image patches with labels, from the supplied list of
annotated images. Positive-labelled patches are extracted centered on the
annotated bounding box; negative-labelled patches are extracted at random
from any part of the image which does not overlap an annotated bounding box.'''
if progressbar:
pb = ProgressBar(len(img_basenames))
if not annotation_dir[-1] == os.path.sep:
annotation_dir = annotation_dir + os.path.sep
if not image_dir[-1] == os.path.sep:
image_dir = image_dir + os.path.sep
color_type = 0
if grayscale:
channels=1
else:
channels=3
pos = []
neg = []
s = 1
for img_filename in img_basenames:
if progressbar:
pb.step(s)
s +=1
annotation_filename = annotation_dir + img_filename[:-3] + 'xml'
boundingboxes = get_bounding_boxes_for_single_image(annotation_filename, objectclass)
#colortype = cv2.IMREAD_COLOR
#img = cv2.imread(image_dir + img_filename, colortype)
img = misc.imread(image_dir + img_filename)
height,width,channels=img.shape
img = img.reshape((height, width,channels))
img = np.rollaxis(img,2)
image_pos = get_image_positives(img,boundingboxes,size,downsample=downsample)
pos.append(image_pos)
image_neg = get_image_negatives(img,boundingboxes,size,step,downsample=downsample,discard_rate=negative_discard_rate)
neg.append(image_neg)
pos = [item for sublist in pos for item in sublist]
neg = [item for sublist in neg for item in sublist]
patches = pos+neg
index = np.arange(len(patches))
np.random.seed(0)
np.random.shuffle(index)
np_patches = np.empty((len(patches),channels,size/downsample,size/downsample),dtype=np.uint8)
np_labels = np.empty(len(patches),dtype=int)
max_pos=len(pos)
for i,j in zip(index,xrange(len(index))):
if i < max_pos:
np_patches[j,] = pos[i]
np_labels[j] = 1
else:
np_patches[j,] = neg[i-max_pos]
np_labels[j] = 0
np_labels = np_labels.astype(np.uint8)
return np_labels,np_patches
def balance(X,y,mult_neg=10):
'''Returns an array with all the positive samples and as many negatives as
mult_neg*npos'''
np.random.seed(0)
neg = np.where(y==0)[0]
neg_count = len(neg)
pos = np.where(y==1)[0]
pos_count = len(pos)
np.random.shuffle(neg,)
neg = neg[0:pos_count*mult_neg]
index = np.concatenate((pos, neg))
np.random.shuffle(index)
y = y.take(index)
X = X.take(index,axis=0)
return X,y
def augment(X,y):
'''Create rotated and flipped versions of all patches.'''
shape = X.shape
num_org=shape[0]
shape = (shape[0]*8, shape[1], shape[2], shape[3])
aug_X = np.empty(shape,dtype=np.uint8)
aug_y = np.empty(shape[0],dtype=int)
new_patch_order = np.arange(shape[0])
np.random.shuffle(new_patch_order)
for i,j in zip(new_patch_order,xrange(shape[0])):
orig_patch = i/8
rot_n = i%4
do_flip = i%8>3
x = np.rollaxis(X[orig_patch],0,3 )
if do_flip:
x = np.flipud(x)
x = np.rot90(x,rot_n)
rot_X = np.rollaxis(x,2)
aug_X[j,] = (rot_X)
aug_y[j]=(y[orig_patch])
aug_y = aug_y.astype('uint8')
return aug_X,aug_y
def augment_positives(X,y):
'''Create rotated and flipped versions of only the positive-labelled
patches.'''
pos_indices = np.where(y)[0]
neg_indices = np.where(y==0)[0]
aug_X_pos, aug_y_pos = augment(X[pos_indices,], y[pos_indices])
aug_X = np.vstack((aug_X_pos, X[neg_indices,]))
aug_y = np.hstack((aug_y_pos, y[neg_indices]))
new_order = np.random.permutation(aug_y.shape[0])
aug_X = aug_X[new_order,]
aug_y = aug_y[new_order]
aug_y = aug_y.astype('uint8')
return aug_X, aug_y
def get_bounding_boxes_for_single_image(filename, objectclass=None):
'''
Given an annotation XML filename, get a list of the bounding boxes around
each object (the ground truth object locations).
'''
annofile = filename[:-3] + 'xml'
file_exists = os.path.exists(filename)
boundingboxes = []
if (file_exists):
# Read the bounding boxes from xml annotation
tree = etree.parse(filename)
r = tree.xpath('//bndbox')
if (len(r) != 0):
for i in range(len(r)):
if (objectclass==None) or (objectclass in r[i].getparent().xpath('label')[0].text.lower()):
xmin = round(float(r[i].xpath('xmin')[0].text))
xmin = max(xmin,1)
xmax = round(float(r[i].xpath('xmax')[0].text))
ymin = round(float(r[i].xpath('ymin')[0].text))
ymin = max(ymin,1)
ymax = round(float(r[i].xpath('ymax')[0].text))
xmin, xmax, ymin, ymax = int(xmin),int(xmax),int(ymin),int(ymax)
boundingboxes.append((xmin,xmax,ymin,ymax))
if len(boundingboxes) == 0:
return np.array([])
return np.vstack(boundingboxes)
| mit |
taedla01/MissionPlanner | Lib/site-packages/numpy/core/function_base.py | 82 | 5474 | __all__ = ['logspace', 'linspace']
import numeric as _nx
from numeric import array
def linspace(start, stop, num=50, endpoint=True, retstep=False):
"""
Return evenly spaced numbers over a specified interval.
Returns `num` evenly spaced samples, calculated over the
interval [`start`, `stop` ].
The endpoint of the interval can optionally be excluded.
Parameters
----------
start : scalar
The starting value of the sequence.
stop : scalar
The end value of the sequence, unless `endpoint` is set to False.
In that case, the sequence consists of all but the last of ``num + 1``
evenly spaced samples, so that `stop` is excluded. Note that the step
size changes when `endpoint` is False.
num : int, optional
Number of samples to generate. Default is 50.
endpoint : bool, optional
If True, `stop` is the last sample. Otherwise, it is not included.
Default is True.
retstep : bool, optional
If True, return (`samples`, `step`), where `step` is the spacing
between samples.
Returns
-------
samples : ndarray
There are `num` equally spaced samples in the closed interval
``[start, stop]`` or the half-open interval ``[start, stop)``
(depending on whether `endpoint` is True or False).
step : float (only if `retstep` is True)
Size of spacing between samples.
See Also
--------
arange : Similiar to `linspace`, but uses a step size (instead of the
number of samples).
logspace : Samples uniformly distributed in log space.
Examples
--------
>>> np.linspace(2.0, 3.0, num=5)
array([ 2. , 2.25, 2.5 , 2.75, 3. ])
>>> np.linspace(2.0, 3.0, num=5, endpoint=False)
array([ 2. , 2.2, 2.4, 2.6, 2.8])
>>> np.linspace(2.0, 3.0, num=5, retstep=True)
(array([ 2. , 2.25, 2.5 , 2.75, 3. ]), 0.25)
Graphical illustration:
>>> import matplotlib.pyplot as plt
>>> N = 8
>>> y = np.zeros(N)
>>> x1 = np.linspace(0, 10, N, endpoint=True)
>>> x2 = np.linspace(0, 10, N, endpoint=False)
>>> plt.plot(x1, y, 'o')
[<matplotlib.lines.Line2D object at 0x...>]
>>> plt.plot(x2, y + 0.5, 'o')
[<matplotlib.lines.Line2D object at 0x...>]
>>> plt.ylim([-0.5, 1])
(-0.5, 1)
>>> plt.show()
"""
num = int(num)
if num <= 0:
return array([], float)
if endpoint:
if num == 1:
return array([float(start)])
step = (stop-start)/float((num-1))
y = _nx.arange(0, num) * step + start
y[-1] = stop
else:
step = (stop-start)/float(num)
y = _nx.arange(0, num) * step + start
if retstep:
return y, step
else:
return y
def logspace(start,stop,num=50,endpoint=True,base=10.0):
"""
Return numbers spaced evenly on a log scale.
In linear space, the sequence starts at ``base ** start``
(`base` to the power of `start`) and ends with ``base ** stop``
(see `endpoint` below).
Parameters
----------
start : float
``base ** start`` is the starting value of the sequence.
stop : float
``base ** stop`` is the final value of the sequence, unless `endpoint`
is False. In that case, ``num + 1`` values are spaced over the
interval in log-space, of which all but the last (a sequence of
length ``num``) are returned.
num : integer, optional
Number of samples to generate. Default is 50.
endpoint : boolean, optional
If true, `stop` is the last sample. Otherwise, it is not included.
Default is True.
base : float, optional
The base of the log space. The step size between the elements in
``ln(samples) / ln(base)`` (or ``log_base(samples)``) is uniform.
Default is 10.0.
Returns
-------
samples : ndarray
`num` samples, equally spaced on a log scale.
See Also
--------
arange : Similiar to linspace, with the step size specified instead of the
number of samples. Note that, when used with a float endpoint, the
endpoint may or may not be included.
linspace : Similar to logspace, but with the samples uniformly distributed
in linear space, instead of log space.
Notes
-----
Logspace is equivalent to the code
>>> y = np.linspace(start, stop, num=num, endpoint=endpoint)
... # doctest: +SKIP
>>> power(base, y)
... # doctest: +SKIP
Examples
--------
>>> np.logspace(2.0, 3.0, num=4)
array([ 100. , 215.443469 , 464.15888336, 1000. ])
>>> np.logspace(2.0, 3.0, num=4, endpoint=False)
array([ 100. , 177.827941 , 316.22776602, 562.34132519])
>>> np.logspace(2.0, 3.0, num=4, base=2.0)
array([ 4. , 5.0396842 , 6.34960421, 8. ])
Graphical illustration:
>>> import matplotlib.pyplot as plt
>>> N = 10
>>> x1 = np.logspace(0.1, 1, N, endpoint=True)
>>> x2 = np.logspace(0.1, 1, N, endpoint=False)
>>> y = np.zeros(N)
>>> plt.plot(x1, y, 'o')
[<matplotlib.lines.Line2D object at 0x...>]
>>> plt.plot(x2, y + 0.5, 'o')
[<matplotlib.lines.Line2D object at 0x...>]
>>> plt.ylim([-0.5, 1])
(-0.5, 1)
>>> plt.show()
"""
y = linspace(start,stop,num=num,endpoint=endpoint)
return _nx.power(base,y)
| gpl-3.0 |
nhejazi/scikit-learn | sklearn/decomposition/tests/test_online_lda.py | 38 | 16445 | import sys
import numpy as np
from scipy.linalg import block_diag
from scipy.sparse import csr_matrix
from scipy.special import psi
from sklearn.decomposition import LatentDirichletAllocation
from sklearn.decomposition._online_lda import (_dirichlet_expectation_1d,
_dirichlet_expectation_2d)
from sklearn.utils.testing import assert_allclose
from sklearn.utils.testing import assert_true
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_almost_equal
from sklearn.utils.testing import assert_greater_equal
from sklearn.utils.testing import assert_raises_regexp
from sklearn.utils.testing import if_safe_multiprocessing_with_blas
from sklearn.utils.testing import assert_warns
from sklearn.exceptions import NotFittedError
from sklearn.externals.six.moves import xrange
from sklearn.externals.six import StringIO
def _build_sparse_mtx():
# Create 3 topics and each topic has 3 distinct words.
# (Each word only belongs to a single topic.)
n_components = 3
block = n_components * np.ones((3, 3))
blocks = [block] * n_components
X = block_diag(*blocks)
X = csr_matrix(X)
return (n_components, X)
def test_lda_default_prior_params():
# default prior parameter should be `1 / topics`
# and verbose params should not affect result
n_components, X = _build_sparse_mtx()
prior = 1. / n_components
lda_1 = LatentDirichletAllocation(n_components=n_components,
doc_topic_prior=prior,
topic_word_prior=prior, random_state=0)
lda_2 = LatentDirichletAllocation(n_components=n_components,
random_state=0)
topic_distr_1 = lda_1.fit_transform(X)
topic_distr_2 = lda_2.fit_transform(X)
assert_almost_equal(topic_distr_1, topic_distr_2)
def test_lda_fit_batch():
# Test LDA batch learning_offset (`fit` method with 'batch' learning)
rng = np.random.RandomState(0)
n_components, X = _build_sparse_mtx()
lda = LatentDirichletAllocation(n_components=n_components,
evaluate_every=1, learning_method='batch',
random_state=rng)
lda.fit(X)
correct_idx_grps = [(0, 1, 2), (3, 4, 5), (6, 7, 8)]
for component in lda.components_:
# Find top 3 words in each LDA component
top_idx = set(component.argsort()[-3:][::-1])
assert_true(tuple(sorted(top_idx)) in correct_idx_grps)
def test_lda_fit_online():
# Test LDA online learning (`fit` method with 'online' learning)
rng = np.random.RandomState(0)
n_components, X = _build_sparse_mtx()
lda = LatentDirichletAllocation(n_components=n_components,
learning_offset=10., evaluate_every=1,
learning_method='online', random_state=rng)
lda.fit(X)
correct_idx_grps = [(0, 1, 2), (3, 4, 5), (6, 7, 8)]
for component in lda.components_:
# Find top 3 words in each LDA component
top_idx = set(component.argsort()[-3:][::-1])
assert_true(tuple(sorted(top_idx)) in correct_idx_grps)
def test_lda_partial_fit():
# Test LDA online learning (`partial_fit` method)
# (same as test_lda_batch)
rng = np.random.RandomState(0)
n_components, X = _build_sparse_mtx()
lda = LatentDirichletAllocation(n_components=n_components,
learning_offset=10., total_samples=100,
random_state=rng)
for i in xrange(3):
lda.partial_fit(X)
correct_idx_grps = [(0, 1, 2), (3, 4, 5), (6, 7, 8)]
for c in lda.components_:
top_idx = set(c.argsort()[-3:][::-1])
assert_true(tuple(sorted(top_idx)) in correct_idx_grps)
def test_lda_dense_input():
# Test LDA with dense input.
rng = np.random.RandomState(0)
n_components, X = _build_sparse_mtx()
lda = LatentDirichletAllocation(n_components=n_components,
learning_method='batch', random_state=rng)
lda.fit(X.toarray())
correct_idx_grps = [(0, 1, 2), (3, 4, 5), (6, 7, 8)]
for component in lda.components_:
# Find top 3 words in each LDA component
top_idx = set(component.argsort()[-3:][::-1])
assert_true(tuple(sorted(top_idx)) in correct_idx_grps)
def test_lda_transform():
# Test LDA transform.
# Transform result cannot be negative and should be normalized
rng = np.random.RandomState(0)
X = rng.randint(5, size=(20, 10))
n_components = 3
lda = LatentDirichletAllocation(n_components=n_components,
random_state=rng)
X_trans = lda.fit_transform(X)
assert_true((X_trans > 0.0).any())
assert_array_almost_equal(np.sum(X_trans, axis=1),
np.ones(X_trans.shape[0]))
def test_lda_fit_transform():
# Test LDA fit_transform & transform
# fit_transform and transform result should be the same
for method in ('online', 'batch'):
rng = np.random.RandomState(0)
X = rng.randint(10, size=(50, 20))
lda = LatentDirichletAllocation(n_components=5, learning_method=method,
random_state=rng)
X_fit = lda.fit_transform(X)
X_trans = lda.transform(X)
assert_array_almost_equal(X_fit, X_trans, 4)
def test_lda_partial_fit_dim_mismatch():
# test `n_features` mismatch in `partial_fit`
rng = np.random.RandomState(0)
n_components = rng.randint(3, 6)
n_col = rng.randint(6, 10)
X_1 = np.random.randint(4, size=(10, n_col))
X_2 = np.random.randint(4, size=(10, n_col + 1))
lda = LatentDirichletAllocation(n_components=n_components,
learning_offset=5., total_samples=20,
random_state=rng)
lda.partial_fit(X_1)
assert_raises_regexp(ValueError, r"^The provided data has",
lda.partial_fit, X_2)
def test_invalid_params():
# test `_check_params` method
X = np.ones((5, 10))
invalid_models = (
('n_components', LatentDirichletAllocation(n_components=0)),
('learning_method',
LatentDirichletAllocation(learning_method='unknown')),
('total_samples', LatentDirichletAllocation(total_samples=0)),
('learning_offset', LatentDirichletAllocation(learning_offset=-1)),
)
for param, model in invalid_models:
regex = r"^Invalid %r parameter" % param
assert_raises_regexp(ValueError, regex, model.fit, X)
def test_lda_negative_input():
# test pass dense matrix with sparse negative input.
X = -np.ones((5, 10))
lda = LatentDirichletAllocation()
regex = r"^Negative values in data passed"
assert_raises_regexp(ValueError, regex, lda.fit, X)
def test_lda_no_component_error():
# test `transform` and `perplexity` before `fit`
rng = np.random.RandomState(0)
X = rng.randint(4, size=(20, 10))
lda = LatentDirichletAllocation()
regex = r"^no 'components_' attribute"
assert_raises_regexp(NotFittedError, regex, lda.transform, X)
assert_raises_regexp(NotFittedError, regex, lda.perplexity, X)
def test_lda_transform_mismatch():
# test `n_features` mismatch in partial_fit and transform
rng = np.random.RandomState(0)
X = rng.randint(4, size=(20, 10))
X_2 = rng.randint(4, size=(10, 8))
n_components = rng.randint(3, 6)
lda = LatentDirichletAllocation(n_components=n_components,
random_state=rng)
lda.partial_fit(X)
assert_raises_regexp(ValueError, r"^The provided data has",
lda.partial_fit, X_2)
@if_safe_multiprocessing_with_blas
def test_lda_multi_jobs():
n_components, X = _build_sparse_mtx()
# Test LDA batch training with multi CPU
for method in ('online', 'batch'):
rng = np.random.RandomState(0)
lda = LatentDirichletAllocation(n_components=n_components, n_jobs=2,
learning_method=method,
evaluate_every=1, random_state=rng)
lda.fit(X)
correct_idx_grps = [(0, 1, 2), (3, 4, 5), (6, 7, 8)]
for c in lda.components_:
top_idx = set(c.argsort()[-3:][::-1])
assert_true(tuple(sorted(top_idx)) in correct_idx_grps)
@if_safe_multiprocessing_with_blas
def test_lda_partial_fit_multi_jobs():
# Test LDA online training with multi CPU
rng = np.random.RandomState(0)
n_components, X = _build_sparse_mtx()
lda = LatentDirichletAllocation(n_components=n_components, n_jobs=2,
learning_offset=5., total_samples=30,
random_state=rng)
for i in range(2):
lda.partial_fit(X)
correct_idx_grps = [(0, 1, 2), (3, 4, 5), (6, 7, 8)]
for c in lda.components_:
top_idx = set(c.argsort()[-3:][::-1])
assert_true(tuple(sorted(top_idx)) in correct_idx_grps)
def test_lda_preplexity_mismatch():
# test dimension mismatch in `perplexity` method
rng = np.random.RandomState(0)
n_components = rng.randint(3, 6)
n_samples = rng.randint(6, 10)
X = np.random.randint(4, size=(n_samples, 10))
lda = LatentDirichletAllocation(n_components=n_components,
learning_offset=5., total_samples=20,
random_state=rng)
lda.fit(X)
# invalid samples
invalid_n_samples = rng.randint(4, size=(n_samples + 1, n_components))
assert_raises_regexp(ValueError, r'Number of samples',
lda._perplexity_precomp_distr, X, invalid_n_samples)
# invalid topic number
invalid_n_components = rng.randint(4, size=(n_samples, n_components + 1))
assert_raises_regexp(ValueError, r'Number of topics',
lda._perplexity_precomp_distr, X,
invalid_n_components)
def test_lda_perplexity():
# Test LDA perplexity for batch training
# perplexity should be lower after each iteration
n_components, X = _build_sparse_mtx()
for method in ('online', 'batch'):
lda_1 = LatentDirichletAllocation(n_components=n_components,
max_iter=1, learning_method=method,
total_samples=100, random_state=0)
lda_2 = LatentDirichletAllocation(n_components=n_components,
max_iter=10, learning_method=method,
total_samples=100, random_state=0)
lda_1.fit(X)
perp_1 = lda_1.perplexity(X, sub_sampling=False)
lda_2.fit(X)
perp_2 = lda_2.perplexity(X, sub_sampling=False)
assert_greater_equal(perp_1, perp_2)
perp_1_subsampling = lda_1.perplexity(X, sub_sampling=True)
perp_2_subsampling = lda_2.perplexity(X, sub_sampling=True)
assert_greater_equal(perp_1_subsampling, perp_2_subsampling)
def test_lda_score():
# Test LDA score for batch training
# score should be higher after each iteration
n_components, X = _build_sparse_mtx()
for method in ('online', 'batch'):
lda_1 = LatentDirichletAllocation(n_components=n_components,
max_iter=1, learning_method=method,
total_samples=100, random_state=0)
lda_2 = LatentDirichletAllocation(n_components=n_components,
max_iter=10, learning_method=method,
total_samples=100, random_state=0)
lda_1.fit_transform(X)
score_1 = lda_1.score(X)
lda_2.fit_transform(X)
score_2 = lda_2.score(X)
assert_greater_equal(score_2, score_1)
def test_perplexity_input_format():
# Test LDA perplexity for sparse and dense input
# score should be the same for both dense and sparse input
n_components, X = _build_sparse_mtx()
lda = LatentDirichletAllocation(n_components=n_components, max_iter=1,
learning_method='batch',
total_samples=100, random_state=0)
lda.fit(X)
perp_1 = lda.perplexity(X)
perp_2 = lda.perplexity(X.toarray())
assert_almost_equal(perp_1, perp_2)
def test_lda_score_perplexity():
# Test the relationship between LDA score and perplexity
n_components, X = _build_sparse_mtx()
lda = LatentDirichletAllocation(n_components=n_components, max_iter=10,
random_state=0)
lda.fit(X)
perplexity_1 = lda.perplexity(X, sub_sampling=False)
score = lda.score(X)
perplexity_2 = np.exp(-1. * (score / np.sum(X.data)))
assert_almost_equal(perplexity_1, perplexity_2)
def test_lda_fit_perplexity():
# Test that the perplexity computed during fit is consistent with what is
# returned by the perplexity method
n_components, X = _build_sparse_mtx()
lda = LatentDirichletAllocation(n_components=n_components, max_iter=1,
learning_method='batch', random_state=0,
evaluate_every=1)
lda.fit(X)
# Perplexity computed at end of fit method
perplexity1 = lda.bound_
# Result of perplexity method on the train set
perplexity2 = lda.perplexity(X)
assert_almost_equal(perplexity1, perplexity2)
def test_doc_topic_distr_deprecation():
# Test that the appropriate warning message is displayed when a user
# attempts to pass the doc_topic_distr argument to the perplexity method
n_components, X = _build_sparse_mtx()
lda = LatentDirichletAllocation(n_components=n_components, max_iter=1,
learning_method='batch',
total_samples=100, random_state=0)
distr1 = lda.fit_transform(X)
distr2 = None
assert_warns(DeprecationWarning, lda.perplexity, X, distr1)
assert_warns(DeprecationWarning, lda.perplexity, X, distr2)
def test_lda_empty_docs():
"""Test LDA on empty document (all-zero rows)."""
Z = np.zeros((5, 4))
for X in [Z, csr_matrix(Z)]:
lda = LatentDirichletAllocation(max_iter=750).fit(X)
assert_almost_equal(lda.components_.sum(axis=0),
np.ones(lda.components_.shape[1]))
def test_dirichlet_expectation():
"""Test Cython version of Dirichlet expectation calculation."""
x = np.logspace(-100, 10, 10000)
expectation = np.empty_like(x)
_dirichlet_expectation_1d(x, 0, expectation)
assert_allclose(expectation, np.exp(psi(x) - psi(np.sum(x))),
atol=1e-19)
x = x.reshape(100, 100)
assert_allclose(_dirichlet_expectation_2d(x),
psi(x) - psi(np.sum(x, axis=1)[:, np.newaxis]),
rtol=1e-11, atol=3e-9)
def check_verbosity(verbose, evaluate_every, expected_lines,
expected_perplexities):
n_components, X = _build_sparse_mtx()
lda = LatentDirichletAllocation(n_components=n_components, max_iter=3,
learning_method='batch',
verbose=verbose,
evaluate_every=evaluate_every,
random_state=0)
out = StringIO()
old_out, sys.stdout = sys.stdout, out
try:
lda.fit(X)
finally:
sys.stdout = old_out
n_lines = out.getvalue().count('\n')
n_perplexity = out.getvalue().count('perplexity')
assert_equal(expected_lines, n_lines)
assert_equal(expected_perplexities, n_perplexity)
def test_verbosity():
for verbose, evaluate_every, expected_lines, expected_perplexities in [
(False, 1, 0, 0),
(False, 0, 0, 0),
(True, 0, 3, 0),
(True, 1, 3, 3),
(True, 2, 3, 1),
]:
yield (check_verbosity, verbose, evaluate_every, expected_lines,
expected_perplexities)
def test_lda_n_topics_deprecation():
n_components, X = _build_sparse_mtx()
lda = LatentDirichletAllocation(n_topics=10, learning_method='batch')
assert_warns(DeprecationWarning, lda.fit, X)
| bsd-3-clause |
twhyntie/image-heatmap | make_image_heatmap.py | 1 | 3834 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
#...for the plotting.
import matplotlib.pyplot as plt
#...for the image manipulation.
import matplotlib.image as mpimg
#...for the MATH.
import numpy as np
# For scaling images.
import scipy.ndimage.interpolation as inter
#...for the colours.
from matplotlib import colorbar, colors
# For playing with the tick marks on the colour map axis.
from matplotlib import ticker
# Load the LaTeX text plot libraries.
from matplotlib import rc
# Uncomment to use LaTeX for the plot text.
rc('font',**{'family':'serif','serif':['Computer Modern']})
rc('text', usetex=True)
# Load in the image.
## The scan image as a NumPy array.
scan_img = mpimg.imread("scan.png")
print(" *")
print(" * Image dimensions: %s" % (str(scan_img.shape)))
## The figure upon which to display the scan image.
plot = plt.figure(101, figsize=(5.0, 5.0), dpi=150, facecolor='w', edgecolor='w')
# Adjust the position of the axes.
#plot.subplots_adjust(bottom=0.17, left=0.15)
plot.subplots_adjust(bottom=0.05, left=0.15, right=0.99, top=0.95)
## The plot axes.
plotax = plot.add_subplot(111)
# Set the x axis label.
plt.xlabel("$x$")
# Set the y axis label.
plt.ylabel("$y$")
# Add the original scan image to the plot.
plt.imshow(scan_img)
## The blob centre x values [pixels].
blob_xs = []
## The blob centre x values [pixels].
blob_ys = []
## The blob radii [pixels].
blob_rs = []
# Open the blob data file and retrieve the x, y, and r values.
with open("blobs.csv", "r") as f:
for l in f.readlines():
blob_xs.append(float(l.split(",")[0]))
blob_ys.append(float(l.split(",")[1]))
blob_rs.append(float(l.split(",")[2]))
## The image scale factor.
scale = 6.0
## The width of the image scaled up by the scale factor [pixels].
w = scan_img.shape[0]
## The original width of the image [pixels].
w_o = w / scale
## The height of the image scaled up by the scale factor [pixels].
h = scan_img.shape[1]
## The original height of the image [pixels].
h_o = h / scale
print(" * Image dimensions (w,h) = (%d,%d) -> (w_o,h_o) = (%d,%d)" % (w,h,w_o,h_o))
## The number of bins in each dimension of the heatmap.
#
# We are using the original image dimensions so that our heat map
# maps to the pixels in the original image. This is mainly for
# aesthetic reasons - there would be nothing to stop us using more
# (or fewer) bins.
bins = [w_o, h_o]
## The dimensions of the heat map, taken from the scaled-up image.
map_range = [[0, w], [0, h]]
# Create the heat map using NumPy's 2D histogram functionality.
centre_heatmap, x_edges, y_edges = np.histogram2d(blob_ys, blob_xs, bins=bins, range=map_range)
## The scaled heat map image.
#
# We need to scale the heat map array because although the bin widths
# are > 1, the resultant histogram (when made into an image) creates
# an image with one pixel per bin.
zoom_img = inter.zoom(centre_heatmap, (scale, scale), order=0, prefilter=False)
## The colo(u)r map for the heat map.
cmap = plt.cm.gnuplot
## The maximum number of blob centres in the heat map.
bc_max = np.amax(centre_heatmap)
#
print(" * Maximum value in the heat map is %d." % (bc_max))
## The maximum value to use in the colo(u)r map axis.
color_map_max = bc_max
# Add the (scaled) heat map (2D histogram) to the plot.
zoomed_heat_map = plt.imshow(zoom_img, alpha=0.8, cmap=cmap,norm=colors.Normalize(vmin=0,vmax=color_map_max))
## The heat map colo(u)r bar.
cb = plt.colorbar(alpha=1.0, mappable=zoomed_heat_map)
## An object to neaten up the colour map axis tick marks.
tick_locator = ticker.MaxNLocator(nbins=7)
#
cb.locator = tick_locator
#
cb.update_ticks()
# Add a grid.
plt.grid(1)
# Crop the plot limits to the limits of the scan iteself.
plotax.set_xlim([0, h])
plotax.set_ylim([w, 0])
# Save the figure.
plot.savefig("heatmap.png")
print(" *")
| mit |
neilhan/tensorflow | tensorflow/contrib/learn/python/learn/dataframe/transforms/in_memory_source.py | 4 | 6151 | # Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Sources for numpy arrays and pandas DataFrames."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.contrib.learn.python.learn.dataframe import transform
from tensorflow.contrib.learn.python.learn.dataframe.queues import feeding_functions
class BaseInMemorySource(transform.TensorFlowTransform):
"""Abstract parent class for NumpySource and PandasSource."""
def __init__(self,
data,
num_threads=None,
enqueue_size=None,
batch_size=None,
queue_capacity=None,
shuffle=False,
min_after_dequeue=None,
seed=None,
data_name="in_memory_data"):
super(BaseInMemorySource, self).__init__()
self._data = data
self._num_threads = 1 if num_threads is None else num_threads
self._batch_size = (32 if batch_size is None else batch_size)
self._enqueue_size = max(1, int(self._batch_size / self._num_threads)
) if enqueue_size is None else enqueue_size
self._queue_capacity = (self._batch_size * 10 if queue_capacity is None else
queue_capacity)
self._shuffle = shuffle
self._min_after_dequeue = (batch_size if min_after_dequeue is None else
min_after_dequeue)
self._seed = seed
self._data_name = data_name
@transform.parameter
def data(self):
return self._data
@transform.parameter
def num_threads(self):
return self._num_threads
@transform.parameter
def enqueue_size(self):
return self._enqueue_size
@transform.parameter
def batch_size(self):
return self._batch_size
@transform.parameter
def queue_capacity(self):
return self._queue_capacity
@transform.parameter
def shuffle(self):
return self._shuffle
@transform.parameter
def min_after_dequeue(self):
return self._min_after_dequeue
@transform.parameter
def seed(self):
return self._seed
@transform.parameter
def data_name(self):
return self._data_name
@property
def input_valency(self):
return 0
def _apply_transform(self, transform_input, **kwargs):
queue = feeding_functions.enqueue_data(self.data,
self.queue_capacity,
self.shuffle,
self.min_after_dequeue,
num_threads=self.num_threads,
seed=self.seed,
name=self.data_name,
enqueue_size=self.enqueue_size,
num_epochs=kwargs.get("num_epochs"))
dequeued = queue.dequeue_many(self.batch_size)
# TODO(jamieas): dequeue and dequeue_many will soon return a list regardless
# of the number of enqueued tensors. Remove the following once that change
# is in place.
if not isinstance(dequeued, (tuple, list)):
dequeued = (dequeued,)
# pylint: disable=not-callable
return self.return_type(*dequeued)
class NumpySource(BaseInMemorySource):
"""A zero-input Transform that produces a single column from a numpy array."""
@property
def name(self):
return "NumpySource"
@property
def _output_names(self):
return ("index", "value")
class OrderedDictNumpySource(BaseInMemorySource):
"""A zero-input Transform that produces Series from a dict of numpy arrays."""
def __init__(self,
ordered_dict_of_arrays,
num_threads=None,
enqueue_size=None,
batch_size=None,
queue_capacity=None,
shuffle=False,
min_after_dequeue=None,
seed=None,
data_name="pandas_data"):
if "index" in ordered_dict_of_arrays.keys():
raise ValueError("Column name `index` is reserved.")
super(OrderedDictNumpySource, self).__init__(ordered_dict_of_arrays,
num_threads, enqueue_size,
batch_size, queue_capacity,
shuffle, min_after_dequeue,
seed, data_name)
@property
def name(self):
return "OrderedDictNumpySource"
@property
def _output_names(self):
return tuple(["index"] + self._data.keys())
class PandasSource(BaseInMemorySource):
"""A zero-input Transform that produces Series from a DataFrame."""
def __init__(self,
dataframe,
num_threads=None,
enqueue_size=None,
batch_size=None,
queue_capacity=None,
shuffle=False,
min_after_dequeue=None,
seed=None,
data_name="pandas_data"):
if "index" in dataframe.columns:
raise ValueError("Column name `index` is reserved.")
super(PandasSource, self).__init__(dataframe, num_threads, enqueue_size,
batch_size, queue_capacity, shuffle,
min_after_dequeue, seed, data_name)
@property
def name(self):
return "PandasSource"
@property
def _output_names(self):
return tuple(["index"] + self._data.columns.tolist())
| apache-2.0 |
Saurabh7/shogun | examples/undocumented/python_modular/graphical/preprocessor_kpca_graphical.py | 26 | 1893 | from numpy import *
import matplotlib.pyplot as p
import os, sys, inspect
path = os.path.abspath(os.path.join(os.path.dirname(__file__), '../tools'))
if not path in sys.path:
sys.path.insert(1, path)
del path
from generate_circle_data import circle_data
cir=circle_data()
number_of_points_for_circle1=42
number_of_points_for_circle2=122
row_vector=2
data=cir.generate_data(number_of_points_for_circle1,number_of_points_for_circle2,row_vector)
d=zeros((row_vector,number_of_points_for_circle1))
d2=zeros((row_vector,number_of_points_for_circle2))
d=[data[i][0:number_of_points_for_circle1] for i in range(0,row_vector)]
d2=[data[i][number_of_points_for_circle1:(number_of_points_for_circle1+number_of_points_for_circle2)] for i in range(0,row_vector)]
p.plot(d[1][:],d[0][:],'x',d2[1][:],d2[0][:],'o')
p.title('input data')
p.show()
parameter_list = [[data,0.01,1.0], [data,0.05,2.0]]
def preprocessor_kernelpca_modular (data, threshold, width):
from modshogun import RealFeatures
from modshogun import KernelPCA
from modshogun import GaussianKernel
features = RealFeatures(data)
kernel=GaussianKernel(features,features,width)
preprocessor=KernelPCA(kernel)
preprocessor.init(features)
preprocessor.set_target_dim(2)
#X=preprocessor.get_transformation_matrix()
X2=preprocessor.apply_to_feature_matrix(features)
lx0=len(X2)
modified_d1=zeros((lx0,number_of_points_for_circle1))
modified_d2=zeros((lx0,number_of_points_for_circle2))
modified_d1=[X2[i][0:number_of_points_for_circle1] for i in range(lx0)]
modified_d2=[X2[i][number_of_points_for_circle1:(number_of_points_for_circle1+number_of_points_for_circle2)] for i in range(lx0)]
p.plot(modified_d1[0][:],modified_d1[1][:],'o',modified_d2[0][:],modified_d2[1][:],'x')
p.title('final data')
p.show()
return features
if __name__=='__main__':
print('KernelPCA')
preprocessor_kernelpca_modular(*parameter_list[0])
| mit |
kazemakase/scikit-learn | examples/ensemble/plot_voting_decision_regions.py | 230 | 2386 | """
==================================================
Plot the decision boundaries of a VotingClassifier
==================================================
Plot the decision boundaries of a `VotingClassifier` for
two features of the Iris dataset.
Plot the class probabilities of the first sample in a toy dataset
predicted by three different classifiers and averaged by the
`VotingClassifier`.
First, three examplary classifiers are initialized (`DecisionTreeClassifier`,
`KNeighborsClassifier`, and `SVC`) and used to initialize a
soft-voting `VotingClassifier` with weights `[2, 1, 2]`, which means that
the predicted probabilities of the `DecisionTreeClassifier` and `SVC`
count 5 times as much as the weights of the `KNeighborsClassifier` classifier
when the averaged probability is calculated.
"""
print(__doc__)
from itertools import product
import numpy as np
import matplotlib.pyplot as plt
from sklearn import datasets
from sklearn.tree import DecisionTreeClassifier
from sklearn.neighbors import KNeighborsClassifier
from sklearn.svm import SVC
from sklearn.ensemble import VotingClassifier
# Loading some example data
iris = datasets.load_iris()
X = iris.data[:, [0, 2]]
y = iris.target
# Training classifiers
clf1 = DecisionTreeClassifier(max_depth=4)
clf2 = KNeighborsClassifier(n_neighbors=7)
clf3 = SVC(kernel='rbf', probability=True)
eclf = VotingClassifier(estimators=[('dt', clf1), ('knn', clf2),
('svc', clf3)],
voting='soft', weights=[2, 1, 2])
clf1.fit(X, y)
clf2.fit(X, y)
clf3.fit(X, y)
eclf.fit(X, y)
# Plotting decision regions
x_min, x_max = X[:, 0].min() - 1, X[:, 0].max() + 1
y_min, y_max = X[:, 1].min() - 1, X[:, 1].max() + 1
xx, yy = np.meshgrid(np.arange(x_min, x_max, 0.1),
np.arange(y_min, y_max, 0.1))
f, axarr = plt.subplots(2, 2, sharex='col', sharey='row', figsize=(10, 8))
for idx, clf, tt in zip(product([0, 1], [0, 1]),
[clf1, clf2, clf3, eclf],
['Decision Tree (depth=4)', 'KNN (k=7)',
'Kernel SVM', 'Soft Voting']):
Z = clf.predict(np.c_[xx.ravel(), yy.ravel()])
Z = Z.reshape(xx.shape)
axarr[idx[0], idx[1]].contourf(xx, yy, Z, alpha=0.4)
axarr[idx[0], idx[1]].scatter(X[:, 0], X[:, 1], c=y, alpha=0.8)
axarr[idx[0], idx[1]].set_title(tt)
plt.show()
| bsd-3-clause |
ml-lab/neon | neon/diagnostics/visualize_rnn.py | 4 | 6174 | # ----------------------------------------------------------------------------
# Copyright 2014 Nervana Systems Inc.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ----------------------------------------------------------------------------
"""
Visualization for recurrent neural networks
"""
import numpy as np
from neon.util.compat import range
class VisualizeRNN(object):
"""
Visualzing weight matrices during training
"""
def __init__(self):
import matplotlib.pyplot
self.plt = matplotlib.pyplot
self.plt.interactive(1)
def plot_weights(self, weights_in, weights_rec, weights_out):
"""
Visizualize the three weight matrices after every epoch. Serves to
check that weights are structured, not exploding, and get upated
"""
self.plt.figure(2)
self.plt.clf()
self.plt.subplot(1, 3, 1)
self.plt.imshow(weights_in.T, vmin=-1, vmax=1, interpolation='nearest')
self.plt.title('input.T')
self.plt.subplot(1, 3, 2)
self.plt.imshow(weights_rec, vmin=-1, vmax=1, interpolation='nearest')
self.plt.title('recurrent')
self.plt.subplot(1, 3, 3)
self.plt.imshow(weights_out, vmin=-1, vmax=1, interpolation='nearest')
self.plt.title('output')
self.plt.colorbar()
self.plt.draw()
self.plt.show()
def plot_lstm_wts(self, lstm_layer, scale=1, fig=4):
"""
Visizualize the three weight matrices after every epoch. Serves to
check that weights are structured, not exploding, and get upated
"""
self.plt.figure(fig)
self.plt.clf()
pltidx = 1
for lbl, wts in zip(lstm_layer.param_names, lstm_layer.params[:4]):
self.plt.subplot(2, 4, pltidx)
self.plt.imshow(wts.asnumpyarray().T, vmin=-scale, vmax=scale,
interpolation='nearest')
self.plt.title(lbl + ' Wx.T')
pltidx += 1
for lbl, wts, bs in zip(lstm_layer.param_names,
lstm_layer.params[4:8],
lstm_layer.params[8:12]):
self.plt.subplot(2, 4, pltidx)
self.plt.imshow(np.hstack((wts.asnumpyarray(),
bs.asnumpyarray(),
bs.asnumpyarray())).T,
vmin=-scale, vmax=scale, interpolation='nearest')
self.plt.title(lbl + ' Wh.T')
pltidx += 1
self.plt.draw()
self.plt.show()
def plot_lstm_acts(self, lstm_layer, scale=1, fig=4):
acts_lbl = ['i_t', 'f_t', 'o_t', 'g_t', 'net_i', 'c_t', 'c_t', 'c_phi']
acts_stp = [0, 0, 0, 1, 0, 0, 1, 1]
self.plt.figure(fig)
self.plt.clf()
for idx, lbl in enumerate(acts_lbl):
act_tsr = getattr(lstm_layer, lbl)[acts_stp[idx]]
self.plt.subplot(2, 4, idx+1)
self.plt.imshow(act_tsr.asnumpyarray().T,
vmin=-scale, vmax=scale, interpolation='nearest')
self.plt.title(lbl + '[' + str(acts_stp[idx]) + '].T')
self.plt.draw()
self.plt.show()
def plot_error(self, suberror_list, error_list):
self.plt.figure(1)
self.plt.clf()
self.plt.plot(np.arange(len(suberror_list)) /
np.float(len(suberror_list)) *
len(error_list), suberror_list)
self.plt.plot(error_list, linewidth=2)
self.plt.ylim((min(suberror_list), max(error_list)))
self.plt.draw()
self.plt.show()
def plot_activations(self, pre1, out1, pre2, out2, targets):
"""
Loop over tau unrolling steps, at each time step show the pre-acts
and outputs of the recurrent layer and output layer. Note that the
pre-acts are actually the g', so if the activation is linear it will
be one.
"""
self.plt.figure(3)
self.plt.clf()
for i in range(len(pre1)): # loop over unrolling
self.plt.subplot(len(pre1), 5, 5 * i + 1)
self.plt.imshow(pre1[i].asnumpyarray(), vmin=-1, vmax=1,
interpolation='nearest')
if i == 0:
self.plt.title('pre1 or g\'1')
self.plt.subplot(len(pre1), 5, 5 * i + 2)
self.plt.imshow(out1[i].asnumpyarray(), vmin=-1, vmax=1,
interpolation='nearest')
if i == 0:
self.plt.title('out1')
self.plt.subplot(len(pre1), 5, 5 * i + 3)
self.plt.imshow(pre2[i].asnumpyarray(), vmin=-1, vmax=1,
interpolation='nearest')
if i == 0:
self.plt.title('pre2 or g\'2')
self.plt.subplot(len(pre1), 5, 5 * i + 4)
self.plt.imshow(out2[i].asnumpyarray(), vmin=-1, vmax=1,
interpolation='nearest')
if i == 0:
self.plt.title('out2')
self.plt.subplot(len(pre1), 5, 5 * i + 5)
self.plt.imshow(targets[i].asnumpyarray(),
vmin=-1, vmax=1, interpolation='nearest')
if i == 0:
self.plt.title('target')
self.plt.draw()
self.plt.show()
def print_text(self, inputs, outputs):
"""
Moved this here so it's legal to use numpy.
"""
print("Prediction inputs")
print(np.argmax(inputs, 0).asnumpyarray().astype(np.int8).view('c'))
print("Prediction outputs")
print(np.argmax(outputs, 0).asnumpyarray().astype(np.int8).view('c'))
| apache-2.0 |
ratschlab/RGAN | eICU_tstr_evaluation.py | 1 | 8268 | import data_utils
import pandas as pd
import numpy as np
import tensorflow as tf
import math, random, itertools
import pickle
import time
import json
import os
import math
import data_utils
import pickle
from sklearn.ensemble import RandomForestClassifier
from sklearn.metrics import accuracy_score, precision_score, recall_score, roc_curve, auc, precision_recall_curve
import copy
from scipy.stats import sem
print ("Starting TSTR experiment.")
print ("loading data...")
samples, labels = data_utils.eICU_task()
train_seqs = samples['train'].reshape(-1,16,4)
vali_seqs = samples['vali'].reshape(-1,16,4)
test_seqs = samples['test'].reshape(-1,16,4)
train_targets = labels['train']
vali_targets = labels['vali']
test_targets = labels['test']
train_seqs, vali_seqs, test_seqs = data_utils.scale_data(train_seqs, vali_seqs, test_seqs)
print ("data loaded.")
# iterate over all dataset versions generated after running the GAN for 5 times
aurocs_all_runs = []
auprcs_all_runs = []
for oo in range(5):
print (oo)
# find the best "dataset epoch", meaning the GAN epoch that generated the dataset
# validation is only done in some of the tasks, and the others are considered unknown
# (use validation set to pick best GAN epoch, then get result on test set)
vali_seqs_r = vali_seqs.reshape((vali_seqs.shape[0], -1))
test_seqs_r = test_seqs.reshape((test_seqs.shape[0], -1))
all_aurocs_exp = []
all_auprcs_exp = []
for nn in np.arange(50,1050,50):
with open('./synthetic_eICU_datasets/samples_eICU_cdgan_synthetic_dataset_r' + str(oo) + '_' + str(nn) + '.pk', 'rb') as f:
synth_data = pickle.load(file=f)
with open('./synthetic_eICU_datasets/labels_eICU_cdgan_synthetic_dataset_r' + str(oo) + '_' + str(nn) + '.pk', 'rb') as f:
synth_labels = pickle.load(file=f)
train_seqs = synth_data
train_targets = synth_labels
train_seqs_r = train_seqs.reshape((train_seqs.shape[0], -1))
all_aurocs = []
all_auprcs = []
# in case we want to train each random forest multiple times with each dataset
for exp_num in range(1):
accuracies = []
precisions = []
recalls = []
aurocs = []
auprcs = []
for col_num in range(train_targets.shape[1]):
estimator = RandomForestClassifier(n_estimators=100)
estimator.fit(train_seqs_r, train_targets[:,col_num])
accuracies.append(estimator.score(vali_seqs_r, vali_targets[:,col_num]))
preds = estimator.predict(vali_seqs_r)
precisions.append(precision_score(y_pred=preds, y_true=vali_targets[:,col_num]))
recalls.append(recall_score(y_pred=preds, y_true=vali_targets[:,col_num]))
preds = estimator.predict_proba(vali_seqs_r)
fpr, tpr, thresholds = roc_curve(vali_targets[:,col_num], preds[:,1])
aurocs.append(auc(fpr, tpr))
precision, recall, thresholds = precision_recall_curve(vali_targets[:,col_num], preds[:,1])
auprcs.append(auc(recall, precision))
all_aurocs.append(aurocs)
all_auprcs.append(auprcs)
all_aurocs_exp.append(all_aurocs)
all_auprcs_exp.append(all_auprcs)
#with open('all_aurocs_exp_r' + str(oo) + '.pk', 'wb') as f:
# pickle.dump(file=f, obj=all_aurocs_exp)
#with open('all_auprcs_exp_r' + str(oo) + '.pk', 'wb') as f:
# pickle.dump(file=f, obj=all_auprcs_exp)
best_idx = np.argmax(np.array(all_aurocs_exp).sum(axis=1)[:,[0,2,4]].sum(axis=1) + np.array(all_auprcs_exp).sum(axis=1)[:,[0,2,4]].sum(axis=1))
best = np.arange(50,1050,50)[best_idx]
with open('./synthetic_eICU_datasets/samples_eICU_cdgan_synthetic_dataset_r' + str(oo) + '_' + str(best) + '.pk', 'rb') as f:
synth_data = pickle.load(file=f)
with open('./synthetic_eICU_datasets/labels_eICU_cdgan_synthetic_dataset_r' + str(oo) + '_' + str(best) + '.pk', 'rb') as f:
synth_labels = pickle.load(file=f)
train_seqs = synth_data
train_targets = synth_labels
train_seqs_r = train_seqs.reshape((train_seqs.shape[0], -1))
accuracies = []
precisions = []
recalls = []
aurocs = []
auprcs = []
for col_num in range(train_targets.shape[1]):
estimator = RandomForestClassifier(n_estimators=100)
estimator.fit(train_seqs_r, train_targets[:,col_num])
accuracies.append(estimator.score(test_seqs_r, test_targets[:,col_num]))
preds = estimator.predict(test_seqs_r)
precisions.append(precision_score(y_pred=preds, y_true=test_targets[:,col_num]))
recalls.append(recall_score(y_pred=preds, y_true=test_targets[:,col_num]))
preds = estimator.predict_proba(test_seqs_r)
fpr, tpr, thresholds = roc_curve(test_targets[:,col_num], preds[:,1])
aurocs.append(auc(fpr, tpr))
precision, recall, thresholds = precision_recall_curve(test_targets[:,col_num], preds[:,1])
auprcs.append(auc(recall, precision))
print(accuracies)
print(precisions)
print(recalls)
print(aurocs)
print(auprcs)
print ("----------------------------")
aurocs_all_runs.append(aurocs)
auprcs_all_runs.append(auprcs)
allr = np.vstack(aurocs_all_runs)
allp = np.vstack(auprcs_all_runs)
tstr_aurocs_mean = allr.mean(axis=0)
tstr_aurocs_sem = sem(allr, axis=0)
tstr_auprcs_mean = allp.mean(axis=0)
tstr_auprcs_sem = sem(allp, axis=0)
# get AUROC/AUPRC for real, random data
print ("Experiment with real data.")
print ("loading data...")
samples, labels = data_utils.eICU_task()
train_seqs = samples['train'].reshape(-1,16,4)
vali_seqs = samples['vali'].reshape(-1,16,4)
test_seqs = samples['test'].reshape(-1,16,4)
train_targets = labels['train']
vali_targets = labels['vali']
test_targets = labels['test']
train_seqs, vali_seqs, test_seqs = data_utils.scale_data(train_seqs, vali_seqs, test_seqs)
print ("data loaded.")
train_seqs_r = train_seqs.reshape((train_seqs.shape[0], -1))
vali_seqs_r = vali_seqs.reshape((vali_seqs.shape[0], -1))
test_seqs_r = test_seqs.reshape((test_seqs.shape[0], -1))
aurocs_all = []
auprcs_all = []
for i in range(5):
accuracies = []
precisions = []
recalls = []
aurocs = []
auprcs = []
for col_num in range(train_targets.shape[1]):
estimator = RandomForestClassifier(n_estimators=100)
estimator.fit(train_seqs_r, train_targets[:,col_num])
accuracies.append(estimator.score(test_seqs_r, test_targets[:,col_num]))
preds = estimator.predict(test_seqs_r)
precisions.append(precision_score(y_pred=preds, y_true=test_targets[:,col_num]))
recalls.append(recall_score(y_pred=preds, y_true=test_targets[:,col_num]))
preds = estimator.predict_proba(test_seqs_r)
fpr, tpr, thresholds = roc_curve(test_targets[:,col_num], preds[:,1])
aurocs.append(auc(fpr, tpr))
precision, recall, thresholds = precision_recall_curve(test_targets[:,col_num], preds[:,1])
auprcs.append(auc(recall, precision))
print(accuracies)
print(precisions)
print(recalls)
print(aurocs)
print(auprcs)
aurocs_all.append(aurocs)
auprcs_all.append(auprcs)
real_aurocs_mean = np.array(aurocs_all).mean(axis=0)
real_aurocs_sem = sem(aurocs_all, axis=0)
real_auprcs_mean = np.array(auprcs_all).mean(axis=0)
real_auprcs_sem = sem(auprcs_all, axis=0)
print ("Experiment with random predictions.")
#random score
test_targets_random = copy.deepcopy(test_targets)
random.shuffle(test_targets_random)
accuracies = []
precisions = []
recalls = []
aurocs = []
auprcs = []
for col_num in range(train_targets.shape[1]):
accuracies.append(accuracy_score(y_pred=test_targets_random[:,col_num], y_true=test_targets[:,col_num]))
precisions.append(precision_score(y_pred=test_targets_random[:,col_num], y_true=test_targets[:,col_num]))
recalls.append(recall_score(y_pred=test_targets_random[:,col_num], y_true=test_targets[:,col_num]))
preds = np.random.rand(len(test_targets[:,col_num]))
fpr, tpr, thresholds = roc_curve(test_targets[:,col_num], preds)
aurocs.append(auc(fpr, tpr))
precision, recall, thresholds = precision_recall_curve(test_targets[:,col_num], preds)
auprcs.append(auc(recall, precision))
print(accuracies)
print(precisions)
print(recalls)
print(aurocs)
print(auprcs)
random_aurocs = aurocs
random_auprcs = auprcs
print("Results")
print("------------")
print("------------")
print("TSTR")
print(tstr_aurocs_mean)
print(tstr_aurocs_sem)
print(tstr_auprcs_mean)
print(tstr_auprcs_sem)
print("------------")
print("Real")
print(real_aurocs_mean)
print(real_aurocs_sem)
print(real_auprcs_mean)
print(real_auprcs_sem)
print("------------")
print("Random")
print(random_aurocs)
print(random_auprcs) | mit |
robin-lai/scikit-learn | examples/semi_supervised/plot_label_propagation_versus_svm_iris.py | 286 | 2378 | """
=====================================================================
Decision boundary of label propagation versus SVM on the Iris dataset
=====================================================================
Comparison for decision boundary generated on iris dataset
between Label Propagation and SVM.
This demonstrates Label Propagation learning a good boundary
even with a small amount of labeled data.
"""
print(__doc__)
# Authors: Clay Woolam <clay@woolam.org>
# Licence: BSD
import numpy as np
import matplotlib.pyplot as plt
from sklearn import datasets
from sklearn import svm
from sklearn.semi_supervised import label_propagation
rng = np.random.RandomState(0)
iris = datasets.load_iris()
X = iris.data[:, :2]
y = iris.target
# step size in the mesh
h = .02
y_30 = np.copy(y)
y_30[rng.rand(len(y)) < 0.3] = -1
y_50 = np.copy(y)
y_50[rng.rand(len(y)) < 0.5] = -1
# we create an instance of SVM and fit out data. We do not scale our
# data since we want to plot the support vectors
ls30 = (label_propagation.LabelSpreading().fit(X, y_30),
y_30)
ls50 = (label_propagation.LabelSpreading().fit(X, y_50),
y_50)
ls100 = (label_propagation.LabelSpreading().fit(X, y), y)
rbf_svc = (svm.SVC(kernel='rbf').fit(X, y), y)
# create a mesh to plot in
x_min, x_max = X[:, 0].min() - 1, X[:, 0].max() + 1
y_min, y_max = X[:, 1].min() - 1, X[:, 1].max() + 1
xx, yy = np.meshgrid(np.arange(x_min, x_max, h),
np.arange(y_min, y_max, h))
# title for the plots
titles = ['Label Spreading 30% data',
'Label Spreading 50% data',
'Label Spreading 100% data',
'SVC with rbf kernel']
color_map = {-1: (1, 1, 1), 0: (0, 0, .9), 1: (1, 0, 0), 2: (.8, .6, 0)}
for i, (clf, y_train) in enumerate((ls30, ls50, ls100, rbf_svc)):
# Plot the decision boundary. For that, we will assign a color to each
# point in the mesh [x_min, m_max]x[y_min, y_max].
plt.subplot(2, 2, i + 1)
Z = clf.predict(np.c_[xx.ravel(), yy.ravel()])
# Put the result into a color plot
Z = Z.reshape(xx.shape)
plt.contourf(xx, yy, Z, cmap=plt.cm.Paired)
plt.axis('off')
# Plot also the training points
colors = [color_map[y] for y in y_train]
plt.scatter(X[:, 0], X[:, 1], c=colors, cmap=plt.cm.Paired)
plt.title(titles[i])
plt.text(.90, 0, "Unlabeled points are colored white")
plt.show()
| bsd-3-clause |
Vvucinic/Wander | venv_2_7/lib/python2.7/site-packages/pandas/core/indexing.py | 9 | 64500 | # pylint: disable=W0223
from pandas.core.index import Index, MultiIndex
from pandas.compat import range, zip
import pandas.compat as compat
import pandas.core.common as com
from pandas.core.common import (is_bool_indexer, is_integer_dtype,
_asarray_tuplesafe, is_list_like, isnull,
is_null_slice, is_full_slice,
ABCSeries, ABCDataFrame, ABCPanel, is_float,
_values_from_object, _infer_fill_value, is_integer)
import numpy as np
# the supported indexers
def get_indexers_list():
return [
('ix', _IXIndexer),
('iloc', _iLocIndexer),
('loc', _LocIndexer),
('at', _AtIndexer),
('iat', _iAtIndexer),
]
# "null slice"
_NS = slice(None, None)
# the public IndexSlicerMaker
class _IndexSlice(object):
def __getitem__(self, arg):
return arg
IndexSlice = _IndexSlice()
class IndexingError(Exception):
pass
class _NDFrameIndexer(object):
_valid_types = None
_exception = KeyError
def __init__(self, obj, name):
self.obj = obj
self.ndim = obj.ndim
self.name = name
self.axis = None
def __call__(self, *args, **kwargs):
# we need to return a copy of ourselves
self = self.__class__(self.obj, self.name)
# set the passed in values
for k, v in compat.iteritems(kwargs):
setattr(self,k,v)
return self
def __iter__(self):
raise NotImplementedError('ix is not iterable')
def __getitem__(self, key):
if type(key) is tuple:
try:
values = self.obj.get_value(*key)
if np.isscalar(values):
return values
except Exception:
pass
return self._getitem_tuple(key)
else:
return self._getitem_axis(key, axis=0)
def _get_label(self, label, axis=0):
if self.ndim == 1:
# for perf reasons we want to try _xs first
# as its basically direct indexing
# but will fail when the index is not present
# see GH5667
try:
return self.obj._xs(label, axis=axis)
except:
return self.obj[label]
elif (isinstance(label, tuple) and
isinstance(label[axis], slice)):
raise IndexingError('no slices here, handle elsewhere')
return self.obj._xs(label, axis=axis)
def _get_loc(self, key, axis=0):
return self.obj._ixs(key, axis=axis)
def _slice(self, obj, axis=0, kind=None):
return self.obj._slice(obj, axis=axis, kind=kind)
def _get_setitem_indexer(self, key):
if self.axis is not None:
return self._convert_tuple(key, is_setter=True)
axis = self.obj._get_axis(0)
if isinstance(axis, MultiIndex):
try:
return axis.get_loc(key)
except Exception:
pass
if isinstance(key, tuple) and not self.ndim < len(key):
return self._convert_tuple(key, is_setter=True)
if isinstance(key, range):
return self._convert_range(key, is_setter=True)
try:
return self._convert_to_indexer(key, is_setter=True)
except TypeError:
raise IndexingError(key)
def __setitem__(self, key, value):
indexer = self._get_setitem_indexer(key)
self._setitem_with_indexer(indexer, value)
def _has_valid_type(self, k, axis):
raise NotImplementedError()
def _has_valid_tuple(self, key):
""" check the key for valid keys across my indexer """
for i, k in enumerate(key):
if i >= self.obj.ndim:
raise IndexingError('Too many indexers')
if not self._has_valid_type(k, i):
raise ValueError("Location based indexing can only have [%s] "
"types" % self._valid_types)
def _should_validate_iterable(self, axis=0):
""" return a boolean whether this axes needs validation for a passed iterable """
ax = self.obj._get_axis(axis)
if isinstance(ax, MultiIndex):
return False
elif ax.is_floating():
return False
return True
def _is_nested_tuple_indexer(self, tup):
if any([ isinstance(ax, MultiIndex) for ax in self.obj.axes ]):
return any([ is_nested_tuple(tup,ax) for ax in self.obj.axes ])
return False
def _convert_tuple(self, key, is_setter=False):
keyidx = []
if self.axis is not None:
axis = self.obj._get_axis_number(self.axis)
for i in range(self.ndim):
if i == axis:
keyidx.append(self._convert_to_indexer(key, axis=axis, is_setter=is_setter))
else:
keyidx.append(slice(None))
else:
for i, k in enumerate(key):
idx = self._convert_to_indexer(k, axis=i, is_setter=is_setter)
keyidx.append(idx)
return tuple(keyidx)
def _convert_range(self, key, is_setter=False):
""" convert a range argument """
return list(key)
def _convert_scalar_indexer(self, key, axis):
# if we are accessing via lowered dim, use the last dim
ax = self.obj._get_axis(min(axis, self.ndim - 1))
# a scalar
return ax._convert_scalar_indexer(key, kind=self.name)
def _convert_slice_indexer(self, key, axis):
# if we are accessing via lowered dim, use the last dim
ax = self.obj._get_axis(min(axis, self.ndim - 1))
return ax._convert_slice_indexer(key, kind=self.name)
def _has_valid_setitem_indexer(self, indexer):
return True
def _has_valid_positional_setitem_indexer(self, indexer):
""" validate that an positional indexer cannot enlarge its target
will raise if needed, does not modify the indexer externally """
if isinstance(indexer, dict):
raise IndexError("{0} cannot enlarge its target object"
.format(self.name))
else:
if not isinstance(indexer, tuple):
indexer = self._tuplify(indexer)
for ax, i in zip(self.obj.axes, indexer):
if isinstance(i, slice):
# should check the stop slice?
pass
elif is_list_like_indexer(i):
# should check the elements?
pass
elif is_integer(i):
if i >= len(ax):
raise IndexError("{0} cannot enlarge its target object"
.format(self.name))
elif isinstance(i, dict):
raise IndexError("{0} cannot enlarge its target object"
.format(self.name))
return True
def _setitem_with_indexer(self, indexer, value):
self._has_valid_setitem_indexer(indexer)
# also has the side effect of consolidating in-place
from pandas import Panel, DataFrame, Series
info_axis = self.obj._info_axis_number
# maybe partial set
take_split_path = self.obj._is_mixed_type
# if there is only one block/type, still have to take split path
# unless the block is one-dimensional or it can hold the value
if not take_split_path and self.obj._data.blocks:
blk, = self.obj._data.blocks
if 1 < blk.ndim: # in case of dict, keys are indices
val = list(value.values()) if isinstance(value,dict) else value
take_split_path = not blk._can_hold_element(val)
if isinstance(indexer, tuple) and len(indexer) == len(self.obj.axes):
for i, ax in zip(indexer, self.obj.axes):
# if we have any multi-indexes that have non-trivial slices (not null slices)
# then we must take the split path, xref GH 10360
if isinstance(ax, MultiIndex) and not (is_integer(i) or is_null_slice(i)):
take_split_path = True
break
if isinstance(indexer, tuple):
nindexer = []
for i, idx in enumerate(indexer):
if isinstance(idx, dict):
# reindex the axis to the new value
# and set inplace
key, _ = convert_missing_indexer(idx)
# if this is the items axes, then take the main missing
# path first
# this correctly sets the dtype and avoids cache issues
# essentially this separates out the block that is needed
# to possibly be modified
if self.ndim > 1 and i == self.obj._info_axis_number:
# add the new item, and set the value
# must have all defined axes if we have a scalar
# or a list-like on the non-info axes if we have a
# list-like
len_non_info_axes = [
len(_ax) for _i, _ax in enumerate(self.obj.axes)
if _i != i
]
if any([not l for l in len_non_info_axes]):
if not is_list_like_indexer(value):
raise ValueError("cannot set a frame with no "
"defined index and a scalar")
self.obj[key] = value
return self.obj
# add a new item with the dtype setup
self.obj[key] = _infer_fill_value(value)
new_indexer = convert_from_missing_indexer_tuple(
indexer, self.obj.axes)
self._setitem_with_indexer(new_indexer, value)
return self.obj
# reindex the axis
# make sure to clear the cache because we are
# just replacing the block manager here
# so the object is the same
index = self.obj._get_axis(i)
labels = index.insert(len(index),key)
self.obj._data = self.obj.reindex_axis(labels, i)._data
self.obj._maybe_update_cacher(clear=True)
self.obj.is_copy=None
nindexer.append(labels.get_loc(key))
else:
nindexer.append(idx)
indexer = tuple(nindexer)
else:
indexer, missing = convert_missing_indexer(indexer)
if missing:
# reindex the axis to the new value
# and set inplace
if self.ndim == 1:
index = self.obj.index
new_index = index.insert(len(index),indexer)
# this preserves dtype of the value
new_values = Series([value])._values
if len(self.obj._values):
new_values = np.concatenate([self.obj._values,
new_values])
self.obj._data = self.obj._constructor(
new_values, index=new_index, name=self.obj.name)._data
self.obj._maybe_update_cacher(clear=True)
return self.obj
elif self.ndim == 2:
# no columns and scalar
if not len(self.obj.columns):
raise ValueError(
"cannot set a frame with no defined columns"
)
# append a Series
if isinstance(value, Series):
value = value.reindex(index=self.obj.columns,copy=True)
value.name = indexer
# a list-list
else:
# must have conforming columns
if is_list_like_indexer(value):
if len(value) != len(self.obj.columns):
raise ValueError(
"cannot set a row with mismatched columns"
)
value = Series(value,index=self.obj.columns,name=indexer)
self.obj._data = self.obj.append(value)._data
self.obj._maybe_update_cacher(clear=True)
return self.obj
# set using setitem (Panel and > dims)
elif self.ndim >= 3:
return self.obj.__setitem__(indexer, value)
# set
item_labels = self.obj._get_axis(info_axis)
# align and set the values
if take_split_path:
if not isinstance(indexer, tuple):
indexer = self._tuplify(indexer)
if isinstance(value, ABCSeries):
value = self._align_series(indexer, value)
info_idx = indexer[info_axis]
if is_integer(info_idx):
info_idx = [info_idx]
labels = item_labels[info_idx]
# if we have a partial multiindex, then need to adjust the plane
# indexer here
if (len(labels) == 1 and
isinstance(self.obj[labels[0]].axes[0], MultiIndex)):
item = labels[0]
obj = self.obj[item]
index = obj.index
idx = indexer[:info_axis][0]
plane_indexer = tuple([idx]) + indexer[info_axis + 1:]
lplane_indexer = length_of_indexer(plane_indexer[0], index)
# require that we are setting the right number of values that
# we are indexing
if is_list_like_indexer(value) and np.iterable(value) and lplane_indexer != len(value):
if len(obj[idx]) != len(value):
raise ValueError(
"cannot set using a multi-index selection indexer "
"with a different length than the value"
)
# make sure we have an ndarray
value = getattr(value,'values',value).ravel()
# we can directly set the series here
# as we select a slice indexer on the mi
idx = index._convert_slice_indexer(idx)
obj._consolidate_inplace()
obj = obj.copy()
obj._data = obj._data.setitem(indexer=tuple([idx]), value=value)
self.obj[item] = obj
return
# non-mi
else:
plane_indexer = indexer[:info_axis] + indexer[info_axis + 1:]
if info_axis > 0:
plane_axis = self.obj.axes[:info_axis][0]
lplane_indexer = length_of_indexer(plane_indexer[0],
plane_axis)
else:
lplane_indexer = 0
def setter(item, v):
s = self.obj[item]
pi = plane_indexer[0] if lplane_indexer == 1 else plane_indexer
# perform the equivalent of a setitem on the info axis
# as we have a null slice or a slice with full bounds
# which means essentially reassign to the columns of a multi-dim object
# GH6149 (null slice), GH10408 (full bounds)
if isinstance(pi, tuple) and all(is_null_slice(idx) or is_full_slice(idx, len(self.obj)) for idx in pi):
s = v
else:
# set the item, possibly having a dtype change
s._consolidate_inplace()
s = s.copy()
s._data = s._data.setitem(indexer=pi, value=v)
s._maybe_update_cacher(clear=True)
# reset the sliced object if unique
self.obj[item] = s
def can_do_equal_len():
""" return True if we have an equal len settable """
if not len(labels) == 1 or not np.iterable(value):
return False
l = len(value)
item = labels[0]
index = self.obj[item].index
# equal len list/ndarray
if len(index) == l:
return True
elif lplane_indexer == l:
return True
return False
# we need an iterable, with a ndim of at least 1
# eg. don't pass through np.array(0)
if is_list_like_indexer(value) and getattr(value,'ndim',1) > 0:
# we have an equal len Frame
if isinstance(value, ABCDataFrame) and value.ndim > 1:
sub_indexer = list(indexer)
multiindex_indexer = isinstance(labels, MultiIndex)
for item in labels:
if item in value:
sub_indexer[info_axis] = item
v = self._align_series(
tuple(sub_indexer), value[item], multiindex_indexer
)
else:
v = np.nan
setter(item, v)
# we have an equal len ndarray/convertible to our labels
elif np.array(value).ndim == 2:
# note that this coerces the dtype if we are mixed
# GH 7551
value = np.array(value,dtype=object)
if len(labels) != value.shape[1]:
raise ValueError('Must have equal len keys and value '
'when setting with an ndarray')
for i, item in enumerate(labels):
# setting with a list, recoerces
setter(item, value[:, i].tolist())
# we have an equal len list/ndarray
elif can_do_equal_len():
setter(labels[0], value)
# per label values
else:
if len(labels) != len(value):
raise ValueError('Must have equal len keys and value '
'when setting with an iterable')
for item, v in zip(labels, value):
setter(item, v)
else:
# scalar
for item in labels:
setter(item, value)
else:
if isinstance(indexer, tuple):
indexer = maybe_convert_ix(*indexer)
# if we are setting on the info axis ONLY
# set using those methods to avoid block-splitting
# logic here
if len(indexer) > info_axis and is_integer(indexer[info_axis]) and all(
is_null_slice(idx) for i, idx in enumerate(indexer) if i != info_axis):
self.obj[item_labels[indexer[info_axis]]] = value
return
if isinstance(value, (ABCSeries, dict)):
value = self._align_series(indexer, Series(value))
elif isinstance(value, ABCDataFrame):
value = self._align_frame(indexer, value)
if isinstance(value, ABCPanel):
value = self._align_panel(indexer, value)
# check for chained assignment
self.obj._check_is_chained_assignment_possible()
# actually do the set
self.obj._consolidate_inplace()
self.obj._data = self.obj._data.setitem(indexer=indexer, value=value)
self.obj._maybe_update_cacher(clear=True)
def _align_series(self, indexer, ser, multiindex_indexer=False):
"""
Parameters
----------
indexer : tuple, slice, scalar
The indexer used to get the locations that will be set to
`ser`
ser : pd.Series
The values to assign to the locations specified by `indexer`
multiindex_indexer : boolean, optional
Defaults to False. Should be set to True if `indexer` was from
a `pd.MultiIndex`, to avoid unnecessary broadcasting.
Returns:
--------
`np.array` of `ser` broadcast to the appropriate shape for assignment
to the locations selected by `indexer`
"""
if isinstance(indexer, (slice, np.ndarray, list, Index)):
indexer = tuple([indexer])
if isinstance(indexer, tuple):
# flatten np.ndarray indexers
ravel = lambda i: i.ravel() if isinstance(i, np.ndarray) else i
indexer = tuple(map(ravel, indexer))
aligners = [not is_null_slice(idx) for idx in indexer]
sum_aligners = sum(aligners)
single_aligner = sum_aligners == 1
is_frame = self.obj.ndim == 2
is_panel = self.obj.ndim >= 3
obj = self.obj
# are we a single alignable value on a non-primary
# dim (e.g. panel: 1,2, or frame: 0) ?
# hence need to align to a single axis dimension
# rather that find all valid dims
# frame
if is_frame:
single_aligner = single_aligner and aligners[0]
# panel
elif is_panel:
single_aligner = (single_aligner and
(aligners[1] or aligners[2]))
# we have a frame, with multiple indexers on both axes; and a
# series, so need to broadcast (see GH5206)
if (sum_aligners == self.ndim and
all([com.is_sequence(_) for _ in indexer])):
ser = ser.reindex(obj.axes[0][indexer[0]], copy=True)._values
# single indexer
if len(indexer) > 1 and not multiindex_indexer:
l = len(indexer[1])
ser = np.tile(ser, l).reshape(l, -1).T
return ser
for i, idx in enumerate(indexer):
ax = obj.axes[i]
# multiple aligners (or null slices)
if com.is_sequence(idx) or isinstance(idx, slice):
if single_aligner and is_null_slice(idx):
continue
new_ix = ax[idx]
if not is_list_like_indexer(new_ix):
new_ix = Index([new_ix])
else:
new_ix = Index(new_ix)
if ser.index.equals(new_ix) or not len(new_ix):
return ser._values.copy()
return ser.reindex(new_ix)._values
# 2 dims
elif single_aligner and is_frame:
# reindex along index
ax = self.obj.axes[1]
if ser.index.equals(ax) or not len(ax):
return ser._values.copy()
return ser.reindex(ax)._values
# >2 dims
elif single_aligner:
broadcast = []
for n, labels in enumerate(self.obj._get_plane_axes(i)):
# reindex along the matching dimensions
if len(labels & ser.index):
ser = ser.reindex(labels)
else:
broadcast.append((n, len(labels)))
# broadcast along other dims
ser = ser._values.copy()
for (axis, l) in broadcast:
shape = [-1] * (len(broadcast) + 1)
shape[axis] = l
ser = np.tile(ser, l).reshape(shape)
if self.obj.ndim == 3:
ser = ser.T
return ser
elif np.isscalar(indexer):
ax = self.obj._get_axis(1)
if ser.index.equals(ax):
return ser._values.copy()
return ser.reindex(ax)._values
raise ValueError('Incompatible indexer with Series')
def _align_frame(self, indexer, df):
is_frame = self.obj.ndim == 2
is_panel = self.obj.ndim >= 3
if isinstance(indexer, tuple):
aligners = [not is_null_slice(idx) for idx in indexer]
sum_aligners = sum(aligners)
single_aligner = sum_aligners == 1
idx, cols = None, None
sindexers = []
for i, ix in enumerate(indexer):
ax = self.obj.axes[i]
if com.is_sequence(ix) or isinstance(ix, slice):
if idx is None:
idx = ax[ix].ravel()
elif cols is None:
cols = ax[ix].ravel()
else:
break
else:
sindexers.append(i)
# panel
if is_panel:
# need to conform to the convention
# as we are not selecting on the items axis
# and we have a single indexer
# GH 7763
if len(sindexers) == 1 and sindexers[0] != 0:
df = df.T
if idx is None:
idx = df.index
if cols is None:
cols = df.columns
if idx is not None and cols is not None:
if df.index.equals(idx) and df.columns.equals(cols):
val = df.copy()._values
else:
val = df.reindex(idx, columns=cols)._values
return val
elif ((isinstance(indexer, slice) or is_list_like_indexer(indexer))
and is_frame):
ax = self.obj.index[indexer]
if df.index.equals(ax):
val = df.copy()._values
else:
# we have a multi-index and are trying to align
# with a particular, level GH3738
if isinstance(ax, MultiIndex) and isinstance(
df.index, MultiIndex) and ax.nlevels != df.index.nlevels:
raise TypeError("cannot align on a multi-index with out specifying the join levels")
val = df.reindex(index=ax)._values
return val
elif np.isscalar(indexer) and is_panel:
idx = self.obj.axes[1]
cols = self.obj.axes[2]
# by definition we are indexing on the 0th axis
# a passed in dataframe which is actually a transpose
# of what is needed
if idx.equals(df.index) and cols.equals(df.columns):
return df.copy()._values
return df.reindex(idx, columns=cols)._values
raise ValueError('Incompatible indexer with DataFrame')
def _align_panel(self, indexer, df):
is_frame = self.obj.ndim == 2
is_panel = self.obj.ndim >= 3
raise NotImplementedError("cannot set using an indexer with a Panel "
"yet!")
def _getitem_tuple(self, tup):
try:
return self._getitem_lowerdim(tup)
except IndexingError:
pass
# no multi-index, so validate all of the indexers
self._has_valid_tuple(tup)
# ugly hack for GH #836
if self._multi_take_opportunity(tup):
return self._multi_take(tup)
# no shortcut needed
retval = self.obj
for i, key in enumerate(tup):
if i >= self.obj.ndim:
raise IndexingError('Too many indexers')
if is_null_slice(key):
continue
retval = getattr(retval, self.name)._getitem_axis(key, axis=i)
return retval
def _multi_take_opportunity(self, tup):
from pandas.core.generic import NDFrame
# ugly hack for GH #836
if not isinstance(self.obj, NDFrame):
return False
if not all(is_list_like_indexer(x) for x in tup):
return False
# just too complicated
for indexer, ax in zip(tup, self.obj._data.axes):
if isinstance(ax, MultiIndex):
return False
elif is_bool_indexer(indexer):
return False
elif not ax.is_unique:
return False
return True
def _multi_take(self, tup):
""" create the reindex map for our objects, raise the _exception if we
can't create the indexer
"""
try:
o = self.obj
d = dict([
(a, self._convert_for_reindex(t, axis=o._get_axis_number(a)))
for t, a in zip(tup, o._AXIS_ORDERS)
])
return o.reindex(**d)
except:
raise self._exception
def _convert_for_reindex(self, key, axis=0):
labels = self.obj._get_axis(axis)
if is_bool_indexer(key):
key = check_bool_indexer(labels, key)
return labels[key]
else:
if isinstance(key, Index):
# want Index objects to pass through untouched
keyarr = key
else:
# asarray can be unsafe, NumPy strings are weird
keyarr = _asarray_tuplesafe(key)
if is_integer_dtype(keyarr) and not labels.is_integer():
keyarr = com._ensure_platform_int(keyarr)
return labels.take(keyarr)
return keyarr
def _handle_lowerdim_multi_index_axis0(self, tup):
# we have an axis0 multi-index, handle or raise
try:
# fast path for series or for tup devoid of slices
return self._get_label(tup, axis=0)
except TypeError:
# slices are unhashable
pass
except Exception as e1:
if isinstance(tup[0], (slice, Index)):
raise IndexingError("Handle elsewhere")
# raise the error if we are not sorted
ax0 = self.obj._get_axis(0)
if not ax0.is_lexsorted_for_tuple(tup):
raise e1
return None
def _getitem_lowerdim(self, tup):
# we can directly get the axis result since the axis is specified
if self.axis is not None:
axis = self.obj._get_axis_number(self.axis)
return self._getitem_axis(tup, axis=axis)
# we may have a nested tuples indexer here
if self._is_nested_tuple_indexer(tup):
return self._getitem_nested_tuple(tup)
# we maybe be using a tuple to represent multiple dimensions here
ax0 = self.obj._get_axis(0)
if isinstance(ax0, MultiIndex):
result = self._handle_lowerdim_multi_index_axis0(tup)
if result is not None:
return result
if len(tup) > self.obj.ndim:
raise IndexingError("Too many indexers. handle elsewhere")
# to avoid wasted computation
# df.ix[d1:d2, 0] -> columns first (True)
# df.ix[0, ['C', 'B', A']] -> rows first (False)
for i, key in enumerate(tup):
if is_label_like(key) or isinstance(key, tuple):
section = self._getitem_axis(key, axis=i)
# we have yielded a scalar ?
if not is_list_like_indexer(section):
return section
elif section.ndim == self.ndim:
# we're in the middle of slicing through a MultiIndex
# revise the key wrt to `section` by inserting an _NS
new_key = tup[:i] + (_NS,) + tup[i + 1:]
else:
new_key = tup[:i] + tup[i + 1:]
# unfortunately need an odious kludge here because of
# DataFrame transposing convention
if (isinstance(section, ABCDataFrame) and i > 0
and len(new_key) == 2):
a, b = new_key
new_key = b, a
if len(new_key) == 1:
new_key, = new_key
# This is an elided recursive call to iloc/loc/etc'
return getattr(section, self.name)[new_key]
raise IndexingError('not applicable')
def _getitem_nested_tuple(self, tup):
# we have a nested tuple so have at least 1 multi-index level
# we should be able to match up the dimensionaility here
# we have too many indexers for our dim, but have at least 1
# multi-index dimension, try to see if we have something like
# a tuple passed to a series with a multi-index
if len(tup) > self.ndim:
result = self._handle_lowerdim_multi_index_axis0(tup)
if result is not None:
return result
# this is a series with a multi-index specified a tuple of selectors
return self._getitem_axis(tup, axis=0)
# handle the multi-axis by taking sections and reducing
# this is iterative
obj = self.obj
axis = 0
for i, key in enumerate(tup):
if is_null_slice(key):
axis += 1
continue
current_ndim = obj.ndim
obj = getattr(obj, self.name)._getitem_axis(key, axis=axis)
axis += 1
# if we have a scalar, we are done
if np.isscalar(obj) or not hasattr(obj,'ndim'):
break
# has the dim of the obj changed?
# GH 7199
if obj.ndim < current_ndim:
# GH 7516
# if had a 3 dim and are going to a 2d
# axes are reversed on a DataFrame
if i >= 1 and current_ndim == 3 and obj.ndim == 2:
obj = obj.T
axis -= 1
return obj
def _getitem_axis(self, key, axis=0):
if self._should_validate_iterable(axis):
self._has_valid_type(key, axis)
labels = self.obj._get_axis(axis)
if isinstance(key, slice):
return self._get_slice_axis(key, axis=axis)
elif is_list_like_indexer(key) and not (isinstance(key, tuple) and
isinstance(labels, MultiIndex)):
if hasattr(key, 'ndim') and key.ndim > 1:
raise ValueError('Cannot index with multidimensional key')
return self._getitem_iterable(key, axis=axis)
else:
if is_integer(key):
if axis == 0 and isinstance(labels, MultiIndex):
try:
return self._get_label(key, axis=axis)
except (KeyError, TypeError):
if self.obj.index.levels[0].is_integer():
raise
# this is the fallback! (for a non-float, non-integer index)
if not labels.is_floating() and not labels.is_integer():
return self._get_loc(key, axis=axis)
return self._get_label(key, axis=axis)
def _getitem_iterable(self, key, axis=0):
if self._should_validate_iterable(axis):
self._has_valid_type(key, axis)
labels = self.obj._get_axis(axis)
if is_bool_indexer(key):
key = check_bool_indexer(labels, key)
inds, = key.nonzero()
return self.obj.take(inds, axis=axis, convert=False)
else:
if isinstance(key, Index):
# want Index objects to pass through untouched
keyarr = key
else:
# asarray can be unsafe, NumPy strings are weird
keyarr = _asarray_tuplesafe(key)
# have the index handle the indexer and possibly return
# an indexer or raising
indexer = labels._convert_list_indexer(keyarr, kind=self.name)
if indexer is not None:
return self.obj.take(indexer, axis=axis)
# this is not the most robust, but...
if (isinstance(labels, MultiIndex) and len(keyarr) and
not isinstance(keyarr[0], tuple)):
level = 0
else:
level = None
# existing labels are unique and indexer are unique
if labels.is_unique and Index(keyarr).is_unique:
try:
result = self.obj.reindex_axis(keyarr, axis=axis, level=level)
# this is an error as we are trying to find
# keys in a multi-index that don't exist
if isinstance(labels, MultiIndex) and level is not None:
if hasattr(result,'ndim') and not np.prod(result.shape) and len(keyarr):
raise KeyError("cannot index a multi-index axis with these keys")
return result
except AttributeError:
# Series
if axis != 0:
raise AssertionError('axis must be 0')
return self.obj.reindex(keyarr, level=level)
# existing labels are non-unique
else:
# reindex with the specified axis
if axis + 1 > self.obj.ndim:
raise AssertionError("invalid indexing error with "
"non-unique index")
new_target, indexer, new_indexer = labels._reindex_non_unique(keyarr)
if new_indexer is not None:
result = self.obj.take(indexer[indexer!=-1], axis=axis,
convert=False)
result = result._reindex_with_indexers({
axis: [new_target, new_indexer]
}, copy=True, allow_dups=True)
else:
result = self.obj.take(indexer, axis=axis,
convert=False)
return result
def _convert_to_indexer(self, obj, axis=0, is_setter=False):
"""
Convert indexing key into something we can use to do actual fancy
indexing on an ndarray
Examples
ix[:5] -> slice(0, 5)
ix[[1,2,3]] -> [1,2,3]
ix[['foo', 'bar', 'baz']] -> [i, j, k] (indices of foo, bar, baz)
Going by Zen of Python?
"In the face of ambiguity, refuse the temptation to guess."
raise AmbiguousIndexError with integer labels?
- No, prefer label-based indexing
"""
labels = self.obj._get_axis(axis)
# if we are a scalar indexer and not type correct raise
obj = self._convert_scalar_indexer(obj, axis)
# see if we are positional in nature
is_int_index = labels.is_integer()
is_int_positional = is_integer(obj) and not is_int_index
# if we are a label return me
try:
return labels.get_loc(obj)
except LookupError:
if isinstance(obj, tuple) and isinstance(labels, MultiIndex):
if is_setter and len(obj) == labels.nlevels:
return {'key': obj}
raise
except TypeError:
pass
except (ValueError):
if not is_int_positional:
raise
# a positional
if is_int_positional:
# if we are setting and its not a valid location
# its an insert which fails by definition
if is_setter:
# always valid
if self.name == 'loc':
return {'key': obj}
# a positional
if (obj >= self.obj.shape[axis] and
not isinstance(labels, MultiIndex)):
raise ValueError("cannot set by positional indexing with "
"enlargement")
return obj
if isinstance(obj, slice):
return self._convert_slice_indexer(obj, axis)
elif is_nested_tuple(obj, labels):
return labels.get_locs(obj)
elif is_list_like_indexer(obj):
if is_bool_indexer(obj):
obj = check_bool_indexer(labels, obj)
inds, = obj.nonzero()
return inds
else:
if isinstance(obj, Index):
objarr = obj.values
else:
objarr = _asarray_tuplesafe(obj)
# The index may want to handle a list indexer differently
# by returning an indexer or raising
indexer = labels._convert_list_indexer(objarr, kind=self.name)
if indexer is not None:
return indexer
# this is not the most robust, but...
if (isinstance(labels, MultiIndex) and
not isinstance(objarr[0], tuple)):
level = 0
_, indexer = labels.reindex(objarr, level=level)
# take all
if indexer is None:
indexer = np.arange(len(labels))
check = labels.levels[0].get_indexer(objarr)
else:
level = None
# unique index
if labels.is_unique:
indexer = check = labels.get_indexer(objarr)
# non-unique (dups)
else:
(indexer,
missing) = labels.get_indexer_non_unique(objarr)
check = indexer
mask = check == -1
if mask.any():
raise KeyError('%s not in index' % objarr[mask])
return _values_from_object(indexer)
else:
try:
return labels.get_loc(obj)
except LookupError:
# allow a not found key only if we are a setter
if not is_list_like_indexer(obj) and is_setter:
return {'key': obj}
raise
def _tuplify(self, loc):
tup = [slice(None, None) for _ in range(self.ndim)]
tup[0] = loc
return tuple(tup)
def _get_slice_axis(self, slice_obj, axis=0):
obj = self.obj
if not need_slice(slice_obj):
return obj
indexer = self._convert_slice_indexer(slice_obj, axis)
if isinstance(indexer, slice):
return self._slice(indexer, axis=axis, kind='iloc')
else:
return self.obj.take(indexer, axis=axis, convert=False)
class _IXIndexer(_NDFrameIndexer):
"""A primarily label-location based indexer, with integer position
fallback.
``.ix[]`` supports mixed integer and label based access. It is
primarily label based, but will fall back to integer positional
access unless the corresponding axis is of integer type.
``.ix`` is the most general indexer and will support any of the
inputs in ``.loc`` and ``.iloc``. ``.ix`` also supports floating
point label schemes. ``.ix`` is exceptionally useful when dealing
with mixed positional and label based hierachical indexes.
However, when an axis is integer based, ONLY label based access
and not positional access is supported. Thus, in such cases, it's
usually better to be explicit and use ``.iloc`` or ``.loc``.
See more at :ref:`Advanced Indexing <advanced>`.
"""
def _has_valid_type(self, key, axis):
if isinstance(key, slice):
return True
elif is_bool_indexer(key):
return True
elif is_list_like_indexer(key):
return True
else:
self._convert_scalar_indexer(key, axis)
return True
class _LocationIndexer(_NDFrameIndexer):
_exception = Exception
def __getitem__(self, key):
if type(key) is tuple:
return self._getitem_tuple(key)
else:
return self._getitem_axis(key, axis=0)
def _getitem_axis(self, key, axis=0):
raise NotImplementedError()
def _getbool_axis(self, key, axis=0):
labels = self.obj._get_axis(axis)
key = check_bool_indexer(labels, key)
inds, = key.nonzero()
try:
return self.obj.take(inds, axis=axis, convert=False)
except Exception as detail:
raise self._exception(detail)
def _get_slice_axis(self, slice_obj, axis=0):
""" this is pretty simple as we just have to deal with labels """
obj = self.obj
if not need_slice(slice_obj):
return obj
labels = obj._get_axis(axis)
indexer = labels.slice_indexer(slice_obj.start, slice_obj.stop,
slice_obj.step)
if isinstance(indexer, slice):
return self._slice(indexer, axis=axis, kind='iloc')
else:
return self.obj.take(indexer, axis=axis, convert=False)
class _LocIndexer(_LocationIndexer):
"""Purely label-location based indexer for selection by label.
``.loc[]`` is primarily label based, but may also be used with a
boolean array.
Allowed inputs are:
- A single label, e.g. ``5`` or ``'a'``, (note that ``5`` is
interpreted as a *label* of the index, and **never** as an
integer position along the index).
- A list or array of labels, e.g. ``['a', 'b', 'c']``.
- A slice object with labels, e.g. ``'a':'f'`` (note that contrary
to usual python slices, **both** the start and the stop are included!).
- A boolean array.
``.loc`` will raise a ``KeyError`` when the items are not found.
See more at :ref:`Selection by Label <indexing.label>`
"""
_valid_types = ("labels (MUST BE IN THE INDEX), slices of labels (BOTH "
"endpoints included! Can be slices of integers if the "
"index is integers), listlike of labels, boolean")
_exception = KeyError
def _has_valid_type(self, key, axis):
ax = self.obj._get_axis(axis)
# valid for a label where all labels are in the index
# slice of lables (where start-end in labels)
# slice of integers (only if in the lables)
# boolean
if isinstance(key, slice):
return True
elif is_bool_indexer(key):
return True
elif is_list_like_indexer(key):
# mi is just a passthru
if isinstance(key, tuple) and isinstance(ax, MultiIndex):
return True
# TODO: don't check the entire key unless necessary
if len(key) and np.all(ax.get_indexer_for(key) < 0):
raise KeyError("None of [%s] are in the [%s]" %
(key, self.obj._get_axis_name(axis)))
return True
else:
def error():
if isnull(key):
raise TypeError(
"cannot use label indexing with a null key")
raise KeyError("the label [%s] is not in the [%s]" %
(key, self.obj._get_axis_name(axis)))
try:
key = self._convert_scalar_indexer(key, axis)
if not key in ax:
error()
except (TypeError) as e:
# python 3 type errors should be raised
if 'unorderable' in str(e): # pragma: no cover
error()
raise
except:
error()
return True
def _getitem_axis(self, key, axis=0):
labels = self.obj._get_axis(axis)
if isinstance(key, slice):
self._has_valid_type(key, axis)
return self._get_slice_axis(key, axis=axis)
elif is_bool_indexer(key):
return self._getbool_axis(key, axis=axis)
elif is_list_like_indexer(key):
# GH 7349
# possibly convert a list-like into a nested tuple
# but don't convert a list-like of tuples
if isinstance(labels, MultiIndex):
if not isinstance(key, tuple) and len(key) > 1 and not isinstance(key[0], tuple):
key = tuple([key])
# an iterable multi-selection
if not (isinstance(key, tuple) and
isinstance(labels, MultiIndex)):
if hasattr(key, 'ndim') and key.ndim > 1:
raise ValueError('Cannot index with multidimensional key')
return self._getitem_iterable(key, axis=axis)
# nested tuple slicing
if is_nested_tuple(key, labels):
locs = labels.get_locs(key)
indexer = [ slice(None) ] * self.ndim
indexer[axis] = locs
return self.obj.iloc[tuple(indexer)]
# fall thru to straight lookup
self._has_valid_type(key, axis)
return self._get_label(key, axis=axis)
class _iLocIndexer(_LocationIndexer):
"""Purely integer-location based indexing for selection by position.
``.iloc[]`` is primarily integer position based (from ``0`` to
``length-1`` of the axis), but may also be used with a boolean
array.
Allowed inputs are:
- An integer, e.g. ``5``.
- A list or array of integers, e.g. ``[4, 3, 0]``.
- A slice object with ints, e.g. ``1:7``.
- A boolean array.
``.iloc`` will raise ``IndexError`` if a requested indexer is
out-of-bounds, except *slice* indexers which allow out-of-bounds
indexing (this conforms with python/numpy *slice* semantics).
See more at :ref:`Selection by Position <indexing.integer>`
"""
_valid_types = ("integer, integer slice (START point is INCLUDED, END "
"point is EXCLUDED), listlike of integers, boolean array")
_exception = IndexError
def _has_valid_type(self, key, axis):
if is_bool_indexer(key):
if hasattr(key, 'index') and isinstance(key.index, Index):
if key.index.inferred_type == 'integer':
raise NotImplementedError(
"iLocation based boolean indexing on an integer type "
"is not available"
)
raise ValueError("iLocation based boolean indexing cannot use "
"an indexable as a mask")
return True
if isinstance(key, slice):
return True
elif is_integer(key):
return self._is_valid_integer(key, axis)
elif is_list_like_indexer(key):
return self._is_valid_list_like(key, axis)
return False
def _has_valid_setitem_indexer(self, indexer):
self._has_valid_positional_setitem_indexer(indexer)
def _is_valid_integer(self, key, axis):
# return a boolean if we have a valid integer indexer
ax = self.obj._get_axis(axis)
l = len(ax)
if key >= l or key < -l:
raise IndexError("single positional indexer is out-of-bounds")
return True
def _is_valid_list_like(self, key, axis):
# return a boolean if we are a valid list-like (e.g. that we dont' have out-of-bounds values)
# coerce the key to not exceed the maximum size of the index
arr = np.array(key)
ax = self.obj._get_axis(axis)
l = len(ax)
if len(arr) and (arr.max() >= l or arr.min() < -l):
raise IndexError("positional indexers are out-of-bounds")
return True
def _getitem_tuple(self, tup):
self._has_valid_tuple(tup)
try:
return self._getitem_lowerdim(tup)
except:
pass
retval = self.obj
axis=0
for i, key in enumerate(tup):
if i >= self.obj.ndim:
raise IndexingError('Too many indexers')
if is_null_slice(key):
axis += 1
continue
retval = getattr(retval, self.name)._getitem_axis(key, axis=axis)
# if the dim was reduced, then pass a lower-dim the next time
if retval.ndim<self.ndim:
axis -= 1
# try to get for the next axis
axis += 1
return retval
def _get_slice_axis(self, slice_obj, axis=0):
obj = self.obj
if not need_slice(slice_obj):
return obj
slice_obj = self._convert_slice_indexer(slice_obj, axis)
if isinstance(slice_obj, slice):
return self._slice(slice_obj, axis=axis, kind='iloc')
else:
return self.obj.take(slice_obj, axis=axis, convert=False)
def _getitem_axis(self, key, axis=0):
if isinstance(key, slice):
self._has_valid_type(key, axis)
return self._get_slice_axis(key, axis=axis)
elif is_bool_indexer(key):
self._has_valid_type(key, axis)
return self._getbool_axis(key, axis=axis)
# a single integer or a list of integers
else:
if is_list_like_indexer(key):
# validate list bounds
self._is_valid_list_like(key, axis)
# force an actual list
key = list(key)
else:
key = self._convert_scalar_indexer(key, axis)
if not is_integer(key):
raise TypeError("Cannot index by location index with a "
"non-integer key")
# validate the location
self._is_valid_integer(key, axis)
return self._get_loc(key, axis=axis)
def _convert_to_indexer(self, obj, axis=0, is_setter=False):
""" much simpler as we only have to deal with our valid types """
# make need to convert a float key
if isinstance(obj, slice):
return self._convert_slice_indexer(obj, axis)
elif is_float(obj):
return self._convert_scalar_indexer(obj, axis)
elif self._has_valid_type(obj, axis):
return obj
raise ValueError("Can only index by location with a [%s]" %
self._valid_types)
class _ScalarAccessIndexer(_NDFrameIndexer):
""" access scalars quickly """
def _convert_key(self, key, is_setter=False):
return list(key)
def __getitem__(self, key):
if not isinstance(key, tuple):
# we could have a convertible item here (e.g. Timestamp)
if not is_list_like_indexer(key):
key = tuple([key])
else:
raise ValueError('Invalid call for scalar access (getting)!')
key = self._convert_key(key)
return self.obj.get_value(*key, takeable=self._takeable)
def __setitem__(self, key, value):
if not isinstance(key, tuple):
key = self._tuplify(key)
if len(key) != self.obj.ndim:
raise ValueError('Not enough indexers for scalar access '
'(setting)!')
key = list(self._convert_key(key, is_setter=True))
key.append(value)
self.obj.set_value(*key, takeable=self._takeable)
class _AtIndexer(_ScalarAccessIndexer):
"""Fast label-based scalar accessor
Similarly to ``loc``, ``at`` provides **label** based scalar lookups.
You can also set using these indexers.
"""
_takeable = False
def _convert_key(self, key, is_setter=False):
""" require they keys to be the same type as the index (so we don't fallback) """
# allow arbitrary setting
if is_setter:
return list(key)
for ax, i in zip(self.obj.axes, key):
if ax.is_integer():
if not is_integer(i):
raise ValueError("At based indexing on an integer index can only have integer "
"indexers")
else:
if is_integer(i):
raise ValueError("At based indexing on an non-integer index can only have non-integer "
"indexers")
return key
class _iAtIndexer(_ScalarAccessIndexer):
"""Fast integer location scalar accessor.
Similarly to ``iloc``, ``iat`` provides **integer** based lookups.
You can also set using these indexers.
"""
_takeable = True
def _has_valid_setitem_indexer(self, indexer):
self._has_valid_positional_setitem_indexer(indexer)
def _convert_key(self, key, is_setter=False):
""" require integer args (and convert to label arguments) """
for a, i in zip(self.obj.axes, key):
if not is_integer(i):
raise ValueError("iAt based indexing can only have integer "
"indexers")
return key
# 32-bit floating point machine epsilon
_eps = np.finfo('f4').eps
def length_of_indexer(indexer, target=None):
"""return the length of a single non-tuple indexer which could be a slice
"""
if target is not None and isinstance(indexer, slice):
l = len(target)
start = indexer.start
stop = indexer.stop
step = indexer.step
if start is None:
start = 0
elif start < 0:
start += l
if stop is None or stop > l:
stop = l
elif stop < 0:
stop += l
if step is None:
step = 1
elif step < 0:
step = -step
return (stop - start + step-1) // step
elif isinstance(indexer, (ABCSeries, Index, np.ndarray, list)):
return len(indexer)
elif not is_list_like_indexer(indexer):
return 1
raise AssertionError("cannot find the length of the indexer")
def convert_to_index_sliceable(obj, key):
"""if we are index sliceable, then return my slicer, otherwise return None
"""
idx = obj.index
if isinstance(key, slice):
return idx._convert_slice_indexer(key, kind='getitem')
elif isinstance(key, compat.string_types):
# we are an actual column
if key in obj._data.items:
return None
# we need a timelike key here
if idx.is_all_dates:
try:
return idx._get_string_slice(key)
except:
return None
return None
def is_index_slice(obj):
def _is_valid_index(x):
return (is_integer(x) or is_float(x)
and np.allclose(x, int(x), rtol=_eps, atol=0))
def _crit(v):
return v is None or _is_valid_index(v)
both_none = obj.start is None and obj.stop is None
return not both_none and (_crit(obj.start) and _crit(obj.stop))
def check_bool_indexer(ax, key):
# boolean indexing, need to check that the data are aligned, otherwise
# disallowed
# this function assumes that is_bool_indexer(key) == True
result = key
if isinstance(key, ABCSeries) and not key.index.equals(ax):
result = result.reindex(ax)
mask = com.isnull(result._values)
if mask.any():
raise IndexingError('Unalignable boolean Series key provided')
result = result.astype(bool)._values
else:
# is_bool_indexer has already checked for nulls in the case of an
# object array key, so no check needed here
result = np.asarray(result, dtype=bool)
return result
def convert_missing_indexer(indexer):
""" reverse convert a missing indexer, which is a dict
return the scalar indexer and a boolean indicating if we converted """
if isinstance(indexer, dict):
# a missing key (but not a tuple indexer)
indexer = indexer['key']
if isinstance(indexer, bool):
raise KeyError("cannot use a single bool to index into setitem")
return indexer, True
return indexer, False
def convert_from_missing_indexer_tuple(indexer, axes):
""" create a filtered indexer that doesn't have any missing indexers """
def get_indexer(_i, _idx):
return (axes[_i].get_loc(_idx['key'])
if isinstance(_idx, dict) else _idx)
return tuple([get_indexer(_i, _idx) for _i, _idx in enumerate(indexer)])
def maybe_convert_indices(indices, n):
""" if we have negative indicies, translate to postive here
if have indicies that are out-of-bounds, raise an IndexError
"""
if isinstance(indices, list):
indices = np.array(indices)
if len(indices) == 0:
# If list is empty, np.array will return float and cause indexing
# errors.
return np.empty(0, dtype=np.int_)
mask = indices < 0
if mask.any():
indices[mask] += n
mask = (indices >= n) | (indices < 0)
if mask.any():
raise IndexError("indices are out-of-bounds")
return indices
def maybe_convert_ix(*args):
"""
We likely want to take the cross-product
"""
ixify = True
for arg in args:
if not isinstance(arg, (np.ndarray, list, ABCSeries, Index)):
ixify = False
if ixify:
return np.ix_(*args)
else:
return args
def is_nested_tuple(tup, labels):
# check for a compatiable nested tuple and multiindexes among the axes
if not isinstance(tup, tuple):
return False
# are we nested tuple of: tuple,list,slice
for i, k in enumerate(tup):
if isinstance(k, (tuple, list, slice)):
return isinstance(labels, MultiIndex)
return False
def is_list_like_indexer(key):
# allow a list_like, but exclude NamedTuples which can be indexers
return is_list_like(key) and not (isinstance(key, tuple) and type(key) is not tuple)
def is_label_like(key):
# select a label or row
return not isinstance(key, slice) and not is_list_like_indexer(key)
def need_slice(obj):
return (obj.start is not None or
obj.stop is not None or
(obj.step is not None and obj.step != 1))
def maybe_droplevels(index, key):
# drop levels
original_index = index
if isinstance(key, tuple):
for _ in key:
try:
index = index.droplevel(0)
except:
# we have dropped too much, so back out
return original_index
else:
try:
index = index.droplevel(0)
except:
pass
return index
def _non_reducing_slice(slice_):
"""
Ensurse that a slice doesn't reduce to a Series or Scalar.
Any user-paseed `subset` should have this called on it
to make sure we're always working with DataFrames.
"""
# default to column slice, like DataFrame
# ['A', 'B'] -> IndexSlices[:, ['A', 'B']]
kinds = tuple(list(compat.string_types) +
[ABCSeries, np.ndarray, Index, list])
if isinstance(slice_, kinds):
slice_ = IndexSlice[:, slice_]
def pred(part):
# true when slice does *not* reduce
return isinstance(part, slice) or com.is_list_like(part)
if not com.is_list_like(slice_):
if not isinstance(slice_, slice):
# a 1-d slice, like df.loc[1]
slice_ = [[slice_]]
else:
# slice(a, b, c)
slice_ = [slice_] # to tuplize later
else:
slice_ = [part if pred(part) else [part] for part in slice_]
return tuple(slice_)
def _maybe_numeric_slice(df, slice_, include_bool=False):
"""
want nice defaults for background_gradient that don't break
with non-numeric data. But if slice_ is passed go with that.
"""
if slice_ is None:
dtypes = [np.number]
if include_bool:
dtypes.append(bool)
slice_ = IndexSlice[:, df.select_dtypes(include=dtypes).columns]
return slice_
| artistic-2.0 |
jnmclarty/trump | trump/extensions/source/tx-dbapi/dbapiext.py | 2 | 2524 | """
The DBAPI driver, will use by default the same driver SQLAlchemy is using for trump.
There is currently no way to change this default. It's assumed that the driver
is DBAPI 2.0 compliant.
Required kwargs include:
- 'dbinsttype' which must be one of 'COMMAND', 'KEYCOL', 'TWOKEYCOL'
- 'dsn', 'user', 'password', 'host', 'database', 'port'
Optional kwargs include:
- duphandler ['sum'] which just groups duplicate index values together via the sum.
Additional kwargs:
Required based on 'dbinsttype' chosen:
'COMMAND' :
- 'command' which is just a SQL string, where the first column becomes the index, and the second
column becomes the data.
'KEYCOL' :
- ['indexcol', 'datacol', 'table', 'keycol', 'key']
'TWOKEYCOL' :
- ['indexcol', 'datacol', 'table', 'keyacol', 'keya', 'keybcol', 'keyb']
"""
stype = 'DBAPI'
renew = True
class Source(object):
def __init__(self, ses, **kwargs):
db = __import__(ses.bind.driver)
dbargs = ['dsn', 'user', 'password', 'host', 'database', 'port']
con_kwargs = {k: v for k, v in kwargs.items() if k in dbargs}
self.con = db.connect(**con_kwargs)
import pandas as pd
self.pd = pd
def getseries(self, ses, **kwargs):
cur = self.con.cursor()
if kwargs['dbinstype'] == 'COMMAND':
qry = kwargs['command']
elif kwargs['dbinstype'] == 'KEYCOL':
reqd = ['indexcol', 'datacol', 'table', 'keycol', 'key']
rel = (kwargs[c] for c in reqd)
qry = "SELECT {0},{1} FROM {2} WHERE {3} = '{4}' ORDER BY {0};"
qry = qry.format(*rel)
elif kwargs['dbinstype'] == 'TWOKEYCOL':
reqd = ['indexcol', 'datacol', 'table', 'keyacol', 'keya', 'keybcol', 'keyb']
rel = (kwargs[c] for c in reqd)
qry = "SELECT {0},{1} FROM {2} WHERE {3} = '{4}' AND {5} = '{6}' ORDER BY {0};"
qry = qry.format(*rel)
else:
raise NotImplementedError("The database type {} has not been created.".format(kwargs['dbinstype']))
cur.execute(qry)
results = [(row[0], row[1]) for row in cur.fetchall()]
if len(results):
ind, dat = zip(*results)
else:
ind, dat = [], []
data = self.pd.Series(dat, ind)
try:
dosum = kwargs['duphandler'] == 'sum'
except:
dosum = False
if dosum:
data = data.groupby(data.index).sum()
return data
| bsd-3-clause |
nikitasingh981/scikit-learn | examples/tree/plot_tree_regression_multioutput.py | 73 | 1854 | """
===================================================================
Multi-output Decision Tree Regression
===================================================================
An example to illustrate multi-output regression with decision tree.
The :ref:`decision trees <tree>`
is used to predict simultaneously the noisy x and y observations of a circle
given a single underlying feature. As a result, it learns local linear
regressions approximating the circle.
We can see that if the maximum depth of the tree (controlled by the
`max_depth` parameter) is set too high, the decision trees learn too fine
details of the training data and learn from the noise, i.e. they overfit.
"""
print(__doc__)
import numpy as np
import matplotlib.pyplot as plt
from sklearn.tree import DecisionTreeRegressor
# Create a random dataset
rng = np.random.RandomState(1)
X = np.sort(200 * rng.rand(100, 1) - 100, axis=0)
y = np.array([np.pi * np.sin(X).ravel(), np.pi * np.cos(X).ravel()]).T
y[::5, :] += (0.5 - rng.rand(20, 2))
# Fit regression model
regr_1 = DecisionTreeRegressor(max_depth=2)
regr_2 = DecisionTreeRegressor(max_depth=5)
regr_3 = DecisionTreeRegressor(max_depth=8)
regr_1.fit(X, y)
regr_2.fit(X, y)
regr_3.fit(X, y)
# Predict
X_test = np.arange(-100.0, 100.0, 0.01)[:, np.newaxis]
y_1 = regr_1.predict(X_test)
y_2 = regr_2.predict(X_test)
y_3 = regr_3.predict(X_test)
# Plot the results
plt.figure()
s = 50
plt.scatter(y[:, 0], y[:, 1], c="navy", s=s, label="data")
plt.scatter(y_1[:, 0], y_1[:, 1], c="cornflowerblue", s=s, label="max_depth=2")
plt.scatter(y_2[:, 0], y_2[:, 1], c="c", s=s, label="max_depth=5")
plt.scatter(y_3[:, 0], y_3[:, 1], c="orange", s=s, label="max_depth=8")
plt.xlim([-6, 6])
plt.ylim([-6, 6])
plt.xlabel("target 1")
plt.ylabel("target 2")
plt.title("Multi-output Decision Tree Regression")
plt.legend()
plt.show()
| bsd-3-clause |
Sklearn-HMM/scikit-learn-HMM | sklean-hmm/naive_bayes.py | 3 | 20231 | # -*- coding: utf-8 -*-
"""
The :mod:`sklearn.naive_bayes` module implements Naive Bayes algorithms. These
are supervised learning methods based on applying Bayes' theorem with strong
(naive) feature independence assumptions.
"""
# Author: Vincent Michel <vincent.michel@inria.fr>
# Minor fixes by Fabian Pedregosa
# Amit Aides <amitibo@tx.technion.ac.il>
# Yehuda Finkelstein <yehudaf@tx.technion.ac.il>
# Lars Buitinck <L.J.Buitinck@uva.nl>
# (parts based on earlier work by Mathieu Blondel)
#
# License: BSD 3 clause
from abc import ABCMeta, abstractmethod
import numpy as np
from scipy.sparse import issparse
import warnings
from .base import BaseEstimator, ClassifierMixin
from .preprocessing import binarize
from .preprocessing import LabelBinarizer
from .preprocessing import label_binarize
from .utils import array2d, atleast2d_or_csr, column_or_1d, check_arrays
from .utils.extmath import safe_sparse_dot, logsumexp
from .utils.multiclass import _check_partial_fit_first_call
from .externals import six
__all__ = ['BernoulliNB', 'GaussianNB', 'MultinomialNB']
class BaseNB(six.with_metaclass(ABCMeta, BaseEstimator, ClassifierMixin)):
"""Abstract base class for naive Bayes estimators"""
@abstractmethod
def _joint_log_likelihood(self, X):
"""Compute the unnormalized posterior log probability of X
I.e. ``log P(c) + log P(x|c)`` for all rows x of X, as an array-like of
shape [n_classes, n_samples].
Input is passed to _joint_log_likelihood as-is by predict,
predict_proba and predict_log_proba.
"""
def predict(self, X):
"""
Perform classification on an array of test vectors X.
Parameters
----------
X : array-like, shape = [n_samples, n_features]
Returns
-------
C : array, shape = [n_samples]
Predicted target values for X
"""
jll = self._joint_log_likelihood(X)
return self.classes_[np.argmax(jll, axis=1)]
def predict_log_proba(self, X):
"""
Return log-probability estimates for the test vector X.
Parameters
----------
X : array-like, shape = [n_samples, n_features]
Returns
-------
C : array-like, shape = [n_samples, n_classes]
Returns the log-probability of the sample for each class
in the model, where classes are ordered arithmetically.
"""
jll = self._joint_log_likelihood(X)
# normalize by P(x) = P(f_1, ..., f_n)
log_prob_x = logsumexp(jll, axis=1)
return jll - np.atleast_2d(log_prob_x).T
def predict_proba(self, X):
"""
Return probability estimates for the test vector X.
Parameters
----------
X : array-like, shape = [n_samples, n_features]
Returns
-------
C : array-like, shape = [n_samples, n_classes]
Returns the probability of the sample for each class in
the model, where classes are ordered arithmetically.
"""
return np.exp(self.predict_log_proba(X))
class GaussianNB(BaseNB):
"""
Gaussian Naive Bayes (GaussianNB)
Parameters
----------
X : array-like, shape = [n_samples, n_features]
Training vector, where n_samples in the number of samples and
n_features is the number of features.
y : array, shape = [n_samples]
Target vector relative to X
Attributes
----------
`class_prior_` : array, shape = [n_classes]
probability of each class.
`theta_` : array, shape = [n_classes, n_features]
mean of each feature per class
`sigma_` : array, shape = [n_classes, n_features]
variance of each feature per class
Examples
--------
>>> import numpy as np
>>> X = np.array([[-1, -1], [-2, -1], [-3, -2], [1, 1], [2, 1], [3, 2]])
>>> Y = np.array([1, 1, 1, 2, 2, 2])
>>> from sklearn.naive_bayes import GaussianNB
>>> clf = GaussianNB()
>>> clf.fit(X, Y)
GaussianNB()
>>> print(clf.predict([[-0.8, -1]]))
[1]
"""
def fit(self, X, y):
"""Fit Gaussian Naive Bayes according to X, y
Parameters
----------
X : array-like, shape = [n_samples, n_features]
Training vectors, where n_samples is the number of samples
and n_features is the number of features.
y : array-like, shape = [n_samples]
Target values.
Returns
-------
self : object
Returns self.
"""
X, y = check_arrays(X, y, sparse_format='dense')
y = column_or_1d(y, warn=True)
n_samples, n_features = X.shape
self.classes_ = unique_y = np.unique(y)
n_classes = unique_y.shape[0]
self.theta_ = np.zeros((n_classes, n_features))
self.sigma_ = np.zeros((n_classes, n_features))
self.class_prior_ = np.zeros(n_classes)
epsilon = 1e-9
for i, y_i in enumerate(unique_y):
Xi = X[y == y_i, :]
self.theta_[i, :] = np.mean(Xi, axis=0)
self.sigma_[i, :] = np.var(Xi, axis=0) + epsilon
self.class_prior_[i] = np.float(Xi.shape[0]) / n_samples
return self
def _joint_log_likelihood(self, X):
X = array2d(X)
joint_log_likelihood = []
for i in range(np.size(self.classes_)):
jointi = np.log(self.class_prior_[i])
n_ij = - 0.5 * np.sum(np.log(np.pi * self.sigma_[i, :]))
n_ij -= 0.5 * np.sum(((X - self.theta_[i, :]) ** 2) /
(self.sigma_[i, :]), 1)
joint_log_likelihood.append(jointi + n_ij)
joint_log_likelihood = np.array(joint_log_likelihood).T
return joint_log_likelihood
class BaseDiscreteNB(BaseNB):
"""Abstract base class for naive Bayes on discrete/categorical data
Any estimator based on this class should provide:
__init__
_joint_log_likelihood(X) as per BaseNB
"""
def _update_class_log_prior(self, class_prior=None):
n_classes = len(self.classes_)
if class_prior is not None:
if len(class_prior) != n_classes:
raise ValueError("Number of priors must match number of"
" classes.")
self.class_log_prior_ = np.log(class_prior)
elif self.fit_prior:
# empirical prior, with sample_weight taken into account
self.class_log_prior_ = (np.log(self.class_count_)
- np.log(self.class_count_.sum()))
else:
self.class_log_prior_ = np.zeros(n_classes) - np.log(n_classes)
def partial_fit(self, X, y, classes=None, sample_weight=None):
"""Incremental fit on a batch of samples.
This method is expected to be called several times consecutively
on different chunks of a dataset so as to implement out-of-core
or online learning.
This is especially useful when the whole dataset is too big to fit in
memory at once.
This method has some performance overhead hence it is better to call
partial_fit on chunks of data that are as large as possible
(as long as fitting in the memory budget) to hide the overhead.
Parameters
----------
X : {array-like, sparse matrix}, shape = [n_samples, n_features]
Training vectors, where n_samples is the number of samples and
n_features is the number of features.
y : array-like, shape = [n_samples]
Target values.
classes : array-like, shape = [n_classes]
List of all the classes that can possibly appear in the y vector.
Must be provided at the first call to partial_fit, can be omitted
in subsequent calls.
sample_weight : array-like, shape = [n_samples], optional
Weights applied to individual samples (1. for unweighted).
Returns
-------
self : object
Returns self.
"""
X = atleast2d_or_csr(X, dtype=np.float64)
_, n_features = X.shape
if _check_partial_fit_first_call(self, classes):
# This is the first call to partial_fit:
# initialize various cumulative counters
n_effective_classes = len(classes) if len(classes) > 1 else 2
self.class_count_ = np.zeros(n_effective_classes, dtype=np.float64)
self.feature_count_ = np.zeros((n_effective_classes, n_features),
dtype=np.float64)
Y = label_binarize(y, classes=self.classes_)
if Y.shape[1] == 1:
Y = np.concatenate((1 - Y, Y), axis=1)
n_samples, n_classes = Y.shape
if X.shape[0] != Y.shape[0]:
msg = "X.shape[0]=%d and y.shape[0]=%d are incompatible."
raise ValueError(msg % (X.shape[0], y.shape[0]))
# convert to float to support sample weight consistently
Y = Y.astype(np.float64)
if sample_weight is not None:
Y *= array2d(sample_weight).T
# Count raw events from data before updating the class log prior
# and feature log probas
self._count(X, Y)
# XXX: OPTIM: we could introduce a public finalization method to
# be called by the user explicitly just once after several consecutive
# calls to partial_fit and prior any call to predict[_[log_]proba]
# to avoid computing the smooth log probas at each call to partial fit
self._update_feature_log_prob()
self._update_class_log_prior()
return self
def fit(self, X, y, sample_weight=None, class_prior=None):
"""Fit Naive Bayes classifier according to X, y
Parameters
----------
X : {array-like, sparse matrix}, shape = [n_samples, n_features]
Training vectors, where n_samples is the number of samples and
n_features is the number of features.
y : array-like, shape = [n_samples]
Target values.
sample_weight : array-like, shape = [n_samples], optional
Weights applied to individual samples (1. for unweighted).
Returns
-------
self : object
Returns self.
"""
X, y = check_arrays(X, y, sparse_format='csr')
X = X.astype(np.float)
y = column_or_1d(y, warn=True)
_, n_features = X.shape
labelbin = LabelBinarizer()
Y = labelbin.fit_transform(y)
self.classes_ = labelbin.classes_
if Y.shape[1] == 1:
Y = np.concatenate((1 - Y, Y), axis=1)
# convert to float to support sample weight consistently
Y = Y.astype(np.float64)
if sample_weight is not None:
Y *= array2d(sample_weight).T
if class_prior is not None:
warnings.warn('class_prior has been made an ``__init__`` parameter'
' and will be removed from fit in version 0.15.',
DeprecationWarning)
else:
class_prior = self.class_prior
# Count raw events from data before updating the class log prior
# and feature log probas
n_effective_classes = Y.shape[1]
self.class_count_ = np.zeros(n_effective_classes, dtype=np.float64)
self.feature_count_ = np.zeros((n_effective_classes, n_features),
dtype=np.float64)
self._count(X, Y)
self._update_feature_log_prob()
self._update_class_log_prior(class_prior=class_prior)
return self
# XXX The following is a stopgap measure; we need to set the dimensions
# of class_log_prior_ and feature_log_prob_ correctly.
def _get_coef(self):
return (self.feature_log_prob_[1:]
if len(self.classes_) == 2 else self.feature_log_prob_)
def _get_intercept(self):
return (self.class_log_prior_[1:]
if len(self.classes_) == 2 else self.class_log_prior_)
coef_ = property(_get_coef)
intercept_ = property(_get_intercept)
class MultinomialNB(BaseDiscreteNB):
"""
Naive Bayes classifier for multinomial models
The multinomial Naive Bayes classifier is suitable for classification with
discrete features (e.g., word counts for text classification). The
multinomial distribution normally requires integer feature counts. However,
in practice, fractional counts such as tf-idf may also work.
Parameters
----------
alpha : float, optional (default=1.0)
Additive (Laplace/Lidstone) smoothing parameter
(0 for no smoothing).
fit_prior : boolean
Whether to learn class prior probabilities or not.
If false, a uniform prior will be used.
class_prior : array-like, size (n_classes,)
Prior probabilities of the classes. If specified the priors are not
adjusted according to the data.
Attributes
----------
`class_log_prior_` : array, shape (n_classes, )
Smoothed empirical log probability for each class.
`intercept_` : property
Mirrors ``class_log_prior_`` for interpreting MultinomialNB
as a linear model.
`feature_log_prob_`: array, shape (n_classes, n_features)
Empirical log probability of features
given a class, ``P(x_i|y)``.
`coef_` : property
Mirrors ``feature_log_prob_`` for interpreting MultinomialNB
as a linear model.
`class_count_` : array, shape (n_classes,)
Number of samples encountered for each class during fitting. This
value is weighted by the sample weight when provided.
`feature_count_` : array, shape (n_classes, n_features)
Number of samples encountered for each (class, feature)
during fitting. This value is weighted by the sample weight when
provided.
Examples
--------
>>> import numpy as np
>>> X = np.random.randint(5, size=(6, 100))
>>> y = np.array([1, 2, 3, 4, 5, 6])
>>> from sklearn.naive_bayes import MultinomialNB
>>> clf = MultinomialNB()
>>> clf.fit(X, y)
MultinomialNB(alpha=1.0, class_prior=None, fit_prior=True)
>>> print(clf.predict(X[2]))
[3]
Notes
-----
For the rationale behind the names `coef_` and `intercept_`, i.e.
naive Bayes as a linear classifier, see J. Rennie et al. (2003),
Tackling the poor assumptions of naive Bayes text classifiers, ICML.
References
----------
C.D. Manning, P. Raghavan and H. Schuetze (2008). Introduction to
Information Retrieval. Cambridge University Press, pp. 234-265.
http://nlp.stanford.edu/IR-book/html/htmledition/
naive-bayes-text-classification-1.html
"""
def __init__(self, alpha=1.0, fit_prior=True, class_prior=None):
self.alpha = alpha
self.fit_prior = fit_prior
self.class_prior = class_prior
def _count(self, X, Y):
"""Count and smooth feature occurrences."""
if np.any((X.data if issparse(X) else X) < 0):
raise ValueError("Input X must be non-negative")
self.feature_count_ += safe_sparse_dot(Y.T, X)
self.class_count_ += Y.sum(axis=0)
def _update_feature_log_prob(self):
"""Apply smoothing to raw counts and recompute log probabilities"""
smoothed_fc = self.feature_count_ + self.alpha
smoothed_cc = smoothed_fc.sum(axis=1)
self.feature_log_prob_ = (np.log(smoothed_fc)
- np.log(smoothed_cc.reshape(-1, 1)))
def _joint_log_likelihood(self, X):
"""Calculate the posterior log probability of the samples X"""
X = atleast2d_or_csr(X)
return (safe_sparse_dot(X, self.feature_log_prob_.T)
+ self.class_log_prior_)
class BernoulliNB(BaseDiscreteNB):
"""Naive Bayes classifier for multivariate Bernoulli models.
Like MultinomialNB, this classifier is suitable for discrete data. The
difference is that while MultinomialNB works with occurrence counts,
BernoulliNB is designed for binary/boolean features.
Parameters
----------
alpha : float, optional (default=1.0)
Additive (Laplace/Lidstone) smoothing parameter
(0 for no smoothing).
binarize : float or None, optional
Threshold for binarizing (mapping to booleans) of sample features.
If None, input is presumed to already consist of binary vectors.
fit_prior : boolean
Whether to learn class prior probabilities or not.
If false, a uniform prior will be used.
class_prior : array-like, size=[n_classes,]
Prior probabilities of the classes. If specified the priors are not
adjusted according to the data.
Attributes
----------
`class_log_prior_` : array, shape = [n_classes]
Log probability of each class (smoothed).
`feature_log_prob_` : array, shape = [n_classes, n_features]
Empirical log probability of features given a class, P(x_i|y).
`class_count_` : array, shape = [n_classes]
Number of samples encountered for each class during fitting. This
value is weighted by the sample weight when provided.
`feature_count_` : array, shape = [n_classes, n_features]
Number of samples encountered for each (class, feature)
during fitting. This value is weighted by the sample weight when
provided.
Examples
--------
>>> import numpy as np
>>> X = np.random.randint(2, size=(6, 100))
>>> Y = np.array([1, 2, 3, 4, 4, 5])
>>> from sklearn.naive_bayes import BernoulliNB
>>> clf = BernoulliNB()
>>> clf.fit(X, Y)
BernoulliNB(alpha=1.0, binarize=0.0, class_prior=None, fit_prior=True)
>>> print(clf.predict(X[2]))
[3]
References
----------
C.D. Manning, P. Raghavan and H. Schuetze (2008). Introduction to
Information Retrieval. Cambridge University Press, pp. 234-265.
http://nlp.stanford.edu/IR-book/html/htmledition/the-bernoulli-model-1.html
A. McCallum and K. Nigam (1998). A comparison of event models for naive
Bayes text classification. Proc. AAAI/ICML-98 Workshop on Learning for
Text Categorization, pp. 41-48.
V. Metsis, I. Androutsopoulos and G. Paliouras (2006). Spam filtering with
naive Bayes -- Which naive Bayes? 3rd Conf. on Email and Anti-Spam (CEAS).
"""
def __init__(self, alpha=1.0, binarize=.0, fit_prior=True,
class_prior=None):
self.alpha = alpha
self.binarize = binarize
self.fit_prior = fit_prior
self.class_prior = class_prior
def _count(self, X, Y):
"""Count and smooth feature occurrences."""
if self.binarize is not None:
X = binarize(X, threshold=self.binarize)
self.feature_count_ += safe_sparse_dot(Y.T, X)
self.class_count_ += Y.sum(axis=0)
def _update_feature_log_prob(self):
"""Apply smoothing to raw counts and recompute log probabilities"""
n_classes = len(self.classes_)
smoothed_fc = self.feature_count_ + self.alpha
smoothed_cc = self.class_count_ + self.alpha * n_classes
self.feature_log_prob_ = (np.log(smoothed_fc)
- np.log(smoothed_cc.reshape(-1, 1)))
def _joint_log_likelihood(self, X):
"""Calculate the posterior log probability of the samples X"""
X = atleast2d_or_csr(X)
if self.binarize is not None:
X = binarize(X, threshold=self.binarize)
n_classes, n_features = self.feature_log_prob_.shape
n_samples, n_features_X = X.shape
if n_features_X != n_features:
raise ValueError("Expected input with %d features, got %d instead"
% (n_features, n_features_X))
neg_prob = np.log(1 - np.exp(self.feature_log_prob_))
# Compute neg_prob · (1 - X).T as ∑neg_prob - X · neg_prob
jll = safe_sparse_dot(X, (self.feature_log_prob_ - neg_prob).T)
jll += self.class_log_prior_ + neg_prob.sum(axis=1)
return jll
| bsd-3-clause |
fspaolo/scikit-learn | examples/cluster/plot_kmeans_digits.py | 8 | 4495 | """
===========================================================
A demo of K-Means clustering on the handwritten digits data
===========================================================
In this example with compare the various initialization strategies for
K-means in terms of runtime and quality of the results.
As the ground truth is known here, we also apply different cluster
quality metrics to judge the goodness of fit of the cluster labels to the
ground truth.
Cluster quality metrics evaluated (see :ref:`clustering_evaluation` for
definitions and discussions of the metrics):
=========== ========================================================
Shorthand full name
=========== ========================================================
homo homogeneity score
compl completeness score
v-meas V measure
ARI adjusted Rand index
AMI adjusted mutual information
silhouette silhouette coefficient
=========== ========================================================
"""
print(__doc__)
from time import time
import numpy as np
import pylab as pl
from sklearn import metrics
from sklearn.cluster import KMeans
from sklearn.datasets import load_digits
from sklearn.decomposition import PCA
from sklearn.preprocessing import scale
np.random.seed(42)
digits = load_digits()
data = scale(digits.data)
n_samples, n_features = data.shape
n_digits = len(np.unique(digits.target))
labels = digits.target
sample_size = 300
print("n_digits: %d, \t n_samples %d, \t n_features %d"
% (n_digits, n_samples, n_features))
print(79 * '_')
print('% 9s' % 'init'
' time inertia homo compl v-meas ARI AMI silhouette')
def bench_k_means(estimator, name, data):
t0 = time()
estimator.fit(data)
print('% 9s %.2fs %i %.3f %.3f %.3f %.3f %.3f %.3f'
% (name, (time() - t0), estimator.inertia_,
metrics.homogeneity_score(labels, estimator.labels_),
metrics.completeness_score(labels, estimator.labels_),
metrics.v_measure_score(labels, estimator.labels_),
metrics.adjusted_rand_score(labels, estimator.labels_),
metrics.adjusted_mutual_info_score(labels, estimator.labels_),
metrics.silhouette_score(data, estimator.labels_,
metric='euclidean',
sample_size=sample_size)))
bench_k_means(KMeans(init='k-means++', n_clusters=n_digits, n_init=10),
name="k-means++", data=data)
bench_k_means(KMeans(init='random', n_clusters=n_digits, n_init=10),
name="random", data=data)
# in this case the seeding of the centers is deterministic, hence we run the
# kmeans algorithm only once with n_init=1
pca = PCA(n_components=n_digits).fit(data)
bench_k_means(KMeans(init=pca.components_, n_clusters=n_digits, n_init=1),
name="PCA-based",
data=data)
print(79 * '_')
###############################################################################
# Visualize the results on PCA-reduced data
reduced_data = PCA(n_components=2).fit_transform(data)
kmeans = KMeans(init='k-means++', n_clusters=n_digits, n_init=10)
kmeans.fit(reduced_data)
# Step size of the mesh. Decrease to increase the quality of the VQ.
h = .02 # point in the mesh [x_min, m_max]x[y_min, y_max].
# Plot the decision boundary. For that, we will assign a color to each
x_min, x_max = reduced_data[:, 0].min() + 1, reduced_data[:, 0].max() - 1
y_min, y_max = reduced_data[:, 1].min() + 1, reduced_data[:, 1].max() - 1
xx, yy = np.meshgrid(np.arange(x_min, x_max, h), np.arange(y_min, y_max, h))
# Obtain labels for each point in mesh. Use last trained model.
Z = kmeans.predict(np.c_[xx.ravel(), yy.ravel()])
# Put the result into a color plot
Z = Z.reshape(xx.shape)
pl.figure(1)
pl.clf()
pl.imshow(Z, interpolation='nearest',
extent=(xx.min(), xx.max(), yy.min(), yy.max()),
cmap=pl.cm.Paired,
aspect='auto', origin='lower')
pl.plot(reduced_data[:, 0], reduced_data[:, 1], 'k.', markersize=2)
# Plot the centroids as a white X
centroids = kmeans.cluster_centers_
pl.scatter(centroids[:, 0], centroids[:, 1],
marker='x', s=169, linewidths=3,
color='w', zorder=10)
pl.title('K-means clustering on the digits dataset (PCA-reduced data)\n'
'Centroids are marked with white cross')
pl.xlim(x_min, x_max)
pl.ylim(y_min, y_max)
pl.xticks(())
pl.yticks(())
pl.show()
| bsd-3-clause |
Barmaley-exe/scikit-learn | examples/tree/plot_iris.py | 271 | 2186 | """
================================================================
Plot the decision surface of a decision tree on the iris dataset
================================================================
Plot the decision surface of a decision tree trained on pairs
of features of the iris dataset.
See :ref:`decision tree <tree>` for more information on the estimator.
For each pair of iris features, the decision tree learns decision
boundaries made of combinations of simple thresholding rules inferred from
the training samples.
"""
print(__doc__)
import numpy as np
import matplotlib.pyplot as plt
from sklearn.datasets import load_iris
from sklearn.tree import DecisionTreeClassifier
# Parameters
n_classes = 3
plot_colors = "bry"
plot_step = 0.02
# Load data
iris = load_iris()
for pairidx, pair in enumerate([[0, 1], [0, 2], [0, 3],
[1, 2], [1, 3], [2, 3]]):
# We only take the two corresponding features
X = iris.data[:, pair]
y = iris.target
# Shuffle
idx = np.arange(X.shape[0])
np.random.seed(13)
np.random.shuffle(idx)
X = X[idx]
y = y[idx]
# Standardize
mean = X.mean(axis=0)
std = X.std(axis=0)
X = (X - mean) / std
# Train
clf = DecisionTreeClassifier().fit(X, y)
# Plot the decision boundary
plt.subplot(2, 3, pairidx + 1)
x_min, x_max = X[:, 0].min() - 1, X[:, 0].max() + 1
y_min, y_max = X[:, 1].min() - 1, X[:, 1].max() + 1
xx, yy = np.meshgrid(np.arange(x_min, x_max, plot_step),
np.arange(y_min, y_max, plot_step))
Z = clf.predict(np.c_[xx.ravel(), yy.ravel()])
Z = Z.reshape(xx.shape)
cs = plt.contourf(xx, yy, Z, cmap=plt.cm.Paired)
plt.xlabel(iris.feature_names[pair[0]])
plt.ylabel(iris.feature_names[pair[1]])
plt.axis("tight")
# Plot the training points
for i, color in zip(range(n_classes), plot_colors):
idx = np.where(y == i)
plt.scatter(X[idx, 0], X[idx, 1], c=color, label=iris.target_names[i],
cmap=plt.cm.Paired)
plt.axis("tight")
plt.suptitle("Decision surface of a decision tree using paired features")
plt.legend()
plt.show()
| bsd-3-clause |
xuanyuanking/spark | python/pyspark/pandas/data_type_ops/base.py | 5 | 13688 | #
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import numbers
from abc import ABCMeta
from itertools import chain
from typing import Any, Optional, Union
import numpy as np
import pandas as pd
from pandas.api.types import CategoricalDtype
from pyspark.sql import functions as F
from pyspark.sql.types import (
ArrayType,
BinaryType,
BooleanType,
DataType,
DateType,
DecimalType,
FractionalType,
IntegralType,
MapType,
NullType,
NumericType,
StringType,
StructType,
TimestampType,
UserDefinedType,
)
from pyspark.pandas._typing import Dtype, IndexOpsLike, SeriesOrIndex
from pyspark.pandas.spark import functions as SF
from pyspark.pandas.typedef import extension_dtypes
from pyspark.pandas.typedef.typehints import (
extension_dtypes_available,
extension_float_dtypes_available,
extension_object_dtypes_available,
)
if extension_dtypes_available:
from pandas import Int8Dtype, Int16Dtype, Int32Dtype, Int64Dtype
if extension_float_dtypes_available:
from pandas import Float32Dtype, Float64Dtype
if extension_object_dtypes_available:
from pandas import BooleanDtype, StringDtype
def is_valid_operand_for_numeric_arithmetic(operand: Any, *, allow_bool: bool = True) -> bool:
"""Check whether the `operand` is valid for arithmetic operations against numerics."""
from pyspark.pandas.base import IndexOpsMixin
if isinstance(operand, numbers.Number):
return not isinstance(operand, bool) or allow_bool
elif isinstance(operand, IndexOpsMixin):
if isinstance(operand.dtype, CategoricalDtype):
return False
else:
return isinstance(operand.spark.data_type, NumericType) or (
allow_bool and isinstance(operand.spark.data_type, BooleanType)
)
else:
return False
def transform_boolean_operand_to_numeric(
operand: Any, spark_type: Optional[DataType] = None
) -> Any:
"""Transform boolean operand to numeric.
If the `operand` is:
- a boolean IndexOpsMixin, transform the `operand` to the `spark_type`.
- a boolean literal, transform to the int value.
Otherwise, return the operand as it is.
"""
from pyspark.pandas.base import IndexOpsMixin
if isinstance(operand, IndexOpsMixin) and isinstance(operand.spark.data_type, BooleanType):
assert spark_type, "spark_type must be provided if the operand is a boolean IndexOpsMixin"
return operand.spark.transform(lambda scol: scol.cast(spark_type))
elif isinstance(operand, bool):
return int(operand)
else:
return operand
def _as_categorical_type(
index_ops: IndexOpsLike, dtype: CategoricalDtype, spark_type: DataType
) -> IndexOpsLike:
"""Cast `index_ops` to categorical dtype, given `dtype` and `spark_type`."""
assert isinstance(dtype, CategoricalDtype)
if dtype.categories is None:
codes, uniques = index_ops.factorize()
return codes._with_new_scol(
codes.spark.column,
field=codes._internal.data_fields[0].copy(dtype=CategoricalDtype(categories=uniques)),
)
else:
categories = dtype.categories
if len(categories) == 0:
scol = SF.lit(-1)
else:
kvs = chain(
*[(SF.lit(category), SF.lit(code)) for code, category in enumerate(categories)]
)
map_scol = F.create_map(*kvs)
scol = F.coalesce(map_scol.getItem(index_ops.spark.column), SF.lit(-1))
return index_ops._with_new_scol(
scol.cast(spark_type).alias(index_ops._internal.data_fields[0].name),
field=index_ops._internal.data_fields[0].copy(
dtype=dtype, spark_type=spark_type, nullable=False
),
)
def _as_bool_type(index_ops: IndexOpsLike, dtype: Union[str, type, Dtype]) -> IndexOpsLike:
"""Cast `index_ops` to BooleanType Spark type, given `dtype`."""
from pyspark.pandas.internal import InternalField
if isinstance(dtype, extension_dtypes):
scol = index_ops.spark.column.cast(BooleanType())
else:
scol = F.when(index_ops.spark.column.isNull(), SF.lit(False)).otherwise(
index_ops.spark.column.cast(BooleanType())
)
return index_ops._with_new_scol(
scol.alias(index_ops._internal.data_spark_column_names[0]),
field=InternalField(dtype=dtype),
)
def _as_string_type(
index_ops: IndexOpsLike, dtype: Union[str, type, Dtype], *, null_str: str = str(None)
) -> IndexOpsLike:
"""Cast `index_ops` to StringType Spark type, given `dtype` and `null_str`,
representing null Spark column.
"""
from pyspark.pandas.internal import InternalField
if isinstance(dtype, extension_dtypes):
scol = index_ops.spark.column.cast(StringType())
else:
casted = index_ops.spark.column.cast(StringType())
scol = F.when(index_ops.spark.column.isNull(), null_str).otherwise(casted)
return index_ops._with_new_scol(
scol.alias(index_ops._internal.data_spark_column_names[0]),
field=InternalField(dtype=dtype),
)
def _as_other_type(
index_ops: IndexOpsLike, dtype: Union[str, type, Dtype], spark_type: DataType
) -> IndexOpsLike:
"""Cast `index_ops` to a `dtype` (`spark_type`) that needs no pre-processing.
Destination types that need pre-processing: CategoricalDtype, BooleanType, and StringType.
"""
from pyspark.pandas.internal import InternalField
need_pre_process = (
isinstance(dtype, CategoricalDtype)
or isinstance(spark_type, BooleanType)
or isinstance(spark_type, StringType)
)
assert not need_pre_process, "Pre-processing is needed before the type casting."
scol = index_ops.spark.column.cast(spark_type)
return index_ops._with_new_scol(
scol.alias(index_ops._internal.data_spark_column_names[0]),
field=InternalField(dtype=dtype),
)
class DataTypeOps(object, metaclass=ABCMeta):
"""The base class for binary operations of pandas-on-Spark objects (of different data types)."""
def __new__(cls, dtype: Dtype, spark_type: DataType) -> "DataTypeOps":
from pyspark.pandas.data_type_ops.binary_ops import BinaryOps
from pyspark.pandas.data_type_ops.boolean_ops import BooleanOps, BooleanExtensionOps
from pyspark.pandas.data_type_ops.categorical_ops import CategoricalOps
from pyspark.pandas.data_type_ops.complex_ops import ArrayOps, MapOps, StructOps
from pyspark.pandas.data_type_ops.date_ops import DateOps
from pyspark.pandas.data_type_ops.datetime_ops import DatetimeOps
from pyspark.pandas.data_type_ops.null_ops import NullOps
from pyspark.pandas.data_type_ops.num_ops import (
DecimalOps,
FractionalExtensionOps,
FractionalOps,
IntegralExtensionOps,
IntegralOps,
)
from pyspark.pandas.data_type_ops.string_ops import StringOps, StringExtensionOps
from pyspark.pandas.data_type_ops.udt_ops import UDTOps
if isinstance(dtype, CategoricalDtype):
return object.__new__(CategoricalOps)
elif isinstance(spark_type, DecimalType):
return object.__new__(DecimalOps)
elif isinstance(spark_type, FractionalType):
if extension_float_dtypes_available and type(dtype) in [Float32Dtype, Float64Dtype]:
return object.__new__(FractionalExtensionOps)
else:
return object.__new__(FractionalOps)
elif isinstance(spark_type, IntegralType):
if extension_dtypes_available and type(dtype) in [
Int8Dtype,
Int16Dtype,
Int32Dtype,
Int64Dtype,
]:
return object.__new__(IntegralExtensionOps)
else:
return object.__new__(IntegralOps)
elif isinstance(spark_type, StringType):
if extension_object_dtypes_available and isinstance(dtype, StringDtype):
return object.__new__(StringExtensionOps)
else:
return object.__new__(StringOps)
elif isinstance(spark_type, BooleanType):
if extension_object_dtypes_available and isinstance(dtype, BooleanDtype):
return object.__new__(BooleanExtensionOps)
else:
return object.__new__(BooleanOps)
elif isinstance(spark_type, TimestampType):
return object.__new__(DatetimeOps)
elif isinstance(spark_type, DateType):
return object.__new__(DateOps)
elif isinstance(spark_type, BinaryType):
return object.__new__(BinaryOps)
elif isinstance(spark_type, ArrayType):
return object.__new__(ArrayOps)
elif isinstance(spark_type, MapType):
return object.__new__(MapOps)
elif isinstance(spark_type, StructType):
return object.__new__(StructOps)
elif isinstance(spark_type, NullType):
return object.__new__(NullOps)
elif isinstance(spark_type, UserDefinedType):
return object.__new__(UDTOps)
else:
raise TypeError("Type %s was not understood." % dtype)
def __init__(self, dtype: Dtype, spark_type: DataType):
self.dtype = dtype
self.spark_type = spark_type
@property
def pretty_name(self) -> str:
raise NotImplementedError()
def add(self, left: IndexOpsLike, right: Any) -> SeriesOrIndex:
raise TypeError("Addition can not be applied to %s." % self.pretty_name)
def sub(self, left: IndexOpsLike, right: Any) -> SeriesOrIndex:
raise TypeError("Subtraction can not be applied to %s." % self.pretty_name)
def mul(self, left: IndexOpsLike, right: Any) -> SeriesOrIndex:
raise TypeError("Multiplication can not be applied to %s." % self.pretty_name)
def truediv(self, left: IndexOpsLike, right: Any) -> SeriesOrIndex:
raise TypeError("True division can not be applied to %s." % self.pretty_name)
def floordiv(self, left: IndexOpsLike, right: Any) -> SeriesOrIndex:
raise TypeError("Floor division can not be applied to %s." % self.pretty_name)
def mod(self, left: IndexOpsLike, right: Any) -> SeriesOrIndex:
raise TypeError("Modulo can not be applied to %s." % self.pretty_name)
def pow(self, left: IndexOpsLike, right: Any) -> SeriesOrIndex:
raise TypeError("Exponentiation can not be applied to %s." % self.pretty_name)
def radd(self, left: IndexOpsLike, right: Any) -> SeriesOrIndex:
raise TypeError("Addition can not be applied to %s." % self.pretty_name)
def rsub(self, left: IndexOpsLike, right: Any) -> SeriesOrIndex:
raise TypeError("Subtraction can not be applied to %s." % self.pretty_name)
def rmul(self, left: IndexOpsLike, right: Any) -> SeriesOrIndex:
raise TypeError("Multiplication can not be applied to %s." % self.pretty_name)
def rtruediv(self, left: IndexOpsLike, right: Any) -> SeriesOrIndex:
raise TypeError("True division can not be applied to %s." % self.pretty_name)
def rfloordiv(self, left: IndexOpsLike, right: Any) -> SeriesOrIndex:
raise TypeError("Floor division can not be applied to %s." % self.pretty_name)
def rmod(self, left: IndexOpsLike, right: Any) -> SeriesOrIndex:
raise TypeError("Modulo can not be applied to %s." % self.pretty_name)
def rpow(self, left: IndexOpsLike, right: Any) -> SeriesOrIndex:
raise TypeError("Exponentiation can not be applied to %s." % self.pretty_name)
def __and__(self, left: IndexOpsLike, right: Any) -> SeriesOrIndex:
raise TypeError("Bitwise and can not be applied to %s." % self.pretty_name)
def __or__(self, left: IndexOpsLike, right: Any) -> SeriesOrIndex:
raise TypeError("Bitwise or can not be applied to %s." % self.pretty_name)
def rand(self, left: IndexOpsLike, right: Any) -> SeriesOrIndex:
return left.__and__(right)
def ror(self, left: IndexOpsLike, right: Any) -> SeriesOrIndex:
return left.__or__(right)
def restore(self, col: pd.Series) -> pd.Series:
"""Restore column when to_pandas."""
return col
def prepare(self, col: pd.Series) -> pd.Series:
"""Prepare column when from_pandas."""
return col.replace({np.nan: None})
def isnull(self, index_ops: IndexOpsLike) -> IndexOpsLike:
return index_ops._with_new_scol(
index_ops.spark.column.isNull(),
field=index_ops._internal.data_fields[0].copy(
dtype=np.dtype("bool"), spark_type=BooleanType(), nullable=False
),
)
def astype(self, index_ops: IndexOpsLike, dtype: Union[str, type, Dtype]) -> IndexOpsLike:
raise TypeError("astype can not be applied to %s." % self.pretty_name)
| apache-2.0 |
anntzer/scikit-learn | sklearn/tests/test_multiclass.py | 5 | 32749 | import numpy as np
import scipy.sparse as sp
import pytest
from re import escape
from sklearn.utils._testing import assert_array_equal
from sklearn.utils._testing import assert_almost_equal
from sklearn.utils._testing import ignore_warnings
from sklearn.utils._mocking import CheckingClassifier
from sklearn.multiclass import OneVsRestClassifier
from sklearn.multiclass import OneVsOneClassifier
from sklearn.multiclass import OutputCodeClassifier
from sklearn.utils.multiclass import (check_classification_targets,
type_of_target)
from sklearn.utils import (
check_array,
shuffle,
)
from sklearn.metrics import precision_score
from sklearn.metrics import recall_score
from sklearn.svm import LinearSVC, SVC
from sklearn.naive_bayes import MultinomialNB
from sklearn.linear_model import (LinearRegression, Lasso, ElasticNet, Ridge,
Perceptron, LogisticRegression,
SGDClassifier)
from sklearn.tree import DecisionTreeClassifier, DecisionTreeRegressor
from sklearn.model_selection import GridSearchCV, cross_val_score
from sklearn.pipeline import Pipeline, make_pipeline
from sklearn.impute import SimpleImputer
from sklearn import svm
from sklearn.exceptions import NotFittedError
from sklearn import datasets
iris = datasets.load_iris()
rng = np.random.RandomState(0)
perm = rng.permutation(iris.target.size)
iris.data = iris.data[perm]
iris.target = iris.target[perm]
n_classes = 3
def test_ovr_exceptions():
ovr = OneVsRestClassifier(LinearSVC(random_state=0))
# test predicting without fitting
with pytest.raises(NotFittedError):
ovr.predict([])
# Fail on multioutput data
msg = "Multioutput target data is not supported with label binarization"
with pytest.raises(ValueError, match=msg):
X = np.array([[1, 0], [0, 1]])
y = np.array([[1, 2], [3, 1]])
OneVsRestClassifier(MultinomialNB()).fit(X, y)
with pytest.raises(ValueError, match=msg):
X = np.array([[1, 0], [0, 1]])
y = np.array([[1.5, 2.4], [3.1, 0.8]])
OneVsRestClassifier(MultinomialNB()).fit(X, y)
def test_check_classification_targets():
# Test that check_classification_target return correct type. #5782
y = np.array([0.0, 1.1, 2.0, 3.0])
msg = type_of_target(y)
with pytest.raises(ValueError, match=msg):
check_classification_targets(y)
def test_ovr_fit_predict():
# A classifier which implements decision_function.
ovr = OneVsRestClassifier(LinearSVC(random_state=0))
pred = ovr.fit(iris.data, iris.target).predict(iris.data)
assert len(ovr.estimators_) == n_classes
clf = LinearSVC(random_state=0)
pred2 = clf.fit(iris.data, iris.target).predict(iris.data)
assert np.mean(iris.target == pred) == np.mean(iris.target == pred2)
# A classifier which implements predict_proba.
ovr = OneVsRestClassifier(MultinomialNB())
pred = ovr.fit(iris.data, iris.target).predict(iris.data)
assert np.mean(iris.target == pred) > 0.65
def test_ovr_partial_fit():
# Test if partial_fit is working as intended
X, y = shuffle(iris.data, iris.target, random_state=0)
ovr = OneVsRestClassifier(MultinomialNB())
ovr.partial_fit(X[:100], y[:100], np.unique(y))
ovr.partial_fit(X[100:], y[100:])
pred = ovr.predict(X)
ovr2 = OneVsRestClassifier(MultinomialNB())
pred2 = ovr2.fit(X, y).predict(X)
assert_almost_equal(pred, pred2)
assert len(ovr.estimators_) == len(np.unique(y))
assert np.mean(y == pred) > 0.65
# Test when mini batches doesn't have all classes
# with SGDClassifier
X = np.abs(np.random.randn(14, 2))
y = [1, 1, 1, 1, 2, 3, 3, 0, 0, 2, 3, 1, 2, 3]
ovr = OneVsRestClassifier(SGDClassifier(max_iter=1, tol=None,
shuffle=False, random_state=0))
ovr.partial_fit(X[:7], y[:7], np.unique(y))
ovr.partial_fit(X[7:], y[7:])
pred = ovr.predict(X)
ovr1 = OneVsRestClassifier(SGDClassifier(max_iter=1, tol=None,
shuffle=False, random_state=0))
pred1 = ovr1.fit(X, y).predict(X)
assert np.mean(pred == y) == np.mean(pred1 == y)
# test partial_fit only exists if estimator has it:
ovr = OneVsRestClassifier(SVC())
assert not hasattr(ovr, "partial_fit")
def test_ovr_partial_fit_exceptions():
ovr = OneVsRestClassifier(MultinomialNB())
X = np.abs(np.random.randn(14, 2))
y = [1, 1, 1, 1, 2, 3, 3, 0, 0, 2, 3, 1, 2, 3]
ovr.partial_fit(X[:7], y[:7], np.unique(y))
# If a new class that was not in the first call of partial fit is seen
# it should raise ValueError
y1 = [5] + y[7:-1]
msg = r"Mini-batch contains \[.+\] while classes must be subset of \[.+\]"
with pytest.raises(ValueError, match=msg):
ovr.partial_fit(X=X[7:], y=y1)
def test_ovr_ovo_regressor():
# test that ovr and ovo work on regressors which don't have a decision_
# function
ovr = OneVsRestClassifier(DecisionTreeRegressor())
pred = ovr.fit(iris.data, iris.target).predict(iris.data)
assert len(ovr.estimators_) == n_classes
assert_array_equal(np.unique(pred), [0, 1, 2])
# we are doing something sensible
assert np.mean(pred == iris.target) > .9
ovr = OneVsOneClassifier(DecisionTreeRegressor())
pred = ovr.fit(iris.data, iris.target).predict(iris.data)
assert len(ovr.estimators_) == n_classes * (n_classes - 1) / 2
assert_array_equal(np.unique(pred), [0, 1, 2])
# we are doing something sensible
assert np.mean(pred == iris.target) > .9
def test_ovr_fit_predict_sparse():
for sparse in [sp.csr_matrix, sp.csc_matrix, sp.coo_matrix, sp.dok_matrix,
sp.lil_matrix]:
base_clf = MultinomialNB(alpha=1)
X, Y = datasets.make_multilabel_classification(n_samples=100,
n_features=20,
n_classes=5,
n_labels=3,
length=50,
allow_unlabeled=True,
random_state=0)
X_train, Y_train = X[:80], Y[:80]
X_test = X[80:]
clf = OneVsRestClassifier(base_clf).fit(X_train, Y_train)
Y_pred = clf.predict(X_test)
clf_sprs = OneVsRestClassifier(base_clf).fit(X_train, sparse(Y_train))
Y_pred_sprs = clf_sprs.predict(X_test)
assert clf.multilabel_
assert sp.issparse(Y_pred_sprs)
assert_array_equal(Y_pred_sprs.toarray(), Y_pred)
# Test predict_proba
Y_proba = clf_sprs.predict_proba(X_test)
# predict assigns a label if the probability that the
# sample has the label is greater than 0.5.
pred = Y_proba > .5
assert_array_equal(pred, Y_pred_sprs.toarray())
# Test decision_function
clf = svm.SVC()
clf_sprs = OneVsRestClassifier(clf).fit(X_train, sparse(Y_train))
dec_pred = (clf_sprs.decision_function(X_test) > 0).astype(int)
assert_array_equal(dec_pred, clf_sprs.predict(X_test).toarray())
def test_ovr_always_present():
# Test that ovr works with classes that are always present or absent.
# Note: tests is the case where _ConstantPredictor is utilised
X = np.ones((10, 2))
X[:5, :] = 0
# Build an indicator matrix where two features are always on.
# As list of lists, it would be: [[int(i >= 5), 2, 3] for i in range(10)]
y = np.zeros((10, 3))
y[5:, 0] = 1
y[:, 1] = 1
y[:, 2] = 1
ovr = OneVsRestClassifier(LogisticRegression())
msg = r'Label .+ is present in all training examples'
with pytest.warns(UserWarning, match=msg):
ovr.fit(X, y)
y_pred = ovr.predict(X)
assert_array_equal(np.array(y_pred), np.array(y))
y_pred = ovr.decision_function(X)
assert np.unique(y_pred[:, -2:]) == 1
y_pred = ovr.predict_proba(X)
assert_array_equal(y_pred[:, -1], np.ones(X.shape[0]))
# y has a constantly absent label
y = np.zeros((10, 2))
y[5:, 0] = 1 # variable label
ovr = OneVsRestClassifier(LogisticRegression())
msg = r'Label not 1 is present in all training examples'
with pytest.warns(UserWarning, match=msg):
ovr.fit(X, y)
y_pred = ovr.predict_proba(X)
assert_array_equal(y_pred[:, -1], np.zeros(X.shape[0]))
def test_ovr_multiclass():
# Toy dataset where features correspond directly to labels.
X = np.array([[0, 0, 5], [0, 5, 0], [3, 0, 0], [0, 0, 6], [6, 0, 0]])
y = ["eggs", "spam", "ham", "eggs", "ham"]
Y = np.array([[0, 0, 1],
[0, 1, 0],
[1, 0, 0],
[0, 0, 1],
[1, 0, 0]])
classes = set("ham eggs spam".split())
for base_clf in (MultinomialNB(), LinearSVC(random_state=0),
LinearRegression(), Ridge(),
ElasticNet()):
clf = OneVsRestClassifier(base_clf).fit(X, y)
assert set(clf.classes_) == classes
y_pred = clf.predict(np.array([[0, 0, 4]]))[0]
assert_array_equal(y_pred, ["eggs"])
# test input as label indicator matrix
clf = OneVsRestClassifier(base_clf).fit(X, Y)
y_pred = clf.predict([[0, 0, 4]])[0]
assert_array_equal(y_pred, [0, 0, 1])
def test_ovr_binary():
# Toy dataset where features correspond directly to labels.
X = np.array([[0, 0, 5], [0, 5, 0], [3, 0, 0], [0, 0, 6], [6, 0, 0]])
y = ["eggs", "spam", "spam", "eggs", "spam"]
Y = np.array([[0, 1, 1, 0, 1]]).T
classes = set("eggs spam".split())
def conduct_test(base_clf, test_predict_proba=False):
clf = OneVsRestClassifier(base_clf).fit(X, y)
assert set(clf.classes_) == classes
y_pred = clf.predict(np.array([[0, 0, 4]]))[0]
assert_array_equal(y_pred, ["eggs"])
if hasattr(base_clf, 'decision_function'):
dec = clf.decision_function(X)
assert dec.shape == (5,)
if test_predict_proba:
X_test = np.array([[0, 0, 4]])
probabilities = clf.predict_proba(X_test)
assert 2 == len(probabilities[0])
assert (clf.classes_[np.argmax(probabilities, axis=1)] ==
clf.predict(X_test))
# test input as label indicator matrix
clf = OneVsRestClassifier(base_clf).fit(X, Y)
y_pred = clf.predict([[3, 0, 0]])[0]
assert y_pred == 1
for base_clf in (LinearSVC(random_state=0), LinearRegression(),
Ridge(), ElasticNet()):
conduct_test(base_clf)
for base_clf in (MultinomialNB(), SVC(probability=True),
LogisticRegression()):
conduct_test(base_clf, test_predict_proba=True)
def test_ovr_multilabel():
# Toy dataset where features correspond directly to labels.
X = np.array([[0, 4, 5], [0, 5, 0], [3, 3, 3], [4, 0, 6], [6, 0, 0]])
y = np.array([[0, 1, 1],
[0, 1, 0],
[1, 1, 1],
[1, 0, 1],
[1, 0, 0]])
for base_clf in (MultinomialNB(), LinearSVC(random_state=0),
LinearRegression(), Ridge(),
ElasticNet(), Lasso(alpha=0.5)):
clf = OneVsRestClassifier(base_clf).fit(X, y)
y_pred = clf.predict([[0, 4, 4]])[0]
assert_array_equal(y_pred, [0, 1, 1])
assert clf.multilabel_
def test_ovr_fit_predict_svc():
ovr = OneVsRestClassifier(svm.SVC())
ovr.fit(iris.data, iris.target)
assert len(ovr.estimators_) == 3
assert ovr.score(iris.data, iris.target) > .9
def test_ovr_multilabel_dataset():
base_clf = MultinomialNB(alpha=1)
for au, prec, recall in zip((True, False), (0.51, 0.66), (0.51, 0.80)):
X, Y = datasets.make_multilabel_classification(n_samples=100,
n_features=20,
n_classes=5,
n_labels=2,
length=50,
allow_unlabeled=au,
random_state=0)
X_train, Y_train = X[:80], Y[:80]
X_test, Y_test = X[80:], Y[80:]
clf = OneVsRestClassifier(base_clf).fit(X_train, Y_train)
Y_pred = clf.predict(X_test)
assert clf.multilabel_
assert_almost_equal(precision_score(Y_test, Y_pred, average="micro"),
prec,
decimal=2)
assert_almost_equal(recall_score(Y_test, Y_pred, average="micro"),
recall,
decimal=2)
def test_ovr_multilabel_predict_proba():
base_clf = MultinomialNB(alpha=1)
for au in (False, True):
X, Y = datasets.make_multilabel_classification(n_samples=100,
n_features=20,
n_classes=5,
n_labels=3,
length=50,
allow_unlabeled=au,
random_state=0)
X_train, Y_train = X[:80], Y[:80]
X_test = X[80:]
clf = OneVsRestClassifier(base_clf).fit(X_train, Y_train)
# Decision function only estimator.
decision_only = OneVsRestClassifier(svm.SVR()).fit(X_train, Y_train)
assert not hasattr(decision_only, 'predict_proba')
# Estimator with predict_proba disabled, depending on parameters.
decision_only = OneVsRestClassifier(svm.SVC(probability=False))
assert not hasattr(decision_only, 'predict_proba')
decision_only.fit(X_train, Y_train)
assert not hasattr(decision_only, 'predict_proba')
assert hasattr(decision_only, 'decision_function')
# Estimator which can get predict_proba enabled after fitting
gs = GridSearchCV(svm.SVC(probability=False),
param_grid={'probability': [True]})
proba_after_fit = OneVsRestClassifier(gs)
assert not hasattr(proba_after_fit, 'predict_proba')
proba_after_fit.fit(X_train, Y_train)
assert hasattr(proba_after_fit, 'predict_proba')
Y_pred = clf.predict(X_test)
Y_proba = clf.predict_proba(X_test)
# predict assigns a label if the probability that the
# sample has the label is greater than 0.5.
pred = Y_proba > .5
assert_array_equal(pred, Y_pred)
def test_ovr_single_label_predict_proba():
base_clf = MultinomialNB(alpha=1)
X, Y = iris.data, iris.target
X_train, Y_train = X[:80], Y[:80]
X_test = X[80:]
clf = OneVsRestClassifier(base_clf).fit(X_train, Y_train)
# Decision function only estimator.
decision_only = OneVsRestClassifier(svm.SVR()).fit(X_train, Y_train)
assert not hasattr(decision_only, 'predict_proba')
Y_pred = clf.predict(X_test)
Y_proba = clf.predict_proba(X_test)
assert_almost_equal(Y_proba.sum(axis=1), 1.0)
# predict assigns a label if the probability that the
# sample has the label with the greatest predictive probability.
pred = Y_proba.argmax(axis=1)
assert not (pred - Y_pred).any()
def test_ovr_multilabel_decision_function():
X, Y = datasets.make_multilabel_classification(n_samples=100,
n_features=20,
n_classes=5,
n_labels=3,
length=50,
allow_unlabeled=True,
random_state=0)
X_train, Y_train = X[:80], Y[:80]
X_test = X[80:]
clf = OneVsRestClassifier(svm.SVC()).fit(X_train, Y_train)
assert_array_equal((clf.decision_function(X_test) > 0).astype(int),
clf.predict(X_test))
def test_ovr_single_label_decision_function():
X, Y = datasets.make_classification(n_samples=100,
n_features=20,
random_state=0)
X_train, Y_train = X[:80], Y[:80]
X_test = X[80:]
clf = OneVsRestClassifier(svm.SVC()).fit(X_train, Y_train)
assert_array_equal(clf.decision_function(X_test).ravel() > 0,
clf.predict(X_test))
def test_ovr_gridsearch():
ovr = OneVsRestClassifier(LinearSVC(random_state=0))
Cs = [0.1, 0.5, 0.8]
cv = GridSearchCV(ovr, {'estimator__C': Cs})
cv.fit(iris.data, iris.target)
best_C = cv.best_estimator_.estimators_[0].C
assert best_C in Cs
def test_ovr_pipeline():
# Test with pipeline of length one
# This test is needed because the multiclass estimators may fail to detect
# the presence of predict_proba or decision_function.
clf = Pipeline([("tree", DecisionTreeClassifier())])
ovr_pipe = OneVsRestClassifier(clf)
ovr_pipe.fit(iris.data, iris.target)
ovr = OneVsRestClassifier(DecisionTreeClassifier())
ovr.fit(iris.data, iris.target)
assert_array_equal(ovr.predict(iris.data), ovr_pipe.predict(iris.data))
# TODO: Remove this test in version 1.1
# when the coef_ attribute is removed
@ignore_warnings(category=FutureWarning)
def test_ovr_coef_():
for base_classifier in [SVC(kernel='linear', random_state=0),
LinearSVC(random_state=0)]:
# SVC has sparse coef with sparse input data
ovr = OneVsRestClassifier(base_classifier)
for X in [iris.data, sp.csr_matrix(iris.data)]:
# test with dense and sparse coef
ovr.fit(X, iris.target)
shape = ovr.coef_.shape
assert shape[0] == n_classes
assert shape[1] == iris.data.shape[1]
# don't densify sparse coefficients
assert (sp.issparse(ovr.estimators_[0].coef_) ==
sp.issparse(ovr.coef_))
# TODO: Remove this test in version 1.1
# when the coef_ attribute is removed
@ignore_warnings(category=FutureWarning)
def test_ovr_coef_exceptions():
# Not fitted exception!
ovr = OneVsRestClassifier(LinearSVC(random_state=0))
with pytest.raises(NotFittedError):
ovr.coef_
# Doesn't have coef_ exception!
ovr = OneVsRestClassifier(DecisionTreeClassifier())
ovr.fit(iris.data, iris.target)
msg = "Base estimator doesn't have a coef_ attribute"
with pytest.raises(AttributeError, match=msg):
ovr.coef_
# TODO: Remove this test in version 1.1 when
# the coef_ and intercept_ attributes are removed
def test_ovr_deprecated_coef_intercept():
ovr = OneVsRestClassifier(SVC(kernel="linear"))
ovr = ovr.fit(iris.data, iris.target)
msg = (r"Attribute {0} was deprecated in version 0.24 "
r"and will be removed in 1.1 \(renaming of 0.26\). If you observe "
r"this warning while using RFE or SelectFromModel, "
r"use the importance_getter parameter instead.")
for att in ["coef_", "intercept_"]:
with pytest.warns(FutureWarning, match=msg.format(att)):
getattr(ovr, att)
def test_ovo_exceptions():
ovo = OneVsOneClassifier(LinearSVC(random_state=0))
with pytest.raises(NotFittedError):
ovo.predict([])
def test_ovo_fit_on_list():
# Test that OneVsOne fitting works with a list of targets and yields the
# same output as predict from an array
ovo = OneVsOneClassifier(LinearSVC(random_state=0))
prediction_from_array = ovo.fit(iris.data, iris.target).predict(iris.data)
iris_data_list = [list(a) for a in iris.data]
prediction_from_list = ovo.fit(iris_data_list,
list(iris.target)).predict(iris_data_list)
assert_array_equal(prediction_from_array, prediction_from_list)
def test_ovo_fit_predict():
# A classifier which implements decision_function.
ovo = OneVsOneClassifier(LinearSVC(random_state=0))
ovo.fit(iris.data, iris.target).predict(iris.data)
assert len(ovo.estimators_) == n_classes * (n_classes - 1) / 2
# A classifier which implements predict_proba.
ovo = OneVsOneClassifier(MultinomialNB())
ovo.fit(iris.data, iris.target).predict(iris.data)
assert len(ovo.estimators_) == n_classes * (n_classes - 1) / 2
def test_ovo_partial_fit_predict():
temp = datasets.load_iris()
X, y = temp.data, temp.target
ovo1 = OneVsOneClassifier(MultinomialNB())
ovo1.partial_fit(X[:100], y[:100], np.unique(y))
ovo1.partial_fit(X[100:], y[100:])
pred1 = ovo1.predict(X)
ovo2 = OneVsOneClassifier(MultinomialNB())
ovo2.fit(X, y)
pred2 = ovo2.predict(X)
assert len(ovo1.estimators_) == n_classes * (n_classes - 1) / 2
assert np.mean(y == pred1) > 0.65
assert_almost_equal(pred1, pred2)
# Test when mini-batches have binary target classes
ovo1 = OneVsOneClassifier(MultinomialNB())
ovo1.partial_fit(X[:60], y[:60], np.unique(y))
ovo1.partial_fit(X[60:], y[60:])
pred1 = ovo1.predict(X)
ovo2 = OneVsOneClassifier(MultinomialNB())
pred2 = ovo2.fit(X, y).predict(X)
assert_almost_equal(pred1, pred2)
assert len(ovo1.estimators_) == len(np.unique(y))
assert np.mean(y == pred1) > 0.65
ovo = OneVsOneClassifier(MultinomialNB())
X = np.random.rand(14, 2)
y = [1, 1, 2, 3, 3, 0, 0, 4, 4, 4, 4, 4, 2, 2]
ovo.partial_fit(X[:7], y[:7], [0, 1, 2, 3, 4])
ovo.partial_fit(X[7:], y[7:])
pred = ovo.predict(X)
ovo2 = OneVsOneClassifier(MultinomialNB())
pred2 = ovo2.fit(X, y).predict(X)
assert_almost_equal(pred, pred2)
# raises error when mini-batch does not have classes from all_classes
ovo = OneVsOneClassifier(MultinomialNB())
error_y = [0, 1, 2, 3, 4, 5, 2]
message_re = escape("Mini-batch contains {0} while "
"it must be subset of {1}".format(np.unique(error_y),
np.unique(y)))
with pytest.raises(ValueError, match=message_re):
ovo.partial_fit(X[:7], error_y, np.unique(y))
# test partial_fit only exists if estimator has it:
ovr = OneVsOneClassifier(SVC())
assert not hasattr(ovr, "partial_fit")
def test_ovo_decision_function():
n_samples = iris.data.shape[0]
ovo_clf = OneVsOneClassifier(LinearSVC(random_state=0))
# first binary
ovo_clf.fit(iris.data, iris.target == 0)
decisions = ovo_clf.decision_function(iris.data)
assert decisions.shape == (n_samples,)
# then multi-class
ovo_clf.fit(iris.data, iris.target)
decisions = ovo_clf.decision_function(iris.data)
assert decisions.shape == (n_samples, n_classes)
assert_array_equal(decisions.argmax(axis=1), ovo_clf.predict(iris.data))
# Compute the votes
votes = np.zeros((n_samples, n_classes))
k = 0
for i in range(n_classes):
for j in range(i + 1, n_classes):
pred = ovo_clf.estimators_[k].predict(iris.data)
votes[pred == 0, i] += 1
votes[pred == 1, j] += 1
k += 1
# Extract votes and verify
assert_array_equal(votes, np.round(decisions))
for class_idx in range(n_classes):
# For each sample and each class, there only 3 possible vote levels
# because they are only 3 distinct class pairs thus 3 distinct
# binary classifiers.
# Therefore, sorting predictions based on votes would yield
# mostly tied predictions:
assert set(votes[:, class_idx]).issubset(set([0., 1., 2.]))
# The OVO decision function on the other hand is able to resolve
# most of the ties on this data as it combines both the vote counts
# and the aggregated confidence levels of the binary classifiers
# to compute the aggregate decision function. The iris dataset
# has 150 samples with a couple of duplicates. The OvO decisions
# can resolve most of the ties:
assert len(np.unique(decisions[:, class_idx])) > 146
def test_ovo_gridsearch():
ovo = OneVsOneClassifier(LinearSVC(random_state=0))
Cs = [0.1, 0.5, 0.8]
cv = GridSearchCV(ovo, {'estimator__C': Cs})
cv.fit(iris.data, iris.target)
best_C = cv.best_estimator_.estimators_[0].C
assert best_C in Cs
def test_ovo_ties():
# Test that ties are broken using the decision function,
# not defaulting to the smallest label
X = np.array([[1, 2], [2, 1], [-2, 1], [-2, -1]])
y = np.array([2, 0, 1, 2])
multi_clf = OneVsOneClassifier(Perceptron(shuffle=False, max_iter=4,
tol=None))
ovo_prediction = multi_clf.fit(X, y).predict(X)
ovo_decision = multi_clf.decision_function(X)
# Classifiers are in order 0-1, 0-2, 1-2
# Use decision_function to compute the votes and the normalized
# sum_of_confidences, which is used to disambiguate when there is a tie in
# votes.
votes = np.round(ovo_decision)
normalized_confidences = ovo_decision - votes
# For the first point, there is one vote per class
assert_array_equal(votes[0, :], 1)
# For the rest, there is no tie and the prediction is the argmax
assert_array_equal(np.argmax(votes[1:], axis=1), ovo_prediction[1:])
# For the tie, the prediction is the class with the highest score
assert ovo_prediction[0] == normalized_confidences[0].argmax()
def test_ovo_ties2():
# test that ties can not only be won by the first two labels
X = np.array([[1, 2], [2, 1], [-2, 1], [-2, -1]])
y_ref = np.array([2, 0, 1, 2])
# cycle through labels so that each label wins once
for i in range(3):
y = (y_ref + i) % 3
multi_clf = OneVsOneClassifier(Perceptron(shuffle=False, max_iter=4,
tol=None))
ovo_prediction = multi_clf.fit(X, y).predict(X)
assert ovo_prediction[0] == i % 3
def test_ovo_string_y():
# Test that the OvO doesn't mess up the encoding of string labels
X = np.eye(4)
y = np.array(['a', 'b', 'c', 'd'])
ovo = OneVsOneClassifier(LinearSVC())
ovo.fit(X, y)
assert_array_equal(y, ovo.predict(X))
def test_ovo_one_class():
# Test error for OvO with one class
X = np.eye(4)
y = np.array(['a'] * 4)
ovo = OneVsOneClassifier(LinearSVC())
msg = "when only one class"
with pytest.raises(ValueError, match=msg):
ovo.fit(X, y)
def test_ovo_float_y():
# Test that the OvO errors on float targets
X = iris.data
y = iris.data[:, 0]
ovo = OneVsOneClassifier(LinearSVC())
msg = "Unknown label type"
with pytest.raises(ValueError, match=msg):
ovo.fit(X, y)
def test_ecoc_exceptions():
ecoc = OutputCodeClassifier(LinearSVC(random_state=0))
with pytest.raises(NotFittedError):
ecoc.predict([])
def test_ecoc_fit_predict():
# A classifier which implements decision_function.
ecoc = OutputCodeClassifier(LinearSVC(random_state=0),
code_size=2, random_state=0)
ecoc.fit(iris.data, iris.target).predict(iris.data)
assert len(ecoc.estimators_) == n_classes * 2
# A classifier which implements predict_proba.
ecoc = OutputCodeClassifier(MultinomialNB(), code_size=2, random_state=0)
ecoc.fit(iris.data, iris.target).predict(iris.data)
assert len(ecoc.estimators_) == n_classes * 2
def test_ecoc_gridsearch():
ecoc = OutputCodeClassifier(LinearSVC(random_state=0),
random_state=0)
Cs = [0.1, 0.5, 0.8]
cv = GridSearchCV(ecoc, {'estimator__C': Cs})
cv.fit(iris.data, iris.target)
best_C = cv.best_estimator_.estimators_[0].C
assert best_C in Cs
def test_ecoc_float_y():
# Test that the OCC errors on float targets
X = iris.data
y = iris.data[:, 0]
ovo = OutputCodeClassifier(LinearSVC())
msg = "Unknown label type"
with pytest.raises(ValueError, match=msg):
ovo.fit(X, y)
ovo = OutputCodeClassifier(LinearSVC(), code_size=-1)
msg = "code_size should be greater than 0, got -1"
with pytest.raises(ValueError, match=msg):
ovo.fit(X, y)
def test_ecoc_delegate_sparse_base_estimator():
# Non-regression test for
# https://github.com/scikit-learn/scikit-learn/issues/17218
X, y = iris.data, iris.target
X_sp = sp.csc_matrix(X)
# create an estimator that does not support sparse input
base_estimator = CheckingClassifier(
check_X=check_array,
check_X_params={"ensure_2d": True, "accept_sparse": False},
)
ecoc = OutputCodeClassifier(base_estimator, random_state=0)
with pytest.raises(TypeError, match="A sparse matrix was passed"):
ecoc.fit(X_sp, y)
ecoc.fit(X, y)
with pytest.raises(TypeError, match="A sparse matrix was passed"):
ecoc.predict(X_sp)
# smoke test to check when sparse input should be supported
ecoc = OutputCodeClassifier(LinearSVC(random_state=0))
ecoc.fit(X_sp, y).predict(X_sp)
assert len(ecoc.estimators_) == 4
def test_pairwise_indices():
clf_precomputed = svm.SVC(kernel='precomputed')
X, y = iris.data, iris.target
ovr_false = OneVsOneClassifier(clf_precomputed)
linear_kernel = np.dot(X, X.T)
ovr_false.fit(linear_kernel, y)
n_estimators = len(ovr_false.estimators_)
precomputed_indices = ovr_false.pairwise_indices_
for idx in precomputed_indices:
assert (idx.shape[0] * n_estimators / (n_estimators - 1) ==
linear_kernel.shape[0])
@ignore_warnings(category=FutureWarning)
def test_pairwise_attribute():
clf_precomputed = svm.SVC(kernel='precomputed')
clf_notprecomputed = svm.SVC()
for MultiClassClassifier in [OneVsRestClassifier, OneVsOneClassifier]:
ovr_false = MultiClassClassifier(clf_notprecomputed)
assert not ovr_false._pairwise
ovr_true = MultiClassClassifier(clf_precomputed)
assert ovr_true._pairwise
@pytest.mark.parametrize("MultiClassClassifier", [OneVsRestClassifier,
OneVsOneClassifier])
def test_pairwise_tag(MultiClassClassifier):
clf_precomputed = svm.SVC(kernel='precomputed')
clf_notprecomputed = svm.SVC()
ovr_false = MultiClassClassifier(clf_notprecomputed)
assert not ovr_false._get_tags()["pairwise"]
ovr_true = MultiClassClassifier(clf_precomputed)
assert ovr_true._get_tags()["pairwise"]
# TODO: Remove in 1.1
@pytest.mark.parametrize("MultiClassClassifier", [OneVsRestClassifier,
OneVsOneClassifier])
def test_pairwise_deprecated(MultiClassClassifier):
clf_precomputed = svm.SVC(kernel='precomputed')
ov_clf = MultiClassClassifier(clf_precomputed)
msg = r"Attribute _pairwise was deprecated in version 0\.24"
with pytest.warns(FutureWarning, match=msg):
ov_clf._pairwise
def test_pairwise_cross_val_score():
clf_precomputed = svm.SVC(kernel='precomputed')
clf_notprecomputed = svm.SVC(kernel='linear')
X, y = iris.data, iris.target
for MultiClassClassifier in [OneVsRestClassifier, OneVsOneClassifier]:
ovr_false = MultiClassClassifier(clf_notprecomputed)
ovr_true = MultiClassClassifier(clf_precomputed)
linear_kernel = np.dot(X, X.T)
score_precomputed = cross_val_score(ovr_true, linear_kernel, y)
score_linear = cross_val_score(ovr_false, X, y)
assert_array_equal(score_precomputed, score_linear)
@pytest.mark.parametrize("MultiClassClassifier",
[OneVsRestClassifier, OneVsOneClassifier])
# FIXME: we should move this test in `estimator_checks` once we are able
# to construct meta-estimator instances
def test_support_missing_values(MultiClassClassifier):
# smoke test to check that pipeline OvR and OvO classifiers are letting
# the validation of missing values to
# the underlying pipeline or classifiers
rng = np.random.RandomState(42)
X, y = iris.data, iris.target
X = np.copy(X) # Copy to avoid that the original data is modified
mask = rng.choice([1, 0], X.shape, p=[.1, .9]).astype(bool)
X[mask] = np.nan
lr = make_pipeline(SimpleImputer(),
LogisticRegression(random_state=rng))
MultiClassClassifier(lr).fit(X, y).score(X, y)
| bsd-3-clause |
rtrwalker/geotecha | geotecha/mathematics/quadrature.py | 1 | 74253 | # geotecha - A software suite for geotechncial engineering
# Copyright (C) 2018 Rohan T. Walker (rtrwalker@gmail.com)
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see http://www.gnu.org/licenses/gpl.html.
"""Numerical integration by quadrature"""
from __future__ import division, print_function
import matplotlib.pyplot
import numpy as np
from scipy import integrate
from scipy.special import jn_zeros
from scipy.special import jn
from matplotlib import pyplot as plt
import functools
import unittest
from numpy.testing import assert_allclose
from numpy.polynomial.polynomial import Polynomial
def gauss_kronrod_abscissae_and_weights(n):
"""Gauss-Kronrod quadrature abscissae and weights
Coarse integral = Sum(f(xi) * wi1)
Fine integral = Sum(f(xi) * wi2)
For the coarse integral the unused weights are set to zero
Parameters
----------
n : [2-20, 32, 64, 100]
number of integration points for the Gauss points. Number of Kronrod
points will automatically be 2 * n + 1.
Returns
-------
xi : 1d array
Abscissae for the quadrature points.
wi1 : 1d array
Weights for the coarse integral.
wi2 : 1d array
Weights for the fine integral
References
----------
.. [2] Holoborodko, Pavel. 2011. 'Gauss-Kronrod Quadrature Nodes and
Weights. November 7.
http://www.advanpix.com/2011/11/07/gauss-kronrod-quadrature-nodes-weights/#Tabulated_Gauss-Kronrod_weights_and_abscissae
"""
if n not in [7,10,15,20,25,30]:
raise ValueError('n must be 2-20, 32, 64, or 100')
weights = {
7: {
'g': np.array(
[[-0.9491079123427585245261897, 0.1294849661688696932706114],
[ -0.7415311855993944398638648, 0.2797053914892766679014678],
[ -0.4058451513773971669066064, 0.3818300505051189449503698],
[ 0.0000000000000000000000000, 0.4179591836734693877551020],
[ 0.4058451513773971669066064, 0.3818300505051189449503698],
[ 0.7415311855993944398638648, 0.2797053914892766679014678],
[ 0.9491079123427585245261897, 0.1294849661688696932706114]],
dtype=float),
'k': np.array(
[[-0.9914553711208126392068547, 0.0229353220105292249637320],
[ -0.9491079123427585245261897, 0.0630920926299785532907007],
[ -0.8648644233597690727897128, 0.1047900103222501838398763],
[ -0.7415311855993944398638648, 0.1406532597155259187451896],
[ -0.5860872354676911302941448, 0.1690047266392679028265834],
[ -0.4058451513773971669066064, 0.1903505780647854099132564],
[ -0.2077849550078984676006894, 0.2044329400752988924141620],
[ 0.0000000000000000000000000, 0.2094821410847278280129992],
[ 0.2077849550078984676006894, 0.2044329400752988924141620],
[ 0.4058451513773971669066064, 0.1903505780647854099132564],
[ 0.5860872354676911302941448, 0.1690047266392679028265834],
[ 0.7415311855993944398638648, 0.1406532597155259187451896],
[ 0.8648644233597690727897128, 0.1047900103222501838398763],
[ 0.9491079123427585245261897, 0.0630920926299785532907007],
[ 0.9914553711208126392068547, 0.0229353220105292249637320]],
dtype=float),
'dup': np.array(
[False, True, False, True, False, True, False, True, False,
True, False, True, False, True, False], dtype=bool)
},
10: {
'g': np.array(
[[-0.9739065285171717200779640, 0.0666713443086881375935688],
[ -0.8650633666889845107320967, 0.1494513491505805931457763],
[ -0.6794095682990244062343274, 0.2190863625159820439955349],
[ -0.4333953941292471907992659, 0.2692667193099963550912269],
[ -0.1488743389816312108848260, 0.2955242247147528701738930],
[ 0.1488743389816312108848260, 0.2955242247147528701738930],
[ 0.4333953941292471907992659, 0.2692667193099963550912269],
[ 0.6794095682990244062343274, 0.2190863625159820439955349],
[ 0.8650633666889845107320967, 0.1494513491505805931457763],
[ 0.9739065285171717200779640, 0.0666713443086881375935688]],
dtype=float),
'k': np.array(
[[-0.9956571630258080807355273, 0.0116946388673718742780644],
[ -0.9739065285171717200779640, 0.0325581623079647274788190],
[ -0.9301574913557082260012072, 0.0547558965743519960313813],
[ -0.8650633666889845107320967, 0.0750396748109199527670431],
[ -0.7808177265864168970637176, 0.0931254545836976055350655],
[ -0.6794095682990244062343274, 0.1093871588022976418992106],
[ -0.5627571346686046833390001, 0.1234919762620658510779581],
[ -0.4333953941292471907992659, 0.1347092173114733259280540],
[ -0.2943928627014601981311266, 0.1427759385770600807970943],
[ -0.1488743389816312108848260, 0.1477391049013384913748415],
[ 0.0000000000000000000000000, 0.1494455540029169056649365],
[ 0.1488743389816312108848260, 0.1477391049013384913748415],
[ 0.2943928627014601981311266, 0.1427759385770600807970943],
[ 0.4333953941292471907992659, 0.1347092173114733259280540],
[ 0.5627571346686046833390001, 0.1234919762620658510779581],
[ 0.6794095682990244062343274, 0.1093871588022976418992106],
[ 0.7808177265864168970637176, 0.0931254545836976055350655],
[ 0.8650633666889845107320967, 0.0750396748109199527670431],
[ 0.9301574913557082260012072, 0.0547558965743519960313813],
[ 0.9739065285171717200779640, 0.0325581623079647274788190],
[ 0.9956571630258080807355273, 0.0116946388673718742780644]],
dtype=float),
'dup': np.array(
[False, True, False, True, False, True, False, True, False,
True, False, True, False, True, False, True, False, True,
False, True, False], dtype=bool)
},
15: {
'g': np.array(
[[-0.9879925180204854284895657, 0.0307532419961172683546284],
[ -0.9372733924007059043077589, 0.0703660474881081247092674],
[ -0.8482065834104272162006483, 0.1071592204671719350118695],
[ -0.7244177313601700474161861, 0.1395706779261543144478048],
[ -0.5709721726085388475372267, 0.1662692058169939335532009],
[ -0.3941513470775633698972074, 0.1861610000155622110268006],
[ -0.2011940939974345223006283, 0.1984314853271115764561183],
[ 0.0000000000000000000000000, 0.2025782419255612728806202],
[ 0.2011940939974345223006283, 0.1984314853271115764561183],
[ 0.3941513470775633698972074, 0.1861610000155622110268006],
[ 0.5709721726085388475372267, 0.1662692058169939335532009],
[ 0.7244177313601700474161861, 0.1395706779261543144478048],
[ 0.8482065834104272162006483, 0.1071592204671719350118695],
[ 0.9372733924007059043077589, 0.0703660474881081247092674],
[ 0.9879925180204854284895657, 0.0307532419961172683546284]],
dtype=float),
'k': np.array(
[[-0.9980022986933970602851728, 0.0053774798729233489877921],
[ -0.9879925180204854284895657, 0.0150079473293161225383748],
[ -0.9677390756791391342573480, 0.0254608473267153201868740],
[ -0.9372733924007059043077589, 0.0353463607913758462220379],
[ -0.8972645323440819008825097, 0.0445897513247648766082273],
[ -0.8482065834104272162006483, 0.0534815246909280872653431],
[ -0.7904185014424659329676493, 0.0620095678006706402851392],
[ -0.7244177313601700474161861, 0.0698541213187282587095201],
[ -0.6509967412974169705337359, 0.0768496807577203788944328],
[ -0.5709721726085388475372267, 0.0830805028231330210382892],
[ -0.4850818636402396806936557, 0.0885644430562117706472754],
[ -0.3941513470775633698972074, 0.0931265981708253212254869],
[ -0.2991800071531688121667800, 0.0966427269836236785051799],
[ -0.2011940939974345223006283, 0.0991735987217919593323932],
[ -0.1011420669187174990270742, 0.1007698455238755950449467],
[ 0.0000000000000000000000000, 0.1013300070147915490173748],
[ 0.1011420669187174990270742, 0.1007698455238755950449467],
[ 0.2011940939974345223006283, 0.0991735987217919593323932],
[ 0.2991800071531688121667800, 0.0966427269836236785051799],
[ 0.3941513470775633698972074, 0.0931265981708253212254869],
[ 0.4850818636402396806936557, 0.0885644430562117706472754],
[ 0.5709721726085388475372267, 0.0830805028231330210382892],
[ 0.6509967412974169705337359, 0.0768496807577203788944328],
[ 0.7244177313601700474161861, 0.0698541213187282587095201],
[ 0.7904185014424659329676493, 0.0620095678006706402851392],
[ 0.8482065834104272162006483, 0.0534815246909280872653431],
[ 0.8972645323440819008825097, 0.0445897513247648766082273],
[ 0.9372733924007059043077589, 0.0353463607913758462220379],
[ 0.9677390756791391342573480, 0.0254608473267153201868740],
[ 0.9879925180204854284895657, 0.0150079473293161225383748],
[ 0.9980022986933970602851728, 0.0053774798729233489877921]],
dtype=float),
'dup': np.array(
[False, True, False, True, False, True, False, True, False,
True, False, True, False, True, False, True, False, True,
False, True, False, True, False, True, False, True, False,
True, False, True, False], dtype=bool)
},
20: {
'g': np.array(
[[-0.9931285991850949247861224, 0.0176140071391521183118620],
[ -0.9639719272779137912676661, 0.0406014298003869413310400],
[ -0.9122344282513259058677524, 0.0626720483341090635695065],
[ -0.8391169718222188233945291, 0.0832767415767047487247581],
[ -0.7463319064601507926143051, 0.1019301198172404350367501],
[ -0.6360536807265150254528367, 0.1181945319615184173123774],
[ -0.5108670019508270980043641, 0.1316886384491766268984945],
[ -0.3737060887154195606725482, 0.1420961093183820513292983],
[ -0.2277858511416450780804962, 0.1491729864726037467878287],
[ -0.0765265211334973337546404, 0.1527533871307258506980843],
[ 0.0765265211334973337546404, 0.1527533871307258506980843],
[ 0.2277858511416450780804962, 0.1491729864726037467878287],
[ 0.3737060887154195606725482, 0.1420961093183820513292983],
[ 0.5108670019508270980043641, 0.1316886384491766268984945],
[ 0.6360536807265150254528367, 0.1181945319615184173123774],
[ 0.7463319064601507926143051, 0.1019301198172404350367501],
[ 0.8391169718222188233945291, 0.0832767415767047487247581],
[ 0.9122344282513259058677524, 0.0626720483341090635695065],
[ 0.9639719272779137912676661, 0.0406014298003869413310400],
[ 0.9931285991850949247861224, 0.0176140071391521183118620]],
dtype=float),
'k': np.array(
[[-0.9988590315882776638383156, 0.0030735837185205315012183],
[ -0.9931285991850949247861224, 0.0086002698556429421986618],
[ -0.9815078774502502591933430, 0.0146261692569712529837880],
[ -0.9639719272779137912676661, 0.0203883734612665235980102],
[ -0.9408226338317547535199827, 0.0258821336049511588345051],
[ -0.9122344282513259058677524, 0.0312873067770327989585431],
[ -0.8782768112522819760774430, 0.0366001697582007980305572],
[ -0.8391169718222188233945291, 0.0416688733279736862637883],
[ -0.7950414288375511983506388, 0.0464348218674976747202319],
[ -0.7463319064601507926143051, 0.0509445739237286919327077],
[ -0.6932376563347513848054907, 0.0551951053482859947448324],
[ -0.6360536807265150254528367, 0.0591114008806395723749672],
[ -0.5751404468197103153429460, 0.0626532375547811680258701],
[ -0.5108670019508270980043641, 0.0658345971336184221115636],
[ -0.4435931752387251031999922, 0.0686486729285216193456234],
[ -0.3737060887154195606725482, 0.0710544235534440683057904],
[ -0.3016278681149130043205554, 0.0730306903327866674951894],
[ -0.2277858511416450780804962, 0.0745828754004991889865814],
[ -0.1526054652409226755052202, 0.0757044976845566746595428],
[ -0.0765265211334973337546404, 0.0763778676720807367055028],
[ 0.0000000000000000000000000, 0.0766007119179996564450499],
[ 0.0765265211334973337546404, 0.0763778676720807367055028],
[ 0.1526054652409226755052202, 0.0757044976845566746595428],
[ 0.2277858511416450780804962, 0.0745828754004991889865814],
[ 0.3016278681149130043205554, 0.0730306903327866674951894],
[ 0.3737060887154195606725482, 0.0710544235534440683057904],
[ 0.4435931752387251031999922, 0.0686486729285216193456234],
[ 0.5108670019508270980043641, 0.0658345971336184221115636],
[ 0.5751404468197103153429460, 0.0626532375547811680258701],
[ 0.6360536807265150254528367, 0.0591114008806395723749672],
[ 0.6932376563347513848054907, 0.0551951053482859947448324],
[ 0.7463319064601507926143051, 0.0509445739237286919327077],
[ 0.7950414288375511983506388, 0.0464348218674976747202319],
[ 0.8391169718222188233945291, 0.0416688733279736862637883],
[ 0.8782768112522819760774430, 0.0366001697582007980305572],
[ 0.9122344282513259058677524, 0.0312873067770327989585431],
[ 0.9408226338317547535199827, 0.0258821336049511588345051],
[ 0.9639719272779137912676661, 0.0203883734612665235980102],
[ 0.9815078774502502591933430, 0.0146261692569712529837880],
[ 0.9931285991850949247861224, 0.0086002698556429421986618],
[ 0.9988590315882776638383156, 0.0030735837185205315012183]],
dtype=float),
'dup': np.array(
[False, True, False, True, False, True, False, True, False,
True, False, True, False, True, False, True, False, True,
False, True, False, True, False, True, False, True, False,
True, False, True, False, True, False, True, False, True,
False, True, False, True, False], dtype=bool)
},
25: {
'g': np.array(
[[-0.9955569697904980979087849, 0.0113937985010262879479030],
[ -0.9766639214595175114983154, 0.0263549866150321372619018],
[ -0.9429745712289743394140112, 0.0409391567013063126556235],
[ -0.8949919978782753688510420, 0.0549046959758351919259369],
[ -0.8334426287608340014210211, 0.0680383338123569172071872],
[ -0.7592592630373576305772829, 0.0801407003350010180132350],
[ -0.6735663684734683644851206, 0.0910282619829636498114972],
[ -0.5776629302412229677236898, 0.1005359490670506442022069],
[ -0.4730027314457149605221821, 0.1085196244742636531160940],
[ -0.3611723058093878377358217, 0.1148582591457116483393255],
[ -0.2438668837209884320451904, 0.1194557635357847722281781],
[ -0.1228646926107103963873598, 0.1222424429903100416889595],
[ 0.0000000000000000000000000, 0.1231760537267154512039029],
[ 0.1228646926107103963873598, 0.1222424429903100416889595],
[ 0.2438668837209884320451904, 0.1194557635357847722281781],
[ 0.3611723058093878377358217, 0.1148582591457116483393255],
[ 0.4730027314457149605221821, 0.1085196244742636531160940],
[ 0.5776629302412229677236898, 0.1005359490670506442022069],
[ 0.6735663684734683644851206, 0.0910282619829636498114972],
[ 0.7592592630373576305772829, 0.0801407003350010180132350],
[ 0.8334426287608340014210211, 0.0680383338123569172071872],
[ 0.8949919978782753688510420, 0.0549046959758351919259369],
[ 0.9429745712289743394140112, 0.0409391567013063126556235],
[ 0.9766639214595175114983154, 0.0263549866150321372619018],
[ 0.9955569697904980979087849, 0.0113937985010262879479030]],
dtype=float),
'k': np.array(
[[-0.9992621049926098341934575, 0.0019873838923303159265079],
[ -0.9955569697904980979087849, 0.0055619321353567137580402],
[ -0.9880357945340772476373310, 0.0094739733861741516072077],
[ -0.9766639214595175114983154, 0.0132362291955716748136564],
[ -0.9616149864258425124181300, 0.0168478177091282982315167],
[ -0.9429745712289743394140112, 0.0204353711458828354565683],
[ -0.9207471152817015617463461, 0.0240099456069532162200925],
[ -0.8949919978782753688510420, 0.0274753175878517378029485],
[ -0.8658470652932755954489970, 0.0307923001673874888911090],
[ -0.8334426287608340014210211, 0.0340021302743293378367488],
[ -0.7978737979985000594104109, 0.0371162714834155435603306],
[ -0.7592592630373576305772829, 0.0400838255040323820748393],
[ -0.7177664068130843881866541, 0.0428728450201700494768958],
[ -0.6735663684734683644851206, 0.0455029130499217889098706],
[ -0.6268100990103174127881227, 0.0479825371388367139063923],
[ -0.5776629302412229677236898, 0.0502776790807156719633253],
[ -0.5263252843347191825996238, 0.0523628858064074758643667],
[ -0.4730027314457149605221821, 0.0542511298885454901445434],
[ -0.4178853821930377488518144, 0.0559508112204123173082407],
[ -0.3611723058093878377358217, 0.0574371163615678328535827],
[ -0.3030895389311078301674789, 0.0586896800223942079619742],
[ -0.2438668837209884320451904, 0.0597203403241740599790993],
[ -0.1837189394210488920159699, 0.0605394553760458629453603],
[ -0.1228646926107103963873598, 0.0611285097170530483058590],
[ -0.0615444830056850788865464, 0.0614711898714253166615441],
[ 0.0000000000000000000000000, 0.0615808180678329350787598],
[ 0.0615444830056850788865464, 0.0614711898714253166615441],
[ 0.1228646926107103963873598, 0.0611285097170530483058590],
[ 0.1837189394210488920159699, 0.0605394553760458629453603],
[ 0.2438668837209884320451904, 0.0597203403241740599790993],
[ 0.3030895389311078301674789, 0.0586896800223942079619742],
[ 0.3611723058093878377358217, 0.0574371163615678328535827],
[ 0.4178853821930377488518144, 0.0559508112204123173082407],
[ 0.4730027314457149605221821, 0.0542511298885454901445434],
[ 0.5263252843347191825996238, 0.0523628858064074758643667],
[ 0.5776629302412229677236898, 0.0502776790807156719633253],
[ 0.6268100990103174127881227, 0.0479825371388367139063923],
[ 0.6735663684734683644851206, 0.0455029130499217889098706],
[ 0.7177664068130843881866541, 0.0428728450201700494768958],
[ 0.7592592630373576305772829, 0.0400838255040323820748393],
[ 0.7978737979985000594104109, 0.0371162714834155435603306],
[ 0.8334426287608340014210211, 0.0340021302743293378367488],
[ 0.8658470652932755954489970, 0.0307923001673874888911090],
[ 0.8949919978782753688510420, 0.0274753175878517378029485],
[ 0.9207471152817015617463461, 0.0240099456069532162200925],
[ 0.9429745712289743394140112, 0.0204353711458828354565683],
[ 0.9616149864258425124181300, 0.0168478177091282982315167],
[ 0.9766639214595175114983154, 0.0132362291955716748136564],
[ 0.9880357945340772476373310, 0.0094739733861741516072077],
[ 0.9955569697904980979087849, 0.0055619321353567137580402],
[ 0.9992621049926098341934575, 0.0019873838923303159265079]],
dtype=float),
'dup': np.array(
[False, True, False, True, False, True, False, True, False,
True, False, True, False, True, False, True, False, True,
False, True, False, True, False, True, False, True, False,
True, False, True, False, True, False, True, False, True,
False, True, False, True, False, True, False, True, False,
True, False, True, False, True, False], dtype=bool)
},
30: {
'g': np.array(
[[-0.9968934840746495402716301, 0.0079681924961666056154659],
[ -0.9836681232797472099700326, 0.0184664683110909591423021],
[ -0.9600218649683075122168710, 0.0287847078833233693497192],
[ -0.9262000474292743258793243, 0.0387991925696270495968019],
[ -0.8825605357920526815431165, 0.0484026728305940529029381],
[ -0.8295657623827683974428981, 0.0574931562176190664817217],
[ -0.7677774321048261949179773, 0.0659742298821804951281285],
[ -0.6978504947933157969322924, 0.0737559747377052062682439],
[ -0.6205261829892428611404776, 0.0807558952294202153546949],
[ -0.5366241481420198992641698, 0.0868997872010829798023875],
[ -0.4470337695380891767806099, 0.0921225222377861287176327],
[ -0.3527047255308781134710372, 0.0963687371746442596394686],
[ -0.2546369261678898464398051, 0.0995934205867952670627803],
[ -0.1538699136085835469637947, 0.1017623897484055045964290],
[ -0.0514718425553176958330252, 0.1028526528935588403412856],
[ 0.0514718425553176958330252, 0.1028526528935588403412856],
[ 0.1538699136085835469637947, 0.1017623897484055045964290],
[ 0.2546369261678898464398051, 0.0995934205867952670627803],
[ 0.3527047255308781134710372, 0.0963687371746442596394686],
[ 0.4470337695380891767806099, 0.0921225222377861287176327],
[ 0.5366241481420198992641698, 0.0868997872010829798023875],
[ 0.6205261829892428611404776, 0.0807558952294202153546949],
[ 0.6978504947933157969322924, 0.0737559747377052062682439],
[ 0.7677774321048261949179773, 0.0659742298821804951281285],
[ 0.8295657623827683974428981, 0.0574931562176190664817217],
[ 0.8825605357920526815431165, 0.0484026728305940529029381],
[ 0.9262000474292743258793243, 0.0387991925696270495968019],
[ 0.9600218649683075122168710, 0.0287847078833233693497192],
[ 0.9836681232797472099700326, 0.0184664683110909591423021],
[ 0.9968934840746495402716301, 0.0079681924961666056154659]],
dtype=float),
'k': np.array(
[[-0.9994844100504906375713259, 0.0013890136986770076245516],
[ -0.9968934840746495402716301, 0.0038904611270998840512672],
[ -0.9916309968704045948586284, 0.0066307039159312921733198],
[ -0.9836681232797472099700326, 0.0092732796595177634284411],
[ -0.9731163225011262683746939, 0.0118230152534963417422329],
[ -0.9600218649683075122168710, 0.0143697295070458048124514],
[ -0.9443744447485599794158313, 0.0169208891890532726275723],
[ -0.9262000474292743258793243, 0.0194141411939423811734090],
[ -0.9055733076999077985465226, 0.0218280358216091922971675],
[ -0.8825605357920526815431165, 0.0241911620780806013656864],
[ -0.8572052335460610989586585, 0.0265099548823331016106017],
[ -0.8295657623827683974428981, 0.0287540487650412928439788],
[ -0.7997278358218390830136689, 0.0309072575623877624728843],
[ -0.7677774321048261949179773, 0.0329814470574837260318142],
[ -0.7337900624532268047261711, 0.0349793380280600241374997],
[ -0.6978504947933157969322924, 0.0368823646518212292239111],
[ -0.6600610641266269613700537, 0.0386789456247275929503487],
[ -0.6205261829892428611404776, 0.0403745389515359591119953],
[ -0.5793452358263616917560249, 0.0419698102151642461471475],
[ -0.5366241481420198992641698, 0.0434525397013560693168317],
[ -0.4924804678617785749936931, 0.0448148001331626631923556],
[ -0.4470337695380891767806099, 0.0460592382710069881162717],
[ -0.4004012548303943925354762, 0.0471855465692991539452615],
[ -0.3527047255308781134710372, 0.0481858617570871291407795],
[ -0.3040732022736250773726771, 0.0490554345550297788875282],
[ -0.2546369261678898464398051, 0.0497956834270742063578116],
[ -0.2045251166823098914389577, 0.0504059214027823468408931],
[ -0.1538699136085835469637947, 0.0508817958987496064922975],
[ -0.1028069379667370301470968, 0.0512215478492587721706563],
[ -0.0514718425553176958330252, 0.0514261285374590259338629],
[ 0.0000000000000000000000000, 0.0514947294294515675583404],
[ 0.0514718425553176958330252, 0.0514261285374590259338629],
[ 0.1028069379667370301470968, 0.0512215478492587721706563],
[ 0.1538699136085835469637947, 0.0508817958987496064922975],
[ 0.2045251166823098914389577, 0.0504059214027823468408931],
[ 0.2546369261678898464398051, 0.0497956834270742063578116],
[ 0.3040732022736250773726771, 0.0490554345550297788875282],
[ 0.3527047255308781134710372, 0.0481858617570871291407795],
[ 0.4004012548303943925354762, 0.0471855465692991539452615],
[ 0.4470337695380891767806099, 0.0460592382710069881162717],
[ 0.4924804678617785749936931, 0.0448148001331626631923556],
[ 0.5366241481420198992641698, 0.0434525397013560693168317],
[ 0.5793452358263616917560249, 0.0419698102151642461471475],
[ 0.6205261829892428611404776, 0.0403745389515359591119953],
[ 0.6600610641266269613700537, 0.0386789456247275929503487],
[ 0.6978504947933157969322924, 0.0368823646518212292239111],
[ 0.7337900624532268047261711, 0.0349793380280600241374997],
[ 0.7677774321048261949179773, 0.0329814470574837260318142],
[ 0.7997278358218390830136689, 0.0309072575623877624728843],
[ 0.8295657623827683974428981, 0.0287540487650412928439788],
[ 0.8572052335460610989586585, 0.0265099548823331016106017],
[ 0.8825605357920526815431165, 0.0241911620780806013656864],
[ 0.9055733076999077985465226, 0.0218280358216091922971675],
[ 0.9262000474292743258793243, 0.0194141411939423811734090],
[ 0.9443744447485599794158313, 0.0169208891890532726275723],
[ 0.9600218649683075122168710, 0.0143697295070458048124514],
[ 0.9731163225011262683746939, 0.0118230152534963417422329],
[ 0.9836681232797472099700326, 0.0092732796595177634284411],
[ 0.9916309968704045948586284, 0.0066307039159312921733198],
[ 0.9968934840746495402716301, 0.0038904611270998840512672],
[ 0.9994844100504906375713259, 0.0013890136986770076245516]],
dtype=float),
'dup': np.array(
[False, True, False, True, False, True, False, True, False,
True, False, True, False, True, False, True, False, True,
False, True, False, True, False, True, False, True, False,
True, False, True, False, True, False, True, False, True,
False, True, False, True, False, True, False, True, False,
True, False, True, False, True, False, True, False, True,
False, True, False, True, False, True, False], dtype=bool)
},
}
w = weights[n]
dup=w['dup']
xi = w['k'][:,0]
wi1 = np.zeros_like(xi)
wi1[dup] = w['g'][:, 1]
wi2 = w['k'][:,1]
return xi, wi1, wi2
def gauss_legendre_abscissae_and_weights(n):
"""Gauss-Legendre quadrature abscissae and weights
Integral = Sum(f(xi) * wi)
Parameters
----------
n : [2-20, 32, 64, 100]
Number of integration points
Returns
-------
xi, wi : 1d array of len(n)
Abscissae and weights for numericla integration
References
----------
.. [1] Holoborodko, Pavel. 2014. 'Numerical Integration'. Accessed
April 24.
http://www.holoborodko.com/pavel/numerical-methods/numerical-integration/.
"""
if n not in [2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20,
32, 64, 100]:
raise ValueError('n must be 2-20, 32, 64, or 100')
weights = {
2: np.array(
[[-0.5773502691896257645091488, 1.0000000000000000000000000],
[ 0.5773502691896257645091488, 1.0000000000000000000000000]],
dtype=float),
3: np.array(
[[-0.7745966692414833770358531, 0.5555555555555555555555556],
[ 0, 0.8888888888888888888888889],
[ 0.7745966692414833770358531, 0.5555555555555555555555556]],
dtype=float),
4: np.array(
[[-0.8611363115940525752239465, 0.3478548451374538573730639],
[ -0.3399810435848562648026658, 0.6521451548625461426269361],
[ 0.3399810435848562648026658, 0.6521451548625461426269361],
[ 0.8611363115940525752239465, 0.3478548451374538573730639]],
dtype=float),
5: np.array(
[[-0.9061798459386639927976269, 0.2369268850561890875142640],
[ -0.5384693101056830910363144, 0.4786286704993664680412915],
[ 0, 0.5688888888888888888888889],
[ 0.5384693101056830910363144, 0.4786286704993664680412915],
[ 0.9061798459386639927976269, 0.2369268850561890875142640]],
dtype=float),
6: np.array(
[[-0.9324695142031520278123016, 0.1713244923791703450402961],
[ -0.6612093864662645136613996, 0.3607615730481386075698335],
[ -0.2386191860831969086305017, 0.4679139345726910473898703],
[ 0.2386191860831969086305017, 0.4679139345726910473898703],
[ 0.6612093864662645136613996, 0.3607615730481386075698335],
[ 0.9324695142031520278123016, 0.1713244923791703450402961]],
dtype=float),
7: np.array(
[[-0.9491079123427585245261897, 0.1294849661688696932706114],
[ -0.7415311855993944398638648, 0.2797053914892766679014678],
[ -0.4058451513773971669066064, 0.3818300505051189449503698],
[ 0, 0.4179591836734693877551020],
[ 0.4058451513773971669066064, 0.3818300505051189449503698],
[ 0.7415311855993944398638648, 0.2797053914892766679014678],
[ 0.9491079123427585245261897, 0.1294849661688696932706114]],
dtype=float),
8: np.array(
[[-0.9602898564975362316835609, 0.1012285362903762591525314],
[ -0.7966664774136267395915539, 0.2223810344533744705443560],
[ -0.5255324099163289858177390, 0.3137066458778872873379622],
[ -0.1834346424956498049394761, 0.3626837833783619829651504],
[ 0.1834346424956498049394761, 0.3626837833783619829651504],
[ 0.5255324099163289858177390, 0.3137066458778872873379622],
[ 0.7966664774136267395915539, 0.2223810344533744705443560],
[ 0.9602898564975362316835609, 0.1012285362903762591525314]],
dtype=float),
9: np.array(
[[-0.9681602395076260898355762, 0.0812743883615744119718922],
[ -0.8360311073266357942994298, 0.1806481606948574040584720],
[ -0.6133714327005903973087020, 0.2606106964029354623187429],
[ -0.3242534234038089290385380, 0.3123470770400028400686304],
[ 0, 0.3302393550012597631645251],
[ 0.3242534234038089290385380, 0.3123470770400028400686304],
[ 0.6133714327005903973087020, 0.2606106964029354623187429],
[ 0.8360311073266357942994298, 0.1806481606948574040584720],
[ 0.9681602395076260898355762, 0.0812743883615744119718922]],
dtype=float),
10: np.array(
[[-0.9739065285171717200779640, 0.0666713443086881375935688],
[ -0.8650633666889845107320967, 0.1494513491505805931457763],
[ -0.6794095682990244062343274, 0.2190863625159820439955349],
[ -0.4333953941292471907992659, 0.2692667193099963550912269],
[ -0.1488743389816312108848260, 0.2955242247147528701738930],
[ 0.1488743389816312108848260, 0.2955242247147528701738930],
[ 0.4333953941292471907992659, 0.2692667193099963550912269],
[ 0.6794095682990244062343274, 0.2190863625159820439955349],
[ 0.8650633666889845107320967, 0.1494513491505805931457763],
[ 0.9739065285171717200779640, 0.0666713443086881375935688]],
dtype=float),
11: np.array(
[[-0.9782286581460569928039380, 0.0556685671161736664827537],
[ -0.8870625997680952990751578, 0.1255803694649046246346943],
[ -0.7301520055740493240934163, 0.1862902109277342514260976],
[ -0.5190961292068118159257257, 0.2331937645919904799185237],
[ -0.2695431559523449723315320, 0.2628045445102466621806889],
[ 0, 0.2729250867779006307144835],
[ 0.2695431559523449723315320, 0.2628045445102466621806889],
[ 0.5190961292068118159257257, 0.2331937645919904799185237],
[ 0.7301520055740493240934163, 0.1862902109277342514260976],
[ 0.8870625997680952990751578, 0.1255803694649046246346943],
[ 0.9782286581460569928039380, 0.0556685671161736664827537]],
dtype=float),
12: np.array(
[[-0.9815606342467192506905491, 0.0471753363865118271946160],
[ -0.9041172563704748566784659, 0.1069393259953184309602547],
[ -0.7699026741943046870368938, 0.1600783285433462263346525],
[ -0.5873179542866174472967024, 0.2031674267230659217490645],
[ -0.3678314989981801937526915, 0.2334925365383548087608499],
[ -0.1252334085114689154724414, 0.2491470458134027850005624],
[ 0.1252334085114689154724414, 0.2491470458134027850005624],
[ 0.3678314989981801937526915, 0.2334925365383548087608499],
[ 0.5873179542866174472967024, 0.2031674267230659217490645],
[ 0.7699026741943046870368938, 0.1600783285433462263346525],
[ 0.9041172563704748566784659, 0.1069393259953184309602547],
[ 0.9815606342467192506905491, 0.0471753363865118271946160]],
dtype=float),
13: np.array(
[[-0.9841830547185881494728294, 0.0404840047653158795200216],
[ -0.9175983992229779652065478, 0.0921214998377284479144218],
[ -0.8015780907333099127942065, 0.1388735102197872384636018],
[ -0.6423493394403402206439846, 0.1781459807619457382800467],
[ -0.4484927510364468528779129, 0.2078160475368885023125232],
[ -0.2304583159551347940655281, 0.2262831802628972384120902],
[ 0, 0.2325515532308739101945895],
[ 0.2304583159551347940655281, 0.2262831802628972384120902],
[ 0.4484927510364468528779129, 0.2078160475368885023125232],
[ 0.6423493394403402206439846, 0.1781459807619457382800467],
[ 0.8015780907333099127942065, 0.1388735102197872384636018],
[ 0.9175983992229779652065478, 0.0921214998377284479144218],
[ 0.9841830547185881494728294, 0.0404840047653158795200216]],
dtype=float),
14: np.array(
[[-0.9862838086968123388415973, 0.0351194603317518630318329],
[ -0.9284348836635735173363911, 0.0801580871597602098056333],
[ -0.8272013150697649931897947, 0.1215185706879031846894148],
[ -0.6872929048116854701480198, 0.1572031671581935345696019],
[ -0.5152486363581540919652907, 0.1855383974779378137417166],
[ -0.3191123689278897604356718, 0.2051984637212956039659241],
[ -0.1080549487073436620662447, 0.2152638534631577901958764],
[ 0.1080549487073436620662447, 0.2152638534631577901958764],
[ 0.3191123689278897604356718, 0.2051984637212956039659241],
[ 0.5152486363581540919652907, 0.1855383974779378137417166],
[ 0.6872929048116854701480198, 0.1572031671581935345696019],
[ 0.8272013150697649931897947, 0.1215185706879031846894148],
[ 0.9284348836635735173363911, 0.0801580871597602098056333],
[ 0.9862838086968123388415973, 0.0351194603317518630318329]],
dtype=float),
15: np.array(
[[-0.9879925180204854284895657, 0.0307532419961172683546284],
[ -0.9372733924007059043077589, 0.0703660474881081247092674],
[ -0.8482065834104272162006483, 0.1071592204671719350118695],
[ -0.7244177313601700474161861, 0.1395706779261543144478048],
[ -0.5709721726085388475372267, 0.1662692058169939335532009],
[ -0.3941513470775633698972074, 0.1861610000155622110268006],
[ -0.2011940939974345223006283, 0.1984314853271115764561183],
[ 0, 0.2025782419255612728806202],
[ 0.2011940939974345223006283, 0.1984314853271115764561183],
[ 0.3941513470775633698972074, 0.1861610000155622110268006],
[ 0.5709721726085388475372267, 0.1662692058169939335532009],
[ 0.7244177313601700474161861, 0.1395706779261543144478048],
[ 0.8482065834104272162006483, 0.1071592204671719350118695],
[ 0.9372733924007059043077589, 0.0703660474881081247092674],
[ 0.9879925180204854284895657, 0.0307532419961172683546284]],
dtype=float),
16: np.array(
[[-0.9894009349916499325961542, 0.0271524594117540948517806],
[ -0.9445750230732325760779884, 0.0622535239386478928628438],
[ -0.8656312023878317438804679, 0.0951585116824927848099251],
[ -0.7554044083550030338951012, 0.1246289712555338720524763],
[ -0.6178762444026437484466718, 0.1495959888165767320815017],
[ -0.4580167776572273863424194, 0.1691565193950025381893121],
[ -0.2816035507792589132304605, 0.1826034150449235888667637],
[ -0.0950125098376374401853193, 0.1894506104550684962853967],
[ 0.0950125098376374401853193, 0.1894506104550684962853967],
[ 0.2816035507792589132304605, 0.1826034150449235888667637],
[ 0.4580167776572273863424194, 0.1691565193950025381893121],
[ 0.6178762444026437484466718, 0.1495959888165767320815017],
[ 0.7554044083550030338951012, 0.1246289712555338720524763],
[ 0.8656312023878317438804679, 0.0951585116824927848099251],
[ 0.9445750230732325760779884, 0.0622535239386478928628438],
[ 0.9894009349916499325961542, 0.0271524594117540948517806]],
dtype=float),
17: np.array(
[[-0.9905754753144173356754340, 0.0241483028685479319601100],
[ -0.9506755217687677612227170, 0.0554595293739872011294402],
[ -0.8802391537269859021229557, 0.0850361483171791808835354],
[ -0.7815140038968014069252301, 0.1118838471934039710947884],
[ -0.6576711592166907658503022, 0.1351363684685254732863200],
[ -0.5126905370864769678862466, 0.1540457610768102880814316],
[ -0.3512317634538763152971855, 0.1680041021564500445099707],
[ -0.1784841814958478558506775, 0.1765627053669926463252710],
[ 0, 0.1794464703562065254582656],
[ 0.1784841814958478558506775, 0.1765627053669926463252710],
[ 0.3512317634538763152971855, 0.1680041021564500445099707],
[ 0.5126905370864769678862466, 0.1540457610768102880814316],
[ 0.6576711592166907658503022, 0.1351363684685254732863200],
[ 0.7815140038968014069252301, 0.1118838471934039710947884],
[ 0.8802391537269859021229557, 0.0850361483171791808835354],
[ 0.9506755217687677612227170, 0.0554595293739872011294402],
[ 0.9905754753144173356754340, 0.0241483028685479319601100]],
dtype=float),
18: np.array(
[[-0.9915651684209309467300160, 0.0216160135264833103133427],
[ -0.9558239495713977551811959, 0.0497145488949697964533349],
[ -0.8926024664975557392060606, 0.0764257302548890565291297],
[ -0.8037049589725231156824175, 0.1009420441062871655628140],
[ -0.6916870430603532078748911, 0.1225552067114784601845191],
[ -0.5597708310739475346078715, 0.1406429146706506512047313],
[ -0.4117511614628426460359318, 0.1546846751262652449254180],
[ -0.2518862256915055095889729, 0.1642764837458327229860538],
[ -0.0847750130417353012422619, 0.1691423829631435918406565],
[ 0.0847750130417353012422619, 0.1691423829631435918406565],
[ 0.2518862256915055095889729, 0.1642764837458327229860538],
[ 0.4117511614628426460359318, 0.1546846751262652449254180],
[ 0.5597708310739475346078715, 0.1406429146706506512047313],
[ 0.6916870430603532078748911, 0.1225552067114784601845191],
[ 0.8037049589725231156824175, 0.1009420441062871655628140],
[ 0.8926024664975557392060606, 0.0764257302548890565291297],
[ 0.9558239495713977551811959, 0.0497145488949697964533349],
[ 0.9915651684209309467300160, 0.0216160135264833103133427]],
dtype=float),
19: np.array(
[[-0.9924068438435844031890177, 0.0194617882297264770363120],
[ -0.9602081521348300308527788, 0.0448142267656996003328382],
[ -0.9031559036148179016426609, 0.0690445427376412265807083],
[ -0.8227146565371428249789225, 0.0914900216224499994644621],
[ -0.7209661773352293786170959, 0.1115666455473339947160239],
[ -0.6005453046616810234696382, 0.1287539625393362276755158],
[ -0.4645707413759609457172671, 0.1426067021736066117757461],
[ -0.3165640999636298319901173, 0.1527660420658596667788554],
[ -0.1603586456402253758680961, 0.1589688433939543476499564],
[ 0, 0.1610544498487836959791636],
[ 0.1603586456402253758680961, 0.1589688433939543476499564],
[ 0.3165640999636298319901173, 0.1527660420658596667788554],
[ 0.4645707413759609457172671, 0.1426067021736066117757461],
[ 0.6005453046616810234696382, 0.1287539625393362276755158],
[ 0.7209661773352293786170959, 0.1115666455473339947160239],
[ 0.8227146565371428249789225, 0.0914900216224499994644621],
[ 0.9031559036148179016426609, 0.0690445427376412265807083],
[ 0.9602081521348300308527788, 0.0448142267656996003328382],
[ 0.9924068438435844031890177, 0.0194617882297264770363120]],
dtype=float),
20: np.array(
[[-0.9931285991850949247861224, 0.0176140071391521183118620],
[ -0.9639719272779137912676661, 0.0406014298003869413310400],
[ -0.9122344282513259058677524, 0.0626720483341090635695065],
[ -0.8391169718222188233945291, 0.0832767415767047487247581],
[ -0.7463319064601507926143051, 0.1019301198172404350367501],
[ -0.6360536807265150254528367, 0.1181945319615184173123774],
[ -0.5108670019508270980043641, 0.1316886384491766268984945],
[ -0.3737060887154195606725482, 0.1420961093183820513292983],
[ -0.2277858511416450780804962, 0.1491729864726037467878287],
[ -0.0765265211334973337546404, 0.1527533871307258506980843],
[ 0.0765265211334973337546404, 0.1527533871307258506980843],
[ 0.2277858511416450780804962, 0.1491729864726037467878287],
[ 0.3737060887154195606725482, 0.1420961093183820513292983],
[ 0.5108670019508270980043641, 0.1316886384491766268984945],
[ 0.6360536807265150254528367, 0.1181945319615184173123774],
[ 0.7463319064601507926143051, 0.1019301198172404350367501],
[ 0.8391169718222188233945291, 0.0832767415767047487247581],
[ 0.9122344282513259058677524, 0.0626720483341090635695065],
[ 0.9639719272779137912676661, 0.0406014298003869413310400],
[ 0.9931285991850949247861224, 0.0176140071391521183118620]],
dtype=float),
32: np.array(
[[-0.9972638618494815635449811, 0.0070186100094700966004071],
[ -0.9856115115452683354001750, 0.0162743947309056706051706],
[ -0.9647622555875064307738119, 0.0253920653092620594557526],
[ -0.9349060759377396891709191, 0.0342738629130214331026877],
[ -0.8963211557660521239653072, 0.0428358980222266806568786],
[ -0.8493676137325699701336930, 0.0509980592623761761961632],
[ -0.7944837959679424069630973, 0.0586840934785355471452836],
[ -0.7321821187402896803874267, 0.0658222227763618468376501],
[ -0.6630442669302152009751152, 0.0723457941088485062253994],
[ -0.5877157572407623290407455, 0.0781938957870703064717409],
[ -0.5068999089322293900237475, 0.0833119242269467552221991],
[ -0.4213512761306353453641194, 0.0876520930044038111427715],
[ -0.3318686022821276497799168, 0.0911738786957638847128686],
[ -0.2392873622521370745446032, 0.0938443990808045656391802],
[ -0.1444719615827964934851864, 0.0956387200792748594190820],
[ -0.0483076656877383162348126, 0.0965400885147278005667648],
[ 0.0483076656877383162348126, 0.0965400885147278005667648],
[ 0.1444719615827964934851864, 0.0956387200792748594190820],
[ 0.2392873622521370745446032, 0.0938443990808045656391802],
[ 0.3318686022821276497799168, 0.0911738786957638847128686],
[ 0.4213512761306353453641194, 0.0876520930044038111427715],
[ 0.5068999089322293900237475, 0.0833119242269467552221991],
[ 0.5877157572407623290407455, 0.0781938957870703064717409],
[ 0.6630442669302152009751152, 0.0723457941088485062253994],
[ 0.7321821187402896803874267, 0.0658222227763618468376501],
[ 0.7944837959679424069630973, 0.0586840934785355471452836],
[ 0.8493676137325699701336930, 0.0509980592623761761961632],
[ 0.8963211557660521239653072, 0.0428358980222266806568786],
[ 0.9349060759377396891709191, 0.0342738629130214331026877],
[ 0.9647622555875064307738119, 0.0253920653092620594557526],
[ 0.9856115115452683354001750, 0.0162743947309056706051706],
[ 0.9972638618494815635449811, 0.0070186100094700966004071]],
dtype=float),
64: np.array(
[[-0.9993050417357721394569056, 0.0017832807216964329472961],
[ -0.9963401167719552793469245, 0.0041470332605624676352875],
[ -0.9910133714767443207393824, 0.0065044579689783628561174],
[ -0.9833362538846259569312993, 0.0088467598263639477230309],
[ -0.9733268277899109637418535, 0.0111681394601311288185905],
[ -0.9610087996520537189186141, 0.0134630478967186425980608],
[ -0.9464113748584028160624815, 0.0157260304760247193219660],
[ -0.9295691721319395758214902, 0.0179517157756973430850453],
[ -0.9105221370785028057563807, 0.0201348231535302093723403],
[ -0.8893154459951141058534040, 0.0222701738083832541592983],
[ -0.8659993981540928197607834, 0.0243527025687108733381776],
[ -0.8406292962525803627516915, 0.0263774697150546586716918],
[ -0.8132653151227975597419233, 0.0283396726142594832275113],
[ -0.7839723589433414076102205, 0.0302346570724024788679741],
[ -0.7528199072605318966118638, 0.0320579283548515535854675],
[ -0.7198818501716108268489402, 0.0338051618371416093915655],
[ -0.6852363130542332425635584, 0.0354722132568823838106931],
[ -0.6489654712546573398577612, 0.0370551285402400460404151],
[ -0.6111553551723932502488530, 0.0385501531786156291289625],
[ -0.5718956462026340342838781, 0.0399537411327203413866569],
[ -0.5312794640198945456580139, 0.0412625632426235286101563],
[ -0.4894031457070529574785263, 0.0424735151236535890073398],
[ -0.4463660172534640879849477, 0.0435837245293234533768279],
[ -0.4022701579639916036957668, 0.0445905581637565630601347],
[ -0.3572201583376681159504426, 0.0454916279274181444797710],
[ -0.3113228719902109561575127, 0.0462847965813144172959532],
[ -0.2646871622087674163739642, 0.0469681828162100173253263],
[ -0.2174236437400070841496487, 0.0475401657148303086622822],
[ -0.1696444204239928180373136, 0.0479993885964583077281262],
[ -0.1214628192961205544703765, 0.0483447622348029571697695],
[ -0.0729931217877990394495429, 0.0485754674415034269347991],
[ -0.0243502926634244325089558, 0.0486909570091397203833654],
[ 0.0243502926634244325089558, 0.0486909570091397203833654],
[ 0.0729931217877990394495429, 0.0485754674415034269347991],
[ 0.1214628192961205544703765, 0.0483447622348029571697695],
[ 0.1696444204239928180373136, 0.0479993885964583077281262],
[ 0.2174236437400070841496487, 0.0475401657148303086622822],
[ 0.2646871622087674163739642, 0.0469681828162100173253263],
[ 0.3113228719902109561575127, 0.0462847965813144172959532],
[ 0.3572201583376681159504426, 0.0454916279274181444797710],
[ 0.4022701579639916036957668, 0.0445905581637565630601347],
[ 0.4463660172534640879849477, 0.0435837245293234533768279],
[ 0.4894031457070529574785263, 0.0424735151236535890073398],
[ 0.5312794640198945456580139, 0.0412625632426235286101563],
[ 0.5718956462026340342838781, 0.0399537411327203413866569],
[ 0.6111553551723932502488530, 0.0385501531786156291289625],
[ 0.6489654712546573398577612, 0.0370551285402400460404151],
[ 0.6852363130542332425635584, 0.0354722132568823838106931],
[ 0.7198818501716108268489402, 0.0338051618371416093915655],
[ 0.7528199072605318966118638, 0.0320579283548515535854675],
[ 0.7839723589433414076102205, 0.0302346570724024788679741],
[ 0.8132653151227975597419233, 0.0283396726142594832275113],
[ 0.8406292962525803627516915, 0.0263774697150546586716918],
[ 0.8659993981540928197607834, 0.0243527025687108733381776],
[ 0.8893154459951141058534040, 0.0222701738083832541592983],
[ 0.9105221370785028057563807, 0.0201348231535302093723403],
[ 0.9295691721319395758214902, 0.0179517157756973430850453],
[ 0.9464113748584028160624815, 0.0157260304760247193219660],
[ 0.9610087996520537189186141, 0.0134630478967186425980608],
[ 0.9733268277899109637418535, 0.0111681394601311288185905],
[ 0.9833362538846259569312993, 0.0088467598263639477230309],
[ 0.9910133714767443207393824, 0.0065044579689783628561174],
[ 0.9963401167719552793469245, 0.0041470332605624676352875],
[ 0.9993050417357721394569056, 0.0017832807216964329472961]],
dtype=float),
100: np.array(
[[-0.9997137267734412336782285, 0.0007346344905056717304063],
[ -0.9984919506395958184001634, 0.0017093926535181052395294],
[ -0.9962951347331251491861317, 0.0026839253715534824194396],
[ -0.9931249370374434596520099, 0.0036559612013263751823425],
[ -0.9889843952429917480044187, 0.0046244500634221193510958],
[ -0.9838775407060570154961002, 0.0055884280038655151572119],
[ -0.9778093584869182885537811, 0.0065469484508453227641521],
[ -0.9707857757637063319308979, 0.0074990732554647115788287],
[ -0.9628136542558155272936593, 0.0084438714696689714026208],
[ -0.9539007829254917428493369, 0.0093804196536944579514182],
[ -0.9440558701362559779627747, 0.0103078025748689695857821],
[ -0.9332885350430795459243337, 0.0112251140231859771172216],
[ -0.9216092981453339526669513, 0.0121314576629794974077448],
[ -0.9090295709825296904671263, 0.0130259478929715422855586],
[ -0.8955616449707269866985210, 0.0139077107037187726879541],
[ -0.8812186793850184155733168, 0.0147758845274413017688800],
[ -0.8660146884971646234107400, 0.0156296210775460027239369],
[ -0.8499645278795912842933626, 0.0164680861761452126431050],
[ -0.8330838798884008235429158, 0.0172904605683235824393442],
[ -0.8153892383391762543939888, 0.0180959407221281166643908],
[ -0.7968978923903144763895729, 0.0188837396133749045529412],
[ -0.7776279096494954756275514, 0.0196530874944353058653815],
[ -0.7575981185197071760356680, 0.0204032326462094327668389],
[ -0.7368280898020207055124277, 0.0211334421125276415426723],
[ -0.7153381175730564464599671, 0.0218430024162473863139537],
[ -0.6931491993558019659486479, 0.0225312202563362727017970],
[ -0.6702830156031410158025870, 0.0231974231852541216224889],
[ -0.6467619085141292798326303, 0.0238409602659682059625604],
[ -0.6226088602037077716041908, 0.0244612027079570527199750],
[ -0.5978474702471787212648065, 0.0250575444815795897037642],
[ -0.5725019326213811913168704, 0.0256294029102081160756420],
[ -0.5465970120650941674679943, 0.0261762192395456763423087],
[ -0.5201580198817630566468157, 0.0266974591835709626603847],
[ -0.4932107892081909335693088, 0.0271926134465768801364916],
[ -0.4657816497733580422492166, 0.0276611982207923882942042],
[ -0.4378974021720315131089780, 0.0281027556591011733176483],
[ -0.4095852916783015425288684, 0.0285168543223950979909368],
[ -0.3808729816246299567633625, 0.0289030896011252031348762],
[ -0.3517885263724217209723438, 0.0292610841106382766201190],
[ -0.3223603439005291517224766, 0.0295904880599126425117545],
[ -0.2926171880384719647375559, 0.0298909795933328309168368],
[ -0.2625881203715034791689293, 0.0301622651051691449190687],
[ -0.2323024818449739696495100, 0.0304040795264548200165079],
[ -0.2017898640957359972360489, 0.0306161865839804484964594],
[ -0.1710800805386032748875324, 0.0307983790311525904277139],
[ -0.1402031372361139732075146, 0.0309504788504909882340635],
[ -0.1091892035800611150034260, 0.0310723374275665165878102],
[ -0.0780685828134366366948174, 0.0311638356962099067838183],
[ -0.0468716824215916316149239, 0.0312248842548493577323765],
[ -0.0156289844215430828722167, 0.0312554234538633569476425],
[ 0.0156289844215430828722167, 0.0312554234538633569476425],
[ 0.0468716824215916316149239, 0.0312248842548493577323765],
[ 0.0780685828134366366948174, 0.0311638356962099067838183],
[ 0.1091892035800611150034260, 0.0310723374275665165878102],
[ 0.1402031372361139732075146, 0.0309504788504909882340635],
[ 0.1710800805386032748875324, 0.0307983790311525904277139],
[ 0.2017898640957359972360489, 0.0306161865839804484964594],
[ 0.2323024818449739696495100, 0.0304040795264548200165079],
[ 0.2625881203715034791689293, 0.0301622651051691449190687],
[ 0.2926171880384719647375559, 0.0298909795933328309168368],
[ 0.3223603439005291517224766, 0.0295904880599126425117545],
[ 0.3517885263724217209723438, 0.0292610841106382766201190],
[ 0.3808729816246299567633625, 0.0289030896011252031348762],
[ 0.4095852916783015425288684, 0.0285168543223950979909368],
[ 0.4378974021720315131089780, 0.0281027556591011733176483],
[ 0.4657816497733580422492166, 0.0276611982207923882942042],
[ 0.4932107892081909335693088, 0.0271926134465768801364916],
[ 0.5201580198817630566468157, 0.0266974591835709626603847],
[ 0.5465970120650941674679943, 0.0261762192395456763423087],
[ 0.5725019326213811913168704, 0.0256294029102081160756420],
[ 0.5978474702471787212648065, 0.0250575444815795897037642],
[ 0.6226088602037077716041908, 0.0244612027079570527199750],
[ 0.6467619085141292798326303, 0.0238409602659682059625604],
[ 0.6702830156031410158025870, 0.0231974231852541216224889],
[ 0.6931491993558019659486479, 0.0225312202563362727017970],
[ 0.7153381175730564464599671, 0.0218430024162473863139537],
[ 0.7368280898020207055124277, 0.0211334421125276415426723],
[ 0.7575981185197071760356680, 0.0204032326462094327668389],
[ 0.7776279096494954756275514, 0.0196530874944353058653815],
[ 0.7968978923903144763895729, 0.0188837396133749045529412],
[ 0.8153892383391762543939888, 0.0180959407221281166643908],
[ 0.8330838798884008235429158, 0.0172904605683235824393442],
[ 0.8499645278795912842933626, 0.0164680861761452126431050],
[ 0.8660146884971646234107400, 0.0156296210775460027239369],
[ 0.8812186793850184155733168, 0.0147758845274413017688800],
[ 0.8955616449707269866985210, 0.0139077107037187726879541],
[ 0.9090295709825296904671263, 0.0130259478929715422855586],
[ 0.9216092981453339526669513, 0.0121314576629794974077448],
[ 0.9332885350430795459243337, 0.0112251140231859771172216],
[ 0.9440558701362559779627747, 0.0103078025748689695857821],
[ 0.9539007829254917428493369, 0.0093804196536944579514182],
[ 0.9628136542558155272936593, 0.0084438714696689714026208],
[ 0.9707857757637063319308979, 0.0074990732554647115788287],
[ 0.9778093584869182885537811, 0.0065469484508453227641521],
[ 0.9838775407060570154961002, 0.0055884280038655151572119],
[ 0.9889843952429917480044187, 0.0046244500634221193510958],
[ 0.9931249370374434596520099, 0.0036559612013263751823425],
[ 0.9962951347331251491861317, 0.0026839253715534824194396],
[ 0.9984919506395958184001634, 0.0017093926535181052395294],
[ 0.9997137267734412336782285, 0.0007346344905056717304063]],
dtype=float),
}
return weights[n][:,0], weights[n][:,1]
def shanks_table(seq, table=None, randomized=False):
r'''Copied from sympy.mpmath.mpmath.calculus.extrapolation.py
This shanks function is taken almost verbatim (minus an initial ctx
argument???) from sympy.mpmath.mpmath.calculus.extrapolation.py:
- http://docs.sympy.org/dev/modules/mpmath/calculus/sums_limits.html#mpmath.shanks
- https://github.com/sympy/sympy/blob/master/sympy/mpmath/calculus/extrapolation.py
mpmath is BSD license
Notes
-----
Given a list ``seq`` of the first `N` elements of a slowly
convergent infinite sequence `(A_k)`, :func:`~mpmath.shanks` computes the iterated
Shanks transformation `S(A), S(S(A)), \ldots, S^{N/2}(A)`. The Shanks
transformation often provides strong convergence acceleration,
especially if the sequence is oscillating.
The iterated Shanks transformation is computed using the Wynn
epsilon algorithm (see [1]). :func:`~mpmath.shanks` returns the full
epsilon table generated by Wynn's algorithm, which can be read
off as follows:
- The table is a list of lists forming a lower triangular matrix,
where higher row and column indices correspond to more accurate
values.
- The columns with even index hold dummy entries (required for the
computation) and the columns with odd index hold the actual
extrapolates.
- The last element in the last row is typically the most
accurate estimate of the limit.
- The difference to the third last element in the last row
provides an estimate of the approximation error.
- The magnitude of the second last element provides an estimate
of the numerical accuracy lost to cancellation.
For convenience, so the extrapolation is stopped at an odd index
so that ``shanks(seq)[-1][-1]`` always gives an estimate of the
limit.
Optionally, an existing table can be passed to :func:`~mpmath.shanks`.
This can be used to efficiently extend a previous computation after
new elements have been appended to the sequence. The table will
then be updated in-place.
The Shanks transformation:
The Shanks transformation is defined as follows (see [2]): given
the input sequence `(A_0, A_1, \ldots)`, the transformed sequence is
given by
.. math ::
S(A_k) = \frac{A_{k+1}A_{k-1}-A_k^2}{A_{k+1}+A_{k-1}-2 A_k}
The Shanks transformation gives the exact limit `A_{\infty}` in a
single step if `A_k = A + a q^k`. Note in particular that it
extrapolates the exact sum of a geometric series in a single step.
Applying the Shanks transformation once often improves convergence
substantially for an arbitrary sequence, but the optimal effect is
obtained by applying it iteratively:
`S(S(A_k)), S(S(S(A_k))), \ldots`.
Wynn's epsilon algorithm provides an efficient way to generate
the table of iterated Shanks transformations. It reduces the
computation of each element to essentially a single division, at
the cost of requiring dummy elements in the table. See [1] for
details.
Precision issues:
Due to cancellation effects, the sequence must be typically be
computed at a much higher precision than the target accuracy
of the extrapolation.
If the Shanks transformation converges to the exact limit (such
as if the sequence is a geometric series), then a division by
zero occurs. By default, :func:`~mpmath.shanks` handles this case by
terminating the iteration and returning the table it has
generated so far. With *randomized=True*, it will instead
replace the zero by a pseudorandom number close to zero.
(TODO: find a better solution to this problem.)
Examples (truncated from original)
We illustrate by applying Shanks transformation to the Leibniz
series for `\pi`:
>>> S = [4*sum((-1)**n/(2*n+1) for n in range(m))
... for m in range(1,30)]
>>>
>>> T = shanks_table(S[:7])
>>> for row in T:
... print('['+', '.join(['{:.6g}'.format(v) for v in row])+']')
...
[-0.75]
[1.25, 3.16667]
[-1.75, 3.13333, -28.75]
[2.25, 3.14524, 82.25, 3.14234]
[-2.75, 3.13968, -177.75, 3.14139, -969.938]
[3.25, 3.14271, 327.25, 3.14166, 3515.06, 3.14161]
'''
if len(seq) < 2:
raise ValueError("seq should be of minimum length 2")
if table:
START = len(table)
else:
START = 0
table = []
STOP = len(seq) - 1
if STOP & 1:
STOP -= 1
one = 1.0#ctx.one
eps = np.spacing(1)#+ctx.eps
if randomized:
from random import Random
rnd = Random()
rnd.seed(START)
for i in range(START, STOP):
row = []
for j in range(i+1):
if j == 0:
a, b = 0, seq[i+1]-seq[i]
else:
if j == 1:
a = seq[i]
else:
a = table[i-1][j-2]
b = row[j-1] - table[i-1][j-1]
if not b:
if randomized:
b = rnd.getrandbits(10)*eps
elif i & 1:
return table[:-1]
else:
return table
row.append(a + one/b)
table.append(row)
return table
def shanks(seq, ind=0):
"""Iterated Shanks transformation to accelerate series convergence
Though normally applied to a 1d array, `shanks` will actually operate on
the last dimension of seq which allows for multi-dimensional arrays. e.g.
for 2d data each row of sequence whould be a separate sequence
Parameters
----------
seq : list or array
If seq is a numpy array then it's elements will be modified in-place.
If seq is a list then seq will not be modified.
ind : int, optional
Start index for extrapolation. Can be negative, e.g. ind=-5
will extrapolate based on the last 5 elements of the `seq`.
default ind=0 i.e. use all elements.
Returns
-------
out : array with 1 dim less than `seq`, or float if seq is only 1d.
Extrapolated value. If `seq` is a numpy array then due to in-place
modification the result will also be in seq[..., -1].
See Also
--------
shanks_table : Copy of sympy.mpmath.calculus.extrapolation.shanks
Provides the whole epsilon table and error estimates.
numpy.apply_along_axis : If your sequence is not in the last dimension of
an array then use np.apply_along_axis to apply it along a specific
axis.
Notes
-----
I think this will also work on multi-dimensional data. The shanks
extrapolation will be performed on the last dimension of the data.
So for 2d data each row is a separate sequence.
For sequence:
.. math A=\\sum_{m=0}^{\\infty}a_m
The partial sum is first defined as:
.. math:: A_n=\\sum_{m=0}^{n}a_m
This forms a new sequence, the convergence of which can be sped up by
repeated use of:
.. math:: S(A_n)=\\frac{A_{n+1}A_{n-1}-A_n^2}{A_{n+1}-2A_n+A_{n-1}}
"""
seq = np.atleast_1d(seq)
if ind is None:
return +seq[..., -1]
if ind < 0:
ind = seq.shape[-1] + ind
ind = max(ind, 0)
for i in range(ind, seq.shape[-1] - 2, 2):
denom = (seq[..., i + 2:] - 2 * seq[..., i + 1: -1] + seq[..., i:-2])
if np.any(denom==0):
return +seq[..., -1]
seq[..., i + 2:] = (
(seq[..., i + 2:] * seq[..., i:-2] - seq[..., i + 1:-1]**2) /
denom)
return +seq[...,-1]
def gk_quad(f, a, b, args=(), n=10, sum_intervals=False):
"""Integration by Gauss-Kronrod quadrature between intervals
Parameters
----------
f : function or method
Function to integrate.
a, b : 1d array
Limits of integration. Must have len(a)==len(b).
args : tuple, optional
`args` will be passed to f using f(x, *args). Default args=().
n : [7,10,15,20,25,30], optional
Number of gauss quadrature evaluation points. Default n=10. There will
be 2*n+1 Kronrod quadrature points.
sum_intervals : [False, True]
If sum_intervals=True the integral for each a and b, will be summed.
Otherwise each interval integration will be returned. The sum of the
error estimates will also be summed.
Returns
-------
igral : ndarray
Integral of f between a and b.
If sum_intervals=False then shape of igral will be (len(a), ...)
where ... corresponds to however many dimensions are returned
from f with scalar arguments. Each value in igral corresponds to
the corresponding a-b interval. If sum_intervals=True then igral will
have shape (...).
err_estimate : ndarray same size as igal
Estimate of the error in the integral. i.e. absolute value of fine
integral minus coarse integral.
"""
ai = np.atleast_1d(a)
bi = np.atleast_1d(b)
xj_, wj1, wj2 = gauss_kronrod_abscissae_and_weights(n)
# dim1 = each integration limits, a and b
# dim2 = each quadrature point
ai = ai[:, np.newaxis]
bi = bi[:, np.newaxis]
xj_ = xj_[np.newaxis, :]
wj1 = wj1[np.newaxis, :]
wj2 = wj2[np.newaxis, :]
bma = (bi - ai) / 2 # b minus a
bpa = (ai + bi) /2 # b plus a
xij = bma * xj_ + bpa # xj_ are in [-1, 1] so need to transform to [a, b]
#get shape of output with scalar argument and form a slice that will ensure
#any extra dims are appended to the args.
extra = np.array(f(xij.flat[0], *args))
gen_slice = [slice(None)] * xij.ndim + [None] * extra.ndim
fij = f(xij[gen_slice], *args)
# igral1 = np.ravel(bma) * np.sum(fij * wj1, axis=1)
# igral2 = np.ravel(bma) * np.sum(fij * wj2, axis=1)
# igral1 = bma[:, 0] * np.sum(fij * wj1, axis=1)
# igral2 = bma[:, 0] * np.sum(fij * wj2, axis=1)
igral1 = np.sum(bma[gen_slice] *fij * wj1[gen_slice], axis=1)
igral2 = np.sum(bma[gen_slice] *fij * wj2[gen_slice], axis=1)
err_estimate = np.abs(igral2 - igral1)
if sum_intervals:
igral1 = np.sum(igral1, axis=0)
igral2 = np.sum(igral2, axis=0)
err_estimate = np.sum(err_estimate, axis=0)
return igral2, err_estimate
def gl_quad(f, a, b, args=(), n=10, shanks_ind=False, sum_intervals=False):
"""Integration by Gauss-Legendre quadrature with subdivided interval
Parameters
----------
f : function or method
function to integrate. Must accept vector aguments for x. Might
need to use numpy.vecotrize.
a, b : 1d array
limits of integration
args : tuple, optional
args will be passed to f using f(x, *args). default=()
n : [2-20, 32, 64, 100], optional
number of quadrature evaluation points. default=10
sum_intervals : [False, True]
If sum_intervals=True the integral for each a and b, will be summed.
Otherwise each interval integration will be returned.
Returns
-------
igral : ndarray
Integral of f between a and b.
If sum_intervals=False then shape of igral will be (len(a), ...)
where ... corresponds to however many dimensions are returned
from f with scalar arguments. Each value in igral corresponds to
the corresponding a-b interval. If sum_intervals=True then igral will
have shape (...).
Notes
-----
Be careful when using large values of n.There may be precision issues.
If f returns an ndarray when x is scalar. igral will have additonal
dimensions corresponding to those of the f-with-scalar-x output.
"""
ai = np.atleast_1d(a)
bi = np.atleast_1d(b)
xj_, wj = gauss_legendre_abscissae_and_weights(n)
# dim1 = each integration limits, a and b
# dim2 = each quadrature point
ai = ai[:, np.newaxis]
bi = bi[:, np.newaxis]
xj_ = xj_[np.newaxis, :]
wj = wj[np.newaxis, :]
bma = (bi - ai) / 2 # b minus a
bpa = (ai + bi) /2 # b plus a
xij = bma * xj_ + bpa # xj_ are in [-1, 1] so need to transform to [a, b]
#get shape of output with scalar argument and form a slice that will ensure
#any extra dims are appended to the args.
extra = np.array(f(xij.flat[0], *args))
gen_slice = [slice(None)] * xij.ndim + [None] * extra.ndim
fij = f(xij[gen_slice], *args)
igral = np.sum(bma[gen_slice] * fij *wj[gen_slice], axis=1)
if sum_intervals:
igral = np.sum(igral, axis=0)
return igral
| gpl-3.0 |
r03ert0/ldsc | test/test_sumstats.py | 3 | 16976 | from __future__ import division
import ldscore.sumstats as s
import ldscore.parse as ps
import unittest
import numpy as np
import pandas as pd
from pandas.util.testing import assert_series_equal, assert_frame_equal
from nose.tools import *
from numpy.testing import assert_array_equal, assert_array_almost_equal, assert_allclose
from nose.plugins.attrib import attr
import os
from ldsc import parser
DIR = os.path.dirname(__file__)
N_REP = 200
s._N_CHR = 2 # having to mock 22 files is annoying
class Mock(object):
'''
Dumb object for mocking args and log
'''
def __init__(self):
pass
def log(self, x):
# pass
print x
log = Mock()
args = Mock()
t = lambda attr: lambda obj: getattr(obj, attr, float('nan'))
def test_check_condnum():
x = np.ones((2, 2))
x[1, 1] += 1e-5
args.invert_anyway = False
assert_raises(ValueError, s._check_ld_condnum, args, log, x)
args.invert_anyway = True
s._check_ld_condnum(args, log, x) # no error
def test_check_variance():
ld = pd.DataFrame({'SNP': ['a', 'b', 'c'],
'LD1': np.ones(3).astype(float),
'LD2': np.arange(3).astype(float)})
ld = ld[['SNP', 'LD1', 'LD2']]
M_annot = np.array([[1, 2]])
M_annot, ld, novar_col = s._check_variance(log, M_annot, ld)
assert_array_equal(M_annot.shape, (1, 1))
assert_array_equal(M_annot, [[2]])
assert_allclose(ld.iloc[:, 1], [0, 1, 2])
assert_array_equal(novar_col, [True, False])
def test_align_alleles():
beta = pd.Series(np.ones(6))
alleles = pd.Series(['ACAC', 'TGTG', 'GTGT', 'AGCT', 'AGTC', 'TCTC'])
beta = s._align_alleles(beta, alleles)
assert_series_equal(beta, pd.Series([1.0, 1, 1, -1, 1, 1]))
def test_filter_bad_alleles():
alleles = pd.Series(['ATAT', 'ATAG', 'DIID', 'ACAC'])
bad_alleles = s._filter_alleles(alleles)
print bad_alleles
assert_series_equal(bad_alleles, pd.Series([False, False, False, True]))
def test_read_annot():
ref_ld_chr = None
ref_ld = os.path.join(DIR, 'annot_test/test')
overlap_matrix, M_tot = s._read_chr_split_files(ref_ld_chr, ref_ld, log, 'annot matrix',
ps.annot, frqfile=None)
assert_array_equal(overlap_matrix, [[1, 0, 0], [0, 2, 2], [0, 2, 2]])
assert_array_equal(M_tot, 3)
frqfile = os.path.join(DIR, 'annot_test/test1')
overlap_matrix, M_tot = s._read_chr_split_files(ref_ld_chr, ref_ld, log, 'annot matrix',
ps.annot, frqfile=frqfile)
assert_array_equal(overlap_matrix, [[1, 0, 0], [0, 1, 1], [0, 1, 1]])
assert_array_equal(M_tot, 2)
def test_valid_snps():
x = {'AC', 'AG', 'CA', 'CT', 'GA', 'GT', 'TC', 'TG'}
assert_equal(x, s.VALID_SNPS)
def test_bases():
x = set(['A', 'T', 'G', 'C'])
assert_equal(x, set(s.BASES))
def test_complement():
assert_equal(s.COMPLEMENT, {'A': 'T', 'T': 'A', 'C': 'G', 'G': 'C'})
def test_warn_len():
# nothing to test except that it doesn't throw an error at runtime
s._warn_length(log, [1])
def test_match_alleles():
m = {'ACAC',
'ACCA',
'ACGT',
'ACTG',
'AGAG',
'AGCT',
'AGGA',
'AGTC',
'CAAC',
'CACA',
'CAGT',
'CATG',
'CTAG',
'CTCT',
'CTGA',
'CTTC',
'GAAG',
'GACT',
'GAGA',
'GATC',
'GTAC',
'GTCA',
'GTGT',
'GTTG',
'TCAG',
'TCCT',
'TCGA',
'TCTC',
'TGAC',
'TGCA',
'TGGT',
'TGTG'}
assert_equal(m, s.MATCH_ALLELES)
def test_flip_alleles():
m = {'ACAC': False,
'ACCA': True,
'ACGT': True,
'ACTG': False,
'AGAG': False,
'AGCT': True,
'AGGA': True,
'AGTC': False,
'CAAC': True,
'CACA': False,
'CAGT': False,
'CATG': True,
'CTAG': True,
'CTCT': False,
'CTGA': False,
'CTTC': True,
'GAAG': True,
'GACT': False,
'GAGA': False,
'GATC': True,
'GTAC': True,
'GTCA': False,
'GTGT': False,
'GTTG': True,
'TCAG': False,
'TCCT': True,
'TCGA': True,
'TCTC': False,
'TGAC': False,
'TGCA': True,
'TGGT': True,
'TGTG': False}
assert_equal(m, s.FLIP_ALLELES)
def test_strand_ambiguous():
m = {'AC': False,
'AG': False,
'AT': True,
'CA': False,
'CG': True,
'CT': False,
'GA': False,
'GC': True,
'GT': False,
'TA': True,
'TC': False,
'TG': False}
assert_equal(m, s.STRAND_AMBIGUOUS)
@attr('rg')
@attr('slow')
class Test_RG_Statistical():
@classmethod
def setUpClass(cls):
args = parser.parse_args('')
args.ref_ld = DIR + '/simulate_test/ldscore/twold_onefile'
args.w_ld = DIR + '/simulate_test/ldscore/w'
args.rg = ','.join(
(DIR + '/simulate_test/sumstats/' + str(i) for i in xrange(N_REP)))
args.out = DIR + '/simulate_test/1'
x = s.estimate_rg(args, log)
args.intercept_gencov = ','.join(('0' for _ in xrange(N_REP)))
args.intercept_h2 = ','.join(('1' for _ in xrange(N_REP)))
y = s.estimate_rg(args, log)
cls.rg = x
cls.rg_noint = y
def test_rg_ratio(self):
assert_allclose(np.nanmean(map(t('rg_ratio'), self.rg)), 0, atol=0.02)
def test_rg_ratio_noint(self):
assert_allclose(
np.nanmean(map(t('rg_ratio'), self.rg_noint)), 0, atol=0.02)
def test_rg_se(self):
assert_allclose(np.nanmean(map(t('rg_se'), self.rg)), np.nanstd(
map(t('rg_ratio'), self.rg)), atol=0.02)
def test_rg_se_noint(self):
assert_allclose(np.nanmean(map(t('rg_se'), self.rg_noint)), np.nanstd(
map(t('rg_ratio'), self.rg_noint)), atol=0.02)
def test_gencov_tot(self):
assert_allclose(
np.nanmean(map(t('tot'), map(t('gencov'), self.rg))), 0, atol=0.02)
def test_gencov_tot_noint(self):
assert_allclose(
np.nanmean(map(t('tot'), map(t('gencov'), self.rg_noint))), 0, atol=0.02)
def test_gencov_tot_se(self):
assert_allclose(np.nanstd(map(t('tot'), map(t('gencov'), self.rg))), np.nanmean(
map(t('tot_se'), map(t('gencov'), self.rg))), atol=0.02)
def test_gencov_tot_se_noint(self):
assert_allclose(np.nanstd(map(t('tot'), map(t('gencov'), self.rg_noint))), np.nanmean(
map(t('tot_se'), map(t('gencov'), self.rg_noint))), atol=0.02)
def test_gencov_cat(self):
assert_allclose(
np.nanmean(map(t('cat'), map(t('gencov'), self.rg))), [0, 0], atol=0.02)
def test_gencov_cat_noint(self):
assert_allclose(
np.nanmean(map(t('cat'), map(t('gencov'), self.rg_noint))), [0, 0], atol=0.02)
def test_gencov_cat_se(self):
assert_allclose(np.nanstd(map(t('cat'), map(t('gencov'), self.rg))), np.nanmean(
map(t('cat_se'), map(t('gencov'), self.rg))), atol=0.02)
def test_gencov_cat_se_noint(self):
assert_allclose(np.nanstd(map(t('cat'), map(t('gencov'), self.rg_noint))), np.nanmean(
map(t('cat_se'), map(t('gencov'), self.rg_noint))), atol=0.02)
def test_gencov_int(self):
assert_allclose(
np.nanmean(map(t('intercept'), map(t('gencov'), self.rg))), 0, atol=0.1)
def test_gencov_int_se(self):
assert_allclose(np.nanmean(map(t('intercept_se'), map(t('gencov'), self.rg))), np.nanstd(
map(t('intercept'), map(t('gencov'), self.rg))), atol=0.1)
def test_hsq_int(self):
assert_allclose(
np.nanmean(map(t('intercept'), map(t('hsq2'), self.rg))), 1, atol=0.1)
def test_hsq_int_se(self):
assert_allclose(np.nanmean(map(t('intercept_se'), map(t('hsq2'), self.rg))), np.nanstd(
map(t('intercept'), map(t('hsq2'), self.rg))), atol=0.1)
@attr('h2')
@attr('slow')
class Test_H2_Statistical(unittest.TestCase):
@classmethod
def setUpClass(cls):
args = parser.parse_args('')
args.ref_ld = DIR + '/simulate_test/ldscore/twold_onefile'
args.w_ld = DIR + '/simulate_test/ldscore/w'
args.chisq_max = 99999
h2 = []
h2_noint = []
for i in xrange(N_REP):
args.intercept_h2 = None
args.h2 = DIR + '/simulate_test/sumstats/' + str(i)
args.out = DIR + '/simulate_test/1'
h2.append(s.estimate_h2(args, log))
args.intercept_h2 = 1
h2_noint.append(s.estimate_h2(args, log))
cls.h2 = h2
cls.h2_noint = h2_noint
def test_tot(self):
assert_allclose(np.nanmean(map(t('tot'), self.h2)), 0.9, atol=0.05)
def test_tot_noint(self):
assert_allclose(
np.nanmean(map(t('tot'), self.h2_noint)), 0.9, atol=0.05)
def test_tot_se(self):
assert_allclose(np.nanmean(map(t('tot_se'), self.h2)), np.nanstd(
map(t('tot'), self.h2)), atol=0.05)
def test_tot_se_noint(self):
assert_allclose(np.nanmean(map(t('tot_se'), self.h2_noint)), np.nanstd(
map(t('tot'), self.h2_noint)), atol=0.05)
def test_cat(self):
x = np.nanmean(map(t('cat'), self.h2_noint), axis=0)
y = np.array((0.3, 0.6)).reshape(x.shape)
assert_allclose(x, y, atol=0.05)
def test_cat_noint(self):
x = np.nanmean(map(t('cat'), self.h2_noint), axis=0)
y = np.array((0.3, 0.6)).reshape(x.shape)
assert_allclose(x, y, atol=0.05)
def test_cat_se(self):
x = np.nanmean(map(t('cat_se'), self.h2), axis=0)
y = np.nanstd(map(t('cat'), self.h2), axis=0).reshape(x.shape)
assert_allclose(x, y, atol=0.05)
def test_cat_se_noint(self):
x = np.nanmean(map(t('cat_se'), self.h2_noint), axis=0)
y = np.nanstd(map(t('cat'), self.h2_noint), axis=0).reshape(x.shape)
assert_allclose(x, y, atol=0.05)
def test_coef(self):
# should be h^2/M = [[0.3, 0.9]] / M
coef = np.array(((0.3, 0.9))) / self.h2[0].M
for h in [self.h2, self.h2_noint]:
assert np.all(np.abs(np.nanmean(map(t('coef'), h), axis=0) - coef) < 1e6)
def test_coef_se(self):
for h in [self.h2, self.h2_noint]:
assert_array_almost_equal(np.nanmean(map(t('coef_se'), h), axis=0),
np.nanstd(map(t('coef'), h), axis=0))
def test_prop(self):
for h in [self.h2, self.h2_noint]:
assert np.all(np.nanmean(map(t('prop'), h), axis=0) - [1/3, 2/3] < 0.02)
def test_prop_se(self):
for h in [self.h2, self.h2_noint]:
assert np.all(np.nanmean(map(t('prop_se'), h), axis=0) - np.nanstd(map(t('prop'), h), axis=0) < 0.02)
def test_int(self):
assert_allclose(np.nanmean(map(t('intercept'), self.h2)), 1, atol=0.1)
def test_int_se(self):
assert_allclose(np.nanstd(map(t('intercept'), self.h2)), np.nanmean(
map(t('intercept_se'), self.h2)), atol=0.1)
class Test_Estimate(unittest.TestCase):
def test_h2_M(self): # check --M works
args = parser.parse_args('')
args.ref_ld = DIR + '/simulate_test/ldscore/oneld_onefile'
args.w_ld = DIR + '/simulate_test/ldscore/w'
args.h2 = DIR + '/simulate_test/sumstats/1'
args.out = DIR + '/simulate_test/1'
args.print_cov = True # right now just check no runtime errors
args.print_delete_vals = True
x = s.estimate_h2(args, log)
args.M = str(
float(open(DIR + '/simulate_test/ldscore/oneld_onefile.l2.M_5_50').read()))
y = s.estimate_h2(args, log)
assert_array_almost_equal(x.tot, y.tot)
assert_array_almost_equal(x.tot_se, y.tot_se)
args.M = '1,2'
assert_raises(ValueError, s.estimate_h2, args, log)
args.M = 'foo_bar'
assert_raises(ValueError, s.estimate_h2, args, log)
def test_h2_ref_ld(self): # test different ways of reading ref ld
args = parser.parse_args('')
args.ref_ld_chr = DIR + '/simulate_test/ldscore/twold_onefile'
args.w_ld = DIR + '/simulate_test/ldscore/w'
args.h2 = DIR + '/simulate_test/sumstats/555'
args.out = DIR + '/simulate_test/'
x = s.estimate_h2(args, log)
args.ref_ld = DIR + '/simulate_test/ldscore/twold_firstfile,' + \
DIR + '/simulate_test/ldscore/twold_secondfile'
y = s.estimate_h2(args, log)
args.ref_ld_chr = DIR + '/simulate_test/ldscore/twold_firstfile,' + \
DIR + '/simulate_test/ldscore/twold_secondfile'
z = s.estimate_h2(args, log)
assert_almost_equal(x.tot, y.tot)
assert_array_almost_equal(y.cat, z.cat)
assert_array_almost_equal(x.prop, y.prop)
assert_array_almost_equal(y.coef, z.coef)
assert_array_almost_equal(x.tot_se, y.tot_se)
assert_array_almost_equal(y.cat_se, z.cat_se)
assert_array_almost_equal(x.prop_se, y.prop_se)
assert_array_almost_equal(y.coef_se, z.coef_se)
# test statistical properties (constrain intercept here)
def test_rg_M(self):
args = parser.parse_args('')
args.ref_ld = DIR + '/simulate_test/ldscore/oneld_onefile'
args.w_ld = DIR + '/simulate_test/ldscore/w'
args.rg = ','.join(
[DIR + '/simulate_test/sumstats/1' for _ in xrange(2)])
args.out = DIR + '/simulate_test/1'
x = s.estimate_rg(args, log)[0]
args.M = open(
DIR + '/simulate_test/ldscore/oneld_onefile.l2.M_5_50', 'rb').read().rstrip('\n')
y = s.estimate_rg(args, log)[0]
assert_array_almost_equal(x.rg_ratio, y.rg_ratio)
assert_array_almost_equal(x.rg_se, y.rg_se)
args.M = '1,2'
assert_raises(ValueError, s.estimate_rg, args, log)
args.M = 'foo_bar'
assert_raises(ValueError, s.estimate_rg, args, log)
def test_rg_ref_ld(self):
args = parser.parse_args('')
args.ref_ld_chr = DIR + '/simulate_test/ldscore/twold_onefile'
args.w_ld = DIR + '/simulate_test/ldscore/w'
args.rg = ','.join(
[DIR + '/simulate_test/sumstats/1' for _ in xrange(2)])
args.out = DIR + '/simulate_test/1'
args.print_cov = True # right now just check no runtime errors
args.print_delete_vals = True
x = s.estimate_rg(args, log)[0]
args.ref_ld = DIR + '/simulate_test/ldscore/twold_firstfile,' + \
DIR + '/simulate_test/ldscore/twold_secondfile'
y = s.estimate_rg(args, log)[0]
args.ref_ld_chr = DIR + '/simulate_test/ldscore/twold_firstfile,' + \
DIR + '/simulate_test/ldscore/twold_secondfile'
z = s.estimate_rg(args, log)[0]
assert_almost_equal(x.rg_ratio, y.rg_ratio)
assert_almost_equal(y.rg_jknife, z.rg_jknife)
assert_almost_equal(x.rg_se, y.rg_se)
def test_no_check_alleles(self):
args = parser.parse_args('')
args.ref_ld = DIR + '/simulate_test/ldscore/oneld_onefile'
args.w_ld = DIR + '/simulate_test/ldscore/w'
args.rg = ','.join(
[DIR + '/simulate_test/sumstats/1' for _ in xrange(2)])
args.out = DIR + '/simulate_test/1'
x = s.estimate_rg(args, log)[0]
args.no_check_alleles = True
y = s.estimate_rg(args, log)[0]
assert_equal(x.rg_ratio, y.rg_ratio)
assert_almost_equal(x.rg_jknife, y.rg_jknife)
assert_equal(x.rg_se, y.rg_se)
def test_twostep_h2(self):
# make sure two step isn't going crazy
args = parser.parse_args('')
args.ref_ld = DIR + '/simulate_test/ldscore/oneld_onefile'
args.w_ld = DIR + '/simulate_test/ldscore/w'
args.h2 = DIR + '/simulate_test/sumstats/1'
args.out = DIR + '/simulate_test/1'
args.chisq_max = 9999999
args.two_step = 999
x = s.estimate_h2(args, log)
args.chisq_max = 9999
args.two_step = 99999
y = s.estimate_h2(args, log)
assert_allclose(x.tot, y.tot, atol=1e-5)
def test_twostep_rg(self):
# make sure two step isn't going crazy
args = parser.parse_args('')
args.ref_ld_chr = DIR + '/simulate_test/ldscore/oneld_onefile'
args.w_ld = DIR + '/simulate_test/ldscore/w'
args.rg = ','.join(
[DIR + '/simulate_test/sumstats/1' for _ in xrange(2)])
args.out = DIR + '/simulate_test/rg'
args.two_step = 999
x = s.estimate_rg(args, log)[0]
args.two_step = 99999
y = s.estimate_rg(args, log)[0]
assert_allclose(x.rg_ratio, y.rg_ratio, atol=1e-5)
assert_allclose(x.gencov.tot, y.gencov.tot, atol=1e-5)
| gpl-3.0 |
AIML/scikit-learn | examples/cluster/plot_feature_agglomeration_vs_univariate_selection.py | 218 | 3893 | """
==============================================
Feature agglomeration vs. univariate selection
==============================================
This example compares 2 dimensionality reduction strategies:
- univariate feature selection with Anova
- feature agglomeration with Ward hierarchical clustering
Both methods are compared in a regression problem using
a BayesianRidge as supervised estimator.
"""
# Author: Alexandre Gramfort <alexandre.gramfort@inria.fr>
# License: BSD 3 clause
print(__doc__)
import shutil
import tempfile
import numpy as np
import matplotlib.pyplot as plt
from scipy import linalg, ndimage
from sklearn.feature_extraction.image import grid_to_graph
from sklearn import feature_selection
from sklearn.cluster import FeatureAgglomeration
from sklearn.linear_model import BayesianRidge
from sklearn.pipeline import Pipeline
from sklearn.grid_search import GridSearchCV
from sklearn.externals.joblib import Memory
from sklearn.cross_validation import KFold
###############################################################################
# Generate data
n_samples = 200
size = 40 # image size
roi_size = 15
snr = 5.
np.random.seed(0)
mask = np.ones([size, size], dtype=np.bool)
coef = np.zeros((size, size))
coef[0:roi_size, 0:roi_size] = -1.
coef[-roi_size:, -roi_size:] = 1.
X = np.random.randn(n_samples, size ** 2)
for x in X: # smooth data
x[:] = ndimage.gaussian_filter(x.reshape(size, size), sigma=1.0).ravel()
X -= X.mean(axis=0)
X /= X.std(axis=0)
y = np.dot(X, coef.ravel())
noise = np.random.randn(y.shape[0])
noise_coef = (linalg.norm(y, 2) / np.exp(snr / 20.)) / linalg.norm(noise, 2)
y += noise_coef * noise # add noise
###############################################################################
# Compute the coefs of a Bayesian Ridge with GridSearch
cv = KFold(len(y), 2) # cross-validation generator for model selection
ridge = BayesianRidge()
cachedir = tempfile.mkdtemp()
mem = Memory(cachedir=cachedir, verbose=1)
# Ward agglomeration followed by BayesianRidge
connectivity = grid_to_graph(n_x=size, n_y=size)
ward = FeatureAgglomeration(n_clusters=10, connectivity=connectivity,
memory=mem)
clf = Pipeline([('ward', ward), ('ridge', ridge)])
# Select the optimal number of parcels with grid search
clf = GridSearchCV(clf, {'ward__n_clusters': [10, 20, 30]}, n_jobs=1, cv=cv)
clf.fit(X, y) # set the best parameters
coef_ = clf.best_estimator_.steps[-1][1].coef_
coef_ = clf.best_estimator_.steps[0][1].inverse_transform(coef_)
coef_agglomeration_ = coef_.reshape(size, size)
# Anova univariate feature selection followed by BayesianRidge
f_regression = mem.cache(feature_selection.f_regression) # caching function
anova = feature_selection.SelectPercentile(f_regression)
clf = Pipeline([('anova', anova), ('ridge', ridge)])
# Select the optimal percentage of features with grid search
clf = GridSearchCV(clf, {'anova__percentile': [5, 10, 20]}, cv=cv)
clf.fit(X, y) # set the best parameters
coef_ = clf.best_estimator_.steps[-1][1].coef_
coef_ = clf.best_estimator_.steps[0][1].inverse_transform(coef_)
coef_selection_ = coef_.reshape(size, size)
###############################################################################
# Inverse the transformation to plot the results on an image
plt.close('all')
plt.figure(figsize=(7.3, 2.7))
plt.subplot(1, 3, 1)
plt.imshow(coef, interpolation="nearest", cmap=plt.cm.RdBu_r)
plt.title("True weights")
plt.subplot(1, 3, 2)
plt.imshow(coef_selection_, interpolation="nearest", cmap=plt.cm.RdBu_r)
plt.title("Feature Selection")
plt.subplot(1, 3, 3)
plt.imshow(coef_agglomeration_, interpolation="nearest", cmap=plt.cm.RdBu_r)
plt.title("Feature Agglomeration")
plt.subplots_adjust(0.04, 0.0, 0.98, 0.94, 0.16, 0.26)
plt.show()
# Attempt to remove the temporary cachedir, but don't worry if it fails
shutil.rmtree(cachedir, ignore_errors=True)
| bsd-3-clause |
MaxStrange/ArtieInfant | scripts/plotaudio/plotaudio.py | 1 | 2598 | """
This is code that I find I use a LOT while debugging or analyzing.
"""
import audiosegment
import math
import matplotlib.pyplot as plt
import numpy as np
import os
import sys
#################################################
#### These are the parameters I have been using #
#################################################
# ---- for long spectrograms ------
sample_rate_hz = 16000.0 # 16kHz sample rate
bytewidth = 2 # 16-bit samples
nchannels = 1 # mono
duration_s = 0.5 # Duration of each complete spectrogram
window_length_s = 0.03 # How long each FFT is
overlap = 0.2 # How much each FFT overlaps with each other one
# ---- for short spectrograms ------
#sample_rate_hz = 8000.0 # 8kHz sample rate
#bytewidth = 2 # 16-bit samples
#nchannels = 1 # mono
#duration_s = 0.3 # Duration of each complete spectrogram
#window_length_s = 0.02 # How long each FFT is
#overlap = 0.2 # How much each FFT overlaps with each other one
#################################################
if __name__ == "__main__":
if len(sys.argv) != 2:
print("Need a path to a WAV file.")
exit(1)
seg = audiosegment.from_file(sys.argv[1])
print(seg)
print(" -> RMS:", seg.rms)
print(" -> SPL:", seg.spl)
print(" -> Length (s):", seg.duration_seconds)
print(" -> NChannels:", seg.channels)
print(" -> Frequency (Hz):", seg.frame_rate)
print(" -> Bytes per sample:", seg.sample_width)
print(" -> Human audible?", seg.human_audible())
name = os.path.basename(sys.argv[1])
name, _ext = os.path.splitext(name)
plt.title("Raw Values")
arr = seg.to_numpy_array()
times = np.linspace(0, len(arr) / seg.frame_rate, num=len(arr))
plt.plot(times, seg.to_numpy_array())
plt.xlabel("Time (s)")
plt.ylabel("PCM")
plt.savefig("{}-waveform.png".format(name))
plt.show()
plt.title("Histogram")
hist_bins, hist_vals = seg.fft()
hist_vals_real_normed = np.abs(hist_vals) / len(hist_vals)
plt.plot(hist_bins/1000, hist_vals_real_normed)
plt.xlabel("kHz")
plt.ylabel("dB")
plt.savefig("{}-histogram.png".format(name))
plt.show()
plt.title("Spectrogram")
fs, ts, amps = seg.spectrogram(0, duration_s, window_length_s=window_length_s, overlap=overlap, window=('tukey', 0.5))
#amps = 10.0 * np.log10(amps)
plt.pcolormesh(ts, fs, amps)
plt.xlabel("Time (s)")
plt.ylabel("Hz")
plt.savefig("{}-spectrogram.png".format(name))
plt.show()
| mit |
janeloveless/mechanics-of-exploration | neuromech/util.py | 1 | 11756 | #! /usr/bin/env python
import os
import itertools as it
import sys
import textwrap
#import gtk
import numpy as np
import sympy as sy
import sympy.stats
import odespy as ode
import matplotlib
import matplotlib.pyplot as plt
import sympy.physics.mechanics as mech
"""
Pretty plotting code.
"""
_all_spines = ["top", "right", "bottom", "left"]
def hide_spines(s=["top", "right"]):
"""Hides the top and rightmost axis spines from view for all active
figures and their respective axes."""
global _all_spines
# Retrieve a list of all current figures.
figures = [x for x in matplotlib._pylab_helpers.Gcf.get_all_fig_managers()]
for figure in figures:
# Get all Axis instances related to the figure.
for ax in figure.canvas.figure.get_axes():
for spine in _all_spines :
if spine in s :
ax.spines[spine].set_color('none')
if "top" in s and "bottom" in s :
ax.xaxis.set_ticks_position('none')
elif "top" in s :
ax.xaxis.set_ticks_position('bottom')
elif "bottom" in s :
ax.xaxis.set_ticks_position('top')
else :
ax.xaxis.set_ticks_position('both')
if "left" in s and "right" in s :
ax.yaxis.set_ticks_position('none')
elif "left" in s :
ax.yaxis.set_ticks_position('right')
elif "right" in s :
ax.yaxis.set_ticks_position('left')
else :
ax.yaxis.set_ticks_position('both')
"""
FORTRAN compilation code.
"""
def find_matching_parentheses(s, popen="(", pclose=")") :
i_start = s.find(popen)
i_end = -1
count = 0
s_frame = s[i_start:]
for i in xrange(len(s_frame)) :
char = s_frame[i]
if char == popen :
count += 1
elif char == pclose :
count -= 1
if count == 0 :
i_end = i + i_start + 1
break
return i_start, i_end
def parse_merge(H, s) :
"""
Parse the first FORTRAN merge statement found within s.
H is the name of a hidden variable which will be used to store the value of
the piecewise function defined by the merge statement.
"""
# extract bracketed code in merge statement from s
# m_statement is of form "(expr1,expr2,cond)"
i_merge_start = s.find("merge")
ms = s[i_merge_start:]
i_start, i_end = find_matching_parentheses(ms)
m_statement = ms[i_start:i_end]
# print m_statement
# extract expr1, expr2, and conditional
i1 = m_statement.find(",")
i2 = m_statement.rfind(",")
expr1 = m_statement[1:i1]
expr2 = m_statement[i1 + 1:i2]
cond = m_statement[i2 + 1:-1]
# if expr1, expr2, or cond are merge statements, recursively call this
# function otherwise, set the hidden switch variable to take the value of
# the relevant expr
if expr1.find("merge") != -1 :
expr1_str = parse_merge(H, expr1)[-1]
expr1_str = "".join([" " + s + "\n" for s in expr1_str.splitlines()])
else :
expr1_str = " " + H + "=" + expr1
if expr2.find("merge") != -1 :
expr2_str = parse_merge(H, expr2)[-1]
expr2_str = "".join([" " + s + "\n" for s in expr2_str.splitlines()])
else :
expr2_str = " " + H + "=" + expr2
# format expr1_str, expr2_str, and cond into a correct FORTRAN IF-THEN-ELSE
# statement
f_code = " IF (" + cond.strip() + ") THEN \n" + expr1_str + "\n" + \
" ELSE \n" + expr2_str + "\n" + \
" ENDIF \n"
return i_merge_start, i_merge_start + i_end, f_code
def FORTRAN_f(x, f, parameters=[], verbose=False) :
"""
Produce FORTRAN function for evaluating a vector-valued SymPy expression f
given a state vector x.
The FORTRAN function will have the signature f_f77(neq, t, X, Y) where neq
is hidden and Y is an output matrix.
"""
# TODO remove code for dealing with stochastic systems -- it is not used in
# this paper
x = list(x) + list(parameters)
f = list(f) + [0]*len(parameters)
rv = list(set((np.concatenate([sy.stats.random_symbols(f_i) for f_i in f]))))
NR = len(rv)
if NR > 0 :
x += [sy.symbols("dt"), sy.symbols("seed")]
f += [0, 0]
NX = len(x)
NY = len(f)
if NX != NY :
raise Exception("System is not square!")
if verbose : print "generating FORTRAN matrices..."
_X = sy.tensor.IndexedBase("X", shape=(NX, ))
X = [_X[i + 1] for i in xrange(NX)]
_R = sy.tensor.IndexedBase("R", shape=(NR, ))
R = [_R[i + 1] for i in xrange(NR)]
if type(f) != sy.Matrix : f = sy.Matrix(f)
# WARNING : These substitution steps are VERY SLOW!!! It might be wise to
# parallelise them in the future, or at least substitute into one dynamical
# equation at a time so that progress can be monitored.
if verbose : print "substituting matrix elements for original state variables and parameters (WARNING: SLOW)..."
f_sub = f.subs(zip(x, X))
if verbose : print "substituting matrix elements for random variables (WARNING: SLOW)..."
f_sub = f_sub.subs(zip(rv, R))
# generate FORTRAN code
if verbose : print "generating FORTRAN code from dynamics equations..."
fstrs = [sy.fcode(fi, standard=95) for fi in f_sub]
# remove whitespace and newlines
if verbose : print "removing whitespace and newlines..."
fstrs = ["".join(fi.split()) for fi in fstrs]
# remove all @ (FORTRAN line continuation indicator)
if verbose : print "removing line continuations..."
fstrs = [fi.replace("@", "") for fi in fstrs]
# find FORTRAN inline merge statements and replace with a hidden "switch"
# variable whose value is set by a full IF statement at the start of the
# function call.
# -- this is needed because FORTRAN77 doesn't support inline merge statements
Hstrs = [] # to hold hidden switch expressions
if verbose : print "formatting piecewise functions..."
for i in xrange(len(fstrs)) :
while fstrs[i].find("merge") != -1 :
H = "H(" + str(len(Hstrs) + 1) + ")"
i_merge_start, i_merge_end, Hstr = parse_merge(H, fstrs[i])
fstrs[i] = fstrs[i][:i_merge_start] + H + fstrs[i][i_merge_end:]
Hstrs.append(Hstr)
NH = len(Hstrs)
# format the fstrs
wrapper = textwrap.TextWrapper(expand_tabs=True,
replace_whitespace=True,
initial_indent=" ",
subsequent_indent=" @ ",
width=60)
if verbose : print "formatting state equations..."
for i in xrange(len(fstrs)) :
fstrs[i] = wrapper.fill("Y(" + str(i + 1) + ")=" + fstrs[i]) + "\n"
# put the above elements together into a FORTRAN subroutine
if verbose : print "formatting preamble..."
hdr = " subroutine f_f77(neq, t, X, Y) \n" +\
"Cf2py intent(hide) neq \n" +\
"Cf2py intent(out) Y \n" +\
" integer neq \n" +\
" double precision t, X, Y \n" +\
" dimension X(neq), Y(neq) \n"
if NH > 0 : hdr += " real, dimension(" + str(NH) + ") :: H \n"
# TODO fix the following -- assumes dt = 0.01
# NOTE this is only important when dealing with stochastic systems
if NR > 0 : hdr += " real, dimension(" + str(NR) + ") :: R \n" +\
" integer :: SEED \n" +\
" real :: RTRASH \n" +\
" SEED = INT((t/" + sy.fcode(X[-2]).strip() +\
") + " + sy.fcode(X[-1]).strip() + ") \n" +\
" CALL SRAND(SEED) \n" +\
" DO i=1,4 \n" +\
" RTRASH=RAND(0) \n" +\
" END DO \n"
R_block = "".join([sy.fcode(R_i) + "=RAND(0) \n" for R_i in R])
H_block = "".join(Hstrs)
Y_block = "".join(fstrs)
if verbose : print "assembling source code blocks..."
fcode = hdr + R_block + H_block + Y_block + " return \n" + " end \n"
# final formatting
if verbose : print "final source code formatting..."
wrapper = textwrap.TextWrapper(expand_tabs=True, replace_whitespace=True,
initial_indent="", subsequent_indent=" @ ", width=60)
fcode = "".join([wrapper.fill(src) + "\n" for src in fcode.split("\n")])
return fcode
def FORTRAN_jacobian(x, jac, parameters=[]) :
# TODO document
# TODO remove this function if unused in paper
NX = len(x)
NP = len(parameters)
Nrowpd = jac.shape[0]
Ncolpd = jac.shape[1]
if NX != Nrowpd != Ncolpd :
raise Exception("System is not square!")
_X = sy.tensor.IndexedBase("X", shape=(NX, ))
X = [_X[i + 1] for i in xrange(NX)]
X = X + [_X[NX + i + 1] for i in xrange(NP)]
if type(jac) == sy.Matrix : jac = sy.Matrix(jac)
jac_sub = jac.subs(zip(list(x) + list(parameters), X))
ijs = [i for i in it.product(xrange(Nrowpd), xrange(Ncolpd))]
# generate FORTRAN code
fstrs = [sy.fcode(jac_ij) for jac_ij in jac_sub]
# remove whitespace and newlines
fstrs = ["".join(jac_ij.split()) for jac_ij in fstrs]
# remove all @ (FORTRAN line continuation indicator)
fstrs = [jac_ij.replace("@", "") for jac_ij in fstrs]
# find FORTRAN inline merge statements and replace with a hidden "switch"
# variable whose value is set by a full IF statement at the start of the
# function call.
# -- this is needed because FORTRAN77 doesn't support inline merge statements
Hstrs = [] # to hold hidden switch expressions
for i in xrange(len(fstrs)) :
while fstrs[i].find("merge") != -1 :
H = "H(" + str(len(Hstrs) + 1) + ")"
i_merge_start, i_merge_end, Hstr = parse_merge(H, fstrs[i])
fstrs[i] = fstrs[i][:i_merge_start] + H + fstrs[i][i_merge_end:]
Hstrs.append(Hstr)
NH = len(Hstrs)
# format the fstrs
wrapper = textwrap.TextWrapper(expand_tabs=True,
replace_whitespace=True,
initial_indent=" ",
subsequent_indent=" @ ",
width=60)
for k in xrange(len(fstrs)) :
i, j = ijs[k]
fstrs[k] = wrapper.fill("pd(" + str(i + 1) + "," + str(j + 1) + ")=" + fstrs[k]) + "\n"
# put the above elements together into a FORTRAN subroutine
hdr = " subroutine jac_f77(neq, t, X, ml, mu, pd, nrowpd) \n" +\
"Cf2py intent(hide) neq, ml, mu, nrowpd \n" +\
"Cf2py intent(out) pd \n" +\
" integer neq, ml, mu, nrowpd \n" +\
" double precision t, X, pd \n" +\
" dimension X(neq), pd(neq, neq) \n"
if NH > 0 : hdr += " real, dimension(" + str(NH) + ") :: H \n"
H_block = "".join(Hstrs)
pd_block = "".join(fstrs)
fcode = hdr + H_block + pd_block + " return \n" + " end \n"
return fcode
def FORTRAN_compile(fcode) :
f_f77 = ode.compile_f77(fcode)
os.remove("tmp_callback.so")
# reload(ode)
return f_f77
"""
Numerical integration code.
"""
def FORTRAN_integrate(t, x0, f, p0=[], jac=None, rtol=0.0001, atol=0.0001) :
solver = ode.Lsodes(f=None, f_f77=f, jac_f77=jac, rtol=rtol, atol=atol)
solver.set_initial_condition(list(x0) + list(p0))
x, _ = solver.solve(t)
return x
| unlicense |
abelfunctions/abelfunctions | examples/riemanntheta_demo.py | 2 | 8564 | """
Grady Williams
January 28, 2013
This module provides functions for displaying graphs of the Riemann-Theta
function. There are 12 different graphs that can be generated, 10 of them
correspond to the graphics shown on the Digital Library of Mathematical
Functions page for Riemann Theta (dlmf.nist.gov/21.4) and the names of the
functions that generate those plots correspond to the names of the plots on
that page. (e.g plt_a1 plots generates the plot denoted a1 on the dlmf page).
The other two graphs are of the first and second derivatives for a given Omega.
Besides the plots for derivatives all of the plots have a few optional commands:
SIZE: Is the number of grid-points per direction over which the function is computed over, the
default is set to 75.
warp: Is the mayavi warp number documentation for it can be found at:
(docs.enthough.com/mayavi/mayavi/auto/mlab_helper_functions.html). The default is auto.
d_axes: Is a boolean value which determines whether or not the axis are displayed.
WARNING: If d_axis is set to True, be then warp should be set to '1'. Otherwise incorrect
axis will be displayed and function values will appear incorrect.
There are 3 different Omegas that are considered
Omega 1 = [[1.690983006 + .951056516*1.0j 1.5 + .363271264*1.0j]
[1.5 + .363271264*1.0j 1.309016994 + .951056516*1.0j]]
Omega 2 = [[1.0j -.5]
[-.5 1.0j]]
Omega 3 = [[-.5 + 1.0j .5 -.5*1.0j -.5-.5*1.0j]
[.5 -.5*1.0j 1.0j 0 ]
[-.5 - .5*1.0j 0 1.0j ]]
In all of the following graphs, the exponential growth of Riemann Theta has been factored out.
"""
from abelfunctions import RiemannTheta
import numpy as np
from mayavi.mlab import *
import matplotlib.pyplot as plt
gpu = True
try:
import pycuda.driver
except ImportError:
gpu = False
"""
Plots the real part of Riemann Theta for Omega 1 with z = (x + iy,0)
where x,y are real numbers such that 0 < x < 1, 0 < y < 5
corresponds to 21.4.1.a1 on DLMF
"""
def plt_a1(SIZE=75, warp="auto", d_axes=False):
X,Y,V = get_r1_vals(SIZE, gpu)
V = V.real
s = surf(X,Y,V, warp_scale = warp)
if d_axes:
axes()
return s
"""
Plots the imaginary part of Riemann Theta for Omega 1 with z = (x + iy,0)
where x,y are real numbers such that 0 < x < 1, 0 < y < 5
corresponds to 21.4.1.b1 on DLMF
"""
def plt_b1(SIZE=75,warp="auto", d_axes=False):
X,Y,V = get_r1_vals(SIZE,gpu)
V = V.imag
s = surf(X,Y,V, warp_scale = warp)
if d_axes:
axes()
return s
"""
Plots the modulus of Riemann Theta for Omega 1 with z = (x + iy,0)
where x,y are real numbers such that 0 < x < 1, 0 < y < 5
corresponds to 21.4.1.c1 on DLMF
"""
def plt_c1(SIZE=75, warp="auto", d_axes=False):
X,Y,V = get_r1_vals(SIZE, gpu)
V = np.absolute(V)
s = surf(X,Y,V, warp_scale = warp)
if d_axes:
axes()
return s
def plt_a2(SIZE=75,warp = "auto",d_axes=False):
X,Y,V = get_r2_vals(SIZE, gpu)
V = V.real
s = surf(X,Y,V, warp_scale = warp)
if d_axes:
axes()
return s
def plt_b2(SIZE=75,warp= "auto", d_axes=False):
X,Y,V = get_r2_vals(SIZE, gpu)
V = V.imag
s = surf(X,Y,V, warp_scale = warp)
if d_axes:
axes()
return s
def plt_c2(SIZE=75, warp = "auto", d_axes=False):
X,Y,V = get_r2_vals(SIZE, gpu)
V = np.absolute(V)
s = surf(X,Y,V,warp_scale = warp)
if d_axes:
axes()
return s
def plt_a3(SIZE=75, warp = "auto", d_axes=False):
X,Y,V = get_r3_vals(SIZE, gpu)
V = V.real
s = surf(X,Y,V, warp_scale = warp)
if d_axes:
axes()
return s
def plt_b3(SIZE=75, warp= "auto", d_axes=False):
X,Y,V = get_r3_vals(SIZE,gpu)
V = V.imag
s = surf(X,Y,V, warp_scale = warp)
if d_axes:
axes()
return s
def plt_c3(SIZE=75, warp= "auto", d_axes=False):
X,Y,V = get_r3_vals(SIZE,gpu)
V = np.absolute(V)
s = surf(X,Y,V,warp_scale = warp)
if d_axes:
axes()
return s
def plt_21_4_2(SIZE=75, warp = "auto", d_axes = False):
X,Y,V = get_d_vals(SIZE, gpu)
V = V.real
s = surf(X,Y,V, warp_scale = warp)
if d_axes:
axes()
return s
def plt_21_4_3(SIZE=75, warp = "auto", d_axes=False):
theta = RiemannTheta
Omega = np.matrix([[1.0j, -.5], [-.5,1.0j]])
X,Y = np.mgrid[0:1:SIZE*1.0j, 0:2:SIZE*1.0j]
Z = X + Y*1.0j
Z = Z.flatten()
U,V = theta.exp_and_osc_at_point([[z,0] for z in Z], Omega, batch=True)
V = np.absolute(V)
V = V.reshape(SIZE,SIZE)
s = surf(X,Y,V, warp_scale = warp)
if d_axes:
axes()
return s
def plt_21_4_4(SIZE=75,warp = "auto", d_axes=False, gpu=False):
theta = RiemannTheta
Omega = np.matrix([[1.0j, -.5], [-.5,1.0j]])
X,Y = np.mgrid[0:4:SIZE*1.0j, 0:4:SIZE*1.0j]
Z = X + Y*1.0j
Z = Z.flatten()
U,V = theta.exp_and_osc_at_point([[z.real*1.0j,z.imag*1.0j] for z in Z], Omega, batch=True)
V = V.real
V = V.reshape(SIZE,SIZE)
s = surf(X,Y,V, warp_scale = warp)
if d_axes:
axes()
return s
def plt_21_4_5(SIZE=75,warp = "auto", d_axes=False, gpu=False):
theta = RiemannTheta
Omega = np.matrix([[-.5 + 1.0j, .5 -.5*1.0j, -.5-.5*1.0j],
[.5 -.5*1.0j, 1.0j, 0],
[-.5 - .5*1.0j, 0, 1.0j]])
X,Y = np.mgrid[0:1:SIZE*1.0j, 0:3:1.0j*SIZE]
Z = X+Y*1.0j
Z = Z.flatten()
U,V = theta.exp_and_osc_at_point([[z,0,0] for z in Z], Omega, batch=True)
V = V.real
V = V.reshape(SIZE,SIZE)
s = surf(X,Y,V,warp_scale=warp)
if d_axes:
axes()
return s
def plt_first_deriv():
theta = RiemannTheta
Omega = np.matrix([[1.690983006 + .951056516*1.0j, 1.5 + .363271264*1.0j],
[1.5 + .363271264*1.0j, 1.309016994 + .951056516*1.0j]])
k = [[1,0]]
Z = np.linspace(0,50,500)
U,V = theta.exp_and_osc_at_point([[0, z*1.0j] for z in Z], Omega, deriv=k, batch=True)
plt.plot(Z, V.real)
plt.show()
def plt_second_deriv():
theta = RiemannTheta
Omega = np.matrix([[1.690983006 + .951056516*1.0j, 1.5 + .363271264*1.0j],
[1.5 + .363271264*1.0j, 1.309016994 + .951056516*1.0j]])
k = [[1,0],[1,0]]
Z = np.linspace(0,50,500)
U,V = theta.exp_and_osc_at_point([[0, z*1.0j] for z in Z], Omega, deriv=k, batch=True)
plt.plot(Z, V.real)
plt.show()
def explosion(SIZE, gpu):
theta = RiemannTheta
Omega = np.matrix([[1.690983006 + .951056516*1.0j, 1.5 + .363271264*1.0j],
[1.5 + .363271264*1.0j, 1.309016994 + .951056516*1.0j]])
X,Y = np.mgrid[-1.5:1.5:SIZE*1.0j, -1.5:1.5:SIZE*1.0j]
Z = X + Y*1.0j
Z = Z.flatten()
U,V = theta.exp_and_osc_at_point([[z,0] for z in Z], Omega, batch=True)
V = np.exp(U)*V
V = V.reshape(SIZE, SIZE)
s = surf(X,Y,np.absolute(V), warp_scale = 'auto')
savefig("test.eps")
def get_r1_vals(SIZE, gpu):
theta = RiemannTheta
Omega = np.matrix([[1.690983006 + .951056516*1.0j, 1.5 + .363271264*1.0j],
[1.5 + .363271264*1.0j, 1.309016994 + .951056516*1.0j]])
X,Y = np.mgrid[0:1:SIZE*1.0j, 0:5:SIZE*1.0j]
Z = X + Y*1.0j
Z = Z.flatten()
U,V = theta.exp_and_osc_at_point([[z,0] for z in Z], Omega, batch=True)
V = V.reshape(SIZE, SIZE)
return X,Y,V
def get_r2_vals(SIZE, gpu):
theta = RiemannTheta
Omega = np.matrix([[1.690983006 + .951056516*1.0j, 1.5 + .363271264*1.0j],
[1.5 + .363271264*1.0j, 1.309016994 + .951056516*1.0j]])
X = np.linspace(0,1,SIZE)
Y = np.linspace(0,1,SIZE)
Z = []
for x in X:
for y in Y:
Z.append([x,y])
U,V = theta.exp_and_osc_at_point(Z, Omega, batch=True)
V = V.reshape(SIZE,SIZE)
return X,Y,V
def get_r3_vals(SIZE, gpu):
theta = RiemannTheta
Omega = np.matrix([[1.690983006 + .951056516*1.0j, 1.5 + .363271264*1.0j],
[1.5 + .363271264*1.0j, 1.309016994 + .951056516*1.0j]])
X,Y = np.mgrid[0:5:SIZE*1.0j, 0:5:SIZE*1.0j]
Z = X + Y*1.0j
Z = Z.flatten()
U,V = theta.exp_and_osc_at_point([[1.0j*z.real,1.0j*z.imag] for z in Z], Omega, batch=True)
V = V.reshape(SIZE, SIZE)
return X,Y,V
def get_d_vals(SIZE, gpu):
theta = RiemannTheta
Omega = np.matrix([[1.0j, -.5], [-.5, 1.0j]])
X,Y = np.mgrid[0:1:SIZE*1.0j, 0:5:SIZE*1.0j]
Z = X + Y * 1.0j
Z = Z.flatten()
U,V = theta.exp_and_osc_at_point([[z,0] for z in Z], Omega, batch=True)
V = V.reshape(SIZE,SIZE)
return X,Y,V
| mit |
RPGOne/Skynet | scikit-learn-c604ac39ad0e5b066d964df3e8f31ba7ebda1e0e/examples/linear_model/plot_ridge_path.py | 254 | 1655 | """
===========================================================
Plot Ridge coefficients as a function of the regularization
===========================================================
Shows the effect of collinearity in the coefficients of an estimator.
.. currentmodule:: sklearn.linear_model
:class:`Ridge` Regression is the estimator used in this example.
Each color represents a different feature of the
coefficient vector, and this is displayed as a function of the
regularization parameter.
At the end of the path, as alpha tends toward zero
and the solution tends towards the ordinary least squares, coefficients
exhibit big oscillations.
"""
# Author: Fabian Pedregosa -- <fabian.pedregosa@inria.fr>
# License: BSD 3 clause
print(__doc__)
import numpy as np
import matplotlib.pyplot as plt
from sklearn import linear_model
# X is the 10x10 Hilbert matrix
X = 1. / (np.arange(1, 11) + np.arange(0, 10)[:, np.newaxis])
y = np.ones(10)
###############################################################################
# Compute paths
n_alphas = 200
alphas = np.logspace(-10, -2, n_alphas)
clf = linear_model.Ridge(fit_intercept=False)
coefs = []
for a in alphas:
clf.set_params(alpha=a)
clf.fit(X, y)
coefs.append(clf.coef_)
###############################################################################
# Display results
ax = plt.gca()
ax.set_color_cycle(['b', 'r', 'g', 'c', 'k', 'y', 'm'])
ax.plot(alphas, coefs)
ax.set_xscale('log')
ax.set_xlim(ax.get_xlim()[::-1]) # reverse axis
plt.xlabel('alpha')
plt.ylabel('weights')
plt.title('Ridge coefficients as a function of the regularization')
plt.axis('tight')
plt.show()
| bsd-3-clause |
JPFrancoia/scikit-learn | sklearn/neural_network/tests/test_rbm.py | 225 | 6278 | import sys
import re
import numpy as np
from scipy.sparse import csc_matrix, csr_matrix, lil_matrix
from sklearn.utils.testing import (assert_almost_equal, assert_array_equal,
assert_true)
from sklearn.datasets import load_digits
from sklearn.externals.six.moves import cStringIO as StringIO
from sklearn.neural_network import BernoulliRBM
from sklearn.utils.validation import assert_all_finite
np.seterr(all='warn')
Xdigits = load_digits().data
Xdigits -= Xdigits.min()
Xdigits /= Xdigits.max()
def test_fit():
X = Xdigits.copy()
rbm = BernoulliRBM(n_components=64, learning_rate=0.1,
batch_size=10, n_iter=7, random_state=9)
rbm.fit(X)
assert_almost_equal(rbm.score_samples(X).mean(), -21., decimal=0)
# in-place tricks shouldn't have modified X
assert_array_equal(X, Xdigits)
def test_partial_fit():
X = Xdigits.copy()
rbm = BernoulliRBM(n_components=64, learning_rate=0.1,
batch_size=20, random_state=9)
n_samples = X.shape[0]
n_batches = int(np.ceil(float(n_samples) / rbm.batch_size))
batch_slices = np.array_split(X, n_batches)
for i in range(7):
for batch in batch_slices:
rbm.partial_fit(batch)
assert_almost_equal(rbm.score_samples(X).mean(), -21., decimal=0)
assert_array_equal(X, Xdigits)
def test_transform():
X = Xdigits[:100]
rbm1 = BernoulliRBM(n_components=16, batch_size=5,
n_iter=5, random_state=42)
rbm1.fit(X)
Xt1 = rbm1.transform(X)
Xt2 = rbm1._mean_hiddens(X)
assert_array_equal(Xt1, Xt2)
def test_small_sparse():
# BernoulliRBM should work on small sparse matrices.
X = csr_matrix(Xdigits[:4])
BernoulliRBM().fit(X) # no exception
def test_small_sparse_partial_fit():
for sparse in [csc_matrix, csr_matrix]:
X_sparse = sparse(Xdigits[:100])
X = Xdigits[:100].copy()
rbm1 = BernoulliRBM(n_components=64, learning_rate=0.1,
batch_size=10, random_state=9)
rbm2 = BernoulliRBM(n_components=64, learning_rate=0.1,
batch_size=10, random_state=9)
rbm1.partial_fit(X_sparse)
rbm2.partial_fit(X)
assert_almost_equal(rbm1.score_samples(X).mean(),
rbm2.score_samples(X).mean(),
decimal=0)
def test_sample_hiddens():
rng = np.random.RandomState(0)
X = Xdigits[:100]
rbm1 = BernoulliRBM(n_components=2, batch_size=5,
n_iter=5, random_state=42)
rbm1.fit(X)
h = rbm1._mean_hiddens(X[0])
hs = np.mean([rbm1._sample_hiddens(X[0], rng) for i in range(100)], 0)
assert_almost_equal(h, hs, decimal=1)
def test_fit_gibbs():
# Gibbs on the RBM hidden layer should be able to recreate [[0], [1]]
# from the same input
rng = np.random.RandomState(42)
X = np.array([[0.], [1.]])
rbm1 = BernoulliRBM(n_components=2, batch_size=2,
n_iter=42, random_state=rng)
# you need that much iters
rbm1.fit(X)
assert_almost_equal(rbm1.components_,
np.array([[0.02649814], [0.02009084]]), decimal=4)
assert_almost_equal(rbm1.gibbs(X), X)
return rbm1
def test_fit_gibbs_sparse():
# Gibbs on the RBM hidden layer should be able to recreate [[0], [1]] from
# the same input even when the input is sparse, and test against non-sparse
rbm1 = test_fit_gibbs()
rng = np.random.RandomState(42)
from scipy.sparse import csc_matrix
X = csc_matrix([[0.], [1.]])
rbm2 = BernoulliRBM(n_components=2, batch_size=2,
n_iter=42, random_state=rng)
rbm2.fit(X)
assert_almost_equal(rbm2.components_,
np.array([[0.02649814], [0.02009084]]), decimal=4)
assert_almost_equal(rbm2.gibbs(X), X.toarray())
assert_almost_equal(rbm1.components_, rbm2.components_)
def test_gibbs_smoke():
# Check if we don't get NaNs sampling the full digits dataset.
# Also check that sampling again will yield different results.
X = Xdigits
rbm1 = BernoulliRBM(n_components=42, batch_size=40,
n_iter=20, random_state=42)
rbm1.fit(X)
X_sampled = rbm1.gibbs(X)
assert_all_finite(X_sampled)
X_sampled2 = rbm1.gibbs(X)
assert_true(np.all((X_sampled != X_sampled2).max(axis=1)))
def test_score_samples():
# Test score_samples (pseudo-likelihood) method.
# Assert that pseudo-likelihood is computed without clipping.
# See Fabian's blog, http://bit.ly/1iYefRk
rng = np.random.RandomState(42)
X = np.vstack([np.zeros(1000), np.ones(1000)])
rbm1 = BernoulliRBM(n_components=10, batch_size=2,
n_iter=10, random_state=rng)
rbm1.fit(X)
assert_true((rbm1.score_samples(X) < -300).all())
# Sparse vs. dense should not affect the output. Also test sparse input
# validation.
rbm1.random_state = 42
d_score = rbm1.score_samples(X)
rbm1.random_state = 42
s_score = rbm1.score_samples(lil_matrix(X))
assert_almost_equal(d_score, s_score)
# Test numerical stability (#2785): would previously generate infinities
# and crash with an exception.
with np.errstate(under='ignore'):
rbm1.score_samples([np.arange(1000) * 100])
def test_rbm_verbose():
rbm = BernoulliRBM(n_iter=2, verbose=10)
old_stdout = sys.stdout
sys.stdout = StringIO()
try:
rbm.fit(Xdigits)
finally:
sys.stdout = old_stdout
def test_sparse_and_verbose():
# Make sure RBM works with sparse input when verbose=True
old_stdout = sys.stdout
sys.stdout = StringIO()
from scipy.sparse import csc_matrix
X = csc_matrix([[0.], [1.]])
rbm = BernoulliRBM(n_components=2, batch_size=2, n_iter=1,
random_state=42, verbose=True)
try:
rbm.fit(X)
s = sys.stdout.getvalue()
# make sure output is sound
assert_true(re.match(r"\[BernoulliRBM\] Iteration 1,"
r" pseudo-likelihood = -?(\d)+(\.\d+)?,"
r" time = (\d|\.)+s",
s))
finally:
sys.stdout = old_stdout
| bsd-3-clause |
mlyundin/scikit-learn | examples/decomposition/plot_pca_iris.py | 253 | 1801 | #!/usr/bin/python
# -*- coding: utf-8 -*-
"""
=========================================================
PCA example with Iris Data-set
=========================================================
Principal Component Analysis applied to the Iris dataset.
See `here <http://en.wikipedia.org/wiki/Iris_flower_data_set>`_ for more
information on this dataset.
"""
print(__doc__)
# Code source: Gaël Varoquaux
# License: BSD 3 clause
import numpy as np
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import Axes3D
from sklearn import decomposition
from sklearn import datasets
np.random.seed(5)
centers = [[1, 1], [-1, -1], [1, -1]]
iris = datasets.load_iris()
X = iris.data
y = iris.target
fig = plt.figure(1, figsize=(4, 3))
plt.clf()
ax = Axes3D(fig, rect=[0, 0, .95, 1], elev=48, azim=134)
plt.cla()
pca = decomposition.PCA(n_components=3)
pca.fit(X)
X = pca.transform(X)
for name, label in [('Setosa', 0), ('Versicolour', 1), ('Virginica', 2)]:
ax.text3D(X[y == label, 0].mean(),
X[y == label, 1].mean() + 1.5,
X[y == label, 2].mean(), name,
horizontalalignment='center',
bbox=dict(alpha=.5, edgecolor='w', facecolor='w'))
# Reorder the labels to have colors matching the cluster results
y = np.choose(y, [1, 2, 0]).astype(np.float)
ax.scatter(X[:, 0], X[:, 1], X[:, 2], c=y, cmap=plt.cm.spectral)
x_surf = [X[:, 0].min(), X[:, 0].max(),
X[:, 0].min(), X[:, 0].max()]
y_surf = [X[:, 0].max(), X[:, 0].max(),
X[:, 0].min(), X[:, 0].min()]
x_surf = np.array(x_surf)
y_surf = np.array(y_surf)
v0 = pca.transform(pca.components_[0])
v0 /= v0[-1]
v1 = pca.transform(pca.components_[1])
v1 /= v1[-1]
ax.w_xaxis.set_ticklabels([])
ax.w_yaxis.set_ticklabels([])
ax.w_zaxis.set_ticklabels([])
plt.show()
| bsd-3-clause |
kushalbhola/MyStuff | Practice/PythonApplication/env/Lib/site-packages/pandas/tests/frame/test_nonunique_indexes.py | 2 | 18038 | import numpy as np
import pytest
import pandas as pd
from pandas import DataFrame, MultiIndex, Series, date_range
from pandas.tests.frame.common import TestData
import pandas.util.testing as tm
from pandas.util.testing import assert_frame_equal, assert_series_equal
class TestDataFrameNonuniqueIndexes(TestData):
def test_column_dups_operations(self):
def check(result, expected=None):
if expected is not None:
assert_frame_equal(result, expected)
result.dtypes
str(result)
# assignment
# GH 3687
arr = np.random.randn(3, 2)
idx = list(range(2))
df = DataFrame(arr, columns=["A", "A"])
df.columns = idx
expected = DataFrame(arr, columns=idx)
check(df, expected)
idx = date_range("20130101", periods=4, freq="Q-NOV")
df = DataFrame(
[[1, 1, 1, 5], [1, 1, 2, 5], [2, 1, 3, 5]], columns=["a", "a", "a", "a"]
)
df.columns = idx
expected = DataFrame([[1, 1, 1, 5], [1, 1, 2, 5], [2, 1, 3, 5]], columns=idx)
check(df, expected)
# insert
df = DataFrame(
[[1, 1, 1, 5], [1, 1, 2, 5], [2, 1, 3, 5]],
columns=["foo", "bar", "foo", "hello"],
)
df["string"] = "bah"
expected = DataFrame(
[[1, 1, 1, 5, "bah"], [1, 1, 2, 5, "bah"], [2, 1, 3, 5, "bah"]],
columns=["foo", "bar", "foo", "hello", "string"],
)
check(df, expected)
with pytest.raises(ValueError, match="Length of value"):
df.insert(0, "AnotherColumn", range(len(df.index) - 1))
# insert same dtype
df["foo2"] = 3
expected = DataFrame(
[[1, 1, 1, 5, "bah", 3], [1, 1, 2, 5, "bah", 3], [2, 1, 3, 5, "bah", 3]],
columns=["foo", "bar", "foo", "hello", "string", "foo2"],
)
check(df, expected)
# set (non-dup)
df["foo2"] = 4
expected = DataFrame(
[[1, 1, 1, 5, "bah", 4], [1, 1, 2, 5, "bah", 4], [2, 1, 3, 5, "bah", 4]],
columns=["foo", "bar", "foo", "hello", "string", "foo2"],
)
check(df, expected)
df["foo2"] = 3
# delete (non dup)
del df["bar"]
expected = DataFrame(
[[1, 1, 5, "bah", 3], [1, 2, 5, "bah", 3], [2, 3, 5, "bah", 3]],
columns=["foo", "foo", "hello", "string", "foo2"],
)
check(df, expected)
# try to delete again (its not consolidated)
del df["hello"]
expected = DataFrame(
[[1, 1, "bah", 3], [1, 2, "bah", 3], [2, 3, "bah", 3]],
columns=["foo", "foo", "string", "foo2"],
)
check(df, expected)
# consolidate
df = df._consolidate()
expected = DataFrame(
[[1, 1, "bah", 3], [1, 2, "bah", 3], [2, 3, "bah", 3]],
columns=["foo", "foo", "string", "foo2"],
)
check(df, expected)
# insert
df.insert(2, "new_col", 5.0)
expected = DataFrame(
[[1, 1, 5.0, "bah", 3], [1, 2, 5.0, "bah", 3], [2, 3, 5.0, "bah", 3]],
columns=["foo", "foo", "new_col", "string", "foo2"],
)
check(df, expected)
# insert a dup
with pytest.raises(ValueError, match="cannot insert"):
df.insert(2, "new_col", 4.0)
df.insert(2, "new_col", 4.0, allow_duplicates=True)
expected = DataFrame(
[
[1, 1, 4.0, 5.0, "bah", 3],
[1, 2, 4.0, 5.0, "bah", 3],
[2, 3, 4.0, 5.0, "bah", 3],
],
columns=["foo", "foo", "new_col", "new_col", "string", "foo2"],
)
check(df, expected)
# delete (dup)
del df["foo"]
expected = DataFrame(
[[4.0, 5.0, "bah", 3], [4.0, 5.0, "bah", 3], [4.0, 5.0, "bah", 3]],
columns=["new_col", "new_col", "string", "foo2"],
)
assert_frame_equal(df, expected)
# dup across dtypes
df = DataFrame(
[[1, 1, 1.0, 5], [1, 1, 2.0, 5], [2, 1, 3.0, 5]],
columns=["foo", "bar", "foo", "hello"],
)
check(df)
df["foo2"] = 7.0
expected = DataFrame(
[[1, 1, 1.0, 5, 7.0], [1, 1, 2.0, 5, 7.0], [2, 1, 3.0, 5, 7.0]],
columns=["foo", "bar", "foo", "hello", "foo2"],
)
check(df, expected)
result = df["foo"]
expected = DataFrame([[1, 1.0], [1, 2.0], [2, 3.0]], columns=["foo", "foo"])
check(result, expected)
# multiple replacements
df["foo"] = "string"
expected = DataFrame(
[
["string", 1, "string", 5, 7.0],
["string", 1, "string", 5, 7.0],
["string", 1, "string", 5, 7.0],
],
columns=["foo", "bar", "foo", "hello", "foo2"],
)
check(df, expected)
del df["foo"]
expected = DataFrame(
[[1, 5, 7.0], [1, 5, 7.0], [1, 5, 7.0]], columns=["bar", "hello", "foo2"]
)
check(df, expected)
# values
df = DataFrame([[1, 2.5], [3, 4.5]], index=[1, 2], columns=["x", "x"])
result = df.values
expected = np.array([[1, 2.5], [3, 4.5]])
assert (result == expected).all().all()
# rename, GH 4403
df4 = DataFrame(
{"RT": [0.0454], "TClose": [22.02], "TExg": [0.0422]},
index=MultiIndex.from_tuples(
[(600809, 20130331)], names=["STK_ID", "RPT_Date"]
),
)
df5 = DataFrame(
{
"RPT_Date": [20120930, 20121231, 20130331],
"STK_ID": [600809] * 3,
"STK_Name": ["饡驦", "饡驦", "饡驦"],
"TClose": [38.05, 41.66, 30.01],
},
index=MultiIndex.from_tuples(
[(600809, 20120930), (600809, 20121231), (600809, 20130331)],
names=["STK_ID", "RPT_Date"],
),
)
k = pd.merge(df4, df5, how="inner", left_index=True, right_index=True)
result = k.rename(columns={"TClose_x": "TClose", "TClose_y": "QT_Close"})
str(result)
result.dtypes
expected = DataFrame(
[[0.0454, 22.02, 0.0422, 20130331, 600809, "饡驦", 30.01]],
columns=[
"RT",
"TClose",
"TExg",
"RPT_Date",
"STK_ID",
"STK_Name",
"QT_Close",
],
).set_index(["STK_ID", "RPT_Date"], drop=False)
assert_frame_equal(result, expected)
# reindex is invalid!
df = DataFrame(
[[1, 5, 7.0], [1, 5, 7.0], [1, 5, 7.0]], columns=["bar", "a", "a"]
)
msg = "cannot reindex from a duplicate axis"
with pytest.raises(ValueError, match=msg):
df.reindex(columns=["bar"])
with pytest.raises(ValueError, match=msg):
df.reindex(columns=["bar", "foo"])
# drop
df = DataFrame(
[[1, 5, 7.0], [1, 5, 7.0], [1, 5, 7.0]], columns=["bar", "a", "a"]
)
result = df.drop(["a"], axis=1)
expected = DataFrame([[1], [1], [1]], columns=["bar"])
check(result, expected)
result = df.drop("a", axis=1)
check(result, expected)
# describe
df = DataFrame(
[[1, 1, 1], [2, 2, 2], [3, 3, 3]],
columns=["bar", "a", "a"],
dtype="float64",
)
result = df.describe()
s = df.iloc[:, 0].describe()
expected = pd.concat([s, s, s], keys=df.columns, axis=1)
check(result, expected)
# check column dups with index equal and not equal to df's index
df = DataFrame(
np.random.randn(5, 3),
index=["a", "b", "c", "d", "e"],
columns=["A", "B", "A"],
)
for index in [df.index, pd.Index(list("edcba"))]:
this_df = df.copy()
expected_ser = pd.Series(index.values, index=this_df.index)
expected_df = DataFrame(
{"A": expected_ser, "B": this_df["B"], "A": expected_ser},
columns=["A", "B", "A"],
)
this_df["A"] = index
check(this_df, expected_df)
# operations
for op in ["__add__", "__mul__", "__sub__", "__truediv__"]:
df = DataFrame(dict(A=np.arange(10), B=np.random.rand(10)))
expected = getattr(df, op)(df)
expected.columns = ["A", "A"]
df.columns = ["A", "A"]
result = getattr(df, op)(df)
check(result, expected)
# multiple assignments that change dtypes
# the location indexer is a slice
# GH 6120
df = DataFrame(np.random.randn(5, 2), columns=["that", "that"])
expected = DataFrame(1.0, index=range(5), columns=["that", "that"])
df["that"] = 1.0
check(df, expected)
df = DataFrame(np.random.rand(5, 2), columns=["that", "that"])
expected = DataFrame(1, index=range(5), columns=["that", "that"])
df["that"] = 1
check(df, expected)
def test_column_dups2(self):
# drop buggy GH 6240
df = DataFrame(
{
"A": np.random.randn(5),
"B": np.random.randn(5),
"C": np.random.randn(5),
"D": ["a", "b", "c", "d", "e"],
}
)
expected = df.take([0, 1, 1], axis=1)
df2 = df.take([2, 0, 1, 2, 1], axis=1)
result = df2.drop("C", axis=1)
assert_frame_equal(result, expected)
# dropna
df = DataFrame(
{
"A": np.random.randn(5),
"B": np.random.randn(5),
"C": np.random.randn(5),
"D": ["a", "b", "c", "d", "e"],
}
)
df.iloc[2, [0, 1, 2]] = np.nan
df.iloc[0, 0] = np.nan
df.iloc[1, 1] = np.nan
df.iloc[:, 3] = np.nan
expected = df.dropna(subset=["A", "B", "C"], how="all")
expected.columns = ["A", "A", "B", "C"]
df.columns = ["A", "A", "B", "C"]
result = df.dropna(subset=["A", "C"], how="all")
assert_frame_equal(result, expected)
def test_column_dups_indexing(self):
def check(result, expected=None):
if expected is not None:
assert_frame_equal(result, expected)
result.dtypes
str(result)
# boolean indexing
# GH 4879
dups = ["A", "A", "C", "D"]
df = DataFrame(
np.arange(12).reshape(3, 4), columns=["A", "B", "C", "D"], dtype="float64"
)
expected = df[df.C > 6]
expected.columns = dups
df = DataFrame(np.arange(12).reshape(3, 4), columns=dups, dtype="float64")
result = df[df.C > 6]
check(result, expected)
# where
df = DataFrame(
np.arange(12).reshape(3, 4), columns=["A", "B", "C", "D"], dtype="float64"
)
expected = df[df > 6]
expected.columns = dups
df = DataFrame(np.arange(12).reshape(3, 4), columns=dups, dtype="float64")
result = df[df > 6]
check(result, expected)
# boolean with the duplicate raises
df = DataFrame(np.arange(12).reshape(3, 4), columns=dups, dtype="float64")
msg = "cannot reindex from a duplicate axis"
with pytest.raises(ValueError, match=msg):
df[df.A > 6]
# dup aligning operations should work
# GH 5185
df1 = DataFrame([1, 2, 3, 4, 5], index=[1, 2, 1, 2, 3])
df2 = DataFrame([1, 2, 3], index=[1, 2, 3])
expected = DataFrame([0, 2, 0, 2, 2], index=[1, 1, 2, 2, 3])
result = df1.sub(df2)
assert_frame_equal(result, expected)
# equality
df1 = DataFrame([[1, 2], [2, np.nan], [3, 4], [4, 4]], columns=["A", "B"])
df2 = DataFrame([[0, 1], [2, 4], [2, np.nan], [4, 5]], columns=["A", "A"])
# not-comparing like-labelled
msg = "Can only compare identically-labeled DataFrame objects"
with pytest.raises(ValueError, match=msg):
df1 == df2
df1r = df1.reindex_like(df2)
result = df1r == df2
expected = DataFrame(
[[False, True], [True, False], [False, False], [True, False]],
columns=["A", "A"],
)
assert_frame_equal(result, expected)
# mixed column selection
# GH 5639
dfbool = DataFrame(
{
"one": Series([True, True, False], index=["a", "b", "c"]),
"two": Series([False, False, True, False], index=["a", "b", "c", "d"]),
"three": Series([False, True, True, True], index=["a", "b", "c", "d"]),
}
)
expected = pd.concat([dfbool["one"], dfbool["three"], dfbool["one"]], axis=1)
result = dfbool[["one", "three", "one"]]
check(result, expected)
# multi-axis dups
# GH 6121
df = DataFrame(
np.arange(25.0).reshape(5, 5),
index=["a", "b", "c", "d", "e"],
columns=["A", "B", "C", "D", "E"],
)
z = df[["A", "C", "A"]].copy()
expected = z.loc[["a", "c", "a"]]
df = DataFrame(
np.arange(25.0).reshape(5, 5),
index=["a", "b", "c", "d", "e"],
columns=["A", "B", "C", "D", "E"],
)
z = df[["A", "C", "A"]]
result = z.loc[["a", "c", "a"]]
check(result, expected)
def test_column_dups_indexing2(self):
# GH 8363
# datetime ops with a non-unique index
df = DataFrame(
{"A": np.arange(5, dtype="int64"), "B": np.arange(1, 6, dtype="int64")},
index=[2, 2, 3, 3, 4],
)
result = df.B - df.A
expected = Series(1, index=[2, 2, 3, 3, 4])
assert_series_equal(result, expected)
df = DataFrame(
{
"A": date_range("20130101", periods=5),
"B": date_range("20130101 09:00:00", periods=5),
},
index=[2, 2, 3, 3, 4],
)
result = df.B - df.A
expected = Series(pd.Timedelta("9 hours"), index=[2, 2, 3, 3, 4])
assert_series_equal(result, expected)
def test_columns_with_dups(self):
# GH 3468 related
# basic
df = DataFrame([[1, 2]], columns=["a", "a"])
df.columns = ["a", "a.1"]
str(df)
expected = DataFrame([[1, 2]], columns=["a", "a.1"])
assert_frame_equal(df, expected)
df = DataFrame([[1, 2, 3]], columns=["b", "a", "a"])
df.columns = ["b", "a", "a.1"]
str(df)
expected = DataFrame([[1, 2, 3]], columns=["b", "a", "a.1"])
assert_frame_equal(df, expected)
# with a dup index
df = DataFrame([[1, 2]], columns=["a", "a"])
df.columns = ["b", "b"]
str(df)
expected = DataFrame([[1, 2]], columns=["b", "b"])
assert_frame_equal(df, expected)
# multi-dtype
df = DataFrame(
[[1, 2, 1.0, 2.0, 3.0, "foo", "bar"]],
columns=["a", "a", "b", "b", "d", "c", "c"],
)
df.columns = list("ABCDEFG")
str(df)
expected = DataFrame(
[[1, 2, 1.0, 2.0, 3.0, "foo", "bar"]], columns=list("ABCDEFG")
)
assert_frame_equal(df, expected)
df = DataFrame([[1, 2, "foo", "bar"]], columns=["a", "a", "a", "a"])
df.columns = ["a", "a.1", "a.2", "a.3"]
str(df)
expected = DataFrame([[1, 2, "foo", "bar"]], columns=["a", "a.1", "a.2", "a.3"])
assert_frame_equal(df, expected)
# dups across blocks
df_float = DataFrame(np.random.randn(10, 3), dtype="float64")
df_int = DataFrame(np.random.randn(10, 3), dtype="int64")
df_bool = DataFrame(True, index=df_float.index, columns=df_float.columns)
df_object = DataFrame("foo", index=df_float.index, columns=df_float.columns)
df_dt = DataFrame(
pd.Timestamp("20010101"), index=df_float.index, columns=df_float.columns
)
df = pd.concat([df_float, df_int, df_bool, df_object, df_dt], axis=1)
assert len(df._data._blknos) == len(df.columns)
assert len(df._data._blklocs) == len(df.columns)
# testing iloc
for i in range(len(df.columns)):
df.iloc[:, i]
# dup columns across dtype GH 2079/2194
vals = [[1, -1, 2.0], [2, -2, 3.0]]
rs = DataFrame(vals, columns=["A", "A", "B"])
xp = DataFrame(vals)
xp.columns = ["A", "A", "B"]
assert_frame_equal(rs, xp)
def test_values_duplicates(self):
df = DataFrame(
[[1, 2, "a", "b"], [1, 2, "a", "b"]], columns=["one", "one", "two", "two"]
)
result = df.values
expected = np.array([[1, 2, "a", "b"], [1, 2, "a", "b"]], dtype=object)
tm.assert_numpy_array_equal(result, expected)
def test_set_value_by_index(self):
# See gh-12344
df = DataFrame(np.arange(9).reshape(3, 3).T)
df.columns = list("AAA")
expected = df.iloc[:, 2]
df.iloc[:, 0] = 3
assert_series_equal(df.iloc[:, 2], expected)
df = DataFrame(np.arange(9).reshape(3, 3).T)
df.columns = [2, float(2), str(2)]
expected = df.iloc[:, 1]
df.iloc[:, 0] = 3
assert_series_equal(df.iloc[:, 1], expected)
def test_insert_with_columns_dups(self):
# GH 14291
df = pd.DataFrame()
df.insert(0, "A", ["g", "h", "i"], allow_duplicates=True)
df.insert(0, "A", ["d", "e", "f"], allow_duplicates=True)
df.insert(0, "A", ["a", "b", "c"], allow_duplicates=True)
exp = pd.DataFrame(
[["a", "d", "g"], ["b", "e", "h"], ["c", "f", "i"]], columns=["A", "A", "A"]
)
assert_frame_equal(df, exp)
| apache-2.0 |
jjbrophy47/sn_spam | independent/scripts/independent.py | 1 | 6452 | """
Module containing the Independent class to handle all operations pertaining
to the independent model.
"""
import os
import pandas as pd
class Independent:
"""Returns an Independent object that reads in the data, splits into sets,
trains and classifies, and writes the results."""
def __init__(self, config_obj, classification_obj, util_obj):
"""Initializes object dependencies for this class."""
self.config_obj = config_obj
"""Configuration object with user settings."""
self.classification_obj = classification_obj
"""Object that handles classification of the data."""
self.util_obj = util_obj
"""Class containing general utility methods."""
# public
def main(self):
"""Main method that reads in the comments, splits them into train and
test, writes them to files, and prints out stats.
Returns the train and test comment dataframes."""
modified = self.config_obj.modified
self.util_obj.start()
data_f, fold_f, status_f = self.file_folders()
sw = self.open_status_writer(status_f)
coms_filename = self.util_obj.get_comments_filename(modified)
coms_df = self.read_file(data_f + coms_filename, sw)
train_df, val_df, test_df = self.split_coms(coms_df)
if self.config_obj.alter_user_ids:
self.alter_user_ids(coms_df, test_df)
self.write_folds(val_df, test_df, fold_f)
self.print_subsets(train_df, val_df, test_df, fw=sw)
self.util_obj.start('\nvalidation set:\n', fw=sw)
self.classification_obj.main(train_df, val_df, dset='val', fw=sw)
self.util_obj.end('time: ', fw=sw)
self.util_obj.start('\ntest set:\n', fw=sw)
all_train_df = train_df.copy()
if self.config_obj.super_train:
all_train_df = pd.concat([train_df, val_df])
self.classification_obj.main(all_train_df, test_df, dset='test', fw=sw)
self.util_obj.end('time: ', fw=sw)
self.util_obj.end('total independent model time: ', fw=sw)
self.util_obj.close_writer(sw)
return val_df, test_df
# private
def file_folders(self):
"""Returns absolute paths for various directories."""
ind_dir = self.config_obj.ind_dir
domain = self.config_obj.domain
data_f = ind_dir + 'data/' + domain + '/'
fold_f = ind_dir + 'data/' + domain + '/folds/'
status_f = ind_dir + 'output/' + domain + '/status/'
if not os.path.exists(fold_f):
os.makedirs(fold_f)
if not os.path.exists(status_f):
os.makedirs(status_f)
return data_f, fold_f, status_f
def open_status_writer(self, status_f):
"""Opens a file to write updates of the independent model.
status_f: status folder.
Returns file object to write to."""
fold = self.config_obj.fold
fname = status_f + 'ind_' + fold + '.txt'
f = self.util_obj.open_writer(fname)
return f
def read_file(self, filename, fw=None):
"""Reads the appropriate comments file of the domain.
filename: csv comments file.
Returns comments dataframe up to the end marker in the config."""
self.util_obj.start('loading data...', fw=fw)
coms_df = pd.read_csv(filename, lineterminator='\n',
nrows=self.config_obj.end)
self.util_obj.end(fw=fw)
return coms_df
def split_coms(self, coms_df):
"""Splits the comments into training, validation, and test sets.
coms_df: comments dataframe.
Returns train, val, and test dataframes."""
start = self.config_obj.start
train_size = self.config_obj.train_size
val_size = self.config_obj.val_size
coms_df = coms_df[start:]
num_coms = len(coms_df)
split_ndx1 = int(num_coms * train_size)
split_ndx2 = split_ndx1 + int(num_coms * val_size)
train_df = coms_df[:split_ndx1]
val_df = coms_df[split_ndx1:split_ndx2]
test_df = coms_df[split_ndx2:]
return train_df, val_df, test_df
def alter_user_ids(self, coms_df, test_df):
"""Alters the user ids in the test set so that all spam messages
are posted by a different user.
test_df: test set dataframe.
Returns altered test set with different user ids for each spammer."""
max_user_id = coms_df['user_id'].max() + 1
user_ids = list(zip(test_df['label'], test_df['user_id']))
new_user_ids = []
for label, user_id in user_ids:
new_user_ids.append(max_user_id if label == 1 else user_id)
max_user_id += 1
test_df['user_id'] = new_user_ids
def write_folds(self, val_df, test_df, fold_f):
"""Writes validation and test set dataframes to csv files.
val_df: dataframe with validation set comments.
test_df: dataframe with test set comments.
fold_f: folder to save the data to."""
fold = self.config_obj.fold
val_fname = fold_f + 'val_' + fold + '.csv'
test_fname = fold_f + 'test_' + fold + '.csv'
val_df.to_csv(val_fname, line_terminator='\n', index=None)
test_df.to_csv(test_fname, line_terminator='\n', index=None)
def print_subsets(self, train_df, val_df, test_df, fw=None):
"""Writes basic statistics about the training and test sets.
train_df: training set comments.
test_df: test set comments."""
spam, total = len(train_df[train_df['label'] == 1]), len(train_df)
percentage = round(self.util_obj.div0(spam, total) * 100, 1)
s = '\ttraining set size: ' + str(len(train_df)) + ', '
s += 'spam: ' + str(spam) + ' (' + str(percentage) + '%)'
self.util_obj.write(s, fw=fw)
spam, total = len(val_df[val_df['label'] == 1]), len(val_df)
percentage = round(self.util_obj.div0(spam, total) * 100, 1)
s = '\tvalidation set size: ' + str(len(val_df)) + ', '
s += 'spam: ' + str(spam) + ' (' + str(percentage) + '%)'
self.util_obj.write(s, fw=fw)
spam, total = len(test_df[test_df['label'] == 1]), len(test_df)
percentage = round(self.util_obj.div0(spam, total) * 100, 1)
s = '\ttest set size: ' + str(len(test_df)) + ', '
s += 'spam: ' + str(spam) + ' (' + str(percentage) + '%)'
self.util_obj.write(s, fw=fw)
| mit |
robwarm/gpaw-symm | doc/devel/bigpicture.py | 1 | 9152 | """creates: bigpicture.svg bigpicture.png"""
import os
from math import pi, cos, sin
import numpy as np
import matplotlib
#matplotlib.use('Agg')
import matplotlib.patches as mpatches
import matplotlib.pyplot as plt
class Box:
def __init__(self, name, description=(), attributes=(), color='grey'):
self.name = name
if isinstance(description, str):
description = [description]
self.description = description
self.attributes = attributes
self.color = color
self.owns = []
self.position = None
def set_position(self, position):
self.position = np.asarray(position)
def has(self, other, name, angle=None, distance=None, x=0.4, style='<-'):
self.owns.append((other, name, x, style))
if angle is not None:
angle *= pi / 180
other.set_position(self.position +
[cos(angle) * distance, sin(angle) * distance])
def cut(size, dir):
if abs(size[0] * dir[1]) < abs(size[1] * dir[0]):
x = min(max(-size[0] / 2, dir[0]), size[0] / 2)
y = x * dir[1] / dir[0]
else:
y = min(max(-size[1] / 2, dir[1]), size[1] / 2)
x = y * dir[0] / dir[1]
return x, y
class MPL:
def __init__(self, boxes):
self.boxes = boxes
def plot(self):
a4 = 100 * np.array([2**-1.75, 2**-2.25])
inch = 2.54
self.fig = plt.figure(1, a4 / inch)
self.ax = ax = self.fig.add_axes([0, 0, 1, 1], frameon=False)
ax.set_xlim(0, a4[0])
ax.set_ylim(0, a4[1])
ax.xaxis.set_visible(False)
ax.yaxis.set_visible(False)
ax.add_patch(mpatches.Rectangle((22.5, 16), 6, 4, fc='orange'))
ax.text(22.7, 19.5, 'ASE package')
for b in boxes:
x, y = b.position
text = b.name
for txt in b.description:
text += '\n' + txt
for txt in b.attributes:
text += '\n' + txt
b.text = ax.text(x, y,
text,
fontsize=9,
ha='center',
va='center',
bbox=dict(boxstyle='round',
facecolor=b.color,
alpha=0.75))
self.fig.canvas.mpl_connect('draw_event', self.on_draw)
plt.savefig('bigpicture.png', dpi=50)
plt.savefig('bigpicture.svg')
os.system('cp bigpicture.svg ../_build')
def on_draw(self, event):
for b in self.boxes:
bbox = b.text.get_window_extent()
t = b.text.get_transform()
b.size = t.inverted().transform(bbox.size)
for b in self.boxes:
for other, name, s, style in b.owns:
d = other.position - b.position
p1 = b.position + cut(b.size, d)
p2 = other.position + cut(other.size, -d)
if style == '-|>':
arrowprops = dict(arrowstyle=style, fc='white')
else:
arrowprops = dict(arrowstyle=style)
self.ax.annotate('', p1, p2,
arrowprops=arrowprops)
if name:
p = (1 - s) * p1 + s * p2
self.ax.text(p[0], p[1], name, fontsize=7,
ha='center', va='center',
bbox=dict(facecolor='white', ec='white'))
self.fig.canvas.callbacks.callbacks[event.name] = {}
self.fig.canvas.draw()
return False
boxes = []
def box(*args, **kwargs):
b = Box(*args, **kwargs)
boxes.append(b)
return b
atoms = box('Atoms', [''], ['positions, numbers, cell, pbc'],
color='white')
paw = box('PAW', [], [], 'green')
scf = box('SCFLoop', [])
density = box('Density',
[r'$\tilde{n}_\sigma = \sum_{\mathbf{k}n}' +
r'|\tilde{\psi}_{\sigma\mathbf{k}n}|^2' +
r'+\frac{1}{2}\sum_a \tilde{n}_c^a$',
r'$\tilde{\rho}(\mathbf{r}) = ' +
r'\sum_\sigma\tilde{n}_\sigma + \sum_{aL}Q_L^a \hat{g}_L^a$'],
['nspins, nt_sG, nt_sg,', 'rhot_g, Q_aL, D_asp'])
mixer = box('Mixer')#, color='blue')
hamiltonian = box('Hamiltonian',
[r'$-\frac{1}{2}\nabla^2 + \tilde{v} + ' +
r'\sum_a \sum_{i_1i_2} |\tilde{p}_{i_1}^a \rangle ' +
r'\Delta H_{i_1i_2} \langle \tilde{p}_{i_2}^a|$'],
['nspins, vt_sG, vt_sg, vHt_g, dH_asp',
'Etot, Ekin, Exc, Epot, Ebar'])
wfs = box('WaveFunctions',
[r'$\tilde{\psi}_{\sigma\mathbf{k}n}(\mathbf{r})$'],
['nspins, ibzk_qc, mynbands',
'kpt_comm, band_comm'], color='magenta')
gd = box('GridDescriptor', ['(coarse grid)'],
['cell_cv, N_c,', 'pbc_c, dv, comm'], 'orange')
finegd = box('GridDescriptor', '(fine grid)',
['cell_cv, N_c, pbc_c, dv, comm'], 'orange')
rgd = box('RadialGridDescriptor', [], ['r_g, dr_g, rcut'], color='orange')
setups = box('Setups', ['', '', '', ''], ['nvalence, nao, Eref, corecharge'])
xccorrection = box('XCCorrection')
nct = box('LFC', r'$\tilde{n}_c^a(r)$', [], 'red')
vbar = box('LFC', r'$\bar{v}^a(r)$', [], 'red')
ghat = box('LFC', r'$\hat{g}_{\ell m}^a(\mathbf{r})$', [], 'red')
fd = box('FDWaveFunctions',
r"""$\tilde{\psi}_{\sigma\mathbf{k}n}(ih,jh,kh)$""",
[], 'magenta')
pt = box('LFC', r'$\tilde{p}_i^a(\mathbf{r})$', [], 'red')
lcao = box('LCAOWaveFunctions',
r"$\tilde{\psi}_{\sigma\mathbf{k}n}(\mathbf{r})=\sum_{\mu\mathbf{R}} C_{\sigma\mathbf{k}n\mu} \Phi_\mu(\mathbf{r} - \mathbf{R}) \exp(i\mathbf{k}\cdot\mathbf{R})$",
['S_qMM, T_qMM, P_aqMi'], 'magenta')
atoms0 = box('Atoms', '(copy)', ['positions, numbers, cell, pbc'],
color='grey')
parameters = box('InputParameters', [], ['xc, nbands, ...'])
forces = box('ForceCalculator')
occupations = box(
'OccupationNumbers',
r'$\epsilon_{\sigma\mathbf{k}n} \rightarrow f_{\sigma\mathbf{k}n}$')
poisson = box('PoissonSolver',
r'$\nabla^2 \tilde{v}_H(\mathbf{r}) = -4\pi \tilde{\rho}(\mathbf{r})$')
eigensolver = box('EigenSolver')
symmetry = box('Symmetry')
restrictor = box('Transformer', '(fine -> coarse)',
color='yellow')
interpolator = box('Transformer', '(coarse -> fine)',
color='yellow')
xc = box('XCFunctional')
kin = box('FDOperator', r'$-\frac{1}{2}\nabla^2$')
hsoperator = box('HSOperator',
[r"$\langle \psi_n | A | \psi_{n'} \rangle$",
r"$\sum_{n'}U_{nn'}|\tilde{\psi}_{n'}\rangle$"])
overlap = box('Overlap')
basisfunctions = box('BasisFunctions', r'$\Phi_\mu(\mathbf{r})$',
color='red')
tci = box('TwoCenterIntegrals',
r'$\langle\Phi_\mu|\Phi_\nu\rangle,'
r'\langle\Phi_\mu|\hat{T}|\Phi_\nu\rangle,'
r'\langle\tilde{p}^a_i|\Phi_\mu\rangle$')
atoms.set_position((25, 18.3))
atoms.has(paw, 'calculator', -160, 7.5)
paw.has(scf, 'scf', 160, 4, x=0.48)
paw.has(density, 'density', -150, 14, 0.23)
paw.has(hamiltonian, 'hamiltonian', 180, 10, 0.3)
paw.has(wfs, 'wfs', -65, 5.5, x=0.48)
paw.has(atoms0, 'atoms', 9, 7.5)
paw.has(parameters, 'input_parameters', 90, 4)
paw.has(forces, 'forces', 50, 4)
paw.has(occupations, 'occupations', 136, 4)
density.has(mixer, 'mixer', 130, 3.3)
density.has(gd, 'gd', x=0.33)
density.has(finegd, 'finegd', 76, 3.5)
density.has(setups, 'setups', 0, 7, 0.45)
density.has(nct, 'nct', -90, 3)
density.has(ghat, 'ghat', -130, 3.4)
density.has(interpolator, 'interpolator', -45, 4)
hamiltonian.has(restrictor, 'restrictor', 40, 4)
hamiltonian.has(xc, 'xc', 160, 6, x=0.6)
hamiltonian.has(vbar, 'vbar', 80, 4)
hamiltonian.has(setups, 'setups', x=0.3)
hamiltonian.has(gd, 'gd', x=0.45)
hamiltonian.has(finegd, 'finegd')
hamiltonian.has(poisson, 'poissonsolver', 130, 4)
wfs.has(gd, 'gd', 160, 4.8, x=0.48)
wfs.has(setups, 'setups', x=0.4)
wfs.has(lcao, None, -55, 5.9, style='-|>')
wfs.has(fd, None, -112, 5.0, style='-|>')
wfs.has(eigensolver, 'eigensolver', 30, 5, x=0.6)
wfs.has(symmetry, 'symmetry', 80, 3)
fd.has(pt, 'pt', -45, 3.6)
fd.has(kin, 'kin', -90, 3)
fd.has(overlap, 'overlap', -135, 3.5)
lcao.has(basisfunctions, 'basis_functions', -50, 3.5)
lcao.has(tci, 'tci', -90, 4.2)
overlap.has(setups, 'setups', x=0.4)
overlap.has(hsoperator, 'operator', -115, 2.5, x=0.41)
for i in range(3):
setup = box('Setup', [],
['Z, Nv, Nc, pt_j, nct,', 'vbar, ghat_l, Delta_pl'],
'blue')
setup.set_position(setups.position +
(0.9 - i * 0.14, 0.3 - i * 0.14))
setup.has(xccorrection, 'xc_correction', -110, 3.7)
xccorrection.has(rgd, 'rgd', -105, 2.4, 0.4)
kpts = [box('KPoint', [], ['psit_nG, C_nM,', 'eps_n, f_n, P_ani'],
color='cyan') for i in range(3)]
wfs.has(kpts[1], 'kpt_u', 0, 5.4, 0.48)
kpts[0].set_position(kpts[1].position - 0.14)
kpts[2].set_position(kpts[1].position + 0.14)
MPL(boxes).plot()
| gpl-3.0 |
LumPenPacK/NetworkExtractionFromImages | osx_build/nefi2_osx_amd64_xcode_2015/site-packages/networkx/tests/test_convert_pandas.py | 43 | 2177 | from nose import SkipTest
from nose.tools import assert_true
import networkx as nx
class TestConvertPandas(object):
numpy=1 # nosetests attribute, use nosetests -a 'not numpy' to skip test
@classmethod
def setupClass(cls):
try:
import pandas as pd
except ImportError:
raise SkipTest('Pandas not available.')
def __init__(self, ):
global pd
import pandas as pd
self.r = pd.np.random.RandomState(seed=5)
ints = self.r.random_integers(1, 10, size=(3,2))
a = ['A', 'B', 'C']
b = ['D', 'A', 'E']
df = pd.DataFrame(ints, columns=['weight', 'cost'])
df[0] = a # Column label 0 (int)
df['b'] = b # Column label 'b' (str)
self.df = df
def assert_equal(self, G1, G2):
assert_true( nx.is_isomorphic(G1, G2, edge_match=lambda x, y: x == y ))
def test_from_dataframe_all_attr(self, ):
Gtrue = nx.Graph([('E', 'C', {'cost': 9, 'weight': 10}),
('B', 'A', {'cost': 1, 'weight': 7}),
('A', 'D', {'cost': 7, 'weight': 4})])
G=nx.from_pandas_dataframe(self.df, 0, 'b', True)
self.assert_equal(G, Gtrue)
def test_from_dataframe_multi_attr(self, ):
Gtrue = nx.Graph([('E', 'C', {'cost': 9, 'weight': 10}),
('B', 'A', {'cost': 1, 'weight': 7}),
('A', 'D', {'cost': 7, 'weight': 4})])
G=nx.from_pandas_dataframe(self.df, 0, 'b', ['weight', 'cost'])
self.assert_equal(G, Gtrue)
def test_from_dataframe_one_attr(self, ):
Gtrue = nx.Graph([('E', 'C', {'weight': 10}),
('B', 'A', {'weight': 7}),
('A', 'D', {'weight': 4})])
G=nx.from_pandas_dataframe(self.df, 0, 'b', 'weight')
self.assert_equal(G, Gtrue)
def test_from_dataframe_no_attr(self, ):
Gtrue = nx.Graph([('E', 'C', {}),
('B', 'A', {}),
('A', 'D', {})])
G=nx.from_pandas_dataframe(self.df, 0, 'b',)
self.assert_equal(G, Gtrue)
| bsd-2-clause |
Jeff20/sklearn_pycon2015 | notebooks/fig_code/svm_gui.py | 47 | 11549 | """
==========
Libsvm GUI
==========
A simple graphical frontend for Libsvm mainly intended for didactic
purposes. You can create data points by point and click and visualize
the decision region induced by different kernels and parameter settings.
To create positive examples click the left mouse button; to create
negative examples click the right button.
If all examples are from the same class, it uses a one-class SVM.
"""
from __future__ import division, print_function
print(__doc__)
# Author: Peter Prettenhoer <peter.prettenhofer@gmail.com>
#
# License: BSD 3 clause
import matplotlib
matplotlib.use('TkAgg')
from matplotlib.backends.backend_tkagg import FigureCanvasTkAgg
from matplotlib.backends.backend_tkagg import NavigationToolbar2TkAgg
from matplotlib.figure import Figure
from matplotlib.contour import ContourSet
import Tkinter as Tk
import sys
import numpy as np
from sklearn import svm
from sklearn.datasets import dump_svmlight_file
from sklearn.externals.six.moves import xrange
y_min, y_max = -50, 50
x_min, x_max = -50, 50
class Model(object):
"""The Model which hold the data. It implements the
observable in the observer pattern and notifies the
registered observers on change event.
"""
def __init__(self):
self.observers = []
self.surface = None
self.data = []
self.cls = None
self.surface_type = 0
def changed(self, event):
"""Notify the observers. """
for observer in self.observers:
observer.update(event, self)
def add_observer(self, observer):
"""Register an observer. """
self.observers.append(observer)
def set_surface(self, surface):
self.surface = surface
def dump_svmlight_file(self, file):
data = np.array(self.data)
X = data[:, 0:2]
y = data[:, 2]
dump_svmlight_file(X, y, file)
class Controller(object):
def __init__(self, model):
self.model = model
self.kernel = Tk.IntVar()
self.surface_type = Tk.IntVar()
# Whether or not a model has been fitted
self.fitted = False
def fit(self):
print("fit the model")
train = np.array(self.model.data)
X = train[:, 0:2]
y = train[:, 2]
C = float(self.complexity.get())
gamma = float(self.gamma.get())
coef0 = float(self.coef0.get())
degree = int(self.degree.get())
kernel_map = {0: "linear", 1: "rbf", 2: "poly"}
if len(np.unique(y)) == 1:
clf = svm.OneClassSVM(kernel=kernel_map[self.kernel.get()],
gamma=gamma, coef0=coef0, degree=degree)
clf.fit(X)
else:
clf = svm.SVC(kernel=kernel_map[self.kernel.get()], C=C,
gamma=gamma, coef0=coef0, degree=degree)
clf.fit(X, y)
if hasattr(clf, 'score'):
print("Accuracy:", clf.score(X, y) * 100)
X1, X2, Z = self.decision_surface(clf)
self.model.clf = clf
self.model.set_surface((X1, X2, Z))
self.model.surface_type = self.surface_type.get()
self.fitted = True
self.model.changed("surface")
def decision_surface(self, cls):
delta = 1
x = np.arange(x_min, x_max + delta, delta)
y = np.arange(y_min, y_max + delta, delta)
X1, X2 = np.meshgrid(x, y)
Z = cls.decision_function(np.c_[X1.ravel(), X2.ravel()])
Z = Z.reshape(X1.shape)
return X1, X2, Z
def clear_data(self):
self.model.data = []
self.fitted = False
self.model.changed("clear")
def add_example(self, x, y, label):
self.model.data.append((x, y, label))
self.model.changed("example_added")
# update decision surface if already fitted.
self.refit()
def refit(self):
"""Refit the model if already fitted. """
if self.fitted:
self.fit()
class View(object):
"""Test docstring. """
def __init__(self, root, controller):
f = Figure()
ax = f.add_subplot(111)
ax.set_xticks([])
ax.set_yticks([])
ax.set_xlim((x_min, x_max))
ax.set_ylim((y_min, y_max))
canvas = FigureCanvasTkAgg(f, master=root)
canvas.show()
canvas.get_tk_widget().pack(side=Tk.TOP, fill=Tk.BOTH, expand=1)
canvas._tkcanvas.pack(side=Tk.TOP, fill=Tk.BOTH, expand=1)
canvas.mpl_connect('key_press_event', self.onkeypress)
canvas.mpl_connect('key_release_event', self.onkeyrelease)
canvas.mpl_connect('button_press_event', self.onclick)
toolbar = NavigationToolbar2TkAgg(canvas, root)
toolbar.update()
self.shift_down = False
self.controllbar = ControllBar(root, controller)
self.f = f
self.ax = ax
self.canvas = canvas
self.controller = controller
self.contours = []
self.c_labels = None
self.plot_kernels()
def plot_kernels(self):
self.ax.text(-50, -60, "Linear: $u^T v$")
self.ax.text(-20, -60, "RBF: $\exp (-\gamma \| u-v \|^2)$")
self.ax.text(10, -60, "Poly: $(\gamma \, u^T v + r)^d$")
def onkeypress(self, event):
if event.key == "shift":
self.shift_down = True
def onkeyrelease(self, event):
if event.key == "shift":
self.shift_down = False
def onclick(self, event):
if event.xdata and event.ydata:
if self.shift_down or event.button == 3:
self.controller.add_example(event.xdata, event.ydata, -1)
elif event.button == 1:
self.controller.add_example(event.xdata, event.ydata, 1)
def update_example(self, model, idx):
x, y, l = model.data[idx]
if l == 1:
color = 'w'
elif l == -1:
color = 'k'
self.ax.plot([x], [y], "%so" % color, scalex=0.0, scaley=0.0)
def update(self, event, model):
if event == "examples_loaded":
for i in xrange(len(model.data)):
self.update_example(model, i)
if event == "example_added":
self.update_example(model, -1)
if event == "clear":
self.ax.clear()
self.ax.set_xticks([])
self.ax.set_yticks([])
self.contours = []
self.c_labels = None
self.plot_kernels()
if event == "surface":
self.remove_surface()
self.plot_support_vectors(model.clf.support_vectors_)
self.plot_decision_surface(model.surface, model.surface_type)
self.canvas.draw()
def remove_surface(self):
"""Remove old decision surface."""
if len(self.contours) > 0:
for contour in self.contours:
if isinstance(contour, ContourSet):
for lineset in contour.collections:
lineset.remove()
else:
contour.remove()
self.contours = []
def plot_support_vectors(self, support_vectors):
"""Plot the support vectors by placing circles over the
corresponding data points and adds the circle collection
to the contours list."""
cs = self.ax.scatter(support_vectors[:, 0], support_vectors[:, 1],
s=80, edgecolors="k", facecolors="none")
self.contours.append(cs)
def plot_decision_surface(self, surface, type):
X1, X2, Z = surface
if type == 0:
levels = [-1.0, 0.0, 1.0]
linestyles = ['dashed', 'solid', 'dashed']
colors = 'k'
self.contours.append(self.ax.contour(X1, X2, Z, levels,
colors=colors,
linestyles=linestyles))
elif type == 1:
self.contours.append(self.ax.contourf(X1, X2, Z, 10,
cmap=matplotlib.cm.bone,
origin='lower', alpha=0.85))
self.contours.append(self.ax.contour(X1, X2, Z, [0.0], colors='k',
linestyles=['solid']))
else:
raise ValueError("surface type unknown")
class ControllBar(object):
def __init__(self, root, controller):
fm = Tk.Frame(root)
kernel_group = Tk.Frame(fm)
Tk.Radiobutton(kernel_group, text="Linear", variable=controller.kernel,
value=0, command=controller.refit).pack(anchor=Tk.W)
Tk.Radiobutton(kernel_group, text="RBF", variable=controller.kernel,
value=1, command=controller.refit).pack(anchor=Tk.W)
Tk.Radiobutton(kernel_group, text="Poly", variable=controller.kernel,
value=2, command=controller.refit).pack(anchor=Tk.W)
kernel_group.pack(side=Tk.LEFT)
valbox = Tk.Frame(fm)
controller.complexity = Tk.StringVar()
controller.complexity.set("1.0")
c = Tk.Frame(valbox)
Tk.Label(c, text="C:", anchor="e", width=7).pack(side=Tk.LEFT)
Tk.Entry(c, width=6, textvariable=controller.complexity).pack(
side=Tk.LEFT)
c.pack()
controller.gamma = Tk.StringVar()
controller.gamma.set("0.01")
g = Tk.Frame(valbox)
Tk.Label(g, text="gamma:", anchor="e", width=7).pack(side=Tk.LEFT)
Tk.Entry(g, width=6, textvariable=controller.gamma).pack(side=Tk.LEFT)
g.pack()
controller.degree = Tk.StringVar()
controller.degree.set("3")
d = Tk.Frame(valbox)
Tk.Label(d, text="degree:", anchor="e", width=7).pack(side=Tk.LEFT)
Tk.Entry(d, width=6, textvariable=controller.degree).pack(side=Tk.LEFT)
d.pack()
controller.coef0 = Tk.StringVar()
controller.coef0.set("0")
r = Tk.Frame(valbox)
Tk.Label(r, text="coef0:", anchor="e", width=7).pack(side=Tk.LEFT)
Tk.Entry(r, width=6, textvariable=controller.coef0).pack(side=Tk.LEFT)
r.pack()
valbox.pack(side=Tk.LEFT)
cmap_group = Tk.Frame(fm)
Tk.Radiobutton(cmap_group, text="Hyperplanes",
variable=controller.surface_type, value=0,
command=controller.refit).pack(anchor=Tk.W)
Tk.Radiobutton(cmap_group, text="Surface",
variable=controller.surface_type, value=1,
command=controller.refit).pack(anchor=Tk.W)
cmap_group.pack(side=Tk.LEFT)
train_button = Tk.Button(fm, text='Fit', width=5,
command=controller.fit)
train_button.pack()
fm.pack(side=Tk.LEFT)
Tk.Button(fm, text='Clear', width=5,
command=controller.clear_data).pack(side=Tk.LEFT)
def get_parser():
from optparse import OptionParser
op = OptionParser()
op.add_option("--output",
action="store", type="str", dest="output",
help="Path where to dump data.")
return op
def main(argv):
op = get_parser()
opts, args = op.parse_args(argv[1:])
root = Tk.Tk()
model = Model()
controller = Controller(model)
root.wm_title("Scikit-learn Libsvm GUI")
view = View(root, controller)
model.add_observer(view)
Tk.mainloop()
if opts.output:
model.dump_svmlight_file(opts.output)
if __name__ == "__main__":
main(sys.argv)
| bsd-3-clause |
joshloyal/scikit-learn | examples/feature_selection/plot_f_test_vs_mi.py | 75 | 1647 | """
===========================================
Comparison of F-test and mutual information
===========================================
This example illustrates the differences between univariate F-test statistics
and mutual information.
We consider 3 features x_1, x_2, x_3 distributed uniformly over [0, 1], the
target depends on them as follows:
y = x_1 + sin(6 * pi * x_2) + 0.1 * N(0, 1), that is the third features is completely irrelevant.
The code below plots the dependency of y against individual x_i and normalized
values of univariate F-tests statistics and mutual information.
As F-test captures only linear dependency, it rates x_1 as the most
discriminative feature. On the other hand, mutual information can capture any
kind of dependency between variables and it rates x_2 as the most
discriminative feature, which probably agrees better with our intuitive
perception for this example. Both methods correctly marks x_3 as irrelevant.
"""
print(__doc__)
import numpy as np
import matplotlib.pyplot as plt
from sklearn.feature_selection import f_regression, mutual_info_regression
np.random.seed(0)
X = np.random.rand(1000, 3)
y = X[:, 0] + np.sin(6 * np.pi * X[:, 1]) + 0.1 * np.random.randn(1000)
f_test, _ = f_regression(X, y)
f_test /= np.max(f_test)
mi = mutual_info_regression(X, y)
mi /= np.max(mi)
plt.figure(figsize=(15, 5))
for i in range(3):
plt.subplot(1, 3, i + 1)
plt.scatter(X[:, i], y)
plt.xlabel("$x_{}$".format(i + 1), fontsize=14)
if i == 0:
plt.ylabel("$y$", fontsize=14)
plt.title("F-test={:.2f}, MI={:.2f}".format(f_test[i], mi[i]),
fontsize=16)
plt.show()
| bsd-3-clause |
evgchz/scikit-learn | sklearn/ensemble/gradient_boosting.py | 6 | 63474 | """Gradient Boosted Regression Trees
This module contains methods for fitting gradient boosted regression trees for
both classification and regression.
The module structure is the following:
- The ``BaseGradientBoosting`` base class implements a common ``fit`` method
for all the estimators in the module. Regression and classification
only differ in the concrete ``LossFunction`` used.
- ``GradientBoostingClassifier`` implements gradient boosting for
classification problems.
- ``GradientBoostingRegressor`` implements gradient boosting for
regression problems.
"""
# Authors: Peter Prettenhofer, Scott White, Gilles Louppe, Emanuele Olivetti,
# Arnaud Joly
# License: BSD 3 clause
from __future__ import print_function
from __future__ import division
from abc import ABCMeta, abstractmethod
from warnings import warn
from time import time
import numbers
import numpy as np
from scipy import stats
from .base import BaseEnsemble
from ..base import BaseEstimator
from ..base import ClassifierMixin
from ..base import RegressorMixin
from ..utils import check_random_state, check_array, check_X_y, column_or_1d
from ..utils.extmath import logsumexp
from ..utils.stats import _weighted_percentile
from ..externals import six
from ..feature_selection.from_model import _LearntSelectorMixin
from ..tree.tree import DecisionTreeRegressor
from ..tree._tree import DTYPE, TREE_LEAF
from ..tree._tree import PresortBestSplitter
from ..tree._tree import FriedmanMSE
from ._gradient_boosting import predict_stages
from ._gradient_boosting import predict_stage
from ._gradient_boosting import _random_sample_mask
class QuantileEstimator(BaseEstimator):
"""An estimator predicting the alpha-quantile of the training targets."""
def __init__(self, alpha=0.9):
if not 0 < alpha < 1.0:
raise ValueError("`alpha` must be in (0, 1.0) but was %r" % alpha)
self.alpha = alpha
def fit(self, X, y, sample_weight=None):
if sample_weight is None:
self.quantile = stats.scoreatpercentile(y, self.alpha * 100.0)
else:
self.quantile = _weighted_percentile(y, sample_weight, self.alpha * 100.0)
def predict(self, X):
y = np.empty((X.shape[0], 1), dtype=np.float64)
y.fill(self.quantile)
return y
class MeanEstimator(BaseEstimator):
"""An estimator predicting the mean of the training targets."""
def fit(self, X, y, sample_weight=None):
if sample_weight is None:
self.mean = np.mean(y)
else:
self.mean = np.average(y, weights=sample_weight)
def predict(self, X):
y = np.empty((X.shape[0], 1), dtype=np.float64)
y.fill(self.mean)
return y
class LogOddsEstimator(BaseEstimator):
"""An estimator predicting the log odds ratio."""
scale = 1.0
def fit(self, X, y, sample_weight=None):
# pre-cond: pos, neg are encoded as 1, 0
if sample_weight is None:
pos = np.sum(y)
neg = y.shape[0] - pos
else:
pos = np.sum(sample_weight * y)
neg = np.sum(sample_weight * (1 - y))
if neg == 0 or pos == 0:
raise ValueError('y contains non binary labels.')
self.prior = self.scale * np.log(pos / neg)
def predict(self, X):
y = np.empty((X.shape[0], 1), dtype=np.float64)
y.fill(self.prior)
return y
class ScaledLogOddsEstimator(LogOddsEstimator):
"""Log odds ratio scaled by 0.5 -- for exponential loss. """
scale = 0.5
class PriorProbabilityEstimator(BaseEstimator):
"""An estimator predicting the probability of each
class in the training data.
"""
def fit(self, X, y, sample_weight=None):
if sample_weight is None:
sample_weight = np.ones_like(y, dtype=np.float)
class_counts = np.bincount(y, weights=sample_weight)
self.priors = class_counts / class_counts.sum()
def predict(self, X):
y = np.empty((X.shape[0], self.priors.shape[0]), dtype=np.float64)
y[:] = self.priors
return y
class ZeroEstimator(BaseEstimator):
"""An estimator that simply predicts zero. """
def fit(self, X, y, sample_weight=None):
if np.issubdtype(y.dtype, int):
# classification
self.n_classes = np.unique(y).shape[0]
if self.n_classes == 2:
self.n_classes = 1
else:
# regression
self.n_classes = 1
def predict(self, X):
y = np.empty((X.shape[0], self.n_classes), dtype=np.float64)
y.fill(0.0)
return y
class LossFunction(six.with_metaclass(ABCMeta, object)):
"""Abstract base class for various loss functions.
Attributes
----------
K : int
The number of regression trees to be induced;
1 for regression and binary classification;
``n_classes`` for multi-class classification.
"""
is_multi_class = False
def __init__(self, n_classes):
self.K = n_classes
def init_estimator(self):
"""Default ``init`` estimator for loss function. """
raise NotImplementedError()
@abstractmethod
def __call__(self, y, pred, sample_weight=None):
"""Compute the loss of prediction ``pred`` and ``y``. """
@abstractmethod
def negative_gradient(self, y, y_pred, **kargs):
"""Compute the negative gradient.
Parameters
---------
y : np.ndarray, shape=(n,)
The target labels.
y_pred : np.ndarray, shape=(n,):
The predictions.
"""
def update_terminal_regions(self, tree, X, y, residual, y_pred,
sample_weight, sample_mask,
learning_rate=1.0, k=0):
"""Update the terminal regions (=leaves) of the given tree and
updates the current predictions of the model. Traverses tree
and invokes template method `_update_terminal_region`.
Parameters
----------
tree : tree.Tree
The tree object.
X : np.ndarray, shape=(n, m)
The data array.
y : np.ndarray, shape=(n,)
The target labels.
residual : np.ndarray, shape=(n,)
The residuals (usually the negative gradient).
y_pred : np.ndarray, shape=(n,):
The predictions.
sample_weight np.ndarray, shape=(n,):
The weight of each sample.
"""
# compute leaf for each sample in ``X``.
terminal_regions = tree.apply(X)
# mask all which are not in sample mask.
masked_terminal_regions = terminal_regions.copy()
masked_terminal_regions[~sample_mask] = -1
# update each leaf (= perform line search)
for leaf in np.where(tree.children_left == TREE_LEAF)[0]:
self._update_terminal_region(tree, masked_terminal_regions,
leaf, X, y, residual,
y_pred[:, k], sample_weight)
# update predictions (both in-bag and out-of-bag)
y_pred[:, k] += (learning_rate
* tree.value[:, 0, 0].take(terminal_regions, axis=0))
@abstractmethod
def _update_terminal_region(self, tree, terminal_regions, leaf, X, y,
residual, pred, sample_weight):
"""Template method for updating terminal regions (=leaves). """
class RegressionLossFunction(six.with_metaclass(ABCMeta, LossFunction)):
"""Base class for regression loss functions. """
def __init__(self, n_classes):
if n_classes != 1:
raise ValueError("``n_classes`` must be 1 for regression but "
"was %r" % n_classes)
super(RegressionLossFunction, self).__init__(n_classes)
class LeastSquaresError(RegressionLossFunction):
"""Loss function for least squares (LS) estimation.
Terminal regions need not to be updated for least squares. """
def init_estimator(self):
return MeanEstimator()
def __call__(self, y, pred, sample_weight=None):
if sample_weight is None:
return np.mean((y - pred.ravel()) ** 2.0)
else:
return (1.0 / sample_weight.sum()) * \
np.sum(sample_weight * ((y - pred.ravel()) ** 2.0))
def negative_gradient(self, y, pred, **kargs):
return y - pred.ravel()
def update_terminal_regions(self, tree, X, y, residual, y_pred,
sample_weight, sample_mask,
learning_rate=1.0, k=0):
"""Least squares does not need to update terminal regions.
But it has to update the predictions.
"""
# update predictions
y_pred[:, k] += learning_rate * tree.predict(X).ravel()
def _update_terminal_region(self, tree, terminal_regions, leaf, X, y,
residual, pred, sample_weight):
pass
class LeastAbsoluteError(RegressionLossFunction):
"""Loss function for least absolute deviation (LAD) regression. """
def init_estimator(self):
return QuantileEstimator(alpha=0.5)
def __call__(self, y, pred, sample_weight=None):
if sample_weight is None:
return np.abs(y - pred.ravel()).mean()
else:
return (1.0 / sample_weight.sum()) * \
np.sum(sample_weight * np.abs(y - pred.ravel()))
def negative_gradient(self, y, pred, **kargs):
"""1.0 if y - pred > 0.0 else -1.0"""
pred = pred.ravel()
return 2.0 * (y - pred > 0.0) - 1.0
def _update_terminal_region(self, tree, terminal_regions, leaf, X, y,
residual, pred, sample_weight):
"""LAD updates terminal regions to median estimates. """
terminal_region = np.where(terminal_regions == leaf)[0]
sample_weight = sample_weight.take(terminal_region, axis=0)
diff = y.take(terminal_region, axis=0) - pred.take(terminal_region, axis=0)
tree.value[leaf, 0, 0] = _weighted_percentile(diff, sample_weight, percentile=50)
class HuberLossFunction(RegressionLossFunction):
"""Huber loss function for robust regression.
M-Regression proposed in Friedman 2001.
See
---
J. Friedman, Greedy Function Approximation: A Gradient Boosting
Machine, The Annals of Statistics, Vol. 29, No. 5, 2001.
"""
def __init__(self, n_classes, alpha=0.9):
super(HuberLossFunction, self).__init__(n_classes)
self.alpha = alpha
self.gamma = None
def init_estimator(self):
return QuantileEstimator(alpha=0.5)
def __call__(self, y, pred, sample_weight=None):
pred = pred.ravel()
diff = y - pred
gamma = self.gamma
if gamma is None:
if sample_weight is None:
gamma = stats.scoreatpercentile(np.abs(diff), self.alpha * 100)
else:
gamma = _weighted_percentile(np.abs(diff), sample_weight, self.alpha * 100)
gamma_mask = np.abs(diff) <= gamma
if sample_weight is None:
sq_loss = np.sum(0.5 * diff[gamma_mask] ** 2.0)
lin_loss = np.sum(gamma * (np.abs(diff[~gamma_mask]) - gamma / 2.0))
loss = (sq_loss + lin_loss) / y.shape[0]
else:
sq_loss = np.sum(0.5 * sample_weight[gamma_mask] * diff[gamma_mask] ** 2.0)
lin_loss = np.sum(gamma * sample_weight[~gamma_mask] *
(np.abs(diff[~gamma_mask]) - gamma / 2.0))
loss = (sq_loss + lin_loss) / sample_weight.sum()
return loss
def negative_gradient(self, y, pred, sample_weight=None, **kargs):
pred = pred.ravel()
diff = y - pred
if sample_weight is None:
gamma = stats.scoreatpercentile(np.abs(diff), self.alpha * 100)
else:
gamma = _weighted_percentile(np.abs(diff), sample_weight, self.alpha * 100)
gamma_mask = np.abs(diff) <= gamma
residual = np.zeros((y.shape[0],), dtype=np.float64)
residual[gamma_mask] = diff[gamma_mask]
residual[~gamma_mask] = gamma * np.sign(diff[~gamma_mask])
self.gamma = gamma
return residual
def _update_terminal_region(self, tree, terminal_regions, leaf, X, y,
residual, pred, sample_weight):
terminal_region = np.where(terminal_regions == leaf)[0]
sample_weight = sample_weight.take(terminal_region, axis=0)
gamma = self.gamma
diff = (y.take(terminal_region, axis=0)
- pred.take(terminal_region, axis=0))
median = _weighted_percentile(diff, sample_weight, percentile=50)
diff_minus_median = diff - median
tree.value[leaf, 0] = median + np.mean(
np.sign(diff_minus_median) *
np.minimum(np.abs(diff_minus_median), gamma))
class QuantileLossFunction(RegressionLossFunction):
"""Loss function for quantile regression.
Quantile regression allows to estimate the percentiles
of the conditional distribution of the target.
"""
def __init__(self, n_classes, alpha=0.9):
super(QuantileLossFunction, self).__init__(n_classes)
assert 0 < alpha < 1.0
self.alpha = alpha
self.percentile = alpha * 100.0
def init_estimator(self):
return QuantileEstimator(self.alpha)
def __call__(self, y, pred, sample_weight=None):
pred = pred.ravel()
diff = y - pred
alpha = self.alpha
mask = y > pred
if sample_weight is None:
loss = (alpha * diff[mask].sum() +
(1.0 - alpha) * diff[~mask].sum()) / y.shape[0]
else:
loss = ((alpha * np.sum(sample_weight[mask] * diff[mask]) +
(1.0 - alpha) * np.sum(sample_weight[~mask] * diff[~mask])) /
sample_weight.sum())
return loss
def negative_gradient(self, y, pred, **kargs):
alpha = self.alpha
pred = pred.ravel()
mask = y > pred
return (alpha * mask) - ((1.0 - alpha) * ~mask)
def _update_terminal_region(self, tree, terminal_regions, leaf, X, y,
residual, pred, sample_weight):
terminal_region = np.where(terminal_regions == leaf)[0]
diff = (y.take(terminal_region, axis=0)
- pred.take(terminal_region, axis=0))
sample_weight = sample_weight.take(terminal_region, axis=0)
val = _weighted_percentile(diff, sample_weight, self.percentile)
tree.value[leaf, 0] = val
class ClassificationLossFunction(six.with_metaclass(ABCMeta, LossFunction)):
"""Base class for classification loss functions. """
def _score_to_proba(self, score):
"""Template method to convert scores to probabilities.
If the loss does not support probabilites raises AttributeError.
"""
raise TypeError('%s does not support predict_proba' % type(self).__name__)
@abstractmethod
def _score_to_decision(self, score):
"""Template method to convert scores to decisions.
Returns int arrays.
"""
class BinomialDeviance(ClassificationLossFunction):
"""Binomial deviance loss function for binary classification.
Binary classification is a special case; here, we only need to
fit one tree instead of ``n_classes`` trees.
"""
def __init__(self, n_classes):
if n_classes != 2:
raise ValueError("{0:s} requires 2 classes.".format(
self.__class__.__name__))
# we only need to fit one tree for binary clf.
super(BinomialDeviance, self).__init__(1)
def init_estimator(self):
return LogOddsEstimator()
def __call__(self, y, pred, sample_weight=None):
"""Compute the deviance (= 2 * negative log-likelihood). """
# logaddexp(0, v) == log(1.0 + exp(v))
pred = pred.ravel()
if sample_weight is None:
return -2.0 * np.mean((y * pred) - np.logaddexp(0.0, pred))
else:
return (-2.0 / sample_weight.sum() *
np.sum(sample_weight * ((y * pred) - np.logaddexp(0.0, pred))))
def negative_gradient(self, y, pred, **kargs):
"""Compute the residual (= negative gradient). """
return y - 1.0 / (1.0 + np.exp(-pred.ravel()))
def _update_terminal_region(self, tree, terminal_regions, leaf, X, y,
residual, pred, sample_weight):
"""Make a single Newton-Raphson step.
our node estimate is given by:
sum(w * (y - prob)) / sum(w * prob * (1 - prob))
we take advantage that: y - prob = residual
"""
terminal_region = np.where(terminal_regions == leaf)[0]
residual = residual.take(terminal_region, axis=0)
y = y.take(terminal_region, axis=0)
sample_weight = sample_weight.take(terminal_region, axis=0)
numerator = np.sum(sample_weight * residual)
denominator = np.sum(sample_weight * (y - residual) * (1 - y + residual))
if denominator == 0.0:
tree.value[leaf, 0, 0] = 0.0
else:
tree.value[leaf, 0, 0] = numerator / denominator
def _score_to_proba(self, score):
proba = np.ones((score.shape[0], 2), dtype=np.float64)
proba[:, 1] = 1.0 / (1.0 + np.exp(-score.ravel()))
proba[:, 0] -= proba[:, 1]
return proba
def _score_to_decision(self, score):
proba = self._score_to_proba(score)
return np.argmax(proba, axis=1)
class MultinomialDeviance(ClassificationLossFunction):
"""Multinomial deviance loss function for multi-class classification.
For multi-class classification we need to fit ``n_classes`` trees at
each stage.
"""
is_multi_class = True
def __init__(self, n_classes):
if n_classes < 3:
raise ValueError("{0:s} requires more than 2 classes.".format(
self.__class__.__name__))
super(MultinomialDeviance, self).__init__(n_classes)
def init_estimator(self):
return PriorProbabilityEstimator()
def __call__(self, y, pred, sample_weight=None):
# create one-hot label encoding
Y = np.zeros((y.shape[0], self.K), dtype=np.float64)
for k in range(self.K):
Y[:, k] = y == k
if sample_weight is None:
return np.sum(-1 * (Y * pred).sum(axis=1) +
logsumexp(pred, axis=1))
else:
return np.sum(-1 * sample_weight * (Y * pred).sum(axis=1) +
logsumexp(pred, axis=1))
def negative_gradient(self, y, pred, k=0, **kwargs):
"""Compute negative gradient for the ``k``-th class. """
return y - np.nan_to_num(np.exp(pred[:, k] -
logsumexp(pred, axis=1)))
def _update_terminal_region(self, tree, terminal_regions, leaf, X, y,
residual, pred, sample_weight):
"""Make a single Newton-Raphson step. """
terminal_region = np.where(terminal_regions == leaf)[0]
residual = residual.take(terminal_region, axis=0)
y = y.take(terminal_region, axis=0)
sample_weight = sample_weight.take(terminal_region, axis=0)
numerator = np.sum(sample_weight * residual)
numerator *= (self.K - 1) / self.K
denominator = np.sum(sample_weight * (y - residual) *
(1.0 - y + residual))
if denominator == 0.0:
tree.value[leaf, 0, 0] = 0.0
else:
tree.value[leaf, 0, 0] = numerator / denominator
def _score_to_proba(self, score):
return np.nan_to_num(
np.exp(score - (logsumexp(score, axis=1)[:, np.newaxis])))
def _score_to_decision(self, score):
proba = self._score_to_proba(score)
return np.argmax(proba, axis=1)
class ExponentialLoss(ClassificationLossFunction):
"""Exponential loss function for binary classification.
Same loss as AdaBoost.
See
---
Greg Ridgeway, Generalized Boosted Models: A guide to the gbm package, 2007
"""
def __init__(self, n_classes):
if n_classes != 2:
raise ValueError("{0:s} requires 2 classes.".format(
self.__class__.__name__))
# we only need to fit one tree for binary clf.
super(ExponentialLoss, self).__init__(1)
def init_estimator(self):
return ScaledLogOddsEstimator()
def __call__(self, y, pred, sample_weight=None):
pred = pred.ravel()
if sample_weight is None:
return np.mean(np.exp(-(2. * y - 1.) * pred))
else:
return (1.0 / sample_weight.sum()) * \
np.sum(sample_weight * np.exp(-(2 * y - 1) * pred))
def negative_gradient(self, y, pred, **kargs):
y_ = -(2. * y - 1.)
return y_ * np.exp(y_ * pred.ravel())
def _update_terminal_region(self, tree, terminal_regions, leaf, X, y,
residual, pred, sample_weight):
terminal_region = np.where(terminal_regions == leaf)[0]
pred = pred.take(terminal_region, axis=0)
y = y.take(terminal_region, axis=0)
sample_weight = sample_weight.take(terminal_region, axis=0)
y_ = 2. * y - 1.
numerator = np.sum(y_ * sample_weight * np.exp(-y_ * pred))
denominator = np.sum(sample_weight * np.exp(-y_ * pred))
if denominator == 0.0:
tree.value[leaf, 0, 0] = 0.0
else:
tree.value[leaf, 0, 0] = numerator / denominator
def _score_to_proba(self, score):
proba = np.ones((score.shape[0], 2), dtype=np.float64)
proba[:, 1] = 1.0 / (1.0 + np.exp(-2.0 * score.ravel()))
proba[:, 0] -= proba[:, 1]
return proba
def _score_to_decision(self, score):
return (score.ravel() >= 0.0).astype(np.int)
LOSS_FUNCTIONS = {'ls': LeastSquaresError,
'lad': LeastAbsoluteError,
'huber': HuberLossFunction,
'quantile': QuantileLossFunction,
'deviance': None, # for both, multinomial and binomial
'exponential': ExponentialLoss,
}
INIT_ESTIMATORS = {'zero': ZeroEstimator}
class VerboseReporter(object):
"""Reports verbose output to stdout.
If ``verbose==1`` output is printed once in a while (when iteration mod
verbose_mod is zero).; if larger than 1 then output is printed for
each update.
"""
def __init__(self, verbose):
self.verbose = verbose
def init(self, est, begin_at_stage=0):
# header fields and line format str
header_fields = ['Iter', 'Train Loss']
verbose_fmt = ['{iter:>10d}', '{train_score:>16.4f}']
# do oob?
if est.subsample < 1:
header_fields.append('OOB Improve')
verbose_fmt.append('{oob_impr:>16.4f}')
header_fields.append('Remaining Time')
verbose_fmt.append('{remaining_time:>16s}')
# print the header line
print(('%10s ' + '%16s ' *
(len(header_fields) - 1)) % tuple(header_fields))
self.verbose_fmt = ' '.join(verbose_fmt)
# plot verbose info each time i % verbose_mod == 0
self.verbose_mod = 1
self.start_time = time()
self.begin_at_stage = begin_at_stage
def update(self, j, est):
"""Update reporter with new iteration. """
do_oob = est.subsample < 1
# we need to take into account if we fit additional estimators.
i = j - self.begin_at_stage # iteration relative to the start iter
if (i + 1) % self.verbose_mod == 0:
oob_impr = est.oob_improvement_[j] if do_oob else 0
remaining_time = ((est.n_estimators - (j + 1)) *
(time() - self.start_time) / float(i + 1))
if remaining_time > 60:
remaining_time = '{0:.2f}m'.format(remaining_time / 60.0)
else:
remaining_time = '{0:.2f}s'.format(remaining_time)
print(self.verbose_fmt.format(iter=j + 1,
train_score=est.train_score_[j],
oob_impr=oob_impr,
remaining_time=remaining_time))
if self.verbose == 1 and ((i + 1) // (self.verbose_mod * 10) > 0):
# adjust verbose frequency (powers of 10)
self.verbose_mod *= 10
class BaseGradientBoosting(six.with_metaclass(ABCMeta, BaseEnsemble,
_LearntSelectorMixin)):
"""Abstract base class for Gradient Boosting. """
@abstractmethod
def __init__(self, loss, learning_rate, n_estimators, min_samples_split,
min_samples_leaf, min_weight_fraction_leaf,
max_depth, init, subsample, max_features,
random_state, alpha=0.9, verbose=0, max_leaf_nodes=None,
warm_start=False):
self.n_estimators = n_estimators
self.learning_rate = learning_rate
self.loss = loss
self.min_samples_split = min_samples_split
self.min_samples_leaf = min_samples_leaf
self.min_weight_fraction_leaf = min_weight_fraction_leaf
self.subsample = subsample
self.max_features = max_features
self.max_depth = max_depth
self.init = init
self.random_state = random_state
self.alpha = alpha
self.verbose = verbose
self.max_leaf_nodes = max_leaf_nodes
self.warm_start = warm_start
self.estimators_ = np.empty((0, 0), dtype=np.object)
def _fit_stage(self, i, X, y, y_pred, sample_weight, sample_mask,
criterion, splitter, random_state):
"""Fit another stage of ``n_classes_`` trees to the boosting model. """
assert sample_mask.dtype == np.bool
loss = self.loss_
original_y = y
for k in range(loss.K):
if loss.is_multi_class:
y = np.array(original_y == k, dtype=np.float64)
residual = loss.negative_gradient(y, y_pred, k=k,
sample_weight=sample_weight)
# induce regression tree on residuals
tree = DecisionTreeRegressor(
criterion=criterion,
splitter=splitter,
max_depth=self.max_depth,
min_samples_split=self.min_samples_split,
min_samples_leaf=self.min_samples_leaf,
min_weight_fraction_leaf=self.min_weight_fraction_leaf,
max_features=self.max_features,
max_leaf_nodes=self.max_leaf_nodes,
random_state=random_state)
if self.subsample < 1.0:
# no inplace multiplication!
sample_weight = sample_weight * sample_mask.astype(np.float64)
tree.fit(X, residual, sample_weight=sample_weight,
check_input=False)
# update tree leaves
loss.update_terminal_regions(tree.tree_, X, y, residual, y_pred,
sample_weight, sample_mask,
self.learning_rate, k=k)
# add tree to ensemble
self.estimators_[i, k] = tree
return y_pred
def _check_params(self):
"""Check validity of parameters and raise ValueError if not valid. """
if self.n_estimators <= 0:
raise ValueError("n_estimators must be greater than 0 but "
"was %r" % self.n_estimators)
if self.learning_rate <= 0.0:
raise ValueError("learning_rate must be greater than 0 but "
"was %r" % self.learning_rate)
if (self.loss not in self._SUPPORTED_LOSS
or self.loss not in LOSS_FUNCTIONS):
raise ValueError("Loss '{0:s}' not supported. ".format(self.loss))
if self.loss == 'deviance':
loss_class = (MultinomialDeviance
if len(self.classes_) > 2
else BinomialDeviance)
else:
loss_class = LOSS_FUNCTIONS[self.loss]
if self.loss in ('huber', 'quantile'):
self.loss_ = loss_class(self.n_classes_, self.alpha)
else:
self.loss_ = loss_class(self.n_classes_)
if not (0.0 < self.subsample <= 1.0):
raise ValueError("subsample must be in (0,1] but "
"was %r" % self.subsample)
if self.init is not None:
if isinstance(self.init, six.string_types):
if self.init not in INIT_ESTIMATORS:
raise ValueError('init="%s" is not supported' % self.init)
else:
if (not hasattr(self.init, 'fit')
or not hasattr(self.init, 'predict')):
raise ValueError("init=%r must be valid BaseEstimator "
"and support both fit and "
"predict" % self.init)
if not (0.0 < self.alpha < 1.0):
raise ValueError("alpha must be in (0.0, 1.0) but "
"was %r" % self.alpha)
if isinstance(self.max_features, six.string_types):
if self.max_features == "auto":
# if is_classification
if self.n_classes_ > 1:
max_features = max(1, int(np.sqrt(self.n_features)))
else:
# is regression
max_features = self.n_features
elif self.max_features == "sqrt":
max_features = max(1, int(np.sqrt(self.n_features)))
elif self.max_features == "log2":
max_features = max(1, int(np.log2(self.n_features)))
else:
raise ValueError("Invalid value for max_features: %r. "
"Allowed string values are 'auto', 'sqrt' "
"or 'log2'." % self.max_features)
elif self.max_features is None:
max_features = self.n_features
elif isinstance(self.max_features, (numbers.Integral, np.integer)):
max_features = self.max_features
else: # float
max_features = int(self.max_features * self.n_features)
self.max_features_ = max_features
def _init_state(self):
"""Initialize model state and allocate model state data structures. """
if self.init is None:
self.init_ = self.loss_.init_estimator()
elif isinstance(self.init, six.string_types):
self.init_ = INIT_ESTIMATORS[self.init]()
else:
self.init_ = self.init
self.estimators_ = np.empty((self.n_estimators, self.loss_.K),
dtype=np.object)
self.train_score_ = np.zeros((self.n_estimators,), dtype=np.float64)
# do oob?
if self.subsample < 1.0:
self.oob_improvement_ = np.zeros((self.n_estimators),
dtype=np.float64)
def _clear_state(self):
"""Clear the state of the gradient boosting model. """
if hasattr(self, 'estimators_'):
self.estimators_ = np.empty((0, 0), dtype=np.object)
if hasattr(self, 'train_score_'):
del self.train_score_
if hasattr(self, 'oob_improvement_'):
del self.oob_improvement_
if hasattr(self, 'init_'):
del self.init_
def _resize_state(self):
"""Add additional ``n_estimators`` entries to all attributes. """
# self.n_estimators is the number of additional est to fit
total_n_estimators = self.n_estimators
if total_n_estimators < self.estimators_.shape[0]:
raise ValueError('resize with smaller n_estimators %d < %d' %
(total_n_estimators, self.estimators_[0]))
self.estimators_.resize((total_n_estimators, self.loss_.K))
self.train_score_.resize(total_n_estimators)
if (self.subsample < 1 or hasattr(self, 'oob_improvement_')):
# if do oob resize arrays or create new if not available
if hasattr(self, 'oob_improvement_'):
self.oob_improvement_.resize(total_n_estimators)
else:
self.oob_improvement_ = np.zeros((total_n_estimators,),
dtype=np.float64)
def _is_initialized(self):
return len(getattr(self, 'estimators_', [])) > 0
def fit(self, X, y, sample_weight=None, monitor=None):
"""Fit the gradient boosting model.
Parameters
----------
X : array-like, shape = [n_samples, n_features]
Training vectors, where n_samples is the number of samples
and n_features is the number of features.
y : array-like, shape = [n_samples]
Target values (integers in classification, real numbers in
regression)
For classification, labels must correspond to classes
``0, 1, ..., n_classes_-1``
sample_weight : array-like, shape = [n_samples] or None
Sample weights. If None, then samples are equally weighted. Splits
that would create child nodes with net zero or negative weight are
ignored while searching for a split in each node. In the case of
classification, splits are also ignored if they would result in any
single class carrying a negative weight in either child node.
monitor : callable, optional
The monitor is called after each iteration with the current
iteration, a reference to the estimator and the local variables of
``_fit_stages`` as keyword arguments ``callable(i, self,
locals())``. If the callable returns ``True`` the fitting procedure
is stopped. The monitor can be used for various things such as
computing held-out estimates, early stopping, model introspect, and
snapshoting.
Returns
-------
self : object
Returns self.
"""
# if not warmstart - clear the estimator state
if not self.warm_start:
self._clear_state()
# Check input
X, y = check_X_y(X, y, dtype=DTYPE)
n_samples, n_features = X.shape
if sample_weight is None:
sample_weight = np.ones(n_samples, dtype=np.float32)
else:
sample_weight = column_or_1d(sample_weight, warn=True)
if y.shape[0] != n_samples:
raise ValueError('Shape mismatch of X and y: %d != %d' %
(n_samples, y.shape[0]))
if n_samples != sample_weight.shape[0]:
raise ValueError('Shape mismatch of sample_weight: %d != %d' %
(sample_weight.shape[0], n_samples))
self.n_features = n_features
random_state = check_random_state(self.random_state)
self._check_params()
if not self._is_initialized():
# init state
self._init_state()
# fit initial model - FIXME make sample_weight optional
self.init_.fit(X, y, sample_weight)
# init predictions
y_pred = self.init_.predict(X)
begin_at_stage = 0
else:
# add more estimators to fitted model
# invariant: warm_start = True
if self.n_estimators < self.estimators_.shape[0]:
raise ValueError('n_estimators=%d must be larger or equal to '
'estimators_.shape[0]=%d when '
'warm_start==True'
% (self.n_estimators,
self.estimators_.shape[0]))
begin_at_stage = self.estimators_.shape[0]
y_pred = self._decision_function(X)
self._resize_state()
# fit the boosting stages
n_stages = self._fit_stages(X, y, y_pred, sample_weight, random_state,
begin_at_stage, monitor)
# change shape of arrays after fit (early-stopping or additional ests)
if n_stages != self.estimators_.shape[0]:
self.estimators_ = self.estimators_[:n_stages]
self.train_score_ = self.train_score_[:n_stages]
if hasattr(self, 'oob_improvement_'):
self.oob_improvement_ = self.oob_improvement_[:n_stages]
return self
def _fit_stages(self, X, y, y_pred, sample_weight, random_state,
begin_at_stage=0, monitor=None):
"""Iteratively fits the stages.
For each stage it computes the progress (OOB, train score)
and delegates to ``_fit_stage``.
Returns the number of stages fit; might differ from ``n_estimators``
due to early stopping.
"""
n_samples = X.shape[0]
do_oob = self.subsample < 1.0
sample_mask = np.ones((n_samples, ), dtype=np.bool)
n_inbag = max(1, int(self.subsample * n_samples))
loss_ = self.loss_
# init criterion and splitter
criterion = FriedmanMSE(1)
splitter = PresortBestSplitter(criterion,
self.max_features_,
self.min_samples_leaf,
self.min_weight_fraction_leaf,
random_state)
if self.verbose:
verbose_reporter = VerboseReporter(self.verbose)
verbose_reporter.init(self, begin_at_stage)
# perform boosting iterations
i = begin_at_stage
for i in range(begin_at_stage, self.n_estimators):
# subsampling
if do_oob:
sample_mask = _random_sample_mask(n_samples, n_inbag,
random_state)
# OOB score before adding this stage
old_oob_score = loss_(y[~sample_mask],
y_pred[~sample_mask],
sample_weight[~sample_mask])
# fit next stage of trees
y_pred = self._fit_stage(i, X, y, y_pred, sample_weight,
sample_mask, criterion, splitter,
random_state)
# track deviance (= loss)
if do_oob:
self.train_score_[i] = loss_(y[sample_mask],
y_pred[sample_mask],
sample_weight[sample_mask])
self.oob_improvement_[i] = (old_oob_score -
loss_(y[~sample_mask], y_pred[~sample_mask],
sample_weight[~sample_mask]))
else:
# no need to fancy index w/ no subsampling
self.train_score_[i] = loss_(y, y_pred, sample_weight)
if self.verbose > 0:
verbose_reporter.update(i, self)
if monitor is not None:
early_stopping = monitor(i, self, locals())
if early_stopping:
break
return i + 1
def _make_estimator(self, append=True):
# we don't need _make_estimator
raise NotImplementedError()
def _init_decision_function(self, X):
"""Check input and compute prediction of ``init``. """
if self.estimators_ is None or len(self.estimators_) == 0:
raise ValueError("Estimator not fitted, call `fit` "
"before making predictions`.")
if X.shape[1] != self.n_features:
raise ValueError("X.shape[1] should be {0:d}, not {1:d}.".format(
self.n_features, X.shape[1]))
score = self.init_.predict(X).astype(np.float64)
return score
def _decision_function(self, X):
# for use in inner loop, not raveling the output in single-class case,
# not doing input validation.
score = self._init_decision_function(X)
predict_stages(self.estimators_, X, self.learning_rate, score)
return score
def decision_function(self, X):
"""Compute the decision function of ``X``.
Parameters
----------
X : array-like of shape = [n_samples, n_features]
The input samples.
Returns
-------
score : array, shape = [n_samples, n_classes] or [n_samples]
The decision function of the input samples. The order of the
classes corresponds to that in the attribute `classes_`.
Regression and binary classification produce an array of shape
[n_samples].
"""
X = check_array(X, dtype=DTYPE, order="C")
score = self._decision_function(X)
if score.shape[1] == 1:
return score.ravel()
return score
def staged_decision_function(self, X):
"""Compute decision function of ``X`` for each iteration.
This method allows monitoring (i.e. determine error on testing set)
after each stage.
Parameters
----------
X : array-like of shape = [n_samples, n_features]
The input samples.
Returns
-------
score : generator of array, shape = [n_samples, k]
The decision function of the input samples. The order of the
classes corresponds to that in the attribute `classes_`.
Regression and binary classification are special cases with
``k == 1``, otherwise ``k==n_classes``.
"""
X = check_array(X, dtype=DTYPE, order="C")
score = self._init_decision_function(X)
for i in range(self.estimators_.shape[0]):
predict_stage(self.estimators_, i, X, self.learning_rate, score)
yield score
@property
def feature_importances_(self):
"""Return the feature importances (the higher, the more important the
feature).
Returns
-------
feature_importances_ : array, shape = [n_features]
"""
if self.estimators_ is None or len(self.estimators_) == 0:
raise ValueError("Estimator not fitted, "
"call `fit` before `feature_importances_`.")
total_sum = np.zeros((self.n_features, ), dtype=np.float64)
for stage in self.estimators_:
stage_sum = sum(tree.feature_importances_
for tree in stage) / len(stage)
total_sum += stage_sum
importances = total_sum / len(self.estimators_)
return importances
class GradientBoostingClassifier(BaseGradientBoosting, ClassifierMixin):
"""Gradient Boosting for classification.
GB builds an additive model in a
forward stage-wise fashion; it allows for the optimization of
arbitrary differentiable loss functions. In each stage ``n_classes_``
regression trees are fit on the negative gradient of the
binomial or multinomial deviance loss function. Binary classification
is a special case where only a single regression tree is induced.
Parameters
----------
loss : {'deviance', 'exponential'}, optional (default='deviance')
loss function to be optimized. 'deviance' refers to
deviance (= logistic regression) for classification
with probabilistic outputs. For loss 'exponential' gradient
boosting recoveres the AdaBoost algorithm.
learning_rate : float, optional (default=0.1)
learning rate shrinks the contribution of each tree by `learning_rate`.
There is a trade-off between learning_rate and n_estimators.
n_estimators : int (default=100)
The number of boosting stages to perform. Gradient boosting
is fairly robust to over-fitting so a large number usually
results in better performance.
max_depth : integer, optional (default=3)
maximum depth of the individual regression estimators. The maximum
depth limits the number of nodes in the tree. Tune this parameter
for best performance; the best value depends on the interaction
of the input variables.
Ignored if ``max_samples_leaf`` is not None.
min_samples_split : integer, optional (default=2)
The minimum number of samples required to split an internal node.
min_samples_leaf : integer, optional (default=1)
The minimum number of samples required to be at a leaf node.
min_weight_fraction_leaf : float, optional (default=0.)
The minimum weighted fraction of the input samples required to be at a
leaf node.
subsample : float, optional (default=1.0)
The fraction of samples to be used for fitting the individual base
learners. If smaller than 1.0 this results in Stochastic Gradient
Boosting. `subsample` interacts with the parameter `n_estimators`.
Choosing `subsample < 1.0` leads to a reduction of variance
and an increase in bias.
max_features : int, float, string or None, optional (default=None)
The number of features to consider when looking for the best split:
- If int, then consider `max_features` features at each split.
- If float, then `max_features` is a percentage and
`int(max_features * n_features)` features are considered at each
split.
- If "auto", then `max_features=sqrt(n_features)`.
- If "sqrt", then `max_features=sqrt(n_features)`.
- If "log2", then `max_features=log2(n_features)`.
- If None, then `max_features=n_features`.
Choosing `max_features < n_features` leads to a reduction of variance
and an increase in bias.
Note: the search for a split does not stop until at least one
valid partition of the node samples is found, even if it requires to
effectively inspect more than ``max_features`` features.
max_leaf_nodes : int or None, optional (default=None)
Grow trees with ``max_leaf_nodes`` in best-first fashion.
Best nodes are defined as relative reduction in impurity.
If None then unlimited number of leaf nodes.
If not None then ``max_depth`` will be ignored.
init : BaseEstimator, None, optional (default=None)
An estimator object that is used to compute the initial
predictions. ``init`` has to provide ``fit`` and ``predict``.
If None it uses ``loss.init_estimator``.
verbose : int, default: 0
Enable verbose output. If 1 then it prints progress and performance
once in a while (the more trees the lower the frequency). If greater
than 1 then it prints progress and performance for every tree.
warm_start : bool, default: False
When set to ``True``, reuse the solution of the previous call to fit
and add more estimators to the ensemble, otherwise, just erase the
previous solution.
Attributes
----------
feature_importances_ : array, shape = [n_features]
The feature importances (the higher, the more important the feature).
oob_improvement_ : array, shape = [n_estimators]
The improvement in loss (= deviance) on the out-of-bag samples
relative to the previous iteration.
``oob_improvement_[0]`` is the improvement in
loss of the first stage over the ``init`` estimator.
train_score_ : array, shape = [n_estimators]
The i-th score ``train_score_[i]`` is the deviance (= loss) of the
model at iteration ``i`` on the in-bag sample.
If ``subsample == 1`` this is the deviance on the training data.
loss_ : LossFunction
The concrete ``LossFunction`` object.
`init` : BaseEstimator
The estimator that provides the initial predictions.
Set via the ``init`` argument or ``loss.init_estimator``.
estimators_ : list of DecisionTreeRegressor
The collection of fitted sub-estimators.
See also
--------
sklearn.tree.DecisionTreeClassifier, RandomForestClassifier
AdaBoostClassifier
References
----------
J. Friedman, Greedy Function Approximation: A Gradient Boosting
Machine, The Annals of Statistics, Vol. 29, No. 5, 2001.
J. Friedman, Stochastic Gradient Boosting, 1999
T. Hastie, R. Tibshirani and J. Friedman.
Elements of Statistical Learning Ed. 2, Springer, 2009.
"""
_SUPPORTED_LOSS = ('deviance', 'exponential')
def __init__(self, loss='deviance', learning_rate=0.1, n_estimators=100,
subsample=1.0, min_samples_split=2,
min_samples_leaf=1, min_weight_fraction_leaf=0.,
max_depth=3, init=None, random_state=None,
max_features=None, verbose=0,
max_leaf_nodes=None, warm_start=False):
super(GradientBoostingClassifier, self).__init__(
loss, learning_rate, n_estimators, min_samples_split,
min_samples_leaf, min_weight_fraction_leaf,
max_depth, init, subsample, max_features,
random_state, verbose=verbose, max_leaf_nodes=max_leaf_nodes,
warm_start=warm_start)
def fit(self, X, y, sample_weight=None, monitor=None):
"""Fit the gradient boosting model.
Parameters
----------
X : array-like, shape = [n_samples, n_features]
Training vectors, where n_samples is the number of samples
and n_features is the number of features.
y : array-like, shape = [n_samples]
Target values (integers in classification, real numbers in
regression)
For classification, labels must correspond to classes
``0, 1, ..., n_classes_-1``.
sample_weight : array-like, shape = [n_samples] or None
Sample weights. If None, then samples are equally weighted. Splits
that would create child nodes with net zero or negative weight are
ignored while searching for a split in each node. In the case of
classification, splits are also ignored if they would result in any
single class carrying a negative weight in either child node.
monitor : callable, optional
The monitor is called after each iteration with the current
iteration, a reference to the estimator and the local variables of
``_fit_stages`` as keyword arguments ``callable(i, self,
locals())``. If the callable returns ``True`` the fitting procedure
is stopped. The monitor can be used for various things such as
computing held-out estimates, early stopping, model introspect, and
snapshoting.
Returns
-------
self : object
Returns self.
"""
y = column_or_1d(y, warn=True)
self.classes_, y = np.unique(y, return_inverse=True)
self.n_classes_ = len(self.classes_)
return super(GradientBoostingClassifier, self).fit(X, y, sample_weight,
monitor)
def predict_proba(self, X):
"""Predict class probabilities for X.
Parameters
----------
X : array-like of shape = [n_samples, n_features]
The input samples.
Raises
------
AttributeError
If the ``loss`` does not support probabilities.
Returns
-------
p : array of shape = [n_samples]
The class probabilities of the input samples. The order of the
classes corresponds to that in the attribute `classes_`.
"""
score = self.decision_function(X)
try:
return self.loss_._score_to_proba(score)
except AttributeError:
raise AttributeError('loss=%r does not support predict_proba' %
self.loss)
def staged_predict_proba(self, X):
"""Predict class probabilities at each stage for X.
This method allows monitoring (i.e. determine error on testing set)
after each stage.
Parameters
----------
X : array-like of shape = [n_samples, n_features]
The input samples.
Returns
-------
y : array of shape = [n_samples]
The predicted value of the input samples.
"""
try:
for score in self.staged_decision_function(X):
yield self.loss_._score_to_proba(score)
except AttributeError:
raise AttributeError('loss=%r does not support predict_proba' %
self.loss)
def predict(self, X):
"""Predict class for X.
Parameters
----------
X : array-like of shape = [n_samples, n_features]
The input samples.
Returns
-------
y : array of shape = [n_samples]
The predicted classes.
"""
score = self.decision_function(X)
decisions = self.loss_._score_to_decision(score)
return self.classes_.take(decisions, axis=0)
def staged_predict(self, X):
"""Predict classes at each stage for X.
This method allows monitoring (i.e. determine error on testing set)
after each stage.
Parameters
----------
X : array-like of shape = [n_samples, n_features]
The input samples.
Returns
-------
y : array of shape = [n_samples]
The predicted value of the input samples.
"""
for score in self.staged_decision_function(X):
decisions = self.loss_._score_to_decision(score)
yield self.classes_.take(decisions, axis=0)
class GradientBoostingRegressor(BaseGradientBoosting, RegressorMixin):
"""Gradient Boosting for regression.
GB builds an additive model in a forward stage-wise fashion;
it allows for the optimization of arbitrary differentiable loss functions.
In each stage a regression tree is fit on the negative gradient of the
given loss function.
Parameters
----------
loss : {'ls', 'lad', 'huber', 'quantile'}, optional (default='ls')
loss function to be optimized. 'ls' refers to least squares
regression. 'lad' (least absolute deviation) is a highly robust
loss function solely based on order information of the input
variables. 'huber' is a combination of the two. 'quantile'
allows quantile regression (use `alpha` to specify the quantile).
learning_rate : float, optional (default=0.1)
learning rate shrinks the contribution of each tree by `learning_rate`.
There is a trade-off between learning_rate and n_estimators.
n_estimators : int (default=100)
The number of boosting stages to perform. Gradient boosting
is fairly robust to over-fitting so a large number usually
results in better performance.
max_depth : integer, optional (default=3)
maximum depth of the individual regression estimators. The maximum
depth limits the number of nodes in the tree. Tune this parameter
for best performance; the best value depends on the interaction
of the input variables.
min_samples_split : integer, optional (default=2)
The minimum number of samples required to split an internal node.
min_samples_leaf : integer, optional (default=1)
The minimum number of samples required to be at a leaf node.
min_weight_fraction_leaf : float, optional (default=0.)
The minimum weighted fraction of the input samples required to be at a
leaf node.
subsample : float, optional (default=1.0)
The fraction of samples to be used for fitting the individual base
learners. If smaller than 1.0 this results in Stochastic Gradient
Boosting. `subsample` interacts with the parameter `n_estimators`.
Choosing `subsample < 1.0` leads to a reduction of variance
and an increase in bias.
max_features : int, float, string or None, optional (default=None)
The number of features to consider when looking for the best split:
- If int, then consider `max_features` features at each split.
- If float, then `max_features` is a percentage and
`int(max_features * n_features)` features are considered at each
split.
- If "auto", then `max_features=n_features`.
- If "sqrt", then `max_features=sqrt(n_features)`.
- If "log2", then `max_features=log2(n_features)`.
- If None, then `max_features=n_features`.
Choosing `max_features < n_features` leads to a reduction of variance
and an increase in bias.
Note: the search for a split does not stop until at least one
valid partition of the node samples is found, even if it requires to
effectively inspect more than ``max_features`` features.
max_leaf_nodes : int or None, optional (default=None)
Grow trees with ``max_leaf_nodes`` in best-first fashion.
Best nodes are defined as relative reduction in impurity.
If None then unlimited number of leaf nodes.
alpha : float (default=0.9)
The alpha-quantile of the huber loss function and the quantile
loss function. Only if ``loss='huber'`` or ``loss='quantile'``.
init : BaseEstimator, None, optional (default=None)
An estimator object that is used to compute the initial
predictions. ``init`` has to provide ``fit`` and ``predict``.
If None it uses ``loss.init_estimator``.
verbose : int, default: 0
Enable verbose output. If 1 then it prints progress and performance
once in a while (the more trees the lower the frequency). If greater
than 1 then it prints progress and performance for every tree.
warm_start : bool, default: False
When set to ``True``, reuse the solution of the previous call to fit
and add more estimators to the ensemble, otherwise, just erase the
previous solution.
Attributes
----------
feature_importances_ : array, shape = [n_features]
The feature importances (the higher, the more important the feature).
oob_improvement_ : array, shape = [n_estimators]
The improvement in loss (= deviance) on the out-of-bag samples
relative to the previous iteration.
``oob_improvement_[0]`` is the improvement in
loss of the first stage over the ``init`` estimator.
train_score_ : array, shape = [n_estimators]
The i-th score ``train_score_[i]`` is the deviance (= loss) of the
model at iteration ``i`` on the in-bag sample.
If ``subsample == 1`` this is the deviance on the training data.
loss_ : LossFunction
The concrete ``LossFunction`` object.
`init` : BaseEstimator
The estimator that provides the initial predictions.
Set via the ``init`` argument or ``loss.init_estimator``.
estimators_ : list of DecisionTreeRegressor
The collection of fitted sub-estimators.
See also
--------
DecisionTreeRegressor, RandomForestRegressor
References
----------
J. Friedman, Greedy Function Approximation: A Gradient Boosting
Machine, The Annals of Statistics, Vol. 29, No. 5, 2001.
J. Friedman, Stochastic Gradient Boosting, 1999
T. Hastie, R. Tibshirani and J. Friedman.
Elements of Statistical Learning Ed. 2, Springer, 2009.
"""
_SUPPORTED_LOSS = ('ls', 'lad', 'huber', 'quantile')
def __init__(self, loss='ls', learning_rate=0.1, n_estimators=100,
subsample=1.0, min_samples_split=2,
min_samples_leaf=1, min_weight_fraction_leaf=0.,
max_depth=3, init=None, random_state=None,
max_features=None, alpha=0.9, verbose=0, max_leaf_nodes=None,
warm_start=False):
super(GradientBoostingRegressor, self).__init__(
loss, learning_rate, n_estimators, min_samples_split,
min_samples_leaf, min_weight_fraction_leaf,
max_depth, init, subsample, max_features,
random_state, alpha, verbose, max_leaf_nodes=max_leaf_nodes,
warm_start=warm_start)
def fit(self, X, y, sample_weight=None, monitor=None):
"""Fit the gradient boosting model.
Parameters
----------
X : array-like, shape = [n_samples, n_features]
Training vectors, where n_samples is the number of samples
and n_features is the number of features.
y : array-like, shape = [n_samples]
Target values (integers in classification, real numbers in
regression)
For classification, labels must correspond to classes
``0, 1, ..., n_classes_-1``.
sample_weight : array-like, shape = [n_samples] or None
Sample weights. If None, then samples are equally weighted. Splits
that would create child nodes with net zero or negative weight are
ignored while searching for a split in each node. In the case of
classification, splits are also ignored if they would result in any
single class carrying a negative weight in either child node.
monitor : callable, optional
The monitor is called after each iteration with the current
iteration, a reference to the estimator and the local variables of
``_fit_stages`` as keyword arguments ``callable(i, self,
locals())``. If the callable returns ``True`` the fitting procedure
is stopped. The monitor can be used for various things such as
computing held-out estimates, early stopping, model introspect, and
snapshoting.
Returns
-------
self : object
Returns self.
"""
self.n_classes_ = 1
return super(GradientBoostingRegressor, self).fit(X, y, sample_weight,
monitor)
def predict(self, X):
"""Predict regression target for X.
Parameters
----------
X : array-like of shape = [n_samples, n_features]
The input samples.
Returns
-------
y: array of shape = [n_samples]
The predicted values.
"""
return self.decision_function(X).ravel()
def staged_predict(self, X):
"""Predict regression target at each stage for X.
This method allows monitoring (i.e. determine error on testing set)
after each stage.
Parameters
----------
X : array-like of shape = [n_samples, n_features]
The input samples.
Returns
-------
y : array of shape = [n_samples]
The predicted value of the input samples.
"""
for y in self.staged_decision_function(X):
yield y.ravel()
| bsd-3-clause |
cristiandima/highlights | highlights/extractive/erank.py | 1 | 3576 | """
This is in many ways identical to the textrank algorithms. The only difference
is that we expand the sentence graph to also include the title of the text,
the topics associated with the text, and the named entitites present
The output is still an importance score for each sentence in the original text
but these new nodes offer extra information and increase the weights of those
sentences which are more closely related to the topics/title/named entities
associated with the text
"""
import spacy
from sklearn.feature_extraction.text import TfidfVectorizer, CountVectorizer
from sklearn.metrics.pairwise import cosine_similarity
from highlights.extractive.textrank import _textrank_scores
from highlights.internals.helpers import summary_length, NLP
_word_tokenize = TfidfVectorizer(stop_words='english').build_analyzer()
def _get_named_entities(nlp_doc):
""" Given a spacy document return the top ten most frequent name entities
present in the text. Name entities appearing only once are skipped.
Args:
nlp_doc (spacy document): document to extract named entities from
Returns:
a list of words, the most frequent named entities present in the document
"""
ignored_ents = {'DATE', 'TIME', 'PERCENT', 'MONEY', 'QUANTITY', 'ORDINAL', 'CARDINAL'}
ne = [n.text for n in nlp_doc.ents if n.label_ not in ignored_ents]
ne = [n.replace('the', '').strip() for n in ne]
ne = set(ne)
counter = CountVectorizer(ngram_range=(1,2))
counts = counter.fit_transform([nlp_doc.text])
ne_scores = []
for entity in ne:
entity = entity.lower()
if entity in counter.vocabulary_:
ne_scores.append((counts[0, counter.vocabulary_.get(entity)], entity))
ne_scores = sorted([n for n in ne_scores if n[0] != 1], reverse=True)[:10]
return [n[1] for n in ne_scores]
def _get_topics(nlp_doc, lda, word_dict, topic_terms):
""" Given a spacy document, as well as an lda model, this function returns
a list of lists where each list holds the string words associated with each
topic associated with the document
"""
doc_bow = word_dict.doc2bow(_word_tokenize(nlp_doc.text))
topics = lda.get_document_topics(doc_bow)
topics_as_words = []
for topic_tuple in topics:
topic_words = []
for word_tuple in topic_terms[topic_tuple[0]]:
topic_words.append(word_dict[word_tuple[0]])
topics_as_words.append(topic_words)
return topics_as_words
def _erank_scores(nlp_doc, topics, named_entities, title=None):
sentences = [sent.text for sent in nlp_doc.sents]
original_len = len(sentences)
for topic_words in topics:
sentences.append(' '.join(topic_words))
if len(named_entities) >= 1:
sentences.append(' '.join(named_entities))
if title is not None:
sentences.append(' '.join(_word_tokenize(title)))
scores = _textrank_scores(sentences)
scores = {i: scores.get(i, 0) for i in range(original_len)}
return scores
def erank(text, lda, word_dict, topic_terms, title=None, len_func=summary_length):
nlp_doc = NLP(text)
sentences = [sent.text for sent in nlp_doc.sents]
topics = _get_topics(nlp_doc, lda, word_dict, topic_terms)
named_entities = _get_named_entities(nlp_doc)
scores = _erank_scores(nlp_doc, topics, named_entities, title)
sum_len = len_func(len(scores))
sent_scores = [(scores[i], s) for i, s in enumerate(sentences)]
top_sentences = sorted(sent_scores, reverse=True)[:sum_len]
return [s[1] for s in top_sentences]
| mit |
glemaitre/UnbalancedDataset | imblearn/ensemble/tests/test_classifier.py | 2 | 17981 | """Test the module ensemble classifiers."""
# Authors: Guillaume Lemaitre <g.lemaitre58@gmail.com>
# Christos Aridas
# License: MIT
import numpy as np
from sklearn.datasets import load_iris, make_hastie_10_2
from sklearn.model_selection import (GridSearchCV, ParameterGrid,
train_test_split)
from sklearn.dummy import DummyClassifier
from sklearn.linear_model import Perceptron, LogisticRegression
from sklearn.tree import DecisionTreeClassifier
from sklearn.neighbors import KNeighborsClassifier
from sklearn.svm import SVC
from sklearn.feature_selection import SelectKBest
from sklearn.utils.testing import (assert_array_equal,
assert_array_almost_equal,
assert_raises,
assert_warns,
assert_warns_message)
from imblearn.datasets import make_imbalance
from imblearn.ensemble import BalancedBaggingClassifier
from imblearn.pipeline import make_pipeline
from imblearn.under_sampling import RandomUnderSampler
iris = load_iris()
def test_balanced_bagging_classifier():
# Check classification for various parameter settings.
X, y = make_imbalance(iris.data, iris.target, ratio={0: 20, 1: 25, 2: 50},
random_state=0)
X_train, X_test, y_train, y_test = train_test_split(X, y,
random_state=0)
grid = ParameterGrid({"max_samples": [0.5, 1.0],
"max_features": [1, 2, 4],
"bootstrap": [True, False],
"bootstrap_features": [True, False]})
for base_estimator in [None,
DummyClassifier(),
Perceptron(),
DecisionTreeClassifier(),
KNeighborsClassifier(),
SVC()]:
for params in grid:
BalancedBaggingClassifier(
base_estimator=base_estimator,
random_state=0,
**params).fit(X_train, y_train).predict(X_test)
def test_bootstrap_samples():
# Test that bootstrapping samples generate non-perfect base estimators.
X, y = make_imbalance(iris.data, iris.target, ratio={0: 20, 1: 25, 2: 50},
random_state=0)
X_train, X_test, y_train, y_test = train_test_split(X, y,
random_state=0)
base_estimator = DecisionTreeClassifier().fit(X_train, y_train)
# without bootstrap, all trees are perfect on the training set
# disable the resampling by passing an empty dictionary.
ensemble = BalancedBaggingClassifier(
base_estimator=DecisionTreeClassifier(),
max_samples=1.0,
bootstrap=False,
n_estimators=10,
ratio={},
random_state=0).fit(X_train, y_train)
assert (ensemble.score(X_train, y_train) ==
base_estimator.score(X_train, y_train))
# with bootstrap, trees are no longer perfect on the training set
ensemble = BalancedBaggingClassifier(
base_estimator=DecisionTreeClassifier(),
max_samples=1.0,
bootstrap=True,
random_state=0).fit(X_train, y_train)
assert (ensemble.score(X_train, y_train) <
base_estimator.score(X_train, y_train))
def test_bootstrap_features():
# Test that bootstrapping features may generate duplicate features.
X, y = make_imbalance(iris.data, iris.target, ratio={0: 20, 1: 25, 2: 50},
random_state=0)
X_train, X_test, y_train, y_test = train_test_split(X, y,
random_state=0)
ensemble = BalancedBaggingClassifier(
base_estimator=DecisionTreeClassifier(),
max_features=1.0,
bootstrap_features=False,
random_state=0).fit(X_train, y_train)
for features in ensemble.estimators_features_:
assert np.unique(features).shape[0] == X.shape[1]
ensemble = BalancedBaggingClassifier(
base_estimator=DecisionTreeClassifier(),
max_features=1.0,
bootstrap_features=True,
random_state=0).fit(X_train, y_train)
unique_features = [np.unique(features).shape[0]
for features in ensemble.estimators_features_]
assert np.median(unique_features) < X.shape[1]
def test_probability():
# Predict probabilities.
X, y = make_imbalance(iris.data, iris.target, ratio={0: 20, 1: 25, 2: 50},
random_state=0)
X_train, X_test, y_train, y_test = train_test_split(X, y,
random_state=0)
with np.errstate(divide="ignore", invalid="ignore"):
# Normal case
ensemble = BalancedBaggingClassifier(
base_estimator=DecisionTreeClassifier(),
random_state=0).fit(X_train, y_train)
assert_array_almost_equal(np.sum(ensemble.predict_proba(X_test),
axis=1),
np.ones(len(X_test)))
assert_array_almost_equal(ensemble.predict_proba(X_test),
np.exp(ensemble.predict_log_proba(X_test)))
# Degenerate case, where some classes are missing
ensemble = BalancedBaggingClassifier(
base_estimator=LogisticRegression(),
random_state=0,
max_samples=5).fit(X_train, y_train)
assert_array_almost_equal(np.sum(ensemble.predict_proba(X_test),
axis=1),
np.ones(len(X_test)))
assert_array_almost_equal(ensemble.predict_proba(X_test),
np.exp(ensemble.predict_log_proba(X_test)))
def test_oob_score_classification():
# Check that oob prediction is a good estimation of the generalization
# error.
X, y = make_imbalance(iris.data, iris.target, ratio={0: 20, 1: 25, 2: 50},
random_state=0)
X_train, X_test, y_train, y_test = train_test_split(X, y,
random_state=0)
for base_estimator in [DecisionTreeClassifier(), SVC()]:
clf = BalancedBaggingClassifier(
base_estimator=base_estimator,
n_estimators=100,
bootstrap=True,
oob_score=True,
random_state=0).fit(X_train, y_train)
test_score = clf.score(X_test, y_test)
assert abs(test_score - clf.oob_score_) < 0.1
# Test with few estimators
assert_warns(UserWarning,
BalancedBaggingClassifier(
base_estimator=base_estimator,
n_estimators=1,
bootstrap=True,
oob_score=True,
random_state=0).fit,
X_train,
y_train)
def test_single_estimator():
# Check singleton ensembles.
X, y = make_imbalance(iris.data, iris.target, ratio={0: 20, 1: 25, 2: 50},
random_state=0)
X_train, X_test, y_train, y_test = train_test_split(X, y,
random_state=0)
clf1 = BalancedBaggingClassifier(
base_estimator=KNeighborsClassifier(),
n_estimators=1,
bootstrap=False,
bootstrap_features=False,
random_state=0).fit(X_train, y_train)
clf2 = make_pipeline(RandomUnderSampler(
random_state=clf1.estimators_[0].steps[0][1].random_state),
KNeighborsClassifier()).fit(X_train, y_train)
assert_array_equal(clf1.predict(X_test), clf2.predict(X_test))
def test_error():
# Test that it gives proper exception on deficient input.
X, y = make_imbalance(iris.data, iris.target, ratio={0: 20, 1: 25, 2: 50})
base = DecisionTreeClassifier()
# Test n_estimators
assert_raises(ValueError,
BalancedBaggingClassifier(base, n_estimators=1.5).fit, X, y)
assert_raises(ValueError,
BalancedBaggingClassifier(base, n_estimators=-1).fit, X, y)
# Test max_samples
assert_raises(ValueError,
BalancedBaggingClassifier(base, max_samples=-1).fit, X, y)
assert_raises(ValueError,
BalancedBaggingClassifier(base, max_samples=0.0).fit, X, y)
assert_raises(ValueError,
BalancedBaggingClassifier(base, max_samples=2.0).fit, X, y)
assert_raises(ValueError,
BalancedBaggingClassifier(base, max_samples=1000).fit, X, y)
assert_raises(ValueError,
BalancedBaggingClassifier(base, max_samples="foobar").fit,
X, y)
# Test max_features
assert_raises(ValueError,
BalancedBaggingClassifier(base, max_features=-1).fit, X, y)
assert_raises(ValueError,
BalancedBaggingClassifier(base, max_features=0.0).fit, X, y)
assert_raises(ValueError,
BalancedBaggingClassifier(base, max_features=2.0).fit, X, y)
assert_raises(ValueError,
BalancedBaggingClassifier(base, max_features=5).fit, X, y)
assert_raises(ValueError,
BalancedBaggingClassifier(base, max_features="foobar").fit,
X, y)
# Test support of decision_function
assert not (hasattr(BalancedBaggingClassifier(base).fit(X, y),
'decision_function'))
def test_gridsearch():
# Check that bagging ensembles can be grid-searched.
# Transform iris into a binary classification task
X, y = iris.data, iris.target.copy()
y[y == 2] = 1
# Grid search with scoring based on decision_function
parameters = {'n_estimators': (1, 2),
'base_estimator__C': (1, 2)}
GridSearchCV(BalancedBaggingClassifier(SVC()),
parameters,
scoring="roc_auc").fit(X, y)
def test_base_estimator():
# Check base_estimator and its default values.
X, y = make_imbalance(iris.data, iris.target, ratio={0: 20, 1: 25, 2: 50},
random_state=0)
X_train, X_test, y_train, y_test = train_test_split(X, y,
random_state=0)
ensemble = BalancedBaggingClassifier(None,
n_jobs=3,
random_state=0).fit(X_train, y_train)
assert isinstance(ensemble.base_estimator_.steps[-1][1],
DecisionTreeClassifier)
ensemble = BalancedBaggingClassifier(DecisionTreeClassifier(),
n_jobs=3,
random_state=0).fit(X_train, y_train)
assert isinstance(ensemble.base_estimator_.steps[-1][1],
DecisionTreeClassifier)
ensemble = BalancedBaggingClassifier(Perceptron(),
n_jobs=3,
random_state=0).fit(X_train, y_train)
assert isinstance(ensemble.base_estimator_.steps[-1][1],
Perceptron)
def test_bagging_with_pipeline():
X, y = make_imbalance(iris.data, iris.target, ratio={0: 20, 1: 25, 2: 50},
random_state=0)
estimator = BalancedBaggingClassifier(
make_pipeline(SelectKBest(k=1),
DecisionTreeClassifier()),
max_features=2)
estimator.fit(X, y).predict(X)
def test_warm_start(random_state=42):
# Test if fitting incrementally with warm start gives a forest of the
# right size and the same results as a normal fit.
X, y = make_hastie_10_2(n_samples=20, random_state=1)
clf_ws = None
for n_estimators in [5, 10]:
if clf_ws is None:
clf_ws = BalancedBaggingClassifier(n_estimators=n_estimators,
random_state=random_state,
warm_start=True)
else:
clf_ws.set_params(n_estimators=n_estimators)
clf_ws.fit(X, y)
assert len(clf_ws) == n_estimators
clf_no_ws = BalancedBaggingClassifier(n_estimators=10,
random_state=random_state,
warm_start=False)
clf_no_ws.fit(X, y)
assert (set([pipe.steps[-1][1].random_state for pipe in clf_ws]) ==
set([pipe.steps[-1][1].random_state for pipe in clf_no_ws]))
def test_warm_start_smaller_n_estimators():
# Test if warm start'ed second fit with smaller n_estimators raises error.
X, y = make_hastie_10_2(n_samples=20, random_state=1)
clf = BalancedBaggingClassifier(n_estimators=5, warm_start=True)
clf.fit(X, y)
clf.set_params(n_estimators=4)
assert_raises(ValueError, clf.fit, X, y)
def test_warm_start_equal_n_estimators():
# Test that nothing happens when fitting without increasing n_estimators
X, y = make_hastie_10_2(n_samples=20, random_state=1)
X_train, X_test, y_train, y_test = train_test_split(X, y, random_state=43)
clf = BalancedBaggingClassifier(n_estimators=5, warm_start=True,
random_state=83)
clf.fit(X_train, y_train)
y_pred = clf.predict(X_test)
# modify X to nonsense values, this should not change anything
X_train += 1.
assert_warns_message(UserWarning,
"Warm-start fitting without increasing n_estimators"
" does not", clf.fit, X_train, y_train)
assert_array_equal(y_pred, clf.predict(X_test))
def test_warm_start_equivalence():
# warm started classifier with 5+5 estimators should be equivalent to
# one classifier with 10 estimators
X, y = make_hastie_10_2(n_samples=20, random_state=1)
X_train, X_test, y_train, y_test = train_test_split(X, y, random_state=43)
clf_ws = BalancedBaggingClassifier(n_estimators=5, warm_start=True,
random_state=3141)
clf_ws.fit(X_train, y_train)
clf_ws.set_params(n_estimators=10)
clf_ws.fit(X_train, y_train)
y1 = clf_ws.predict(X_test)
clf = BalancedBaggingClassifier(n_estimators=10, warm_start=False,
random_state=3141)
clf.fit(X_train, y_train)
y2 = clf.predict(X_test)
assert_array_almost_equal(y1, y2)
def test_warm_start_with_oob_score_fails():
# Check using oob_score and warm_start simultaneously fails
X, y = make_hastie_10_2(n_samples=20, random_state=1)
clf = BalancedBaggingClassifier(n_estimators=5, warm_start=True,
oob_score=True)
assert_raises(ValueError, clf.fit, X, y)
def test_oob_score_removed_on_warm_start():
X, y = make_hastie_10_2(n_samples=2000, random_state=1)
clf = BalancedBaggingClassifier(n_estimators=50, oob_score=True)
clf.fit(X, y)
clf.set_params(warm_start=True, oob_score=False, n_estimators=100)
clf.fit(X, y)
assert_raises(AttributeError, getattr, clf, "oob_score_")
def test_oob_score_consistency():
# Make sure OOB scores are identical when random_state, estimator, and
# training data are fixed and fitting is done twice
X, y = make_hastie_10_2(n_samples=200, random_state=1)
bagging = BalancedBaggingClassifier(KNeighborsClassifier(),
max_samples=0.5,
max_features=0.5, oob_score=True,
random_state=1)
assert bagging.fit(X, y).oob_score_ == bagging.fit(X, y).oob_score_
# FIXME: uncomment when #9723 is merged in scikit-learn
# def test_estimators_samples():
# # Check that format of estimators_samples_ is correct and that results
# # generated at fit time can be identically reproduced at a later time
# # using data saved in object attributes.
# X, y = make_hastie_10_2(n_samples=200, random_state=1)
# # remap the y outside of the BalancedBaggingclassifier
# # _, y = np.unique(y, return_inverse=True)
# bagging = BalancedBaggingClassifier(LogisticRegression(),
# max_samples=0.5,
# max_features=0.5, random_state=1,
# bootstrap=False)
# bagging.fit(X, y)
# # Get relevant attributes
# estimators_samples = bagging.estimators_samples_
# estimators_features = bagging.estimators_features_
# estimators = bagging.estimators_
# # Test for correct formatting
# assert len(estimators_samples) == len(estimators)
# assert len(estimators_samples[0]) == len(X)
# assert estimators_samples[0].dtype.kind == 'b'
# # Re-fit single estimator to test for consistent sampling
# estimator_index = 0
# estimator_samples = estimators_samples[estimator_index]
# estimator_features = estimators_features[estimator_index]
# estimator = estimators[estimator_index]
# X_train = (X[estimator_samples])[:, estimator_features]
# y_train = y[estimator_samples]
# orig_coefs = estimator.steps[-1][1].coef_
# estimator.fit(X_train, y_train)
# new_coefs = estimator.steps[-1][1].coef_
# assert_array_almost_equal(orig_coefs, new_coefs)
def test_max_samples_consistency():
# Make sure validated max_samples and original max_samples are identical
# when valid integer max_samples supplied by user
max_samples = 100
X, y = make_hastie_10_2(n_samples=2*max_samples, random_state=1)
bagging = BalancedBaggingClassifier(KNeighborsClassifier(),
max_samples=max_samples,
max_features=0.5, random_state=1)
bagging.fit(X, y)
assert bagging._max_samples == max_samples
| mit |
markpudd/logistic_regression | logisticReg.py | 1 | 1904 | # Helper functions to do logistic regression
# To use the logRegCost function needs to be minimised, use the logRegGrad method to provide derivative
#
import cv2
import numpy as np
import scipy.io as sio
import csv as csv
from sklearn.preprocessing import normalize
def featureNormalize(data):
mu = data.mean(0)
data_norm = data.__sub__(mu)
sigma = np.std(data_norm, axis=0,ddof=1)
data_norm = data_norm.__div__(sigma)
return data_norm;
def addFirstOnes(data):
return np.concatenate((np.ones((np.size(data,0),1)),data),1)
def sigmoid(z):
return 1/(1+np.exp(-z))
def logRegGrad(theta, data_x, data_y, lamb):
m = float(np.size(data_y))
theta=np.array([theta]).T
temp = np.array(theta);
temp[0] = 0;
ha = data_x.dot(theta)
h=sigmoid(ha);
grad = 1/m * ((h-data_y).T.dot(data_x)).T;
grad = grad + ((lamb/m)*temp);
return grad.T[0]
def logRegCost(theta, data_x, data_y, lamb):
m = float(np.size(data_y))
theta=np.array([theta]).T
ha = data_x.dot(theta)
h=sigmoid(ha);
J = 1/m *((-data_y.T.dot(np.log(h))-(1-data_y.T).dot(np.log(1-h))));
temp = np.array(theta);
temp[0] = 0; # because we don't add anything for j = 0
J = J + (lamb/(2*m))*sum(np.power(temp,2));
return J[0,0]
def predict(theta, data_x):
n = np.size(data_x,1)
theta=np.array([theta]).T
ha = data_x.dot(theta)
p=sigmoid(ha);
for i in range(0,np.size(data_x,0)):
if p[i]>=0.5:
p[i]=1
else:
p[i]=0
return p
def testError(theta, data_x,data_y):
m = float(np.size(data_y))
sum =0
p=predict(theta, data_x);
for i in range(0,np.size(data_x,0)):
if p[i,0]==1 and data_y[0,i]==0:
sum = sum+1;
elif p[i,0]==0 and data_y[0,i]==1:
sum = sum+1;
return 1/m * sum
| mit |
gfyoung/pandas | pandas/tests/indexes/datetimes/test_scalar_compat.py | 2 | 12213 | """
Tests for DatetimeIndex methods behaving like their Timestamp counterparts
"""
from datetime import datetime
import numpy as np
import pytest
from pandas._libs.tslibs import OutOfBoundsDatetime, to_offset
from pandas._libs.tslibs.offsets import INVALID_FREQ_ERR_MSG
import pandas as pd
from pandas import DatetimeIndex, Timestamp, date_range
import pandas._testing as tm
class TestDatetimeIndexOps:
def test_dti_time(self):
rng = date_range("1/1/2000", freq="12min", periods=10)
result = pd.Index(rng).time
expected = [t.time() for t in rng]
assert (result == expected).all()
def test_dti_date(self):
rng = date_range("1/1/2000", freq="12H", periods=10)
result = pd.Index(rng).date
expected = [t.date() for t in rng]
assert (result == expected).all()
@pytest.mark.parametrize("data", [["1400-01-01"], [datetime(1400, 1, 1)]])
def test_dti_date_out_of_range(self, data):
# GH#1475
msg = "Out of bounds nanosecond timestamp: 1400-01-01 00:00:00"
with pytest.raises(OutOfBoundsDatetime, match=msg):
DatetimeIndex(data)
@pytest.mark.parametrize(
"field",
[
"dayofweek",
"day_of_week",
"dayofyear",
"day_of_year",
"quarter",
"days_in_month",
"is_month_start",
"is_month_end",
"is_quarter_start",
"is_quarter_end",
"is_year_start",
"is_year_end",
],
)
def test_dti_timestamp_fields(self, field):
# extra fields from DatetimeIndex like quarter and week
idx = tm.makeDateIndex(100)
expected = getattr(idx, field)[-1]
result = getattr(Timestamp(idx[-1]), field)
assert result == expected
def test_dti_timestamp_isocalendar_fields(self):
idx = tm.makeDateIndex(100)
expected = tuple(idx.isocalendar().iloc[-1].to_list())
result = idx[-1].isocalendar()
assert result == expected
def test_dti_timestamp_freq_fields(self):
# extra fields from DatetimeIndex like quarter and week
idx = tm.makeDateIndex(100)
assert idx.freq == Timestamp(idx[-1], idx.freq).freq
assert idx.freqstr == Timestamp(idx[-1], idx.freq).freqstr
# ----------------------------------------------------------------
# DatetimeIndex.round
def test_round_daily(self):
dti = date_range("20130101 09:10:11", periods=5)
result = dti.round("D")
expected = date_range("20130101", periods=5)
tm.assert_index_equal(result, expected)
dti = dti.tz_localize("UTC").tz_convert("US/Eastern")
result = dti.round("D")
expected = date_range("20130101", periods=5).tz_localize("US/Eastern")
tm.assert_index_equal(result, expected)
result = dti.round("s")
tm.assert_index_equal(result, dti)
@pytest.mark.parametrize(
"freq, error_msg",
[
("Y", "<YearEnd: month=12> is a non-fixed frequency"),
("M", "<MonthEnd> is a non-fixed frequency"),
("foobar", "Invalid frequency: foobar"),
],
)
def test_round_invalid(self, freq, error_msg):
dti = date_range("20130101 09:10:11", periods=5)
dti = dti.tz_localize("UTC").tz_convert("US/Eastern")
with pytest.raises(ValueError, match=error_msg):
dti.round(freq)
def test_round(self, tz_naive_fixture):
tz = tz_naive_fixture
rng = date_range(start="2016-01-01", periods=5, freq="30Min", tz=tz)
elt = rng[1]
expected_rng = DatetimeIndex(
[
Timestamp("2016-01-01 00:00:00", tz=tz, freq="30T"),
Timestamp("2016-01-01 00:00:00", tz=tz, freq="30T"),
Timestamp("2016-01-01 01:00:00", tz=tz, freq="30T"),
Timestamp("2016-01-01 02:00:00", tz=tz, freq="30T"),
Timestamp("2016-01-01 02:00:00", tz=tz, freq="30T"),
]
)
expected_elt = expected_rng[1]
tm.assert_index_equal(rng.round(freq="H"), expected_rng)
assert elt.round(freq="H") == expected_elt
msg = INVALID_FREQ_ERR_MSG
with pytest.raises(ValueError, match=msg):
rng.round(freq="foo")
with pytest.raises(ValueError, match=msg):
elt.round(freq="foo")
msg = "<MonthEnd> is a non-fixed frequency"
with pytest.raises(ValueError, match=msg):
rng.round(freq="M")
with pytest.raises(ValueError, match=msg):
elt.round(freq="M")
# GH#14440 & GH#15578
index = DatetimeIndex(["2016-10-17 12:00:00.0015"], tz=tz)
result = index.round("ms")
expected = DatetimeIndex(["2016-10-17 12:00:00.002000"], tz=tz)
tm.assert_index_equal(result, expected)
for freq in ["us", "ns"]:
tm.assert_index_equal(index, index.round(freq))
index = DatetimeIndex(["2016-10-17 12:00:00.00149"], tz=tz)
result = index.round("ms")
expected = DatetimeIndex(["2016-10-17 12:00:00.001000"], tz=tz)
tm.assert_index_equal(result, expected)
index = DatetimeIndex(["2016-10-17 12:00:00.001501031"])
result = index.round("10ns")
expected = DatetimeIndex(["2016-10-17 12:00:00.001501030"])
tm.assert_index_equal(result, expected)
with tm.assert_produces_warning(False):
ts = "2016-10-17 12:00:00.001501031"
DatetimeIndex([ts]).round("1010ns")
def test_no_rounding_occurs(self, tz_naive_fixture):
# GH 21262
tz = tz_naive_fixture
rng = date_range(start="2016-01-01", periods=5, freq="2Min", tz=tz)
expected_rng = DatetimeIndex(
[
Timestamp("2016-01-01 00:00:00", tz=tz, freq="2T"),
Timestamp("2016-01-01 00:02:00", tz=tz, freq="2T"),
Timestamp("2016-01-01 00:04:00", tz=tz, freq="2T"),
Timestamp("2016-01-01 00:06:00", tz=tz, freq="2T"),
Timestamp("2016-01-01 00:08:00", tz=tz, freq="2T"),
]
)
tm.assert_index_equal(rng.round(freq="2T"), expected_rng)
@pytest.mark.parametrize(
"test_input, rounder, freq, expected",
[
(["2117-01-01 00:00:45"], "floor", "15s", ["2117-01-01 00:00:45"]),
(["2117-01-01 00:00:45"], "ceil", "15s", ["2117-01-01 00:00:45"]),
(
["2117-01-01 00:00:45.000000012"],
"floor",
"10ns",
["2117-01-01 00:00:45.000000010"],
),
(
["1823-01-01 00:00:01.000000012"],
"ceil",
"10ns",
["1823-01-01 00:00:01.000000020"],
),
(["1823-01-01 00:00:01"], "floor", "1s", ["1823-01-01 00:00:01"]),
(["1823-01-01 00:00:01"], "ceil", "1s", ["1823-01-01 00:00:01"]),
(["2018-01-01 00:15:00"], "ceil", "15T", ["2018-01-01 00:15:00"]),
(["2018-01-01 00:15:00"], "floor", "15T", ["2018-01-01 00:15:00"]),
(["1823-01-01 03:00:00"], "ceil", "3H", ["1823-01-01 03:00:00"]),
(["1823-01-01 03:00:00"], "floor", "3H", ["1823-01-01 03:00:00"]),
(
("NaT", "1823-01-01 00:00:01"),
"floor",
"1s",
("NaT", "1823-01-01 00:00:01"),
),
(
("NaT", "1823-01-01 00:00:01"),
"ceil",
"1s",
("NaT", "1823-01-01 00:00:01"),
),
],
)
def test_ceil_floor_edge(self, test_input, rounder, freq, expected):
dt = DatetimeIndex(list(test_input))
func = getattr(dt, rounder)
result = func(freq)
expected = DatetimeIndex(list(expected))
assert expected.equals(result)
@pytest.mark.parametrize(
"start, index_freq, periods",
[("2018-01-01", "12H", 25), ("2018-01-01 0:0:0.124999", "1ns", 1000)],
)
@pytest.mark.parametrize(
"round_freq",
[
"2ns",
"3ns",
"4ns",
"5ns",
"6ns",
"7ns",
"250ns",
"500ns",
"750ns",
"1us",
"19us",
"250us",
"500us",
"750us",
"1s",
"2s",
"3s",
"12H",
"1D",
],
)
def test_round_int64(self, start, index_freq, periods, round_freq):
dt = date_range(start=start, freq=index_freq, periods=periods)
unit = to_offset(round_freq).nanos
# test floor
result = dt.floor(round_freq)
diff = dt.asi8 - result.asi8
mod = result.asi8 % unit
assert (mod == 0).all(), f"floor not a {round_freq} multiple"
assert (0 <= diff).all() and (diff < unit).all(), "floor error"
# test ceil
result = dt.ceil(round_freq)
diff = result.asi8 - dt.asi8
mod = result.asi8 % unit
assert (mod == 0).all(), f"ceil not a {round_freq} multiple"
assert (0 <= diff).all() and (diff < unit).all(), "ceil error"
# test round
result = dt.round(round_freq)
diff = abs(result.asi8 - dt.asi8)
mod = result.asi8 % unit
assert (mod == 0).all(), f"round not a {round_freq} multiple"
assert (diff <= unit // 2).all(), "round error"
if unit % 2 == 0:
assert (
result.asi8[diff == unit // 2] % 2 == 0
).all(), "round half to even error"
# ----------------------------------------------------------------
# DatetimeIndex.normalize
def test_normalize(self):
rng = date_range("1/1/2000 9:30", periods=10, freq="D")
result = rng.normalize()
expected = date_range("1/1/2000", periods=10, freq="D")
tm.assert_index_equal(result, expected)
arr_ns = np.array([1380585623454345752, 1380585612343234312]).astype(
"datetime64[ns]"
)
rng_ns = DatetimeIndex(arr_ns)
rng_ns_normalized = rng_ns.normalize()
arr_ns = np.array([1380585600000000000, 1380585600000000000]).astype(
"datetime64[ns]"
)
expected = DatetimeIndex(arr_ns)
tm.assert_index_equal(rng_ns_normalized, expected)
assert result.is_normalized
assert not rng.is_normalized
def test_normalize_nat(self):
dti = DatetimeIndex([pd.NaT, Timestamp("2018-01-01 01:00:00")])
result = dti.normalize()
expected = DatetimeIndex([pd.NaT, Timestamp("2018-01-01")])
tm.assert_index_equal(result, expected)
class TestDateTimeIndexToJulianDate:
def test_1700(self):
dr = date_range(start=Timestamp("1710-10-01"), periods=5, freq="D")
r1 = pd.Index([x.to_julian_date() for x in dr])
r2 = dr.to_julian_date()
assert isinstance(r2, pd.Float64Index)
tm.assert_index_equal(r1, r2)
def test_2000(self):
dr = date_range(start=Timestamp("2000-02-27"), periods=5, freq="D")
r1 = pd.Index([x.to_julian_date() for x in dr])
r2 = dr.to_julian_date()
assert isinstance(r2, pd.Float64Index)
tm.assert_index_equal(r1, r2)
def test_hour(self):
dr = date_range(start=Timestamp("2000-02-27"), periods=5, freq="H")
r1 = pd.Index([x.to_julian_date() for x in dr])
r2 = dr.to_julian_date()
assert isinstance(r2, pd.Float64Index)
tm.assert_index_equal(r1, r2)
def test_minute(self):
dr = date_range(start=Timestamp("2000-02-27"), periods=5, freq="T")
r1 = pd.Index([x.to_julian_date() for x in dr])
r2 = dr.to_julian_date()
assert isinstance(r2, pd.Float64Index)
tm.assert_index_equal(r1, r2)
def test_second(self):
dr = date_range(start=Timestamp("2000-02-27"), periods=5, freq="S")
r1 = pd.Index([x.to_julian_date() for x in dr])
r2 = dr.to_julian_date()
assert isinstance(r2, pd.Float64Index)
tm.assert_index_equal(r1, r2)
| bsd-3-clause |
rs2/pandas | pandas/tests/indexes/test_base.py | 1 | 93051 | from collections import defaultdict
from datetime import datetime, timedelta
from io import StringIO
import math
import operator
import re
import numpy as np
import pytest
import pandas._config.config as cf
from pandas._libs.tslib import Timestamp
from pandas.compat.numpy import np_datetime64_compat
from pandas.util._test_decorators import async_mark
from pandas.core.dtypes.generic import ABCIndex
import pandas as pd
from pandas import (
CategoricalIndex,
DataFrame,
DatetimeIndex,
Float64Index,
Int64Index,
PeriodIndex,
RangeIndex,
Series,
TimedeltaIndex,
UInt64Index,
date_range,
isna,
period_range,
)
import pandas._testing as tm
from pandas.core.indexes.api import (
Index,
MultiIndex,
_get_combined_index,
ensure_index,
ensure_index_from_sequences,
)
from pandas.tests.indexes.common import Base
class TestIndex(Base):
_holder = Index
def create_index(self) -> Index:
return Index(list("abcde"))
def test_can_hold_identifiers(self):
index = self.create_index()
key = index[0]
assert index._can_hold_identifiers_and_holds_name(key) is True
@pytest.mark.parametrize("index", ["datetime"], indirect=True)
def test_new_axis(self, index):
with tm.assert_produces_warning(FutureWarning):
# GH#30588 multi-dimensional indexing deprecated
new_index = index[None, :]
assert new_index.ndim == 2
assert isinstance(new_index, np.ndarray)
def test_constructor_regular(self, index):
tm.assert_contains_all(index, index)
@pytest.mark.parametrize("index", ["string"], indirect=True)
def test_constructor_casting(self, index):
# casting
arr = np.array(index)
new_index = Index(arr)
tm.assert_contains_all(arr, new_index)
tm.assert_index_equal(index, new_index)
@pytest.mark.parametrize("index", ["string"], indirect=True)
def test_constructor_copy(self, index):
# copy
# index = self.create_index()
arr = np.array(index)
new_index = Index(arr, copy=True, name="name")
assert isinstance(new_index, Index)
assert new_index.name == "name"
tm.assert_numpy_array_equal(arr, new_index.values)
arr[0] = "SOMEBIGLONGSTRING"
assert new_index[0] != "SOMEBIGLONGSTRING"
# FIXME: dont leave commented-out
# what to do here?
# arr = np.array(5.)
# pytest.raises(Exception, arr.view, Index)
@pytest.mark.parametrize("cast_as_obj", [True, False])
@pytest.mark.parametrize(
"index",
[
pd.date_range(
"2015-01-01 10:00",
freq="D",
periods=3,
tz="US/Eastern",
name="Green Eggs & Ham",
), # DTI with tz
pd.date_range("2015-01-01 10:00", freq="D", periods=3), # DTI no tz
pd.timedelta_range("1 days", freq="D", periods=3), # td
pd.period_range("2015-01-01", freq="D", periods=3), # period
],
)
def test_constructor_from_index_dtlike(self, cast_as_obj, index):
if cast_as_obj:
result = pd.Index(index.astype(object))
else:
result = pd.Index(index)
tm.assert_index_equal(result, index)
if isinstance(index, pd.DatetimeIndex):
assert result.tz == index.tz
if cast_as_obj:
# GH#23524 check that Index(dti, dtype=object) does not
# incorrectly raise ValueError, and that nanoseconds are not
# dropped
index += pd.Timedelta(nanoseconds=50)
result = pd.Index(index, dtype=object)
assert result.dtype == np.object_
assert list(result) == list(index)
@pytest.mark.parametrize(
"index,has_tz",
[
(
pd.date_range("2015-01-01 10:00", freq="D", periods=3, tz="US/Eastern"),
True,
), # datetimetz
(pd.timedelta_range("1 days", freq="D", periods=3), False), # td
(pd.period_range("2015-01-01", freq="D", periods=3), False), # period
],
)
def test_constructor_from_series_dtlike(self, index, has_tz):
result = pd.Index(pd.Series(index))
tm.assert_index_equal(result, index)
if has_tz:
assert result.tz == index.tz
def test_constructor_from_series_freq(self):
# GH 6273
# create from a series, passing a freq
dts = ["1-1-1990", "2-1-1990", "3-1-1990", "4-1-1990", "5-1-1990"]
expected = DatetimeIndex(dts, freq="MS")
s = Series(pd.to_datetime(dts))
result = DatetimeIndex(s, freq="MS")
tm.assert_index_equal(result, expected)
def test_constructor_from_frame_series_freq(self):
# GH 6273
# create from a series, passing a freq
dts = ["1-1-1990", "2-1-1990", "3-1-1990", "4-1-1990", "5-1-1990"]
expected = DatetimeIndex(dts, freq="MS")
df = pd.DataFrame(np.random.rand(5, 3))
df["date"] = dts
result = DatetimeIndex(df["date"], freq="MS")
assert df["date"].dtype == object
expected.name = "date"
tm.assert_index_equal(result, expected)
expected = pd.Series(dts, name="date")
tm.assert_series_equal(df["date"], expected)
# GH 6274
# infer freq of same
freq = pd.infer_freq(df["date"])
assert freq == "MS"
@pytest.mark.parametrize(
"array",
[
np.arange(5),
np.array(["a", "b", "c"]),
date_range("2000-01-01", periods=3).values,
],
)
def test_constructor_ndarray_like(self, array):
# GH 5460#issuecomment-44474502
# it should be possible to convert any object that satisfies the numpy
# ndarray interface directly into an Index
class ArrayLike:
def __init__(self, array):
self.array = array
def __array__(self, dtype=None) -> np.ndarray:
return self.array
expected = pd.Index(array)
result = pd.Index(ArrayLike(array))
tm.assert_index_equal(result, expected)
def test_constructor_int_dtype_nan(self):
# see gh-15187
data = [np.nan]
expected = Float64Index(data)
result = Index(data, dtype="float")
tm.assert_index_equal(result, expected)
@pytest.mark.parametrize("dtype", ["int64", "uint64"])
def test_constructor_int_dtype_nan_raises(self, dtype):
# see gh-15187
data = [np.nan]
msg = "cannot convert"
with pytest.raises(ValueError, match=msg):
Index(data, dtype=dtype)
def test_constructor_no_pandas_array(self):
ser = pd.Series([1, 2, 3])
result = pd.Index(ser.array)
expected = pd.Index([1, 2, 3])
tm.assert_index_equal(result, expected)
@pytest.mark.parametrize(
"klass,dtype,na_val",
[
(pd.Float64Index, np.float64, np.nan),
(pd.DatetimeIndex, "datetime64[ns]", pd.NaT),
],
)
def test_index_ctor_infer_nan_nat(self, klass, dtype, na_val):
# GH 13467
na_list = [na_val, na_val]
expected = klass(na_list)
assert expected.dtype == dtype
result = Index(na_list)
tm.assert_index_equal(result, expected)
result = Index(np.array(na_list))
tm.assert_index_equal(result, expected)
@pytest.mark.parametrize(
"vals,dtype",
[
([1, 2, 3, 4, 5], "int"),
([1.1, np.nan, 2.2, 3.0], "float"),
(["A", "B", "C", np.nan], "obj"),
],
)
def test_constructor_simple_new(self, vals, dtype):
index = Index(vals, name=dtype)
result = index._simple_new(index.values, dtype)
tm.assert_index_equal(result, index)
@pytest.mark.parametrize(
"vals",
[
[1, 2, 3],
np.array([1, 2, 3]),
np.array([1, 2, 3], dtype=int),
# below should coerce
[1.0, 2.0, 3.0],
np.array([1.0, 2.0, 3.0], dtype=float),
],
)
def test_constructor_dtypes_to_int64(self, vals):
index = Index(vals, dtype=int)
assert isinstance(index, Int64Index)
@pytest.mark.parametrize(
"vals",
[
[1, 2, 3],
[1.0, 2.0, 3.0],
np.array([1.0, 2.0, 3.0]),
np.array([1, 2, 3], dtype=int),
np.array([1.0, 2.0, 3.0], dtype=float),
],
)
def test_constructor_dtypes_to_float64(self, vals):
index = Index(vals, dtype=float)
assert isinstance(index, Float64Index)
@pytest.mark.parametrize(
"vals",
[
[1, 2, 3],
np.array([1, 2, 3], dtype=int),
np.array(
[np_datetime64_compat("2011-01-01"), np_datetime64_compat("2011-01-02")]
),
[datetime(2011, 1, 1), datetime(2011, 1, 2)],
],
)
def test_constructor_dtypes_to_categorical(self, vals):
index = Index(vals, dtype="category")
assert isinstance(index, CategoricalIndex)
@pytest.mark.parametrize("cast_index", [True, False])
@pytest.mark.parametrize(
"vals",
[
Index(
np.array(
[
np_datetime64_compat("2011-01-01"),
np_datetime64_compat("2011-01-02"),
]
)
),
Index([datetime(2011, 1, 1), datetime(2011, 1, 2)]),
],
)
def test_constructor_dtypes_to_datetime(self, cast_index, vals):
if cast_index:
index = Index(vals, dtype=object)
assert isinstance(index, Index)
assert index.dtype == object
else:
index = Index(vals)
assert isinstance(index, DatetimeIndex)
@pytest.mark.parametrize("cast_index", [True, False])
@pytest.mark.parametrize(
"vals",
[
np.array([np.timedelta64(1, "D"), np.timedelta64(1, "D")]),
[timedelta(1), timedelta(1)],
],
)
def test_constructor_dtypes_to_timedelta(self, cast_index, vals):
if cast_index:
index = Index(vals, dtype=object)
assert isinstance(index, Index)
assert index.dtype == object
else:
index = Index(vals)
assert isinstance(index, TimedeltaIndex)
@pytest.mark.parametrize("attr", ["values", "asi8"])
@pytest.mark.parametrize("klass", [pd.Index, pd.DatetimeIndex])
def test_constructor_dtypes_datetime(self, tz_naive_fixture, attr, klass):
# Test constructing with a datetimetz dtype
# .values produces numpy datetimes, so these are considered naive
# .asi8 produces integers, so these are considered epoch timestamps
# ^the above will be true in a later version. Right now we `.view`
# the i8 values as NS_DTYPE, effectively treating them as wall times.
index = pd.date_range("2011-01-01", periods=5)
arg = getattr(index, attr)
index = index.tz_localize(tz_naive_fixture)
dtype = index.dtype
if attr == "asi8":
result = pd.DatetimeIndex(arg).tz_localize(tz_naive_fixture)
else:
result = klass(arg, tz=tz_naive_fixture)
tm.assert_index_equal(result, index)
if attr == "asi8":
result = pd.DatetimeIndex(arg).astype(dtype)
else:
result = klass(arg, dtype=dtype)
tm.assert_index_equal(result, index)
if attr == "asi8":
result = pd.DatetimeIndex(list(arg)).tz_localize(tz_naive_fixture)
else:
result = klass(list(arg), tz=tz_naive_fixture)
tm.assert_index_equal(result, index)
if attr == "asi8":
result = pd.DatetimeIndex(list(arg)).astype(dtype)
else:
result = klass(list(arg), dtype=dtype)
tm.assert_index_equal(result, index)
@pytest.mark.parametrize("attr", ["values", "asi8"])
@pytest.mark.parametrize("klass", [pd.Index, pd.TimedeltaIndex])
def test_constructor_dtypes_timedelta(self, attr, klass):
index = pd.timedelta_range("1 days", periods=5)
index = index._with_freq(None) # wont be preserved by constructors
dtype = index.dtype
values = getattr(index, attr)
result = klass(values, dtype=dtype)
tm.assert_index_equal(result, index)
result = klass(list(values), dtype=dtype)
tm.assert_index_equal(result, index)
@pytest.mark.parametrize("value", [[], iter([]), (_ for _ in [])])
@pytest.mark.parametrize(
"klass",
[
Index,
Float64Index,
Int64Index,
UInt64Index,
CategoricalIndex,
DatetimeIndex,
TimedeltaIndex,
],
)
def test_constructor_empty(self, value, klass):
empty = klass(value)
assert isinstance(empty, klass)
assert not len(empty)
@pytest.mark.parametrize(
"empty,klass",
[
(PeriodIndex([], freq="B"), PeriodIndex),
(PeriodIndex(iter([]), freq="B"), PeriodIndex),
(PeriodIndex((_ for _ in []), freq="B"), PeriodIndex),
(RangeIndex(step=1), pd.RangeIndex),
(MultiIndex(levels=[[1, 2], ["blue", "red"]], codes=[[], []]), MultiIndex),
],
)
def test_constructor_empty_special(self, empty, klass):
assert isinstance(empty, klass)
assert not len(empty)
def test_constructor_overflow_int64(self):
# see gh-15832
msg = (
"The elements provided in the data cannot "
"all be casted to the dtype int64"
)
with pytest.raises(OverflowError, match=msg):
Index([np.iinfo(np.uint64).max - 1], dtype="int64")
@pytest.mark.parametrize(
"index",
[
"datetime",
"float",
"int",
"period",
"range",
"repeats",
"timedelta",
"tuples",
"uint",
],
indirect=True,
)
def test_view_with_args(self, index):
index.view("i8")
@pytest.mark.parametrize(
"index",
[
"unicode",
"string",
pytest.param("categorical", marks=pytest.mark.xfail(reason="gh-25464")),
"bool",
"empty",
],
indirect=True,
)
def test_view_with_args_object_array_raises(self, index):
msg = "Cannot change data-type for object array"
with pytest.raises(TypeError, match=msg):
index.view("i8")
@pytest.mark.parametrize("index", ["int", "range"], indirect=True)
def test_astype(self, index):
casted = index.astype("i8")
# it works!
casted.get_loc(5)
# pass on name
index.name = "foobar"
casted = index.astype("i8")
assert casted.name == "foobar"
def test_equals_object(self):
# same
assert Index(["a", "b", "c"]).equals(Index(["a", "b", "c"]))
@pytest.mark.parametrize(
"comp", [Index(["a", "b"]), Index(["a", "b", "d"]), ["a", "b", "c"]]
)
def test_not_equals_object(self, comp):
assert not Index(["a", "b", "c"]).equals(comp)
def test_insert_missing(self, nulls_fixture):
# GH 22295
# test there is no mangling of NA values
expected = Index(["a", nulls_fixture, "b", "c"])
result = Index(list("abc")).insert(1, nulls_fixture)
tm.assert_index_equal(result, expected)
def test_delete_raises(self):
index = Index(["a", "b", "c", "d"], name="index")
msg = "index 5 is out of bounds for axis 0 with size 4"
with pytest.raises(IndexError, match=msg):
index.delete(5)
def test_identical(self):
# index
i1 = Index(["a", "b", "c"])
i2 = Index(["a", "b", "c"])
assert i1.identical(i2)
i1 = i1.rename("foo")
assert i1.equals(i2)
assert not i1.identical(i2)
i2 = i2.rename("foo")
assert i1.identical(i2)
i3 = Index([("a", "a"), ("a", "b"), ("b", "a")])
i4 = Index([("a", "a"), ("a", "b"), ("b", "a")], tupleize_cols=False)
assert not i3.identical(i4)
def test_is_(self):
ind = Index(range(10))
assert ind.is_(ind)
assert ind.is_(ind.view().view().view().view())
assert not ind.is_(Index(range(10)))
assert not ind.is_(ind.copy())
assert not ind.is_(ind.copy(deep=False))
assert not ind.is_(ind[:])
assert not ind.is_(np.array(range(10)))
# quasi-implementation dependent
assert ind.is_(ind.view())
ind2 = ind.view()
ind2.name = "bob"
assert ind.is_(ind2)
assert ind2.is_(ind)
# doesn't matter if Indices are *actually* views of underlying data,
assert not ind.is_(Index(ind.values))
arr = np.array(range(1, 11))
ind1 = Index(arr, copy=False)
ind2 = Index(arr, copy=False)
assert not ind1.is_(ind2)
@pytest.mark.parametrize("index", ["datetime"], indirect=True)
def test_asof(self, index):
d = index[0]
assert index.asof(d) == d
assert isna(index.asof(d - timedelta(1)))
d = index[-1]
assert index.asof(d + timedelta(1)) == d
d = index[0].to_pydatetime()
assert isinstance(index.asof(d), Timestamp)
def test_asof_datetime_partial(self):
index = pd.date_range("2010-01-01", periods=2, freq="m")
expected = Timestamp("2010-02-28")
result = index.asof("2010-02")
assert result == expected
assert not isinstance(result, Index)
def test_nanosecond_index_access(self):
s = Series([Timestamp("20130101")]).values.view("i8")[0]
r = DatetimeIndex([s + 50 + i for i in range(100)])
x = Series(np.random.randn(100), index=r)
first_value = x.asof(x.index[0])
# this does not yet work, as parsing strings is done via dateutil
# assert first_value == x['2013-01-01 00:00:00.000000050+0000']
expected_ts = np_datetime64_compat("2013-01-01 00:00:00.000000050+0000", "ns")
assert first_value == x[Timestamp(expected_ts)]
@pytest.mark.parametrize("index", ["string"], indirect=True)
def test_booleanindex(self, index):
bool_index = np.ones(len(index), dtype=bool)
bool_index[5:30:2] = False
sub_index = index[bool_index]
for i, val in enumerate(sub_index):
assert sub_index.get_loc(val) == i
sub_index = index[list(bool_index)]
for i, val in enumerate(sub_index):
assert sub_index.get_loc(val) == i
def test_fancy(self):
index = self.create_index()
sl = index[[1, 2, 3]]
for i in sl:
assert i == sl[sl.get_loc(i)]
@pytest.mark.parametrize("index", ["string", "int", "float"], indirect=True)
@pytest.mark.parametrize("dtype", [np.int_, np.bool_])
def test_empty_fancy(self, index, dtype):
empty_arr = np.array([], dtype=dtype)
empty_index = type(index)([])
assert index[[]].identical(empty_index)
assert index[empty_arr].identical(empty_index)
@pytest.mark.parametrize("index", ["string", "int", "float"], indirect=True)
def test_empty_fancy_raises(self, index):
# pd.DatetimeIndex is excluded, because it overrides getitem and should
# be tested separately.
empty_farr = np.array([], dtype=np.float_)
empty_index = type(index)([])
assert index[[]].identical(empty_index)
# np.ndarray only accepts ndarray of int & bool dtypes, so should Index
msg = r"arrays used as indices must be of integer \(or boolean\) type"
with pytest.raises(IndexError, match=msg):
index[empty_farr]
@pytest.mark.parametrize("index", ["string"], indirect=True)
def test_intersection(self, index, sort):
first = index[:20]
second = index[:10]
intersect = first.intersection(second, sort=sort)
if sort is None:
tm.assert_index_equal(intersect, second.sort_values())
assert tm.equalContents(intersect, second)
# Corner cases
inter = first.intersection(first, sort=sort)
assert inter is first
@pytest.mark.parametrize(
"index2,keeps_name",
[
(Index([3, 4, 5, 6, 7], name="index"), True), # preserve same name
(Index([3, 4, 5, 6, 7], name="other"), False), # drop diff names
(Index([3, 4, 5, 6, 7]), False),
],
)
def test_intersection_name_preservation(self, index2, keeps_name, sort):
index1 = Index([1, 2, 3, 4, 5], name="index")
expected = Index([3, 4, 5])
result = index1.intersection(index2, sort)
if keeps_name:
expected.name = "index"
assert result.name == expected.name
tm.assert_index_equal(result, expected)
@pytest.mark.parametrize("index", ["string"], indirect=True)
@pytest.mark.parametrize(
"first_name,second_name,expected_name",
[("A", "A", "A"), ("A", "B", None), (None, "B", None)],
)
def test_intersection_name_preservation2(
self, index, first_name, second_name, expected_name, sort
):
first = index[5:20]
second = index[:10]
first.name = first_name
second.name = second_name
intersect = first.intersection(second, sort=sort)
assert intersect.name == expected_name
@pytest.mark.parametrize(
"index2,keeps_name",
[
(Index([4, 7, 6, 5, 3], name="index"), True),
(Index([4, 7, 6, 5, 3], name="other"), False),
],
)
def test_intersection_monotonic(self, index2, keeps_name, sort):
index1 = Index([5, 3, 2, 4, 1], name="index")
expected = Index([5, 3, 4])
if keeps_name:
expected.name = "index"
result = index1.intersection(index2, sort=sort)
if sort is None:
expected = expected.sort_values()
tm.assert_index_equal(result, expected)
@pytest.mark.parametrize(
"index2,expected_arr",
[(Index(["B", "D"]), ["B"]), (Index(["B", "D", "A"]), ["A", "B", "A"])],
)
def test_intersection_non_monotonic_non_unique(self, index2, expected_arr, sort):
# non-monotonic non-unique
index1 = Index(["A", "B", "A", "C"])
expected = Index(expected_arr, dtype="object")
result = index1.intersection(index2, sort=sort)
if sort is None:
expected = expected.sort_values()
tm.assert_index_equal(result, expected)
def test_intersect_str_dates(self, sort):
dt_dates = [datetime(2012, 2, 9), datetime(2012, 2, 22)]
i1 = Index(dt_dates, dtype=object)
i2 = Index(["aa"], dtype=object)
result = i2.intersection(i1, sort=sort)
assert len(result) == 0
@pytest.mark.xfail(reason="Not implemented")
def test_intersection_equal_sort_true(self):
# TODO decide on True behaviour
idx = pd.Index(["c", "a", "b"])
sorted_ = pd.Index(["a", "b", "c"])
tm.assert_index_equal(idx.intersection(idx, sort=True), sorted_)
def test_chained_union(self, sort):
# Chained unions handles names correctly
i1 = Index([1, 2], name="i1")
i2 = Index([5, 6], name="i2")
i3 = Index([3, 4], name="i3")
union = i1.union(i2.union(i3, sort=sort), sort=sort)
expected = i1.union(i2, sort=sort).union(i3, sort=sort)
tm.assert_index_equal(union, expected)
j1 = Index([1, 2], name="j1")
j2 = Index([], name="j2")
j3 = Index([], name="j3")
union = j1.union(j2.union(j3, sort=sort), sort=sort)
expected = j1.union(j2, sort=sort).union(j3, sort=sort)
tm.assert_index_equal(union, expected)
@pytest.mark.parametrize("index", ["string"], indirect=True)
def test_union(self, index, sort):
first = index[5:20]
second = index[:10]
everything = index[:20]
union = first.union(second, sort=sort)
if sort is None:
tm.assert_index_equal(union, everything.sort_values())
assert tm.equalContents(union, everything)
@pytest.mark.parametrize("slice_", [slice(None), slice(0)])
def test_union_sort_other_special(self, slice_):
# https://github.com/pandas-dev/pandas/issues/24959
idx = pd.Index([1, 0, 2])
# default, sort=None
other = idx[slice_]
tm.assert_index_equal(idx.union(other), idx)
tm.assert_index_equal(other.union(idx), idx)
# sort=False
tm.assert_index_equal(idx.union(other, sort=False), idx)
@pytest.mark.xfail(reason="Not implemented")
@pytest.mark.parametrize("slice_", [slice(None), slice(0)])
def test_union_sort_special_true(self, slice_):
# TODO decide on True behaviour
# sort=True
idx = pd.Index([1, 0, 2])
# default, sort=None
other = idx[slice_]
result = idx.union(other, sort=True)
expected = pd.Index([0, 1, 2])
tm.assert_index_equal(result, expected)
@pytest.mark.parametrize("klass", [np.array, Series, list])
@pytest.mark.parametrize("index", ["string"], indirect=True)
def test_union_from_iterables(self, index, klass, sort):
# GH 10149
first = index[5:20]
second = index[:10]
everything = index[:20]
case = klass(second.values)
result = first.union(case, sort=sort)
if sort is None:
tm.assert_index_equal(result, everything.sort_values())
assert tm.equalContents(result, everything)
@pytest.mark.parametrize("index", ["string"], indirect=True)
def test_union_identity(self, index, sort):
first = index[5:20]
union = first.union(first, sort=sort)
# i.e. identity is not preserved when sort is True
assert (union is first) is (not sort)
# This should no longer be the same object, since [] is not consistent,
# both objects will be recast to dtype('O')
union = first.union([], sort=sort)
assert (union is first) is (not sort)
union = Index([]).union(first, sort=sort)
assert (union is first) is (not sort)
@pytest.mark.parametrize("first_list", [list("ba"), list()])
@pytest.mark.parametrize("second_list", [list("ab"), list()])
@pytest.mark.parametrize(
"first_name, second_name, expected_name",
[("A", "B", None), (None, "B", None), ("A", None, None)],
)
def test_union_name_preservation(
self, first_list, second_list, first_name, second_name, expected_name, sort
):
first = Index(first_list, name=first_name)
second = Index(second_list, name=second_name)
union = first.union(second, sort=sort)
vals = set(first_list).union(second_list)
if sort is None and len(first_list) > 0 and len(second_list) > 0:
expected = Index(sorted(vals), name=expected_name)
tm.assert_index_equal(union, expected)
else:
expected = Index(vals, name=expected_name)
assert tm.equalContents(union, expected)
def test_union_dt_as_obj(self, sort):
# TODO: Replace with fixturesult
index = self.create_index()
date_index = pd.date_range("2019-01-01", periods=10)
first_cat = index.union(date_index)
second_cat = index.union(index)
if date_index.dtype == np.object_:
appended = np.append(index, date_index)
else:
appended = np.append(index, date_index.astype("O"))
assert tm.equalContents(first_cat, appended)
assert tm.equalContents(second_cat, index)
tm.assert_contains_all(index, first_cat)
tm.assert_contains_all(index, second_cat)
tm.assert_contains_all(date_index, first_cat)
def test_map_identity_mapping(self, index):
# GH 12766
tm.assert_index_equal(index, index.map(lambda x: x))
def test_map_with_tuples(self):
# GH 12766
# Test that returning a single tuple from an Index
# returns an Index.
index = tm.makeIntIndex(3)
result = tm.makeIntIndex(3).map(lambda x: (x,))
expected = Index([(i,) for i in index])
tm.assert_index_equal(result, expected)
# Test that returning a tuple from a map of a single index
# returns a MultiIndex object.
result = index.map(lambda x: (x, x == 1))
expected = MultiIndex.from_tuples([(i, i == 1) for i in index])
tm.assert_index_equal(result, expected)
def test_map_with_tuples_mi(self):
# Test that returning a single object from a MultiIndex
# returns an Index.
first_level = ["foo", "bar", "baz"]
multi_index = MultiIndex.from_tuples(zip(first_level, [1, 2, 3]))
reduced_index = multi_index.map(lambda x: x[0])
tm.assert_index_equal(reduced_index, Index(first_level))
@pytest.mark.parametrize(
"attr", ["makeDateIndex", "makePeriodIndex", "makeTimedeltaIndex"]
)
def test_map_tseries_indices_return_index(self, attr):
index = getattr(tm, attr)(10)
expected = Index([1] * 10)
result = index.map(lambda x: 1)
tm.assert_index_equal(expected, result)
def test_map_tseries_indices_accsr_return_index(self):
date_index = tm.makeDateIndex(24, freq="h", name="hourly")
expected = Index(range(24), name="hourly")
tm.assert_index_equal(expected, date_index.map(lambda x: x.hour))
@pytest.mark.parametrize(
"mapper",
[
lambda values, index: {i: e for e, i in zip(values, index)},
lambda values, index: pd.Series(values, index),
],
)
def test_map_dictlike_simple(self, mapper):
# GH 12756
expected = Index(["foo", "bar", "baz"])
index = tm.makeIntIndex(3)
result = index.map(mapper(expected.values, index))
tm.assert_index_equal(result, expected)
@pytest.mark.parametrize(
"mapper",
[
lambda values, index: {i: e for e, i in zip(values, index)},
lambda values, index: pd.Series(values, index),
],
)
def test_map_dictlike(self, index, mapper):
# GH 12756
if isinstance(index, CategoricalIndex):
# Tested in test_categorical
return
elif not index.is_unique:
# Cannot map duplicated index
return
if index.empty:
# to match proper result coercion for uints
expected = Index([])
else:
expected = Index(np.arange(len(index), 0, -1))
result = index.map(mapper(expected, index))
tm.assert_index_equal(result, expected)
@pytest.mark.parametrize(
"mapper",
[Series(["foo", 2.0, "baz"], index=[0, 2, -1]), {0: "foo", 2: 2.0, -1: "baz"}],
)
def test_map_with_non_function_missing_values(self, mapper):
# GH 12756
expected = Index([2.0, np.nan, "foo"])
result = Index([2, 1, 0]).map(mapper)
tm.assert_index_equal(expected, result)
def test_map_na_exclusion(self):
index = Index([1.5, np.nan, 3, np.nan, 5])
result = index.map(lambda x: x * 2, na_action="ignore")
expected = index * 2
tm.assert_index_equal(result, expected)
def test_map_defaultdict(self):
index = Index([1, 2, 3])
default_dict = defaultdict(lambda: "blank")
default_dict[1] = "stuff"
result = index.map(default_dict)
expected = Index(["stuff", "blank", "blank"])
tm.assert_index_equal(result, expected)
@pytest.mark.parametrize("name,expected", [("foo", "foo"), ("bar", None)])
def test_append_empty_preserve_name(self, name, expected):
left = Index([], name="foo")
right = Index([1, 2, 3], name=name)
result = left.append(right)
assert result.name == expected
@pytest.mark.parametrize("index", ["string"], indirect=True)
@pytest.mark.parametrize("second_name,expected", [(None, None), ("name", "name")])
def test_difference_name_preservation(self, index, second_name, expected, sort):
first = index[5:20]
second = index[:10]
answer = index[10:20]
first.name = "name"
second.name = second_name
result = first.difference(second, sort=sort)
assert tm.equalContents(result, answer)
if expected is None:
assert result.name is None
else:
assert result.name == expected
@pytest.mark.parametrize("index", ["string"], indirect=True)
def test_difference_empty_arg(self, index, sort):
first = index[5:20]
first.name = "name"
result = first.difference([], sort)
assert tm.equalContents(result, first)
assert result.name == first.name
@pytest.mark.parametrize("index", ["string"], indirect=True)
def test_difference_identity(self, index, sort):
first = index[5:20]
first.name = "name"
result = first.difference(first, sort)
assert len(result) == 0
assert result.name == first.name
@pytest.mark.parametrize("index", ["string"], indirect=True)
def test_difference_sort(self, index, sort):
first = index[5:20]
second = index[:10]
result = first.difference(second, sort)
expected = index[10:20]
if sort is None:
expected = expected.sort_values()
tm.assert_index_equal(result, expected)
def test_symmetric_difference(self, sort):
# smoke
index1 = Index([5, 2, 3, 4], name="index1")
index2 = Index([2, 3, 4, 1])
result = index1.symmetric_difference(index2, sort=sort)
expected = Index([5, 1])
assert tm.equalContents(result, expected)
assert result.name is None
if sort is None:
expected = expected.sort_values()
tm.assert_index_equal(result, expected)
# __xor__ syntax
expected = index1 ^ index2
assert tm.equalContents(result, expected)
assert result.name is None
@pytest.mark.parametrize("opname", ["difference", "symmetric_difference"])
def test_difference_incomparable(self, opname):
a = pd.Index([3, pd.Timestamp("2000"), 1])
b = pd.Index([2, pd.Timestamp("1999"), 1])
op = operator.methodcaller(opname, b)
# sort=None, the default
result = op(a)
expected = pd.Index([3, pd.Timestamp("2000"), 2, pd.Timestamp("1999")])
if opname == "difference":
expected = expected[:2]
tm.assert_index_equal(result, expected)
# sort=False
op = operator.methodcaller(opname, b, sort=False)
result = op(a)
tm.assert_index_equal(result, expected)
@pytest.mark.xfail(reason="Not implemented")
@pytest.mark.parametrize("opname", ["difference", "symmetric_difference"])
def test_difference_incomparable_true(self, opname):
# TODO decide on True behaviour
# # sort=True, raises
a = pd.Index([3, pd.Timestamp("2000"), 1])
b = pd.Index([2, pd.Timestamp("1999"), 1])
op = operator.methodcaller(opname, b, sort=True)
with pytest.raises(TypeError, match="Cannot compare"):
op(a)
def test_symmetric_difference_mi(self, sort):
index1 = MultiIndex.from_tuples(zip(["foo", "bar", "baz"], [1, 2, 3]))
index2 = MultiIndex.from_tuples([("foo", 1), ("bar", 3)])
result = index1.symmetric_difference(index2, sort=sort)
expected = MultiIndex.from_tuples([("bar", 2), ("baz", 3), ("bar", 3)])
if sort is None:
expected = expected.sort_values()
tm.assert_index_equal(result, expected)
assert tm.equalContents(result, expected)
@pytest.mark.parametrize(
"index2,expected",
[
(Index([0, 1, np.nan]), Index([2.0, 3.0, 0.0])),
(Index([0, 1]), Index([np.nan, 2.0, 3.0, 0.0])),
],
)
def test_symmetric_difference_missing(self, index2, expected, sort):
# GH 13514 change: {nan} - {nan} == {}
# (GH 6444, sorting of nans, is no longer an issue)
index1 = Index([1, np.nan, 2, 3])
result = index1.symmetric_difference(index2, sort=sort)
if sort is None:
expected = expected.sort_values()
tm.assert_index_equal(result, expected)
def test_symmetric_difference_non_index(self, sort):
index1 = Index([1, 2, 3, 4], name="index1")
index2 = np.array([2, 3, 4, 5])
expected = Index([1, 5])
result = index1.symmetric_difference(index2, sort=sort)
assert tm.equalContents(result, expected)
assert result.name == "index1"
result = index1.symmetric_difference(index2, result_name="new_name", sort=sort)
assert tm.equalContents(result, expected)
assert result.name == "new_name"
def test_difference_type(self, index, sort):
# GH 20040
# If taking difference of a set and itself, it
# needs to preserve the type of the index
if not index.is_unique:
return
result = index.difference(index, sort=sort)
expected = index.drop(index)
tm.assert_index_equal(result, expected)
def test_intersection_difference(self, index, sort):
# GH 20040
# Test that the intersection of an index with an
# empty index produces the same index as the difference
# of an index with itself. Test for all types
if not index.is_unique:
return
inter = index.intersection(index.drop(index))
diff = index.difference(index, sort=sort)
tm.assert_index_equal(inter, diff)
def test_is_mixed_deprecated(self):
# GH#32922
index = self.create_index()
with tm.assert_produces_warning(FutureWarning):
index.is_mixed()
@pytest.mark.parametrize(
"index, expected",
[
("string", False),
("bool", False),
("categorical", False),
("int", True),
("datetime", False),
("float", True),
],
indirect=["index"],
)
def test_is_numeric(self, index, expected):
assert index.is_numeric() is expected
@pytest.mark.parametrize(
"index, expected",
[
("string", True),
("bool", True),
("categorical", False),
("int", False),
("datetime", False),
("float", False),
],
indirect=["index"],
)
def test_is_object(self, index, expected):
assert index.is_object() is expected
@pytest.mark.parametrize(
"index, expected",
[
("string", False),
("bool", False),
("categorical", False),
("int", False),
("datetime", True),
("float", False),
],
indirect=["index"],
)
def test_is_all_dates(self, index, expected):
assert index.is_all_dates is expected
def test_summary(self, index):
self._check_method_works(Index._summary, index)
def test_summary_bug(self):
# GH3869`
ind = Index(["{other}%s", "~:{range}:0"], name="A")
result = ind._summary()
# shouldn't be formatted accidentally.
assert "~:{range}:0" in result
assert "{other}%s" in result
def test_format_different_scalar_lengths(self):
# GH35439
idx = Index(["aaaaaaaaa", "b"])
expected = ["aaaaaaaaa", "b"]
assert idx.format() == expected
def test_format_bug(self):
# GH 14626
# windows has different precision on datetime.datetime.now (it doesn't
# include us since the default for Timestamp shows these but Index
# formatting does not we are skipping)
now = datetime.now()
if not str(now).endswith("000"):
index = Index([now])
formatted = index.format()
expected = [str(index[0])]
assert formatted == expected
Index([]).format()
@pytest.mark.parametrize("vals", [[1, 2.0 + 3.0j, 4.0], ["a", "b", "c"]])
def test_format_missing(self, vals, nulls_fixture):
# 2845
vals = list(vals) # Copy for each iteration
vals.append(nulls_fixture)
index = Index(vals)
formatted = index.format()
expected = [str(index[0]), str(index[1]), str(index[2]), "NaN"]
assert formatted == expected
assert index[3] is nulls_fixture
def test_format_with_name_time_info(self):
# bug I fixed 12/20/2011
dates = date_range("2011-01-01 04:00:00", periods=10, name="something")
formatted = dates.format(name=True)
assert formatted[0] == "something"
def test_format_datetime_with_time(self):
t = Index([datetime(2012, 2, 7), datetime(2012, 2, 7, 23)])
result = t.format()
expected = ["2012-02-07 00:00:00", "2012-02-07 23:00:00"]
assert len(result) == 2
assert result == expected
@pytest.mark.parametrize("op", ["any", "all"])
def test_logical_compat(self, op):
index = self.create_index()
assert getattr(index, op)() == getattr(index.values, op)()
def _check_method_works(self, method, index):
method(index)
def test_get_indexer(self):
index1 = Index([1, 2, 3, 4, 5])
index2 = Index([2, 4, 6])
r1 = index1.get_indexer(index2)
e1 = np.array([1, 3, -1], dtype=np.intp)
tm.assert_almost_equal(r1, e1)
@pytest.mark.parametrize("reverse", [True, False])
@pytest.mark.parametrize(
"expected,method",
[
(np.array([-1, 0, 0, 1, 1], dtype=np.intp), "pad"),
(np.array([-1, 0, 0, 1, 1], dtype=np.intp), "ffill"),
(np.array([0, 0, 1, 1, 2], dtype=np.intp), "backfill"),
(np.array([0, 0, 1, 1, 2], dtype=np.intp), "bfill"),
],
)
def test_get_indexer_methods(self, reverse, expected, method):
index1 = Index([1, 2, 3, 4, 5])
index2 = Index([2, 4, 6])
if reverse:
index1 = index1[::-1]
expected = expected[::-1]
result = index2.get_indexer(index1, method=method)
tm.assert_almost_equal(result, expected)
def test_get_indexer_invalid(self):
# GH10411
index = Index(np.arange(10))
with pytest.raises(ValueError, match="tolerance argument"):
index.get_indexer([1, 0], tolerance=1)
with pytest.raises(ValueError, match="limit argument"):
index.get_indexer([1, 0], limit=1)
@pytest.mark.parametrize(
"method, tolerance, indexer, expected",
[
("pad", None, [0, 5, 9], [0, 5, 9]),
("backfill", None, [0, 5, 9], [0, 5, 9]),
("nearest", None, [0, 5, 9], [0, 5, 9]),
("pad", 0, [0, 5, 9], [0, 5, 9]),
("backfill", 0, [0, 5, 9], [0, 5, 9]),
("nearest", 0, [0, 5, 9], [0, 5, 9]),
("pad", None, [0.2, 1.8, 8.5], [0, 1, 8]),
("backfill", None, [0.2, 1.8, 8.5], [1, 2, 9]),
("nearest", None, [0.2, 1.8, 8.5], [0, 2, 9]),
("pad", 1, [0.2, 1.8, 8.5], [0, 1, 8]),
("backfill", 1, [0.2, 1.8, 8.5], [1, 2, 9]),
("nearest", 1, [0.2, 1.8, 8.5], [0, 2, 9]),
("pad", 0.2, [0.2, 1.8, 8.5], [0, -1, -1]),
("backfill", 0.2, [0.2, 1.8, 8.5], [-1, 2, -1]),
("nearest", 0.2, [0.2, 1.8, 8.5], [0, 2, -1]),
],
)
def test_get_indexer_nearest(self, method, tolerance, indexer, expected):
index = Index(np.arange(10))
actual = index.get_indexer(indexer, method=method, tolerance=tolerance)
tm.assert_numpy_array_equal(actual, np.array(expected, dtype=np.intp))
@pytest.mark.parametrize("listtype", [list, tuple, Series, np.array])
@pytest.mark.parametrize(
"tolerance, expected",
list(
zip(
[[0.3, 0.3, 0.1], [0.2, 0.1, 0.1], [0.1, 0.5, 0.5]],
[[0, 2, -1], [0, -1, -1], [-1, 2, 9]],
)
),
)
def test_get_indexer_nearest_listlike_tolerance(
self, tolerance, expected, listtype
):
index = Index(np.arange(10))
actual = index.get_indexer(
[0.2, 1.8, 8.5], method="nearest", tolerance=listtype(tolerance)
)
tm.assert_numpy_array_equal(actual, np.array(expected, dtype=np.intp))
def test_get_indexer_nearest_error(self):
index = Index(np.arange(10))
with pytest.raises(ValueError, match="limit argument"):
index.get_indexer([1, 0], method="nearest", limit=1)
with pytest.raises(ValueError, match="tolerance size must match"):
index.get_indexer([1, 0], method="nearest", tolerance=[1, 2, 3])
@pytest.mark.parametrize(
"method,expected",
[("pad", [8, 7, 0]), ("backfill", [9, 8, 1]), ("nearest", [9, 7, 0])],
)
def test_get_indexer_nearest_decreasing(self, method, expected):
index = Index(np.arange(10))[::-1]
actual = index.get_indexer([0, 5, 9], method=method)
tm.assert_numpy_array_equal(actual, np.array([9, 4, 0], dtype=np.intp))
actual = index.get_indexer([0.2, 1.8, 8.5], method=method)
tm.assert_numpy_array_equal(actual, np.array(expected, dtype=np.intp))
@pytest.mark.parametrize(
"method,expected",
[
("pad", np.array([-1, 0, 1, 1], dtype=np.intp)),
("backfill", np.array([0, 0, 1, -1], dtype=np.intp)),
],
)
def test_get_indexer_strings(self, method, expected):
index = pd.Index(["b", "c"])
actual = index.get_indexer(["a", "b", "c", "d"], method=method)
tm.assert_numpy_array_equal(actual, expected)
def test_get_indexer_strings_raises(self):
index = pd.Index(["b", "c"])
msg = r"unsupported operand type\(s\) for -: 'str' and 'str'"
with pytest.raises(TypeError, match=msg):
index.get_indexer(["a", "b", "c", "d"], method="nearest")
with pytest.raises(TypeError, match=msg):
index.get_indexer(["a", "b", "c", "d"], method="pad", tolerance=2)
with pytest.raises(TypeError, match=msg):
index.get_indexer(
["a", "b", "c", "d"], method="pad", tolerance=[2, 2, 2, 2]
)
@pytest.mark.parametrize("idx_class", [Int64Index, RangeIndex, Float64Index])
def test_get_indexer_numeric_index_boolean_target(self, idx_class):
# GH 16877
numeric_index = idx_class(RangeIndex(4))
result = numeric_index.get_indexer([True, False, True])
expected = np.array([-1, -1, -1], dtype=np.intp)
tm.assert_numpy_array_equal(result, expected)
def test_get_indexer_with_NA_values(
self, unique_nulls_fixture, unique_nulls_fixture2
):
# GH 22332
# check pairwise, that no pair of na values
# is mangled
if unique_nulls_fixture is unique_nulls_fixture2:
return # skip it, values are not unique
arr = np.array([unique_nulls_fixture, unique_nulls_fixture2], dtype=object)
index = pd.Index(arr, dtype=object)
result = index.get_indexer(
[unique_nulls_fixture, unique_nulls_fixture2, "Unknown"]
)
expected = np.array([0, 1, -1], dtype=np.intp)
tm.assert_numpy_array_equal(result, expected)
@pytest.mark.parametrize("method", [None, "pad", "backfill", "nearest"])
def test_get_loc(self, method):
index = pd.Index([0, 1, 2])
assert index.get_loc(1, method=method) == 1
if method:
assert index.get_loc(1, method=method, tolerance=0) == 1
@pytest.mark.parametrize("method", [None, "pad", "backfill", "nearest"])
def test_get_loc_raises_bad_label(self, method):
index = pd.Index([0, 1, 2])
if method:
msg = "not supported between"
else:
msg = "invalid key"
with pytest.raises(TypeError, match=msg):
index.get_loc([1, 2], method=method)
@pytest.mark.parametrize(
"method,loc", [("pad", 1), ("backfill", 2), ("nearest", 1)]
)
def test_get_loc_tolerance(self, method, loc):
index = pd.Index([0, 1, 2])
assert index.get_loc(1.1, method) == loc
assert index.get_loc(1.1, method, tolerance=1) == loc
@pytest.mark.parametrize("method", ["pad", "backfill", "nearest"])
def test_get_loc_outside_tolerance_raises(self, method):
index = pd.Index([0, 1, 2])
with pytest.raises(KeyError, match="1.1"):
index.get_loc(1.1, method, tolerance=0.05)
def test_get_loc_bad_tolerance_raises(self):
index = pd.Index([0, 1, 2])
with pytest.raises(ValueError, match="must be numeric"):
index.get_loc(1.1, "nearest", tolerance="invalid")
def test_get_loc_tolerance_no_method_raises(self):
index = pd.Index([0, 1, 2])
with pytest.raises(ValueError, match="tolerance .* valid if"):
index.get_loc(1.1, tolerance=1)
def test_get_loc_raises_missized_tolerance(self):
index = pd.Index([0, 1, 2])
with pytest.raises(ValueError, match="tolerance size must match"):
index.get_loc(1.1, "nearest", tolerance=[1, 1])
def test_get_loc_raises_object_nearest(self):
index = pd.Index(["a", "c"])
with pytest.raises(TypeError, match="unsupported operand type"):
index.get_loc("a", method="nearest")
def test_get_loc_raises_object_tolerance(self):
index = pd.Index(["a", "c"])
with pytest.raises(TypeError, match="unsupported operand type"):
index.get_loc("a", method="pad", tolerance="invalid")
@pytest.mark.parametrize("dtype", [int, float])
def test_slice_locs(self, dtype):
index = Index(np.array([0, 1, 2, 5, 6, 7, 9, 10], dtype=dtype))
n = len(index)
assert index.slice_locs(start=2) == (2, n)
assert index.slice_locs(start=3) == (3, n)
assert index.slice_locs(3, 8) == (3, 6)
assert index.slice_locs(5, 10) == (3, n)
assert index.slice_locs(end=8) == (0, 6)
assert index.slice_locs(end=9) == (0, 7)
# reversed
index2 = index[::-1]
assert index2.slice_locs(8, 2) == (2, 6)
assert index2.slice_locs(7, 3) == (2, 5)
@pytest.mark.parametrize("dtype", [int, float])
def test_slice_float_locs(self, dtype):
index = Index(np.array([0, 1, 2, 5, 6, 7, 9, 10], dtype=dtype))
n = len(index)
assert index.slice_locs(5.0, 10.0) == (3, n)
assert index.slice_locs(4.5, 10.5) == (3, 8)
index2 = index[::-1]
assert index2.slice_locs(8.5, 1.5) == (2, 6)
assert index2.slice_locs(10.5, -1) == (0, n)
def test_slice_locs_dup(self):
index = Index(["a", "a", "b", "c", "d", "d"])
assert index.slice_locs("a", "d") == (0, 6)
assert index.slice_locs(end="d") == (0, 6)
assert index.slice_locs("a", "c") == (0, 4)
assert index.slice_locs("b", "d") == (2, 6)
index2 = index[::-1]
assert index2.slice_locs("d", "a") == (0, 6)
assert index2.slice_locs(end="a") == (0, 6)
assert index2.slice_locs("d", "b") == (0, 4)
assert index2.slice_locs("c", "a") == (2, 6)
@pytest.mark.parametrize("dtype", [int, float])
def test_slice_locs_dup_numeric(self, dtype):
index = Index(np.array([10, 12, 12, 14], dtype=dtype))
assert index.slice_locs(12, 12) == (1, 3)
assert index.slice_locs(11, 13) == (1, 3)
index2 = index[::-1]
assert index2.slice_locs(12, 12) == (1, 3)
assert index2.slice_locs(13, 11) == (1, 3)
def test_slice_locs_na(self):
index = Index([np.nan, 1, 2])
assert index.slice_locs(1) == (1, 3)
assert index.slice_locs(np.nan) == (0, 3)
index = Index([0, np.nan, np.nan, 1, 2])
assert index.slice_locs(np.nan) == (1, 5)
def test_slice_locs_na_raises(self):
index = Index([np.nan, 1, 2])
with pytest.raises(KeyError, match=""):
index.slice_locs(start=1.5)
with pytest.raises(KeyError, match=""):
index.slice_locs(end=1.5)
@pytest.mark.parametrize(
"in_slice,expected",
[
# error: Slice index must be an integer or None
(pd.IndexSlice[::-1], "yxdcb"),
(pd.IndexSlice["b":"y":-1], ""), # type: ignore[misc]
(pd.IndexSlice["b"::-1], "b"), # type: ignore[misc]
(pd.IndexSlice[:"b":-1], "yxdcb"), # type: ignore[misc]
(pd.IndexSlice[:"y":-1], "y"), # type: ignore[misc]
(pd.IndexSlice["y"::-1], "yxdcb"), # type: ignore[misc]
(pd.IndexSlice["y"::-4], "yb"), # type: ignore[misc]
# absent labels
(pd.IndexSlice[:"a":-1], "yxdcb"), # type: ignore[misc]
(pd.IndexSlice[:"a":-2], "ydb"), # type: ignore[misc]
(pd.IndexSlice["z"::-1], "yxdcb"), # type: ignore[misc]
(pd.IndexSlice["z"::-3], "yc"), # type: ignore[misc]
(pd.IndexSlice["m"::-1], "dcb"), # type: ignore[misc]
(pd.IndexSlice[:"m":-1], "yx"), # type: ignore[misc]
(pd.IndexSlice["a":"a":-1], ""), # type: ignore[misc]
(pd.IndexSlice["z":"z":-1], ""), # type: ignore[misc]
(pd.IndexSlice["m":"m":-1], ""), # type: ignore[misc]
],
)
def test_slice_locs_negative_step(self, in_slice, expected):
index = Index(list("bcdxy"))
s_start, s_stop = index.slice_locs(in_slice.start, in_slice.stop, in_slice.step)
result = index[s_start : s_stop : in_slice.step]
expected = pd.Index(list(expected))
tm.assert_index_equal(result, expected)
@pytest.mark.parametrize("index", ["string", "int", "float"], indirect=True)
def test_drop_by_str_label(self, index):
n = len(index)
drop = index[list(range(5, 10))]
dropped = index.drop(drop)
expected = index[list(range(5)) + list(range(10, n))]
tm.assert_index_equal(dropped, expected)
dropped = index.drop(index[0])
expected = index[1:]
tm.assert_index_equal(dropped, expected)
@pytest.mark.parametrize("index", ["string", "int", "float"], indirect=True)
@pytest.mark.parametrize("keys", [["foo", "bar"], ["1", "bar"]])
def test_drop_by_str_label_raises_missing_keys(self, index, keys):
with pytest.raises(KeyError, match=""):
index.drop(keys)
@pytest.mark.parametrize("index", ["string", "int", "float"], indirect=True)
def test_drop_by_str_label_errors_ignore(self, index):
n = len(index)
drop = index[list(range(5, 10))]
mixed = drop.tolist() + ["foo"]
dropped = index.drop(mixed, errors="ignore")
expected = index[list(range(5)) + list(range(10, n))]
tm.assert_index_equal(dropped, expected)
dropped = index.drop(["foo", "bar"], errors="ignore")
expected = index[list(range(n))]
tm.assert_index_equal(dropped, expected)
def test_drop_by_numeric_label_loc(self):
# TODO: Parametrize numeric and str tests after self.strIndex fixture
index = Index([1, 2, 3])
dropped = index.drop(1)
expected = Index([2, 3])
tm.assert_index_equal(dropped, expected)
def test_drop_by_numeric_label_raises_missing_keys(self):
index = Index([1, 2, 3])
with pytest.raises(KeyError, match=""):
index.drop([3, 4])
@pytest.mark.parametrize(
"key,expected", [(4, Index([1, 2, 3])), ([3, 4, 5], Index([1, 2]))]
)
def test_drop_by_numeric_label_errors_ignore(self, key, expected):
index = Index([1, 2, 3])
dropped = index.drop(key, errors="ignore")
tm.assert_index_equal(dropped, expected)
@pytest.mark.parametrize(
"values",
[["a", "b", ("c", "d")], ["a", ("c", "d"), "b"], [("c", "d"), "a", "b"]],
)
@pytest.mark.parametrize("to_drop", [[("c", "d"), "a"], ["a", ("c", "d")]])
def test_drop_tuple(self, values, to_drop):
# GH 18304
index = pd.Index(values)
expected = pd.Index(["b"])
result = index.drop(to_drop)
tm.assert_index_equal(result, expected)
removed = index.drop(to_drop[0])
for drop_me in to_drop[1], [to_drop[1]]:
result = removed.drop(drop_me)
tm.assert_index_equal(result, expected)
removed = index.drop(to_drop[1])
msg = fr"\"\[{re.escape(to_drop[1].__repr__())}\] not found in axis\""
for drop_me in to_drop[1], [to_drop[1]]:
with pytest.raises(KeyError, match=msg):
removed.drop(drop_me)
@pytest.mark.parametrize(
"method,expected,sort",
[
(
"intersection",
np.array(
[(1, "A"), (2, "A"), (1, "B"), (2, "B")],
dtype=[("num", int), ("let", "a1")],
),
False,
),
(
"intersection",
np.array(
[(1, "A"), (1, "B"), (2, "A"), (2, "B")],
dtype=[("num", int), ("let", "a1")],
),
None,
),
(
"union",
np.array(
[(1, "A"), (1, "B"), (1, "C"), (2, "A"), (2, "B"), (2, "C")],
dtype=[("num", int), ("let", "a1")],
),
None,
),
],
)
def test_tuple_union_bug(self, method, expected, sort):
index1 = Index(
np.array(
[(1, "A"), (2, "A"), (1, "B"), (2, "B")],
dtype=[("num", int), ("let", "a1")],
)
)
index2 = Index(
np.array(
[(1, "A"), (2, "A"), (1, "B"), (2, "B"), (1, "C"), (2, "C")],
dtype=[("num", int), ("let", "a1")],
)
)
result = getattr(index1, method)(index2, sort=sort)
assert result.ndim == 1
expected = Index(expected)
tm.assert_index_equal(result, expected)
@pytest.mark.parametrize(
"attr",
[
"is_monotonic_increasing",
"is_monotonic_decreasing",
"_is_strictly_monotonic_increasing",
"_is_strictly_monotonic_decreasing",
],
)
def test_is_monotonic_incomparable(self, attr):
index = Index([5, datetime.now(), 7])
assert not getattr(index, attr)
def test_set_value_deprecated(self):
# GH 28621
idx = self.create_index()
arr = np.array([1, 2, 3])
with tm.assert_produces_warning(FutureWarning):
idx.set_value(arr, idx[1], 80)
assert arr[1] == 80
@pytest.mark.parametrize(
"index", ["string", "int", "datetime", "timedelta"], indirect=True
)
def test_get_value(self, index):
# TODO: Remove function? GH 19728
values = np.random.randn(100)
value = index[67]
with pytest.raises(AttributeError, match="has no attribute '_values'"):
# Index.get_value requires a Series, not an ndarray
with tm.assert_produces_warning(FutureWarning):
index.get_value(values, value)
with tm.assert_produces_warning(FutureWarning):
result = index.get_value(Series(values, index=values), value)
tm.assert_almost_equal(result, values[67])
@pytest.mark.parametrize("values", [["foo", "bar", "quux"], {"foo", "bar", "quux"}])
@pytest.mark.parametrize(
"index,expected",
[
(Index(["qux", "baz", "foo", "bar"]), np.array([False, False, True, True])),
(Index([]), np.array([], dtype=bool)), # empty
],
)
def test_isin(self, values, index, expected):
result = index.isin(values)
tm.assert_numpy_array_equal(result, expected)
def test_isin_nan_common_object(self, nulls_fixture, nulls_fixture2):
# Test cartesian product of null fixtures and ensure that we don't
# mangle the various types (save a corner case with PyPy)
# all nans are the same
if (
isinstance(nulls_fixture, float)
and isinstance(nulls_fixture2, float)
and math.isnan(nulls_fixture)
and math.isnan(nulls_fixture2)
):
tm.assert_numpy_array_equal(
Index(["a", nulls_fixture]).isin([nulls_fixture2]),
np.array([False, True]),
)
elif nulls_fixture is nulls_fixture2: # should preserve NA type
tm.assert_numpy_array_equal(
Index(["a", nulls_fixture]).isin([nulls_fixture2]),
np.array([False, True]),
)
else:
tm.assert_numpy_array_equal(
Index(["a", nulls_fixture]).isin([nulls_fixture2]),
np.array([False, False]),
)
def test_isin_nan_common_float64(self, nulls_fixture):
if nulls_fixture is pd.NaT:
pytest.skip("pd.NaT not compatible with Float64Index")
# Float64Index overrides isin, so must be checked separately
if nulls_fixture is pd.NA:
pytest.xfail("Float64Index cannot contain pd.NA")
tm.assert_numpy_array_equal(
Float64Index([1.0, nulls_fixture]).isin([np.nan]), np.array([False, True])
)
# we cannot compare NaT with NaN
tm.assert_numpy_array_equal(
Float64Index([1.0, nulls_fixture]).isin([pd.NaT]), np.array([False, False])
)
@pytest.mark.parametrize("level", [0, -1])
@pytest.mark.parametrize(
"index",
[
Index(["qux", "baz", "foo", "bar"]),
# Float64Index overrides isin, so must be checked separately
Float64Index([1.0, 2.0, 3.0, 4.0]),
],
)
def test_isin_level_kwarg(self, level, index):
values = index.tolist()[-2:] + ["nonexisting"]
expected = np.array([False, False, True, True])
tm.assert_numpy_array_equal(expected, index.isin(values, level=level))
index.name = "foobar"
tm.assert_numpy_array_equal(expected, index.isin(values, level="foobar"))
def test_isin_level_kwarg_bad_level_raises(self, index):
for level in [10, index.nlevels, -(index.nlevels + 1)]:
with pytest.raises(IndexError, match="Too many levels"):
index.isin([], level=level)
@pytest.mark.parametrize("label", [1.0, "foobar", "xyzzy", np.nan])
def test_isin_level_kwarg_bad_label_raises(self, label, index):
if isinstance(index, MultiIndex):
index = index.rename(["foo", "bar"] + index.names[2:])
msg = f"'Level {label} not found'"
else:
index = index.rename("foo")
msg = fr"Requested level \({label}\) does not match index name \(foo\)"
with pytest.raises(KeyError, match=msg):
index.isin([], level=label)
@pytest.mark.parametrize("empty", [[], Series(dtype=object), np.array([])])
def test_isin_empty(self, empty):
# see gh-16991
index = Index(["a", "b"])
expected = np.array([False, False])
result = index.isin(empty)
tm.assert_numpy_array_equal(expected, result)
@pytest.mark.parametrize(
"values",
[
[1, 2, 3, 4],
[1.0, 2.0, 3.0, 4.0],
[True, True, True, True],
["foo", "bar", "baz", "qux"],
pd.date_range("2018-01-01", freq="D", periods=4),
],
)
def test_boolean_cmp(self, values):
index = Index(values)
result = index == values
expected = np.array([True, True, True, True], dtype=bool)
tm.assert_numpy_array_equal(result, expected)
@pytest.mark.parametrize("index", ["string"], indirect=True)
@pytest.mark.parametrize("name,level", [(None, 0), ("a", "a")])
def test_get_level_values(self, index, name, level):
expected = index.copy()
if name:
expected.name = name
result = expected.get_level_values(level)
tm.assert_index_equal(result, expected)
def test_slice_keep_name(self):
index = Index(["a", "b"], name="asdf")
assert index.name == index[1:].name
@pytest.mark.parametrize(
"index",
["unicode", "string", "datetime", "int", "uint", "float"],
indirect=True,
)
def test_join_self(self, index, join_type):
joined = index.join(index, how=join_type)
assert index is joined
@pytest.mark.parametrize("method", ["strip", "rstrip", "lstrip"])
def test_str_attribute(self, method):
# GH9068
index = Index([" jack", "jill ", " jesse ", "frank"])
expected = Index([getattr(str, method)(x) for x in index.values])
result = getattr(index.str, method)()
tm.assert_index_equal(result, expected)
@pytest.mark.parametrize(
"index",
[
Index(range(5)),
tm.makeDateIndex(10),
MultiIndex.from_tuples([("foo", "1"), ("bar", "3")]),
period_range(start="2000", end="2010", freq="A"),
],
)
def test_str_attribute_raises(self, index):
with pytest.raises(AttributeError, match="only use .str accessor"):
index.str.repeat(2)
@pytest.mark.parametrize(
"expand,expected",
[
(None, Index([["a", "b", "c"], ["d", "e"], ["f"]])),
(False, Index([["a", "b", "c"], ["d", "e"], ["f"]])),
(
True,
MultiIndex.from_tuples(
[("a", "b", "c"), ("d", "e", np.nan), ("f", np.nan, np.nan)]
),
),
],
)
def test_str_split(self, expand, expected):
index = Index(["a b c", "d e", "f"])
if expand is not None:
result = index.str.split(expand=expand)
else:
result = index.str.split()
tm.assert_index_equal(result, expected)
def test_str_bool_return(self):
# test boolean case, should return np.array instead of boolean Index
index = Index(["a1", "a2", "b1", "b2"])
result = index.str.startswith("a")
expected = np.array([True, True, False, False])
tm.assert_numpy_array_equal(result, expected)
assert isinstance(result, np.ndarray)
def test_str_bool_series_indexing(self):
index = Index(["a1", "a2", "b1", "b2"])
s = Series(range(4), index=index)
result = s[s.index.str.startswith("a")]
expected = Series(range(2), index=["a1", "a2"])
tm.assert_series_equal(result, expected)
@pytest.mark.parametrize(
"index,expected", [(Index(list("abcd")), True), (Index(range(4)), False)]
)
def test_tab_completion(self, index, expected):
# GH 9910
result = "str" in dir(index)
assert result == expected
def test_indexing_doesnt_change_class(self):
index = Index([1, 2, 3, "a", "b", "c"])
assert index[1:3].identical(pd.Index([2, 3], dtype=np.object_))
assert index[[0, 1]].identical(pd.Index([1, 2], dtype=np.object_))
def test_outer_join_sort(self):
left_index = Index(np.random.permutation(15))
right_index = tm.makeDateIndex(10)
with tm.assert_produces_warning(RuntimeWarning):
result = left_index.join(right_index, how="outer")
# right_index in this case because DatetimeIndex has join precedence
# over Int64Index
with tm.assert_produces_warning(RuntimeWarning):
expected = right_index.astype(object).union(left_index.astype(object))
tm.assert_index_equal(result, expected)
def test_nan_first_take_datetime(self):
index = Index([pd.NaT, Timestamp("20130101"), Timestamp("20130102")])
result = index.take([-1, 0, 1])
expected = Index([index[-1], index[0], index[1]])
tm.assert_index_equal(result, expected)
def test_take_fill_value(self):
# GH 12631
index = pd.Index(list("ABC"), name="xxx")
result = index.take(np.array([1, 0, -1]))
expected = pd.Index(list("BAC"), name="xxx")
tm.assert_index_equal(result, expected)
# fill_value
result = index.take(np.array([1, 0, -1]), fill_value=True)
expected = pd.Index(["B", "A", np.nan], name="xxx")
tm.assert_index_equal(result, expected)
# allow_fill=False
result = index.take(np.array([1, 0, -1]), allow_fill=False, fill_value=True)
expected = pd.Index(["B", "A", "C"], name="xxx")
tm.assert_index_equal(result, expected)
def test_take_fill_value_none_raises(self):
index = pd.Index(list("ABC"), name="xxx")
msg = (
"When allow_fill=True and fill_value is not None, "
"all indices must be >= -1"
)
with pytest.raises(ValueError, match=msg):
index.take(np.array([1, 0, -2]), fill_value=True)
with pytest.raises(ValueError, match=msg):
index.take(np.array([1, 0, -5]), fill_value=True)
def test_take_bad_bounds_raises(self):
index = pd.Index(list("ABC"), name="xxx")
with pytest.raises(IndexError, match="out of bounds"):
index.take(np.array([1, -5]))
@pytest.mark.parametrize("name", [None, "foobar"])
@pytest.mark.parametrize(
"labels",
[
[],
np.array([]),
["A", "B", "C"],
["C", "B", "A"],
np.array(["A", "B", "C"]),
np.array(["C", "B", "A"]),
# Must preserve name even if dtype changes
pd.date_range("20130101", periods=3).values,
pd.date_range("20130101", periods=3).tolist(),
],
)
def test_reindex_preserves_name_if_target_is_list_or_ndarray(self, name, labels):
# GH6552
index = pd.Index([0, 1, 2])
index.name = name
assert index.reindex(labels)[0].name == name
@pytest.mark.parametrize("labels", [[], np.array([]), np.array([], dtype=np.int64)])
def test_reindex_preserves_type_if_target_is_empty_list_or_array(self, labels):
# GH7774
index = pd.Index(list("abc"))
assert index.reindex(labels)[0].dtype.type == np.object_
@pytest.mark.parametrize(
"labels,dtype",
[
(pd.Int64Index([]), np.int64),
(pd.Float64Index([]), np.float64),
(pd.DatetimeIndex([]), np.datetime64),
],
)
def test_reindex_doesnt_preserve_type_if_target_is_empty_index(self, labels, dtype):
# GH7774
index = pd.Index(list("abc"))
assert index.reindex(labels)[0].dtype.type == dtype
def test_reindex_no_type_preserve_target_empty_mi(self):
index = pd.Index(list("abc"))
result = index.reindex(
pd.MultiIndex([pd.Int64Index([]), pd.Float64Index([])], [[], []])
)[0]
assert result.levels[0].dtype.type == np.int64
assert result.levels[1].dtype.type == np.float64
def test_groupby(self):
index = Index(range(5))
result = index.groupby(np.array([1, 1, 2, 2, 2]))
expected = {1: pd.Index([0, 1]), 2: pd.Index([2, 3, 4])}
tm.assert_dict_equal(result, expected)
@pytest.mark.parametrize(
"mi,expected",
[
(MultiIndex.from_tuples([(1, 2), (4, 5)]), np.array([True, True])),
(MultiIndex.from_tuples([(1, 2), (4, 6)]), np.array([True, False])),
],
)
def test_equals_op_multiindex(self, mi, expected):
# GH9785
# test comparisons of multiindex
df = pd.read_csv(StringIO("a,b,c\n1,2,3\n4,5,6"), index_col=[0, 1])
result = df.index == mi
tm.assert_numpy_array_equal(result, expected)
def test_equals_op_multiindex_identify(self):
df = pd.read_csv(StringIO("a,b,c\n1,2,3\n4,5,6"), index_col=[0, 1])
result = df.index == df.index
expected = np.array([True, True])
tm.assert_numpy_array_equal(result, expected)
@pytest.mark.parametrize(
"index",
[
MultiIndex.from_tuples([(1, 2), (4, 5), (8, 9)]),
Index(["foo", "bar", "baz"]),
],
)
def test_equals_op_mismatched_multiindex_raises(self, index):
df = pd.read_csv(StringIO("a,b,c\n1,2,3\n4,5,6"), index_col=[0, 1])
with pytest.raises(ValueError, match="Lengths must match"):
df.index == index
def test_equals_op_index_vs_mi_same_length(self):
mi = MultiIndex.from_tuples([(1, 2), (4, 5), (8, 9)])
index = Index(["foo", "bar", "baz"])
result = mi == index
expected = np.array([False, False, False])
tm.assert_numpy_array_equal(result, expected)
@pytest.mark.parametrize("dt_conv", [pd.to_datetime, pd.to_timedelta])
def test_dt_conversion_preserves_name(self, dt_conv):
# GH 10875
index = pd.Index(["01:02:03", "01:02:04"], name="label")
assert index.name == dt_conv(index).name
@pytest.mark.parametrize(
"index,expected",
[
# ASCII
# short
(
pd.Index(["a", "bb", "ccc"]),
"""Index(['a', 'bb', 'ccc'], dtype='object')""",
),
# multiple lines
(
pd.Index(["a", "bb", "ccc"] * 10),
"""\
Index(['a', 'bb', 'ccc', 'a', 'bb', 'ccc', 'a', 'bb', 'ccc', 'a', 'bb', 'ccc',
'a', 'bb', 'ccc', 'a', 'bb', 'ccc', 'a', 'bb', 'ccc', 'a', 'bb', 'ccc',
'a', 'bb', 'ccc', 'a', 'bb', 'ccc'],
dtype='object')""",
),
# truncated
(
pd.Index(["a", "bb", "ccc"] * 100),
"""\
Index(['a', 'bb', 'ccc', 'a', 'bb', 'ccc', 'a', 'bb', 'ccc', 'a',
...
'ccc', 'a', 'bb', 'ccc', 'a', 'bb', 'ccc', 'a', 'bb', 'ccc'],
dtype='object', length=300)""",
),
# Non-ASCII
# short
(
pd.Index(["あ", "いい", "ううう"]),
"""Index(['あ', 'いい', 'ううう'], dtype='object')""",
),
# multiple lines
(
pd.Index(["あ", "いい", "ううう"] * 10),
(
"Index(['あ', 'いい', 'ううう', 'あ', 'いい', 'ううう', "
"'あ', 'いい', 'ううう', 'あ', 'いい', 'ううう',\n"
" 'あ', 'いい', 'ううう', 'あ', 'いい', 'ううう', "
"'あ', 'いい', 'ううう', 'あ', 'いい', 'ううう',\n"
" 'あ', 'いい', 'ううう', 'あ', 'いい', "
"'ううう'],\n"
" dtype='object')"
),
),
# truncated
(
pd.Index(["あ", "いい", "ううう"] * 100),
(
"Index(['あ', 'いい', 'ううう', 'あ', 'いい', 'ううう', "
"'あ', 'いい', 'ううう', 'あ',\n"
" ...\n"
" 'ううう', 'あ', 'いい', 'ううう', 'あ', 'いい', "
"'ううう', 'あ', 'いい', 'ううう'],\n"
" dtype='object', length=300)"
),
),
],
)
def test_string_index_repr(self, index, expected):
result = repr(index)
assert result == expected
@pytest.mark.parametrize(
"index,expected",
[
# short
(
pd.Index(["あ", "いい", "ううう"]),
("Index(['あ', 'いい', 'ううう'], dtype='object')"),
),
# multiple lines
(
pd.Index(["あ", "いい", "ううう"] * 10),
(
"Index(['あ', 'いい', 'ううう', 'あ', 'いい', "
"'ううう', 'あ', 'いい', 'ううう',\n"
" 'あ', 'いい', 'ううう', 'あ', 'いい', "
"'ううう', 'あ', 'いい', 'ううう',\n"
" 'あ', 'いい', 'ううう', 'あ', 'いい', "
"'ううう', 'あ', 'いい', 'ううう',\n"
" 'あ', 'いい', 'ううう'],\n"
" dtype='object')"
""
),
),
# truncated
(
pd.Index(["あ", "いい", "ううう"] * 100),
(
"Index(['あ', 'いい', 'ううう', 'あ', 'いい', "
"'ううう', 'あ', 'いい', 'ううう',\n"
" 'あ',\n"
" ...\n"
" 'ううう', 'あ', 'いい', 'ううう', 'あ', "
"'いい', 'ううう', 'あ', 'いい',\n"
" 'ううう'],\n"
" dtype='object', length=300)"
),
),
],
)
def test_string_index_repr_with_unicode_option(self, index, expected):
# Enable Unicode option -----------------------------------------
with cf.option_context("display.unicode.east_asian_width", True):
result = repr(index)
assert result == expected
def test_cached_properties_not_settable(self):
index = pd.Index([1, 2, 3])
with pytest.raises(AttributeError, match="Can't set attribute"):
index.is_unique = False
@async_mark()
async def test_tab_complete_warning(self, ip):
# https://github.com/pandas-dev/pandas/issues/16409
pytest.importorskip("IPython", minversion="6.0.0")
from IPython.core.completer import provisionalcompleter
code = "import pandas as pd; idx = pd.Index([1, 2])"
await ip.run_code(code)
# GH 31324 newer jedi version raises Deprecation warning
import jedi
if jedi.__version__ < "0.16.0":
warning = tm.assert_produces_warning(None)
else:
warning = tm.assert_produces_warning(
DeprecationWarning, check_stacklevel=False
)
with warning:
with provisionalcompleter("ignore"):
list(ip.Completer.completions("idx.", 4))
def test_contains_method_removed(self, index):
# GH#30103 method removed for all types except IntervalIndex
if isinstance(index, pd.IntervalIndex):
index.contains(1)
else:
msg = f"'{type(index).__name__}' object has no attribute 'contains'"
with pytest.raises(AttributeError, match=msg):
index.contains(1)
class TestMixedIntIndex(Base):
# Mostly the tests from common.py for which the results differ
# in py2 and py3 because ints and strings are uncomparable in py3
# (GH 13514)
_holder = Index
@pytest.fixture(params=[[0, "a", 1, "b", 2, "c"]], ids=["mixedIndex"])
def index(self, request):
return Index(request.param)
def create_index(self) -> Index:
return Index([0, "a", 1, "b", 2, "c"])
def test_argsort(self):
index = self.create_index()
with pytest.raises(TypeError, match="'>|<' not supported"):
index.argsort()
def test_numpy_argsort(self):
index = self.create_index()
with pytest.raises(TypeError, match="'>|<' not supported"):
np.argsort(index)
def test_copy_name(self):
# Check that "name" argument passed at initialization is honoured
# GH12309
index = self.create_index()
first = type(index)(index, copy=True, name="mario")
second = type(first)(first, copy=False)
# Even though "copy=False", we want a new object.
assert first is not second
tm.assert_index_equal(first, second)
assert first.name == "mario"
assert second.name == "mario"
s1 = Series(2, index=first)
s2 = Series(3, index=second[:-1])
s3 = s1 * s2
assert s3.index.name == "mario"
def test_copy_name2(self):
# Check that adding a "name" parameter to the copy is honored
# GH14302
index = pd.Index([1, 2], name="MyName")
index1 = index.copy()
tm.assert_index_equal(index, index1)
index2 = index.copy(name="NewName")
tm.assert_index_equal(index, index2, check_names=False)
assert index.name == "MyName"
assert index2.name == "NewName"
index3 = index.copy(names=["NewName"])
tm.assert_index_equal(index, index3, check_names=False)
assert index.name == "MyName"
assert index.names == ["MyName"]
assert index3.name == "NewName"
assert index3.names == ["NewName"]
def test_unique_na(self):
idx = pd.Index([2, np.nan, 2, 1], name="my_index")
expected = pd.Index([2, np.nan, 1], name="my_index")
result = idx.unique()
tm.assert_index_equal(result, expected)
def test_logical_compat(self):
index = self.create_index()
assert index.all() == index.values.all()
assert index.any() == index.values.any()
@pytest.mark.parametrize("how", ["any", "all"])
@pytest.mark.parametrize("dtype", [None, object, "category"])
@pytest.mark.parametrize(
"vals,expected",
[
([1, 2, 3], [1, 2, 3]),
([1.0, 2.0, 3.0], [1.0, 2.0, 3.0]),
([1.0, 2.0, np.nan, 3.0], [1.0, 2.0, 3.0]),
(["A", "B", "C"], ["A", "B", "C"]),
(["A", np.nan, "B", "C"], ["A", "B", "C"]),
],
)
def test_dropna(self, how, dtype, vals, expected):
# GH 6194
index = pd.Index(vals, dtype=dtype)
result = index.dropna(how=how)
expected = pd.Index(expected, dtype=dtype)
tm.assert_index_equal(result, expected)
@pytest.mark.parametrize("how", ["any", "all"])
@pytest.mark.parametrize(
"index,expected",
[
(
pd.DatetimeIndex(["2011-01-01", "2011-01-02", "2011-01-03"]),
pd.DatetimeIndex(["2011-01-01", "2011-01-02", "2011-01-03"]),
),
(
pd.DatetimeIndex(["2011-01-01", "2011-01-02", "2011-01-03", pd.NaT]),
pd.DatetimeIndex(["2011-01-01", "2011-01-02", "2011-01-03"]),
),
(
pd.TimedeltaIndex(["1 days", "2 days", "3 days"]),
pd.TimedeltaIndex(["1 days", "2 days", "3 days"]),
),
(
pd.TimedeltaIndex([pd.NaT, "1 days", "2 days", "3 days", pd.NaT]),
pd.TimedeltaIndex(["1 days", "2 days", "3 days"]),
),
(
pd.PeriodIndex(["2012-02", "2012-04", "2012-05"], freq="M"),
pd.PeriodIndex(["2012-02", "2012-04", "2012-05"], freq="M"),
),
(
pd.PeriodIndex(["2012-02", "2012-04", "NaT", "2012-05"], freq="M"),
pd.PeriodIndex(["2012-02", "2012-04", "2012-05"], freq="M"),
),
],
)
def test_dropna_dt_like(self, how, index, expected):
result = index.dropna(how=how)
tm.assert_index_equal(result, expected)
def test_dropna_invalid_how_raises(self):
msg = "invalid how option: xxx"
with pytest.raises(ValueError, match=msg):
pd.Index([1, 2, 3]).dropna(how="xxx")
def test_get_combined_index(self):
result = _get_combined_index([])
expected = Index([])
tm.assert_index_equal(result, expected)
@pytest.mark.parametrize(
"index",
[
pd.Index([np.nan]),
pd.Index([np.nan, 1]),
pd.Index([1, 2, np.nan]),
pd.Index(["a", "b", np.nan]),
pd.to_datetime(["NaT"]),
pd.to_datetime(["NaT", "2000-01-01"]),
pd.to_datetime(["2000-01-01", "NaT", "2000-01-02"]),
pd.to_timedelta(["1 day", "NaT"]),
],
)
def test_is_monotonic_na(self, index):
assert index.is_monotonic_increasing is False
assert index.is_monotonic_decreasing is False
assert index._is_strictly_monotonic_increasing is False
assert index._is_strictly_monotonic_decreasing is False
def test_repr_summary(self):
with cf.option_context("display.max_seq_items", 10):
result = repr(pd.Index(np.arange(1000)))
assert len(result) < 200
assert "..." in result
@pytest.mark.parametrize("klass", [Series, DataFrame])
def test_int_name_format(self, klass):
index = Index(["a", "b", "c"], name=0)
result = klass(list(range(3)), index=index)
assert "0" in repr(result)
def test_str_to_bytes_raises(self):
# GH 26447
index = Index([str(x) for x in range(10)])
msg = "^'str' object cannot be interpreted as an integer$"
with pytest.raises(TypeError, match=msg):
bytes(index)
def test_intersect_str_dates(self):
dt_dates = [datetime(2012, 2, 9), datetime(2012, 2, 22)]
index1 = Index(dt_dates, dtype=object)
index2 = Index(["aa"], dtype=object)
result = index2.intersection(index1)
expected = Index([], dtype=object)
tm.assert_index_equal(result, expected)
def test_index_repr_bool_nan(self):
# GH32146
arr = Index([True, False, np.nan], dtype=object)
exp1 = arr.format()
out1 = ["True", "False", "NaN"]
assert out1 == exp1
exp2 = repr(arr)
out2 = "Index([True, False, nan], dtype='object')"
assert out2 == exp2
@pytest.mark.filterwarnings("ignore:elementwise comparison failed:FutureWarning")
def test_index_with_tuple_bool(self):
# GH34123
# TODO: remove tupleize_cols=False once correct behaviour is restored
# TODO: also this op right now produces FutureWarning from numpy
idx = Index([("a", "b"), ("b", "c"), ("c", "a")], tupleize_cols=False)
result = idx == ("c", "a")
expected = np.array([False, False, True])
tm.assert_numpy_array_equal(result, expected)
class TestIndexUtils:
@pytest.mark.parametrize(
"data, names, expected",
[
([[1, 2, 3]], None, Index([1, 2, 3])),
([[1, 2, 3]], ["name"], Index([1, 2, 3], name="name")),
(
[["a", "a"], ["c", "d"]],
None,
MultiIndex([["a"], ["c", "d"]], [[0, 0], [0, 1]]),
),
(
[["a", "a"], ["c", "d"]],
["L1", "L2"],
MultiIndex([["a"], ["c", "d"]], [[0, 0], [0, 1]], names=["L1", "L2"]),
),
],
)
def test_ensure_index_from_sequences(self, data, names, expected):
result = ensure_index_from_sequences(data, names)
tm.assert_index_equal(result, expected)
def test_ensure_index_mixed_closed_intervals(self):
# GH27172
intervals = [
pd.Interval(0, 1, closed="left"),
pd.Interval(1, 2, closed="right"),
pd.Interval(2, 3, closed="neither"),
pd.Interval(3, 4, closed="both"),
]
result = ensure_index(intervals)
expected = Index(intervals, dtype=object)
tm.assert_index_equal(result, expected)
@pytest.mark.parametrize(
"opname",
[
"eq",
"ne",
"le",
"lt",
"ge",
"gt",
"add",
"radd",
"sub",
"rsub",
"mul",
"rmul",
"truediv",
"rtruediv",
"floordiv",
"rfloordiv",
"pow",
"rpow",
"mod",
"divmod",
],
)
def test_generated_op_names(opname, index):
if isinstance(index, ABCIndex) and opname == "rsub":
# pd.Index.__rsub__ does not exist; though the method does exist
# for subclasses. see GH#19723
return
opname = f"__{opname}__"
method = getattr(index, opname)
assert method.__name__ == opname
@pytest.mark.parametrize("index_maker", tm.index_subclass_makers_generator())
def test_index_subclass_constructor_wrong_kwargs(index_maker):
# GH #19348
with pytest.raises(TypeError, match="unexpected keyword argument"):
index_maker(foo="bar")
def test_deprecated_fastpath():
msg = "[Uu]nexpected keyword argument"
with pytest.raises(TypeError, match=msg):
pd.Index(np.array(["a", "b"], dtype=object), name="test", fastpath=True)
with pytest.raises(TypeError, match=msg):
pd.Int64Index(np.array([1, 2, 3], dtype="int64"), name="test", fastpath=True)
with pytest.raises(TypeError, match=msg):
pd.RangeIndex(0, 5, 2, name="test", fastpath=True)
with pytest.raises(TypeError, match=msg):
pd.CategoricalIndex(["a", "b", "c"], name="test", fastpath=True)
def test_shape_of_invalid_index():
# Currently, it is possible to create "invalid" index objects backed by
# a multi-dimensional array (see https://github.com/pandas-dev/pandas/issues/27125
# about this). However, as long as this is not solved in general,this test ensures
# that the returned shape is consistent with this underlying array for
# compat with matplotlib (see https://github.com/pandas-dev/pandas/issues/27775)
idx = pd.Index([0, 1, 2, 3])
with tm.assert_produces_warning(FutureWarning):
# GH#30588 multi-dimensional indexing deprecated
assert idx[:, None].shape == (4, 1)
def test_validate_1d_input():
# GH#27125 check that we do not have >1-dimensional input
msg = "Index data must be 1-dimensional"
arr = np.arange(8).reshape(2, 2, 2)
with pytest.raises(ValueError, match=msg):
pd.Index(arr)
with pytest.raises(ValueError, match=msg):
pd.Float64Index(arr.astype(np.float64))
with pytest.raises(ValueError, match=msg):
pd.Int64Index(arr.astype(np.int64))
with pytest.raises(ValueError, match=msg):
pd.UInt64Index(arr.astype(np.uint64))
df = pd.DataFrame(arr.reshape(4, 2))
with pytest.raises(ValueError, match=msg):
pd.Index(df)
# GH#13601 trying to assign a multi-dimensional array to an index is not
# allowed
ser = pd.Series(0, range(4))
with pytest.raises(ValueError, match=msg):
ser.index = np.array([[2, 3]] * 4)
def test_convert_almost_null_slice(index):
# slice with None at both ends, but not step
key = slice(None, None, "foo")
if isinstance(index, pd.IntervalIndex):
msg = "label-based slicing with step!=1 is not supported for IntervalIndex"
with pytest.raises(ValueError, match=msg):
index._convert_slice_indexer(key, "loc")
else:
msg = "'>=' not supported between instances of 'str' and 'int'"
with pytest.raises(TypeError, match=msg):
index._convert_slice_indexer(key, "loc")
dtlike_dtypes = [
np.dtype("timedelta64[ns]"),
np.dtype("datetime64[ns]"),
pd.DatetimeTZDtype("ns", "Asia/Tokyo"),
pd.PeriodDtype("ns"),
]
@pytest.mark.parametrize("ldtype", dtlike_dtypes)
@pytest.mark.parametrize("rdtype", dtlike_dtypes)
def test_get_indexer_non_unique_wrong_dtype(ldtype, rdtype):
vals = np.tile(3600 * 10 ** 9 * np.arange(3), 2)
def construct(dtype):
if dtype is dtlike_dtypes[-1]:
# PeriodArray will try to cast ints to strings
return pd.DatetimeIndex(vals).astype(dtype)
return pd.Index(vals, dtype=dtype)
left = construct(ldtype)
right = construct(rdtype)
result = left.get_indexer_non_unique(right)
if ldtype is rdtype:
ex1 = np.array([0, 3, 1, 4, 2, 5] * 2, dtype=np.intp)
ex2 = np.array([], dtype=np.intp)
tm.assert_numpy_array_equal(result[0], ex1)
tm.assert_numpy_array_equal(result[1], ex2)
else:
no_matches = np.array([-1] * 6, dtype=np.intp)
tm.assert_numpy_array_equal(result[0], no_matches)
tm.assert_numpy_array_equal(result[1], no_matches)
| bsd-3-clause |
healpy/healpy | healpy/newvisufunc.py | 1 | 17516 | __all__ = ["projview", "newprojplot"]
import numpy as np
from .pixelfunc import ang2pix, npix2nside
from .rotator import Rotator
import matplotlib.pyplot as plt
from matplotlib.projections.geo import GeoAxes
from matplotlib.ticker import MultipleLocator, FormatStrFormatter, AutoMinorLocator
import warnings
class ThetaFormatterCounterclockwisePhi(GeoAxes.ThetaFormatter):
"""Convert tick labels from rads to degs and shifts labelling from -180|-90|0|90|180 to conterclockwise periodic 180|90|0|270|180 """
def __call__(self, x, pos=None):
if x != 0:
x *= -1
if x < 0:
x += 2 * np.pi
return super(ThetaFormatterCounterclockwisePhi, self).__call__(x, pos)
class ThetaFormatterClockwisePhi(GeoAxes.ThetaFormatter):
"""Convert tick labels from rads to degs and shifts labelling from -180|-90|0|90|180 to clockwise periodic 180|270|0|90|180 """
def __call__(self, x, pos=None):
if x < 0:
x += 2 * np.pi
# return super(ThetaFormatterShiftPhi, self).__call__(x, pos)
return super(ThetaFormatterClockwisePhi, self).__call__(x, pos)
class ThetaFormatterSymmetricPhi(GeoAxes.ThetaFormatter):
"""Just convert phi ticks from rad to degs and keep the true -180|-90|0|90|180 """
def __call__(self, x, pos=None):
return super(ThetaFormatterSymmetricPhi, self).__call__(x, pos)
class ThetaFormatterTheta(GeoAxes.ThetaFormatter):
"""Convert theta ticks from rads to degs"""
def __call__(self, x, pos=None):
return super(ThetaFormatterTheta, self).__call__(x, pos)
def lonlat(theta, phi):
"""Converts theta and phi to longitude and latitude"""
longitude = np.asarray(phi)
latitude = np.pi / 2 - np.asarray(theta)
return longitude, latitude
def update_dictionary(main_dict, update_dict):
for key, key_val in main_dict.items():
if key in update_dict:
main_dict[key] = update_dict[key]
return main_dict
def projview(
m=None,
rot=None,
coord=None,
unit="",
xsize=1000,
nest=False,
min=None,
max=None,
flip="astro",
format="%g",
cbar=True,
cmap="viridis",
norm=None,
graticule=False,
graticule_labels=False,
return_only_data=False,
projection_type="mollweide",
cb_orientation="horizontal",
xlabel=None,
ylabel=None,
longitude_grid_spacing=60,
latitude_grid_spacing=30,
override_plot_properties=None,
title=None,
xtick_label_color="black",
ytick_label_color="black",
graticule_color=None,
fontsize=None,
phi_convention="counterclockwise",
custom_xtick_labels=None,
custom_ytick_labels=None,
**kwargs
):
"""Plot a healpix map (given as an array) in the chosen projection.
See examples of using this function in the documentation under "Other tutorials".
Overplot points or lines using :func:`newprojplot`.
.. warning::
this function is work in progress, the aim is to reimplement the healpy
plot functions using the new features of matplotlib and remove most
of the custom projection code.
Please report bugs or submit feature requests via Github.
The interface will change in future releases.
Parameters
----------
map : float, array-like or None
An array containing the map, supports masked maps, see the `ma` function.
If None, will display a blank map, useful for overplotting.
rot : scalar or sequence, optional
Describe the rotation to apply.
In the form (lon, lat, psi) (unit: degrees) : the point at
longitude *lon* and latitude *lat* will be at the center. An additional rotation
of angle *psi* around this direction is applied.
coord : sequence of character, optional
Either one of 'G', 'E' or 'C' to describe the coordinate
system of the map, or a sequence of 2 of these to rotate
the map from the first to the second coordinate system.
unit : str, optional
A text describing the unit of the data. Default: ''
xsize : int, optional
The size of the image. Default: 800
nest : bool, optional
If True, ordering scheme is NESTED. Default: False (RING)
min : float, optional
The minimum range value
max : float, optional
The maximum range value
flip : {'astro', 'geo'}, optional
Defines the convention of projection : 'astro' (default, east towards left, west towards right)
or 'geo' (east towards roght, west towards left)
It creates the `healpy_flip` attribute on the Axes to save the convention in the figure.
format : str, optional
The format of the scale label. Default: '%g'
cbar : bool, optional
Display the colorbar. Default: True
norm : {'hist', 'log', None}
Color normalization, hist= histogram equalized color mapping,
log= logarithmic color mapping, default: None (linear color mapping)
kwargs : keywords
any additional keyword is passed to pcolormesh
graticule : bool
add graticule
graticule_labels : bool
longitude and latitude labels
projection_type : {'aitoff', 'hammer', 'lambert', 'mollweide', 'cart', '3d', 'polar'}
type of the plot
cb_orientation : {'horizontal', 'vertical'}
color bar orientation
xlabel : str
set x axis label
ylabel : str
set y axis label
longitude_grid_spacing : float
set x axis grid spacing
latitude_grid_spacing : float
set y axis grid spacing
override_plot_properties : dict
Override the following plot proporties: "cbar_shrink", "cbar_pad", "cbar_label_pad", "figure_width": width, "figure_size_ratio": ratio.
title : str
set title of the plot
lcolor : str
change the color of the longitude tick labels, some color maps make it hard to read black tick labels
fontsize: dict
Override fontsize of labels: "xlabel", "ylabel", "title", "xtick_label", "ytick_label", "cbar_label", "cbar_tick_label".
phi_convention : string
convention on x-axis (phi), 'counterclockwise' (default), 'clockwise', 'symmetrical' (phi as it is truly given)
if `flip` is "geo", `phi_convention` should be set to 'clockwise'.
custom_xtick_labels : list
override x-axis tick labels
custom_ytick_labels : list
override y-axis tick labels
"""
geographic_projections = ["aitoff", "hammer", "lambert", "mollweide"]
if not m is None:
# auto min and max
if min is None:
min = m.min()
if max is None:
max = m.max()
# do this to find how many decimals are in the colorbar labels, so that the padding in the vertical cbar can done properly
def find_number_of_decimals(number):
try:
return len(str(number).split(".")[1])
except:
return 0
# default font sizes
fontsize_defaults = {
"xlabel": 12,
"ylabel": 12,
"title": 14,
"xtick_label": 12,
"ytick_label": 12,
"cbar_label": 12,
"cbar_tick_label": 12,
}
if fontsize is not None:
fontsize_defaults = update_dictionary(fontsize_defaults, fontsize)
# default plot settings
decs = np.max([find_number_of_decimals(min), find_number_of_decimals(max)])
if decs >= 3:
lpad = -27
else:
lpad = -9 * decs
ratio = 0.63
if projection_type == "3d":
if cb_orientation == "vertical":
shrink = 0.55
pad = 0.02
lpad = lpad
width = 11.5
if cb_orientation == "horizontal":
shrink = 0.2
pad = 0
lpad = -10
width = 14
if projection_type in geographic_projections:
if cb_orientation == "vertical":
shrink = 0.6
pad = 0.01
lpad = lpad
width = 10
if cb_orientation == "horizontal":
shrink = 0.6
pad = 0.05
lpad = -8
width = 8.5
if projection_type == "cart":
if cb_orientation == "vertical":
shrink = 1
pad = 0.01
lpad = lpad
width = 9.6
ratio = 0.42
if cb_orientation == "horizontal":
shrink = 0.4
pad = 0.1
lpad = -12
width = 8.8
if xlabel == None:
pad = 0.01
ratio = 0.63
if projection_type == "polar":
if cb_orientation == "vertical":
shrink = 1
pad = 0.01
lpad = lpad
width = 10
if cb_orientation == "horizontal":
shrink = 0.4
pad = 0.01
lpad = 0
width = 12
# pass the default settings to the plot_properties dictionary
plot_properties = {
"cbar_shrink": shrink,
"cbar_pad": pad,
"cbar_label_pad": lpad,
"figure_width": width,
"figure_size_ratio": ratio,
}
if override_plot_properties is not None:
warnings.warn(
"\n *** Overriding default plot properies: " + str(plot_properties) + " ***"
)
plot_properties = update_dictionary(plot_properties, override_plot_properties)
warnings.warn("\n *** New plot properies: " + str(plot_properties) + " ***")
# not implemented features
if not (norm is None):
raise NotImplementedError()
# Create the figure
if not return_only_data: # supress figure creation when only dumping the data
width = width # 8.5
fig = plt.figure(
figsize=(
plot_properties["figure_width"],
plot_properties["figure_width"] * plot_properties["figure_size_ratio"],
)
)
if projection_type == "cart":
ax = fig.add_subplot(111)
else:
ax = fig.add_subplot(111, projection=projection_type)
# FIXME: make a more general axes creation that works also with subplots
# ax = plt.gcf().add_axes((.125, .1, .9, .9), projection="mollweide")
# remove white space around the image
plt.subplots_adjust(left=0.02, right=0.98, top=0.95, bottom=0.05)
# end if not
if graticule and graticule_labels:
plt.subplots_adjust(left=0.04, right=0.98, top=0.95, bottom=0.05)
# allow callers to override the hold state by passing hold=True|False
# washold = ax.ishold() # commented out
hold = kwargs.pop("hold", None)
# if hold is not None:
# ax.hold(hold)
# try:
ysize = xsize // 2
theta = np.linspace(np.pi, 0, ysize)
phi = np.linspace(-np.pi, np.pi, xsize)
longitude = np.radians(np.linspace(-180, 180, xsize))
if flip == "astro":
longitude = longitude[::-1]
if not return_only_data:
# set property on ax so it can be used in newprojplot
ax.healpy_flip = flip
latitude = np.radians(np.linspace(-90, 90, ysize))
# project the map to a rectangular matrix xsize x ysize
PHI, THETA = np.meshgrid(phi, theta)
# coord or rotation
if coord or rot:
r = Rotator(coord=coord, rot=rot, inv=True)
THETA, PHI = r(THETA.flatten(), PHI.flatten())
THETA = THETA.reshape(ysize, xsize)
PHI = PHI.reshape(ysize, xsize)
nside = npix2nside(len(m))
if not m is None:
grid_pix = ang2pix(nside, THETA, PHI, nest=nest)
grid_map = m[grid_pix]
# plot
if return_only_data: # exit here when dumping the data
return [longitude, latitude, grid_map]
if projection_type != "3d": # test for 3d plot
ret = plt.pcolormesh(
longitude,
latitude,
grid_map,
vmin=min,
vmax=max,
rasterized=True,
cmap=cmap,
shading="auto",
**kwargs
)
elif projection_type == "3d": # test for 3d plot
LONGITUDE, LATITUDE = np.meshgrid(longitude, latitude)
ret = ax.plot_surface(
LONGITUDE,
LATITUDE,
grid_map,
cmap=cmap,
vmin=min,
vmax=max,
rasterized=True,
**kwargs
)
# graticule
if graticule_color is None:
plt.grid(graticule)
else:
plt.grid(graticule, color=graticule_color)
if graticule:
if projection_type in geographic_projections:
longitude_grid_spacing = longitude_grid_spacing # deg 60
ax.set_longitude_grid(longitude_grid_spacing)
ax.set_latitude_grid(latitude_grid_spacing)
ax.set_longitude_grid_ends(90)
else:
longitude_grid_spacing = longitude_grid_spacing # deg
latitude_grid_spacing = latitude_grid_spacing # deg
ax.xaxis.set_major_locator(
MultipleLocator(np.deg2rad(longitude_grid_spacing))
) # longitude
ax.yaxis.set_major_locator(
MultipleLocator(np.deg2rad(latitude_grid_spacing))
) # lattitude
# labelling
if graticule_labels & graticule:
if phi_convention == "counterclockwise":
xtick_formatter = ThetaFormatterCounterclockwisePhi(longitude_grid_spacing)
elif phi_convention == "clockwise":
xtick_formatter = ThetaFormatterClockwisePhi(longitude_grid_spacing)
elif phi_convention == "symmetrical":
xtick_formatter = ThetaFormatterSymmetricPhi(longitude_grid_spacing)
ax.xaxis.set_major_formatter(xtick_formatter)
ax.yaxis.set_major_formatter(ThetaFormatterTheta(latitude_grid_spacing))
if custom_xtick_labels is not None:
try:
ax.xaxis.set_ticklabels(custom_xtick_labels)
except:
warnings.warn(
"Put names for all "
+ str(len(ax.xaxis.get_ticklabels()))
+ " x-tick labels!. No re-labelling done."
)
if custom_ytick_labels is not None:
try:
ax.yaxis.set_ticklabels(custom_ytick_labels)
except:
warnings.warn(
"Put names for all "
+ str(len(ax.yaxis.get_ticklabels()))
+ " y-tick labels!. No re-labelling done."
)
if not graticule:
# remove longitude and latitude labels
ax.xaxis.set_ticklabels([])
ax.yaxis.set_ticklabels([])
ax.tick_params(axis=u"both", which=u"both", length=0)
ax.set_title(title, fontsize=fontsize_defaults["title"])
# tick font size
ax.tick_params(
axis="x", labelsize=fontsize_defaults["xtick_label"], colors=xtick_label_color
)
ax.tick_params(
axis="y", labelsize=fontsize_defaults["ytick_label"], colors=ytick_label_color
)
# colorbar
if projection_type == "cart":
ax.set_aspect(1)
extend = "neither"
if min > np.min(m):
extend = "min"
if max < np.max(m):
extend = "max"
if min > np.min(m) and max < np.max(m):
extend = "both"
if cbar:
cb = fig.colorbar(
ret,
orientation=cb_orientation,
shrink=plot_properties["cbar_shrink"],
pad=plot_properties["cbar_pad"],
ticks=[min, max],
extend=extend,
)
if cb_orientation == "horizontal":
cb.ax.xaxis.set_label_text(unit, fontsize=fontsize_defaults["cbar_label"])
cb.ax.tick_params(axis="x", labelsize=fontsize_defaults["cbar_tick_label"])
cb.ax.xaxis.labelpad = plot_properties["cbar_label_pad"]
if cb_orientation == "vertical":
cb.ax.yaxis.set_label_text(unit, fontsize=fontsize_defaults["cbar_label"])
cb.ax.tick_params(axis="y", labelsize=fontsize_defaults["cbar_tick_label"])
cb.ax.yaxis.labelpad = plot_properties["cbar_label_pad"]
# workaround for issue with viewers, see colorbar docstring
cb.solids.set_edgecolor("face")
ax.set_xlabel(xlabel, fontsize=fontsize_defaults["xlabel"])
ax.set_ylabel(ylabel, fontsize=fontsize_defaults["ylabel"])
plt.draw()
# except:
# pass
return ret
def newprojplot(theta, phi, fmt=None, **kwargs):
"""newprojplot is a wrapper around :func:`matplotlib.Axes.plot` to support
colatitude theta and longitude phi and take into account the longitude convention
(see the `flip` keyword of :func:`projview`)
You can call this function as::
newprojplot(theta, phi) # plot a line going through points at coord (theta, phi)
newprojplot(theta, phi, 'bo') # plot 'o' in blue at coord (theta, phi)
Parameters
----------
theta, phi : float, array-like
Coordinates of point to plot in radians.
fmt : str
A format string (see :func:`matplotlib.Axes.plot` for details)
Notes
-----
Other keywords are passed to :func:`matplotlib.Axes.plot`.
"""
import matplotlib.pyplot as plt
ax = plt.gca()
flip = getattr(ax, "healpy_flip", "astro")
longitude, latitude = lonlat(theta, phi)
if flip == "astro":
longitude = longitude * -1
if fmt is None:
ret = plt.plot(longitude, latitude, **kwargs)
else:
ret = plt.plot(longitude, latitude, fmt, **kwargs)
return ret
| gpl-2.0 |
adrienpacifico/openfisca-france | setup.py | 1 | 1776 | #! /usr/bin/env python
# -*- coding: utf-8 -*-
""" -- a versatile microsimulation free software"""
from setuptools import setup, find_packages
setup(
name = 'OpenFisca-France',
version = '0.5.4.dev0',
author = 'OpenFisca Team',
author_email = 'contact@openfisca.fr',
classifiers = [
"Development Status :: 2 - Pre-Alpha",
"License :: OSI Approved :: GNU Affero General Public License v3",
"Operating System :: POSIX",
"Programming Language :: Python",
"Topic :: Scientific/Engineering :: Information Analysis",
],
description = u'French tax and benefit system for OpenFisca',
keywords = 'benefit france microsimulation social tax',
license = 'http://www.fsf.org/licensing/licenses/agpl-3.0.html',
url = 'https://github.com/openfisca/openfisca-france',
data_files = [
('share/locale/fr/LC_MESSAGES', ['openfisca_france/i18n/fr/LC_MESSAGES/openfisca-france.mo']),
('share/openfisca/openfisca-france', ['CHANGELOG.md', 'LICENSE', 'README.md']),
],
extras_require = {
'inversion_revenus': [
'scipy >= 0.12',
],
'taxipp': [
'pandas >= 0.13',
],
'test': [
'nose',
],
},
include_package_data = True, # Will read MANIFEST.in
install_requires = [
'Babel >= 0.9.4',
'Biryani[datetimeconv] >= 0.10.4',
'numpy >= 1.6,< 1.10',
'OpenFisca-Core >= 0.5.0',
'PyYAML >= 3.10',
'requests >= 2.8',
],
message_extractors = {'openfisca_france': [
('**.py', 'python', None),
]},
packages = find_packages(exclude=['openfisca_france.tests*']),
test_suite = 'nose.collector',
)
| agpl-3.0 |
DGrady/pandas | pandas/tests/computation/test_compat.py | 11 | 1308 | import pytest
from distutils.version import LooseVersion
import pandas as pd
from pandas.core.computation.engines import _engines
import pandas.core.computation.expr as expr
from pandas.core.computation import _MIN_NUMEXPR_VERSION
def test_compat():
# test we have compat with our version of nu
from pandas.core.computation import _NUMEXPR_INSTALLED
try:
import numexpr as ne
ver = ne.__version__
if ver < LooseVersion(_MIN_NUMEXPR_VERSION):
assert not _NUMEXPR_INSTALLED
else:
assert _NUMEXPR_INSTALLED
except ImportError:
pytest.skip("not testing numexpr version compat")
@pytest.mark.parametrize('engine', _engines)
@pytest.mark.parametrize('parser', expr._parsers)
def test_invalid_numexpr_version(engine, parser):
def testit():
a, b = 1, 2 # noqa
res = pd.eval('a + b', engine=engine, parser=parser)
assert res == 3
if engine == 'numexpr':
try:
import numexpr as ne
except ImportError:
pytest.skip("no numexpr")
else:
if ne.__version__ < LooseVersion(_MIN_NUMEXPR_VERSION):
with pytest.raises(ImportError):
testit()
else:
testit()
else:
testit()
| bsd-3-clause |
zhenv5/scikit-learn | examples/feature_selection/plot_feature_selection.py | 249 | 2827 | """
===============================
Univariate Feature Selection
===============================
An example showing univariate feature selection.
Noisy (non informative) features are added to the iris data and
univariate feature selection is applied. For each feature, we plot the
p-values for the univariate feature selection and the corresponding
weights of an SVM. We can see that univariate feature selection
selects the informative features and that these have larger SVM weights.
In the total set of features, only the 4 first ones are significant. We
can see that they have the highest score with univariate feature
selection. The SVM assigns a large weight to one of these features, but also
Selects many of the non-informative features.
Applying univariate feature selection before the SVM
increases the SVM weight attributed to the significant features, and will
thus improve classification.
"""
print(__doc__)
import numpy as np
import matplotlib.pyplot as plt
from sklearn import datasets, svm
from sklearn.feature_selection import SelectPercentile, f_classif
###############################################################################
# import some data to play with
# The iris dataset
iris = datasets.load_iris()
# Some noisy data not correlated
E = np.random.uniform(0, 0.1, size=(len(iris.data), 20))
# Add the noisy data to the informative features
X = np.hstack((iris.data, E))
y = iris.target
###############################################################################
plt.figure(1)
plt.clf()
X_indices = np.arange(X.shape[-1])
###############################################################################
# Univariate feature selection with F-test for feature scoring
# We use the default selection function: the 10% most significant features
selector = SelectPercentile(f_classif, percentile=10)
selector.fit(X, y)
scores = -np.log10(selector.pvalues_)
scores /= scores.max()
plt.bar(X_indices - .45, scores, width=.2,
label=r'Univariate score ($-Log(p_{value})$)', color='g')
###############################################################################
# Compare to the weights of an SVM
clf = svm.SVC(kernel='linear')
clf.fit(X, y)
svm_weights = (clf.coef_ ** 2).sum(axis=0)
svm_weights /= svm_weights.max()
plt.bar(X_indices - .25, svm_weights, width=.2, label='SVM weight', color='r')
clf_selected = svm.SVC(kernel='linear')
clf_selected.fit(selector.transform(X), y)
svm_weights_selected = (clf_selected.coef_ ** 2).sum(axis=0)
svm_weights_selected /= svm_weights_selected.max()
plt.bar(X_indices[selector.get_support()] - .05, svm_weights_selected,
width=.2, label='SVM weights after selection', color='b')
plt.title("Comparing feature selection")
plt.xlabel('Feature number')
plt.yticks(())
plt.axis('tight')
plt.legend(loc='upper right')
plt.show()
| bsd-3-clause |
jshleap/Collaboration | contactList/contacts-classification.py | 1 | 4165 | #!/usr/bin/python
'''
Utility scripts for contacts
Copyright (C) 2012 Alex Safatli, Christian Blouin, Jose Sergio Hleap
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
E-mail: iltafas@gmail.com
'''
import centroidContact
import getContactList
import sys
import matplotlib.pyplot as plt
from matplotlib.backends.backend_pdf import PdfPages
# Generates an adjacency list using the all-atom method
# (getContactList.py) and using the centroid method (centroidContact.py).
# Data for three plots are then found as follows:
#
# True Positive (TP): Number of contacts at a given threshold (also found with atom method).
# False Positive (FP): Number of contacts at a given threshold (not found in atom method).
# False Negative (FN): Number of contacts from atom method not predicted at a given threshold.
#
# specificity Sp = TP / (TP+FP)
# sensitivity Sv = TP / (TP+FN)
# F score = 2 * (Sp*Sv)/(Sp+Sv)
# If run from command line: $ python contacts-classification.py pdbFile.pdb
fIn = sys.argv[1]
TPs = [] # List to hold True Positives.
FPs = [] # List to hold False Positives.
FNs = [] # List to hold False Negatives.
specificities = [] # List to hold the specificities for these cutoffs.
sensitivities = [] # List to hold the sensitivities for these cutoffs.
fScores = [] # List to hold the F Scores for these cutoffs.
cutoffs = [x*0.5 for x in xrange(6,41)] # Cutoffs ranging from 3 to 20, 0.5 increments.
# Get atom-based adjacency list.
print "\nLoading file: " + fIn
print "Will first generate atom-based contact list. This will take up to a few minutes.\n"
atomBased = getContactList.processFile(fIn)
REF = atomBased.adjList # Adjacency list.
# Get centroid-based adjacency lists. Calculate appropriately.
print "\nNow, will generate centroid-based adjacency lists. This will take a little while.\n"
for x in cutoffs:
print "\nCutoff = " + str(x) + "\n"
c = centroidContact.processFile(fIn,x)
TP = len(set(REF).intersection(set(c)))
FP = len(set(c).difference(set(REF)))
FN = len(set(REF).difference(set(c)))
TPs.append(TP)
FPs.append(FP)
FNs.append(FN)
Sp = float(TP)/(TP+FP)
Sv = float(TP)/(TP+FN)
specificities.append(Sp)
sensitivities.append(Sv)
# Avoid division by zero.
fScore = 0 if ((Sp+Sv) == 0) else (2.0*((Sp*Sv)/(Sp+Sv)))
fScores.append(fScore)
# Plot the data.
plt.plot(cutoffs,specificities)
plt.title("Specificities for Contact Determination Methods")
plt.ylabel("Specificity")
plt.xlabel("Cutoff (Angstroms)")
pp = PdfPages('contact-Sp-plot.pdf')
plt.savefig(pp, format='pdf')
pp.close()
plt.clf()
plt.plot(cutoffs,sensitivities)
plt.title("Sensitivities for Contact Determination Methods")
plt.ylabel("Sensitivity")
plt.xlabel("Cutoff (Angstroms)")
pp = PdfPages('contact-Sv-plot.pdf')
plt.savefig(pp, format='pdf')
plt.clf()
pp.close()
plt.plot(cutoffs,fScores)
plt.title("F Scores for Contact Determination Methods")
plt.ylabel("F Score")
plt.xlabel("Cutoff (Angstroms)")
pp = PdfPages('contact-Fscore-plot.pdf')
plt.savefig(pp, format='pdf')
pp.close()
# Save raw data to CSV file.
fout = open('classifications.csv','w')
fout.write("Cutoff (Angstroms)" + "\t" + "Specificity" + "\t"
+ "Sensitivity" + "\t" + "F Score" +
"\t" + "TP" + "\t" + "FP" + "\t" + "FN" + "\n")
for x in xrange(0,len(cutoffs)):
fout.write(str(cutoffs[x]) + "\t" + str(specificities[x]) +
"\t" + str(sensitivities[x]) + "\t" + str(fScores[x])
+ "\t" + str(TPs[x]) + "\t" + str(FPs[x])
+ "\t" + str(FNs[x]) + "\n")
fout.close() | gpl-3.0 |
rvraghav93/scikit-learn | sklearn/neighbors/approximate.py | 3 | 22554 | """Approximate nearest neighbor search"""
# Author: Maheshakya Wijewardena <maheshakya.10@cse.mrt.ac.lk>
# Joel Nothman <joel.nothman@gmail.com>
import numpy as np
import warnings
from scipy import sparse
from .base import KNeighborsMixin, RadiusNeighborsMixin
from ..base import BaseEstimator
from ..utils.validation import check_array
from ..utils import check_random_state
from ..metrics.pairwise import pairwise_distances
from ..random_projection import GaussianRandomProjection
__all__ = ["LSHForest"]
HASH_DTYPE = '>u4'
MAX_HASH_SIZE = np.dtype(HASH_DTYPE).itemsize * 8
def _find_matching_indices(tree, bin_X, left_mask, right_mask):
"""Finds indices in sorted array of integers.
Most significant h bits in the binary representations of the
integers are matched with the items' most significant h bits.
"""
left_index = np.searchsorted(tree, bin_X & left_mask)
right_index = np.searchsorted(tree, bin_X | right_mask,
side='right')
return left_index, right_index
def _find_longest_prefix_match(tree, bin_X, hash_size,
left_masks, right_masks):
"""Find the longest prefix match in tree for each query in bin_X
Most significant bits are considered as the prefix.
"""
hi = np.empty_like(bin_X, dtype=np.intp)
hi.fill(hash_size)
lo = np.zeros_like(bin_X, dtype=np.intp)
res = np.empty_like(bin_X, dtype=np.intp)
left_idx, right_idx = _find_matching_indices(tree, bin_X,
left_masks[hi],
right_masks[hi])
found = right_idx > left_idx
res[found] = lo[found] = hash_size
r = np.arange(bin_X.shape[0])
kept = r[lo < hi] # indices remaining in bin_X mask
while kept.shape[0]:
mid = (lo.take(kept) + hi.take(kept)) // 2
left_idx, right_idx = _find_matching_indices(tree,
bin_X.take(kept),
left_masks[mid],
right_masks[mid])
found = right_idx > left_idx
mid_found = mid[found]
lo[kept[found]] = mid_found + 1
res[kept[found]] = mid_found
hi[kept[~found]] = mid[~found]
kept = r[lo < hi]
return res
class ProjectionToHashMixin(object):
"""Turn a transformed real-valued array into a hash"""
@staticmethod
def _to_hash(projected):
if projected.shape[1] % 8 != 0:
raise ValueError('Require reduced dimensionality to be a multiple '
'of 8 for hashing')
# XXX: perhaps non-copying operation better
out = np.packbits((projected > 0).astype(int)).view(dtype=HASH_DTYPE)
return out.reshape(projected.shape[0], -1)
def fit_transform(self, X, y=None):
self.fit(X)
return self.transform(X)
def transform(self, X):
return self._to_hash(super(ProjectionToHashMixin, self).transform(X))
class GaussianRandomProjectionHash(ProjectionToHashMixin,
GaussianRandomProjection):
"""Use GaussianRandomProjection to produce a cosine LSH fingerprint"""
def __init__(self,
n_components=32,
random_state=None):
super(GaussianRandomProjectionHash, self).__init__(
n_components=n_components,
random_state=random_state)
def _array_of_arrays(list_of_arrays):
"""Creates an array of array from list of arrays."""
out = np.empty(len(list_of_arrays), dtype=object)
out[:] = list_of_arrays
return out
class LSHForest(BaseEstimator, KNeighborsMixin, RadiusNeighborsMixin):
"""Performs approximate nearest neighbor search using LSH forest.
LSH Forest: Locality Sensitive Hashing forest [1] is an alternative
method for vanilla approximate nearest neighbor search methods.
LSH forest data structure has been implemented using sorted
arrays and binary search and 32 bit fixed-length hashes.
Random projection is used as the hash family which approximates
cosine distance.
The cosine distance is defined as ``1 - cosine_similarity``: the lowest
value is 0 (identical point) but it is bounded above by 2 for the farthest
points. Its value does not depend on the norm of the vector points but
only on their relative angles.
Read more in the :ref:`User Guide <approximate_nearest_neighbors>`.
Parameters
----------
n_estimators : int (default = 10)
Number of trees in the LSH Forest.
min_hash_match : int (default = 4)
lowest hash length to be searched when candidate selection is
performed for nearest neighbors.
n_candidates : int (default = 10)
Minimum number of candidates evaluated per estimator, assuming enough
items meet the `min_hash_match` constraint.
n_neighbors : int (default = 5)
Number of neighbors to be returned from query function when
it is not provided to the :meth:`kneighbors` method.
radius : float, optinal (default = 1.0)
Radius from the data point to its neighbors. This is the parameter
space to use by default for the :meth`radius_neighbors` queries.
radius_cutoff_ratio : float, optional (default = 0.9)
A value ranges from 0 to 1. Radius neighbors will be searched until
the ratio between total neighbors within the radius and the total
candidates becomes less than this value unless it is terminated by
hash length reaching `min_hash_match`.
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
Attributes
----------
hash_functions_ : list of GaussianRandomProjectionHash objects
Hash function g(p,x) for a tree is an array of 32 randomly generated
float arrays with the same dimension as the data set. This array is
stored in GaussianRandomProjectionHash object and can be obtained
from ``components_`` attribute.
trees_ : array, shape (n_estimators, n_samples)
Each tree (corresponding to a hash function) contains an array of
sorted hashed values. The array representation may change in future
versions.
original_indices_ : array, shape (n_estimators, n_samples)
Original indices of sorted hashed values in the fitted index.
References
----------
.. [1] M. Bawa, T. Condie and P. Ganesan, "LSH Forest: Self-Tuning
Indexes for Similarity Search", WWW '05 Proceedings of the
14th international conference on World Wide Web, 651-660,
2005.
Examples
--------
>>> from sklearn.neighbors import LSHForest
>>> X_train = [[5, 5, 2], [21, 5, 5], [1, 1, 1], [8, 9, 1], [6, 10, 2]]
>>> X_test = [[9, 1, 6], [3, 1, 10], [7, 10, 3]]
>>> lshf = LSHForest(random_state=42)
>>> lshf.fit(X_train) # doctest: +NORMALIZE_WHITESPACE
LSHForest(min_hash_match=4, n_candidates=50, n_estimators=10,
n_neighbors=5, radius=1.0, radius_cutoff_ratio=0.9,
random_state=42)
>>> distances, indices = lshf.kneighbors(X_test, n_neighbors=2)
>>> distances # doctest: +ELLIPSIS
array([[ 0.069..., 0.149...],
[ 0.229..., 0.481...],
[ 0.004..., 0.014...]])
>>> indices
array([[1, 2],
[2, 0],
[4, 0]])
"""
def __init__(self, n_estimators=10, radius=1.0, n_candidates=50,
n_neighbors=5, min_hash_match=4, radius_cutoff_ratio=.9,
random_state=None):
self.n_estimators = n_estimators
self.radius = radius
self.random_state = random_state
self.n_candidates = n_candidates
self.n_neighbors = n_neighbors
self.min_hash_match = min_hash_match
self.radius_cutoff_ratio = radius_cutoff_ratio
warnings.warn("LSHForest has poor performance and has been deprecated "
"in 0.19. It will be removed in version 0.21.",
DeprecationWarning)
def _compute_distances(self, query, candidates):
"""Computes the cosine distance.
Distance is from the query to points in the candidates array.
Returns argsort of distances in the candidates
array and sorted distances.
"""
if candidates.shape == (0,):
# needed since _fit_X[np.array([])] doesn't work if _fit_X sparse
return np.empty(0, dtype=np.int), np.empty(0, dtype=float)
if sparse.issparse(self._fit_X):
candidate_X = self._fit_X[candidates]
else:
candidate_X = self._fit_X.take(candidates, axis=0, mode='clip')
distances = pairwise_distances(query, candidate_X,
metric='cosine')[0]
distance_positions = np.argsort(distances)
distances = distances.take(distance_positions, mode='clip', axis=0)
return distance_positions, distances
def _generate_masks(self):
"""Creates left and right masks for all hash lengths."""
tri_size = MAX_HASH_SIZE + 1
# Called once on fitting, output is independent of hashes
left_mask = np.tril(np.ones((tri_size, tri_size), dtype=int))[:, 1:]
right_mask = left_mask[::-1, ::-1]
self._left_mask = np.packbits(left_mask).view(dtype=HASH_DTYPE)
self._right_mask = np.packbits(right_mask).view(dtype=HASH_DTYPE)
def _get_candidates(self, query, max_depth, bin_queries, n_neighbors):
"""Performs the Synchronous ascending phase.
Returns an array of candidates, their distance ranks and
distances.
"""
index_size = self._fit_X.shape[0]
# Number of candidates considered including duplicates
# XXX: not sure whether this is being calculated correctly wrt
# duplicates from different iterations through a single tree
n_candidates = 0
candidate_set = set()
min_candidates = self.n_candidates * self.n_estimators
while (max_depth > self.min_hash_match and
(n_candidates < min_candidates or
len(candidate_set) < n_neighbors)):
left_mask = self._left_mask[max_depth]
right_mask = self._right_mask[max_depth]
for i in range(self.n_estimators):
start, stop = _find_matching_indices(self.trees_[i],
bin_queries[i],
left_mask, right_mask)
n_candidates += stop - start
candidate_set.update(
self.original_indices_[i][start:stop].tolist())
max_depth -= 1
candidates = np.fromiter(candidate_set, count=len(candidate_set),
dtype=np.intp)
# For insufficient candidates, candidates are filled.
# Candidates are filled from unselected indices uniformly.
if candidates.shape[0] < n_neighbors:
warnings.warn(
"Number of candidates is not sufficient to retrieve"
" %i neighbors with"
" min_hash_match = %i. Candidates are filled up"
" uniformly from unselected"
" indices." % (n_neighbors, self.min_hash_match))
remaining = np.setdiff1d(np.arange(0, index_size), candidates)
to_fill = n_neighbors - candidates.shape[0]
candidates = np.concatenate((candidates, remaining[:to_fill]))
ranks, distances = self._compute_distances(query,
candidates.astype(int))
return (candidates[ranks[:n_neighbors]],
distances[:n_neighbors])
def _get_radius_neighbors(self, query, max_depth, bin_queries, radius):
"""Finds radius neighbors from the candidates obtained.
Their distances from query are smaller than radius.
Returns radius neighbors and distances.
"""
ratio_within_radius = 1
threshold = 1 - self.radius_cutoff_ratio
total_candidates = np.array([], dtype=int)
total_neighbors = np.array([], dtype=int)
total_distances = np.array([], dtype=float)
while (max_depth > self.min_hash_match and
ratio_within_radius > threshold):
left_mask = self._left_mask[max_depth]
right_mask = self._right_mask[max_depth]
candidates = []
for i in range(self.n_estimators):
start, stop = _find_matching_indices(self.trees_[i],
bin_queries[i],
left_mask, right_mask)
candidates.extend(
self.original_indices_[i][start:stop].tolist())
candidates = np.setdiff1d(candidates, total_candidates)
total_candidates = np.append(total_candidates, candidates)
ranks, distances = self._compute_distances(query, candidates)
m = np.searchsorted(distances, radius, side='right')
positions = np.searchsorted(total_distances, distances[:m])
total_neighbors = np.insert(total_neighbors, positions,
candidates[ranks[:m]])
total_distances = np.insert(total_distances, positions,
distances[:m])
ratio_within_radius = (total_neighbors.shape[0] /
float(total_candidates.shape[0]))
max_depth = max_depth - 1
return total_neighbors, total_distances
def fit(self, X, y=None):
"""Fit the LSH forest on the data.
This creates binary hashes of input data points by getting the
dot product of input points and hash_function then
transforming the projection into a binary string array based
on the sign (positive/negative) of the projection.
A sorted array of binary hashes is created.
Parameters
----------
X : array_like or sparse (CSR) matrix, shape (n_samples, n_features)
List of n_features-dimensional data points. Each row
corresponds to a single data point.
Returns
-------
self : object
Returns self.
"""
self._fit_X = check_array(X, accept_sparse='csr')
# Creates a g(p,x) for each tree
self.hash_functions_ = []
self.trees_ = []
self.original_indices_ = []
rng = check_random_state(self.random_state)
int_max = np.iinfo(np.int32).max
for i in range(self.n_estimators):
# This is g(p,x) for a particular tree.
# Builds a single tree. Hashing is done on an array of data points.
# `GaussianRandomProjection` is used for hashing.
# `n_components=hash size and n_features=n_dim.
hasher = GaussianRandomProjectionHash(MAX_HASH_SIZE,
rng.randint(0, int_max))
hashes = hasher.fit_transform(self._fit_X)[:, 0]
original_index = np.argsort(hashes)
bin_hashes = hashes[original_index]
self.original_indices_.append(original_index)
self.trees_.append(bin_hashes)
self.hash_functions_.append(hasher)
self._generate_masks()
return self
def _query(self, X):
"""Performs descending phase to find maximum depth."""
# Calculate hashes of shape (n_samples, n_estimators, [hash_size])
bin_queries = np.asarray([hasher.transform(X)[:, 0]
for hasher in self.hash_functions_])
bin_queries = np.rollaxis(bin_queries, 1)
# descend phase
depths = [_find_longest_prefix_match(tree, tree_queries, MAX_HASH_SIZE,
self._left_mask, self._right_mask)
for tree, tree_queries in zip(self.trees_,
np.rollaxis(bin_queries, 1))]
return bin_queries, np.max(depths, axis=0)
def kneighbors(self, X, n_neighbors=None, return_distance=True):
"""Returns n_neighbors of approximate nearest neighbors.
Parameters
----------
X : array_like or sparse (CSR) matrix, shape (n_samples, n_features)
List of n_features-dimensional data points. Each row
corresponds to a single query.
n_neighbors : int, optional (default = None)
Number of neighbors required. If not provided, this will
return the number specified at the initialization.
return_distance : boolean, optional (default = True)
Returns the distances of neighbors if set to True.
Returns
-------
dist : array, shape (n_samples, n_neighbors)
Array representing the cosine distances to each point,
only present if return_distance=True.
ind : array, shape (n_samples, n_neighbors)
Indices of the approximate nearest points in the population
matrix.
"""
if not hasattr(self, 'hash_functions_'):
raise ValueError("estimator should be fitted.")
if n_neighbors is None:
n_neighbors = self.n_neighbors
X = check_array(X, accept_sparse='csr')
neighbors, distances = [], []
bin_queries, max_depth = self._query(X)
for i in range(X.shape[0]):
neighs, dists = self._get_candidates(X[[i]], max_depth[i],
bin_queries[i],
n_neighbors)
neighbors.append(neighs)
distances.append(dists)
if return_distance:
return np.array(distances), np.array(neighbors)
else:
return np.array(neighbors)
def radius_neighbors(self, X, radius=None, return_distance=True):
"""Finds the neighbors within a given radius of a point or points.
Return the indices and distances of some points from the dataset
lying in a ball with size ``radius`` around the points of the query
array. Points lying on the boundary are included in the results.
The result points are *not* necessarily sorted by distance to their
query point.
LSH Forest being an approximate method, some true neighbors from the
indexed dataset might be missing from the results.
Parameters
----------
X : array_like or sparse (CSR) matrix, shape (n_samples, n_features)
List of n_features-dimensional data points. Each row
corresponds to a single query.
radius : float
Limiting distance of neighbors to return.
(default is the value passed to the constructor).
return_distance : boolean, optional (default = False)
Returns the distances of neighbors if set to True.
Returns
-------
dist : array, shape (n_samples,) of arrays
Each element is an array representing the cosine distances
to some points found within ``radius`` of the respective query.
Only present if ``return_distance=True``.
ind : array, shape (n_samples,) of arrays
Each element is an array of indices for neighbors within ``radius``
of the respective query.
"""
if not hasattr(self, 'hash_functions_'):
raise ValueError("estimator should be fitted.")
if radius is None:
radius = self.radius
X = check_array(X, accept_sparse='csr')
neighbors, distances = [], []
bin_queries, max_depth = self._query(X)
for i in range(X.shape[0]):
neighs, dists = self._get_radius_neighbors(X[[i]], max_depth[i],
bin_queries[i], radius)
neighbors.append(neighs)
distances.append(dists)
if return_distance:
return _array_of_arrays(distances), _array_of_arrays(neighbors)
else:
return _array_of_arrays(neighbors)
def partial_fit(self, X, y=None):
"""
Inserts new data into the already fitted LSH Forest.
Cost is proportional to new total size, so additions
should be batched.
Parameters
----------
X : array_like or sparse (CSR) matrix, shape (n_samples, n_features)
New data point to be inserted into the LSH Forest.
"""
X = check_array(X, accept_sparse='csr')
if not hasattr(self, 'hash_functions_'):
return self.fit(X)
if X.shape[1] != self._fit_X.shape[1]:
raise ValueError("Number of features in X and"
" fitted array does not match.")
n_samples = X.shape[0]
n_indexed = self._fit_X.shape[0]
for i in range(self.n_estimators):
bin_X = self.hash_functions_[i].transform(X)[:, 0]
# gets the position to be added in the tree.
positions = self.trees_[i].searchsorted(bin_X)
# adds the hashed value into the tree.
self.trees_[i] = np.insert(self.trees_[i],
positions, bin_X)
# add the entry into the original_indices_.
self.original_indices_[i] = np.insert(self.original_indices_[i],
positions,
np.arange(n_indexed,
n_indexed +
n_samples))
# adds the entry into the input_array.
if sparse.issparse(X) or sparse.issparse(self._fit_X):
self._fit_X = sparse.vstack((self._fit_X, X))
else:
self._fit_X = np.row_stack((self._fit_X, X))
return self
| bsd-3-clause |
0todd0000/spm1d | spm1d/rft1d/examples/val_max_4_anova1_1d.py | 1 | 2121 |
import numpy as np
from matplotlib import pyplot
from spm1d import rft1d
eps = np.finfo(float).eps
def here_anova1(Y, X, X0, Xi, X0i, df):
Y = np.matrix(Y)
### estimate parameters:
b = Xi*Y
eij = Y - X*b
R = eij.T*eij
### reduced design:
b0 = X0i*Y
eij0 = Y - X0*b0
R0 = eij0.T*eij0
### compute F statistic:
F = ((np.diag(R0)-np.diag(R))/df[0]) / (np.diag(R+eps)/df[1])
return F
def here_design_matrices(nResponses, nGroups):
nTotal = sum(nResponses)
X = np.zeros((nTotal,nGroups))
i0 = 0
for i,n in enumerate(nResponses):
X[i0:i0+n,i] = 1
i0 += n
X = np.matrix(X)
X0 = np.matrix(np.ones(nTotal)).T #reduced design matrix
Xi,X0i = np.linalg.pinv(X), np.linalg.pinv(X0) #pseudo-inverses
return X,X0,Xi,X0i
#(0) Set parameters:
np.random.seed(123456789)
nResponses = 6,8,9 #number of responses in each group
nNodes = 101
FWHM = 12.0
nIterations = 5000
### derived parameters:
nGroups = len(nResponses)
nTotal = sum(nResponses)
df = nGroups-1, nTotal-nGroups
X,X0,Xi,X0i = here_design_matrices(nResponses, nGroups)
#(1) Generate Gaussian 1D fields, compute test stat, store field maximum:
F = []
generator = rft1d.random.Generator1D(nTotal, nNodes, FWHM)
for i in range(nIterations):
y = generator.generate_sample()
f = here_anova1(y, X, X0, Xi, X0i, df)
F.append( f.max() )
F = np.asarray(F)
#(2) Survival functions:
heights = np.linspace(6, 14, 21)
sf = np.array( [ (F>h).mean() for h in heights] )
sfE = rft1d.f.sf(heights, df, nNodes, FWHM) #theoretical
sf0D = rft1d.f.sf0d(heights, df) #theoretical (0D)
#(3) Plot results:
pyplot.close('all')
ax = pyplot.axes()
ax.plot(heights, sf, 'o', label='Simulated')
ax.plot(heights, sfE, '-', label='Theoretical')
ax.plot(heights, sf0D, 'r-', label='Theoretical (0D)')
ax.set_xlabel('$u$', size=20)
ax.set_ylabel('$P (F_\mathrm{max} > u)$', size=20)
ax.legend()
ax.set_title('ANOVA validation (1D)', size=20)
pyplot.show()
| gpl-3.0 |
DrkVenom/roots | roots.py | 1 | 9713 | #Name: Tony Ranieri
#Created: October 2014
#Modified: August 2015
import numpy as np
import pylab as py
import matplotlib.pyplot as plt
def roots(f,df,a,b,niter,epsilon):
# Input
# f: the function that we need to find roots for
# df: derivative of the function f
# a: initial left bracket x-coord
# b: initial right bracket x-coord
# niter: max number of iterations
# epsilon: tolerance for the stopping rule
#
# Output
# xstar: the root of f for given tolerance epsilon
# perform bisect
fa=f(a) #define y-coord at a based on the given f
fb=f(b) #define y-coord at b based on the given f
if (fa*fb>0): #test to see if there is a single root in the bracket
print "There are either no roots in this bracket, or an even number of them. Please refine your bracket."
return 1
for i in range(niter):
xstar=(a+b)/2 #define xstar as the midpoint of the current bracket
fxstar=f(xstar) #set the value of our function at this new midpoint
err=abs(b-a)
if (fa*fxstar<0): #test to see if root is in [fa,fxstar]
b=xstar #if yes, set our upper bound to now be xstar
fb=fxstar #update the guess and iterate
elif (fb*fxstar<0): #test to see if root is in [fxstar,fb]
a=xstar #if yes, set our lower bound to now be xstar
fa=fxstar #update the guess and iterate
else:
a=xstar
b=xstar
print "Check the results carefully! One of your endpoints may be a root or 0 might be a root."
if (err<epsilon): #test to see if our proposed root is "close enough" based on our tolerance level
break #if it is, we're done here
xstar_b=xstar
fxstar_b=f(xstar_b)
# perform Newton
x0=(a+b)/2 #need an initial guess, midpoint seems decent enough
fx0=f(x0) #define y-coord at x0 based on the given f
for i in range(niter):
dfx0=df(x0) #define derivative y-coord at x0 based on the given df
if (dfx0==0):
break
xstar=x0-fx0/dfx0 #set xstar as defined by Newton's method
err=abs(xstar-x0)
fxstar=f(xstar)
if (err<epsilon): #test to see if our proposed root is "close enough" based on our tolerance level to the error
x0=xstar #update the initial guess and iterate
fx0=fxstar
if (i==niter):
break
xstar_n=xstar
fxstar_n=f(xstar_n)
# perform Secant
fa=f(a) #define y-coord at a based on the given f
fb=f(b) #define y-coord at b based on the given f
for i in range(niter):
if (fb==fa):
break
xstar=b-((fb*(b-a))/(fb-fa)) #set xstar as defined by secant method
err=abs(f(xstar))
fxstar=f(xstar)
if (err<epsilon): #test to see if our proposed root is "close enough" based on our tolerance level to the error
break
a=b #update the initial guess and iterate
b=xstar #update the initial guess and iterate
fa=fb
fb=fxstar
if (i==niter) or (fb==fa):
break
xstar_s=xstar
fxstar_s=f(xstar_s)
#find best estimate for root by testing proximity to zero
if (abs(fxstar_b-0)<=abs(fxstar_n-0)):
if (abs(fxstar_b-0)==abs(fxstar_n-0)):
xstar=xstar_b
print "Bisect method and Newton method came to the same conclusion."
else:
if (abs(fxstar_b-0)<=abs(fxstar_s-0)):
if (abs(fxstar_b-0)==abs(fxstar_s-0)):
xstar=xstar_b
print "Bisect method and Secant method came to the same conclusion."
else:
xstar=xstar_b
print "Bisect method is superior."
else:
xstar=xstar_s
print "Secant method is superior."
else:
if (abs(fxstar_n-0)<=abs(fxstar_s-0)):
if (abs(fxstar_n-0)==abs(fxstar_s-0)):
xstar=xstar_n
print "Newton method and Secant method came to the same conclusion."
else:
xstar=xstar_n
print "Newton method is superior."
else:
xstar=xstar_s
print "Secant method is superior."
#plot function with identified root
#x=np.linspace(a, b, 200)
#plt.plot(x, f(x))
#plt.xlim(a-1, b+1)
#plt.xticks(np.linspace(a, b, 10, endpoint=True))
#plt.xlim(x.min()*1.1,x.max() * 1.1)
#plt.ylim(-5, 5)
#ax = plt.gca()
#ax.axes.get_yaxis().set_visible(False)
#ax.spines['right'].set_color('none')
#ax.spines['top'].set_color('none')
#ax.spines['left'].set_color('none')
#ax.xaxis.set_ticks_position('bottom')
# ax.spines['bottom'].set_position(('data',0))
#plt.show()
print "output = (value, bisect, newton, secant)"
return xstar, xstar_b, xstar_n, xstar_s
def bisect(f,a,b,niter,epsilon):
# Input
# f: the function that we need to find roots for
# a: initial left bracket x-coord
# b: initial right bracket x-coord
# niter: max number of iterations
# epsilon: tolerance for the stopping rule
#
# Output
# xstar: the root of f for given tolerance epsilon
# err: error at convergence
# fxstar: the value of f at xstar (should be very close to zero as we are expecting a root)
# i: the number of iterations taken to get to the tolerance
# xseq: the values of {x_n} to see convergence
fa=f(a) #define y-coord at a based on the given f
fb=f(b) #define y-coord at b based on the given f
xseq=np.zeros(niter)
if (fa*fb>0): #test to see if there is a single root in the bracket
print "There are either no roots in this bracket, or an even number of them. Please refine your bracket."
return 1
for i in range(niter):
xstar=(a+b)/2 #define xstar as the midpoint of the current bracket
xseq[i]=xstar #add the value of xstar to this convergent sequence
fxstar=f(xstar) #set the value of our function at this new midpoint
err=abs(b-a)
if (fa*fxstar<0): #test to see if root is in [fa,fxstar]
b=xstar #if yes, set our upper bound to now be xstar
fb=fxstar #update the guess and iterate
elif (fb*fxstar<0): #test to see if root is in [fxstar,fb]
a=xstar #if yes, set our lower bound to now be xstar
fa=fxstar #update the guess and iterate
else:
a=xstar
b=xstar
print "Check the results carefully! One of your endpoints may be a root."
if (err<epsilon): #test to see if our proposed root is "close enough" based on our tolerance level
break #if it is, we're done here
xstar=(a+b)/2
fxstar=f(xstar)
return xstar, err, fxstar, i+1, xseq[0:i]
def newton(f,df,x0,niter,epsilon):
# Input
# f: the function that we need to find roots for
# df: the derivative of the function f
# x0: initial guess for a root
# niter: max number of iterations
# epsilon: tolerance for the stopping rule
#
# Output
# xstar: the root of f for given tolerance epsilon
# err: error at convergence
# fxstar: the value of f at xstar (should be very close to zero as we are expecting a root)
# i: the number of iterations taken to get to the tolerance
# xseq: the values of {x_n} to see convergence
fx0=f(x0) #define y-coord at x0 based on the given f
xseq=np.zeros(niter+1) #need +1 as we already know the first entry is x0
xseq[0]=x0
for i in range(niter):
dfx0=df(x0) #define derivative y-coord at x0 based on the given df
xstar=x0-fx0/dfx0 #set xstar as defined by Newton's method
xseq[i+1]=xstar
err=abs(xstar-x0)
fxstar=f(xstar)
if (err<epsilon): #test to see if our proposed root is "close enough" based on our tolerance level to the error
break
x0=xstar #update the initial guess and iterate
fx0=fxstar
if (i==niter):
print "Newton's method failed to converge given the number of iterations."
break
return xstar, err, fxstar, i+1, xseq[0:(i+2)]
def secant(f,a,b,niter,epsilon):
# Input
# f: the function of interest
# a: initial left bracket x-coord
# b: initial right bracket x-coord
# niter: max number of iterations
# epsilon: tolerance for the stopping rule
#
# Output
# xstar: the root of f for given tolerance epsilon
# err: error at convergence
# fxstar: the value of f at xstar (should be very close to zero as we are expecting a root)
# i: the number of iterations taken to get to the tolerance
# xseq: the values of {x_n} to see convergence
fa=f(a) #define y-coord at a based on the given f
fb=f(b) #define y-coord at b based on the given f
xseq=np.zeros(niter+1) #need +1 as we already know the first entry is x0
xseq[0]=a
xseq[1]=b
for i in range(niter):
xstar=b-((fb*(b-a))/(fb-fa)) #set xstar as defined by secant method
xseq[i+2]=xstar #+2 as we alreqady defined the first 2
err=abs(f(xstar))
fxstar=f(xstar)
if (err<epsilon): #test to see if our proposed root is "close enough" based on our tolerance level to the error
break
a=b #update the initial guess and iterate
b=xstar #update the initial guess and iterate
fa=fb
fb=fxstar
if (i==niter):
print "Secant's method failed to converge given the number of iterations."
break
return xstar, err, fxstar, i+1, xseq[0:(i+2)]
| gpl-2.0 |
olafhauk/mne-python | mne/utils/numerics.py | 4 | 36095 | # -*- coding: utf-8 -*-
"""Some utility functions."""
# Authors: Alexandre Gramfort <alexandre.gramfort@inria.fr>
#
# License: BSD (3-clause)
from contextlib import contextmanager
import hashlib
from io import BytesIO, StringIO
from math import sqrt
import numbers
import operator
import os
import os.path as op
from math import ceil
import shutil
import sys
from datetime import datetime, timedelta, timezone
import numpy as np
from scipy import sparse
from ._logging import logger, warn, verbose
from .check import check_random_state, _ensure_int, _validate_type
from ..fixes import _infer_dimension_, svd_flip, stable_cumsum, _safe_svd
from .docs import fill_doc
def split_list(v, n, idx=False):
"""Split list in n (approx) equal pieces, possibly giving indices."""
n = int(n)
tot = len(v)
sz = tot // n
start = stop = 0
for i in range(n - 1):
stop += sz
yield (np.arange(start, stop), v[start:stop]) if idx else v[start:stop]
start += sz
yield (np.arange(start, tot), v[start:]) if idx else v[start]
def array_split_idx(ary, indices_or_sections, axis=0, n_per_split=1):
"""Do what numpy.array_split does, but add indices."""
# this only works for indices_or_sections as int
indices_or_sections = _ensure_int(indices_or_sections)
ary_split = np.array_split(ary, indices_or_sections, axis=axis)
idx_split = np.array_split(np.arange(ary.shape[axis]), indices_or_sections)
idx_split = (np.arange(sp[0] * n_per_split, (sp[-1] + 1) * n_per_split)
for sp in idx_split)
return zip(idx_split, ary_split)
def create_chunks(sequence, size):
"""Generate chunks from a sequence.
Parameters
----------
sequence : iterable
Any iterable object
size : int
The chunksize to be returned
"""
return (sequence[p:p + size] for p in range(0, len(sequence), size))
def sum_squared(X):
"""Compute norm of an array.
Parameters
----------
X : array
Data whose norm must be found.
Returns
-------
value : float
Sum of squares of the input array X.
"""
X_flat = X.ravel(order='F' if np.isfortran(X) else 'C')
return np.dot(X_flat, X_flat)
def _compute_row_norms(data):
"""Compute scaling based on estimated norm."""
norms = np.sqrt(np.sum(data ** 2, axis=1))
norms[norms == 0] = 1.0
return norms
def _reg_pinv(x, reg=0, rank='full', rcond=1e-15):
"""Compute a regularized pseudoinverse of Hermitian matrices.
Regularization is performed by adding a constant value to each diagonal
element of the matrix before inversion. This is known as "diagonal
loading". The loading factor is computed as ``reg * np.trace(x) / len(x)``.
The pseudo-inverse is computed through SVD decomposition and inverting the
singular values. When the matrix is rank deficient, some singular values
will be close to zero and will not be used during the inversion. The number
of singular values to use can either be manually specified or automatically
estimated.
Parameters
----------
x : ndarray, shape (..., n, n)
Square, Hermitian matrices to invert.
reg : float
Regularization parameter. Defaults to 0.
rank : int | None | 'full'
This controls the effective rank of the covariance matrix when
computing the inverse. The rank can be set explicitly by specifying an
integer value. If ``None``, the rank will be automatically estimated.
Since applying regularization will always make the covariance matrix
full rank, the rank is estimated before regularization in this case. If
'full', the rank will be estimated after regularization and hence
will mean using the full rank, unless ``reg=0`` is used.
Defaults to 'full'.
rcond : float | 'auto'
Cutoff for detecting small singular values when attempting to estimate
the rank of the matrix (``rank='auto'``). Singular values smaller than
the cutoff are set to zero. When set to 'auto', a cutoff based on
floating point precision will be used. Defaults to 1e-15.
Returns
-------
x_inv : ndarray, shape (..., n, n)
The inverted matrix.
loading_factor : float
Value added to the diagonal of the matrix during regularization.
rank : int
If ``rank`` was set to an integer value, this value is returned,
else the estimated rank of the matrix, before regularization, is
returned.
"""
from ..rank import _estimate_rank_from_s
if rank is not None and rank != 'full':
rank = int(operator.index(rank))
if x.ndim < 2 or x.shape[-2] != x.shape[-1]:
raise ValueError('Input matrix must be square.')
if not np.allclose(x, x.conj().swapaxes(-2, -1)):
raise ValueError('Input matrix must be Hermitian (symmetric)')
assert x.ndim >= 2 and x.shape[-2] == x.shape[-1]
n = x.shape[-1]
# Decompose the matrix, not necessarily positive semidefinite
from mne.fixes import svd
U, s, Vh = svd(x, hermitian=True)
# Estimate the rank before regularization
tol = 'auto' if rcond == 'auto' else rcond * s[..., :1]
rank_before = _estimate_rank_from_s(s, tol)
# Decompose the matrix again after regularization
loading_factor = reg * np.mean(s, axis=-1)
if reg:
U, s, Vh = svd(
x + loading_factor[..., np.newaxis, np.newaxis] * np.eye(n),
hermitian=True)
# Estimate the rank after regularization
tol = 'auto' if rcond == 'auto' else rcond * s[..., :1]
rank_after = _estimate_rank_from_s(s, tol)
# Warn the user if both all parameters were kept at their defaults and the
# matrix is rank deficient.
if (rank_after < n).any() and reg == 0 and \
rank == 'full' and rcond == 1e-15:
warn('Covariance matrix is rank-deficient and no regularization is '
'done.')
elif isinstance(rank, int) and rank > n:
raise ValueError('Invalid value for the rank parameter (%d) given '
'the shape of the input matrix (%d x %d).' %
(rank, x.shape[0], x.shape[1]))
# Pick the requested number of singular values
mask = np.arange(s.shape[-1]).reshape((1,) * (x.ndim - 2) + (-1,))
if rank is None:
cmp = ret = rank_before
elif rank == 'full':
cmp = rank_after
ret = rank_before
else:
cmp = ret = rank
mask = mask < np.asarray(cmp)[..., np.newaxis]
mask &= s > 0
# Invert only non-zero singular values
s_inv = np.zeros(s.shape)
s_inv[mask] = 1. / s[mask]
# Compute the pseudo inverse
x_inv = np.matmul(U * s_inv[..., np.newaxis, :], Vh)
return x_inv, loading_factor, ret
def _gen_events(n_epochs):
"""Generate event structure from number of epochs."""
events = np.c_[np.arange(n_epochs), np.zeros(n_epochs, int),
np.ones(n_epochs, int)]
return events
def _reject_data_segments(data, reject, flat, decim, info, tstep):
"""Reject data segments using peak-to-peak amplitude."""
from ..epochs import _is_good
from ..io.pick import channel_indices_by_type
data_clean = np.empty_like(data)
idx_by_type = channel_indices_by_type(info)
step = int(ceil(tstep * info['sfreq']))
if decim is not None:
step = int(ceil(step / float(decim)))
this_start = 0
this_stop = 0
drop_inds = []
for first in range(0, data.shape[1], step):
last = first + step
data_buffer = data[:, first:last]
if data_buffer.shape[1] < (last - first):
break # end of the time segment
if _is_good(data_buffer, info['ch_names'], idx_by_type, reject,
flat, ignore_chs=info['bads']):
this_stop = this_start + data_buffer.shape[1]
data_clean[:, this_start:this_stop] = data_buffer
this_start += data_buffer.shape[1]
else:
logger.info("Artifact detected in [%d, %d]" % (first, last))
drop_inds.append((first, last))
data = data_clean[:, :this_stop]
if not data.any():
raise RuntimeError('No clean segment found. Please '
'consider updating your rejection '
'thresholds.')
return data, drop_inds
def _get_inst_data(inst):
"""Get data view from MNE object instance like Raw, Epochs or Evoked."""
from ..io.base import BaseRaw
from ..epochs import BaseEpochs
from .. import Evoked
from ..time_frequency.tfr import _BaseTFR
_validate_type(inst, (BaseRaw, BaseEpochs, Evoked, _BaseTFR), "Instance")
if not inst.preload:
inst.load_data()
return inst._data
def compute_corr(x, y):
"""Compute pearson correlations between a vector and a matrix."""
if len(x) == 0 or len(y) == 0:
raise ValueError('x or y has zero length')
X = np.array(x, float)
Y = np.array(y, float)
X -= X.mean(0)
Y -= Y.mean(0)
x_sd = X.std(0, ddof=1)
# if covariance matrix is fully expanded, Y needs a
# transpose / broadcasting else Y is correct
y_sd = Y.std(0, ddof=1)[:, None if X.shape == Y.shape else Ellipsis]
return (np.dot(X.T, Y) / float(len(X) - 1)) / (x_sd * y_sd)
@fill_doc
def random_permutation(n_samples, random_state=None):
"""Emulate the randperm matlab function.
It returns a vector containing a random permutation of the
integers between 0 and n_samples-1. It returns the same random numbers
than randperm matlab function whenever the random_state is the same
as the matlab's random seed.
This function is useful for comparing against matlab scripts
which use the randperm function.
Note: the randperm(n_samples) matlab function generates a random
sequence between 1 and n_samples, whereas
random_permutation(n_samples, random_state) function generates
a random sequence between 0 and n_samples-1, that is:
randperm(n_samples) = random_permutation(n_samples, random_state) - 1
Parameters
----------
n_samples : int
End point of the sequence to be permuted (excluded, i.e., the end point
is equal to n_samples-1)
%(random_state)s
Returns
-------
randperm : ndarray, int
Randomly permuted sequence between 0 and n-1.
"""
rng = check_random_state(random_state)
# This can't just be rng.permutation(n_samples) because it's not identical
# to what MATLAB produces
idx = rng.uniform(size=n_samples)
randperm = np.argsort(idx)
return randperm
@verbose
def _apply_scaling_array(data, picks_list, scalings, verbose=None):
"""Scale data type-dependently for estimation."""
scalings = _check_scaling_inputs(data, picks_list, scalings)
if isinstance(scalings, dict):
logger.debug(' Scaling using mapping %s.' % (scalings,))
picks_dict = dict(picks_list)
scalings = [(picks_dict[k], v) for k, v in scalings.items()
if k in picks_dict]
for idx, scaling in scalings:
data[idx, :] *= scaling # F - order
else:
logger.debug(' Scaling using computed norms.')
data *= scalings[:, np.newaxis] # F - order
def _invert_scalings(scalings):
if isinstance(scalings, dict):
scalings = {k: 1. / v for k, v in scalings.items()}
elif isinstance(scalings, np.ndarray):
scalings = 1. / scalings
return scalings
def _undo_scaling_array(data, picks_list, scalings):
scalings = _invert_scalings(_check_scaling_inputs(data, picks_list,
scalings))
return _apply_scaling_array(data, picks_list, scalings, verbose=False)
@contextmanager
def _scaled_array(data, picks_list, scalings):
"""Scale, use, unscale array."""
_apply_scaling_array(data, picks_list=picks_list, scalings=scalings)
try:
yield
finally:
_undo_scaling_array(data, picks_list=picks_list, scalings=scalings)
def _apply_scaling_cov(data, picks_list, scalings):
"""Scale resulting data after estimation."""
scalings = _check_scaling_inputs(data, picks_list, scalings)
scales = None
if isinstance(scalings, dict):
n_channels = len(data)
covinds = list(zip(*picks_list))[1]
assert len(data) == sum(len(k) for k in covinds)
assert list(sorted(np.concatenate(covinds))) == list(range(len(data)))
scales = np.zeros(n_channels)
for ch_t, idx in picks_list:
scales[idx] = scalings[ch_t]
elif isinstance(scalings, np.ndarray):
if len(scalings) != len(data):
raise ValueError('Scaling factors and data are of incompatible '
'shape')
scales = scalings
elif scalings is None:
pass
else:
raise RuntimeError('Arff...')
if scales is not None:
assert np.sum(scales == 0.) == 0
data *= (scales[None, :] * scales[:, None])
def _undo_scaling_cov(data, picks_list, scalings):
scalings = _invert_scalings(_check_scaling_inputs(data, picks_list,
scalings))
return _apply_scaling_cov(data, picks_list, scalings)
def _check_scaling_inputs(data, picks_list, scalings):
"""Aux function."""
rescale_dict_ = dict(mag=1e15, grad=1e13, eeg=1e6)
scalings_ = None
if isinstance(scalings, str) and scalings == 'norm':
scalings_ = 1. / _compute_row_norms(data)
elif isinstance(scalings, dict):
rescale_dict_.update(scalings)
scalings_ = rescale_dict_
elif isinstance(scalings, np.ndarray):
scalings_ = scalings
elif scalings is None:
pass
else:
raise NotImplementedError("No way! That's not a rescaling "
'option: %s' % scalings)
return scalings_
def hashfunc(fname, block_size=1048576, hash_type="md5"): # 2 ** 20
"""Calculate the hash for a file.
Parameters
----------
fname : str
Filename.
block_size : int
Block size to use when reading.
Returns
-------
hash_ : str
The hexadecimal digest of the hash.
"""
if hash_type == "md5":
hasher = hashlib.md5()
elif hash_type == "sha1":
hasher = hashlib.sha1()
with open(fname, 'rb') as fid:
while True:
data = fid.read(block_size)
if not data:
break
hasher.update(data)
return hasher.hexdigest()
def _replace_md5(fname):
"""Replace a file based on MD5sum."""
# adapted from sphinx-gallery
assert fname.endswith('.new')
fname_old = fname[:-4]
if op.isfile(fname_old) and hashfunc(fname) == hashfunc(fname_old):
os.remove(fname)
else:
shutil.move(fname, fname_old)
def create_slices(start, stop, step=None, length=1):
"""Generate slices of time indexes.
Parameters
----------
start : int
Index where first slice should start.
stop : int
Index where last slice should maximally end.
length : int
Number of time sample included in a given slice.
step: int | None
Number of time samples separating two slices.
If step = None, step = length.
Returns
-------
slices : list
List of slice objects.
"""
# default parameters
if step is None:
step = length
# slicing
slices = [slice(t, t + length, 1) for t in
range(start, stop - length + 1, step)]
return slices
def _time_mask(times, tmin=None, tmax=None, sfreq=None, raise_error=True,
include_tmax=True):
"""Safely find sample boundaries."""
orig_tmin = tmin
orig_tmax = tmax
tmin = -np.inf if tmin is None else tmin
tmax = np.inf if tmax is None else tmax
if not np.isfinite(tmin):
tmin = times[0]
if not np.isfinite(tmax):
tmax = times[-1]
include_tmax = True # ignore this param when tmax is infinite
if sfreq is not None:
# Push to a bit past the nearest sample boundary first
sfreq = float(sfreq)
tmin = int(round(tmin * sfreq)) / sfreq - 0.5 / sfreq
tmax = int(round(tmax * sfreq)) / sfreq
tmax += (0.5 if include_tmax else -0.5) / sfreq
else:
assert include_tmax # can only be used when sfreq is known
if raise_error and tmin > tmax:
raise ValueError('tmin (%s) must be less than or equal to tmax (%s)'
% (orig_tmin, orig_tmax))
mask = (times >= tmin)
mask &= (times <= tmax)
if raise_error and not mask.any():
extra = '' if include_tmax else 'when include_tmax=False '
raise ValueError('No samples remain when using tmin=%s and tmax=%s %s'
'(original time bounds are [%s, %s])'
% (orig_tmin, orig_tmax, extra, times[0], times[-1]))
return mask
def _freq_mask(freqs, sfreq, fmin=None, fmax=None, raise_error=True):
"""Safely find frequency boundaries."""
orig_fmin = fmin
orig_fmax = fmax
fmin = -np.inf if fmin is None else fmin
fmax = np.inf if fmax is None else fmax
if not np.isfinite(fmin):
fmin = freqs[0]
if not np.isfinite(fmax):
fmax = freqs[-1]
if sfreq is None:
raise ValueError('sfreq can not be None')
# Push 0.5/sfreq past the nearest frequency boundary first
sfreq = float(sfreq)
fmin = int(round(fmin * sfreq)) / sfreq - 0.5 / sfreq
fmax = int(round(fmax * sfreq)) / sfreq + 0.5 / sfreq
if raise_error and fmin > fmax:
raise ValueError('fmin (%s) must be less than or equal to fmax (%s)'
% (orig_fmin, orig_fmax))
mask = (freqs >= fmin)
mask &= (freqs <= fmax)
if raise_error and not mask.any():
raise ValueError('No frequencies remain when using fmin=%s and '
'fmax=%s (original frequency bounds are [%s, %s])'
% (orig_fmin, orig_fmax, freqs[0], freqs[-1]))
return mask
def grand_average(all_inst, interpolate_bads=True, drop_bads=True):
"""Make grand average of a list of Evoked or AverageTFR data.
For :class:`mne.Evoked` data, the function interpolates bad channels based
on the ``interpolate_bads`` parameter. If ``interpolate_bads`` is True,
the grand average file will contain good channels and the bad channels
interpolated from the good MEG/EEG channels.
For :class:`mne.time_frequency.AverageTFR` data, the function takes the
subset of channels not marked as bad in any of the instances.
The ``grand_average.nave`` attribute will be equal to the number
of evoked datasets used to calculate the grand average.
.. note:: A grand average evoked should not be used for source
localization.
Parameters
----------
all_inst : list of Evoked or AverageTFR
The evoked datasets.
interpolate_bads : bool
If True, bad MEG and EEG channels are interpolated. Ignored for
AverageTFR.
drop_bads : bool
If True, drop all bad channels marked as bad in any data set.
If neither interpolate_bads nor drop_bads is True, in the output file,
every channel marked as bad in at least one of the input files will be
marked as bad, but no interpolation or dropping will be performed.
Returns
-------
grand_average : Evoked | AverageTFR
The grand average data. Same type as input.
Notes
-----
.. versionadded:: 0.11.0
"""
# check if all elements in the given list are evoked data
from ..evoked import Evoked
from ..time_frequency import AverageTFR
from ..channels.channels import equalize_channels
if not all_inst:
raise ValueError('Please pass a list of Evoked or AverageTFR objects.')
elif len(all_inst) == 1:
warn('Only a single dataset was passed to mne.grand_average().')
inst_type = type(all_inst[0])
_validate_type(all_inst[0], (Evoked, AverageTFR), 'All elements')
for inst in all_inst:
_validate_type(inst, inst_type, 'All elements', 'of the same type')
# Copy channels to leave the original evoked datasets intact.
all_inst = [inst.copy() for inst in all_inst]
# Interpolates if necessary
if isinstance(all_inst[0], Evoked):
if interpolate_bads:
all_inst = [inst.interpolate_bads() if len(inst.info['bads']) > 0
else inst for inst in all_inst]
from ..evoked import combine_evoked as combine
else: # isinstance(all_inst[0], AverageTFR):
from ..time_frequency.tfr import combine_tfr as combine
if drop_bads:
bads = list({b for inst in all_inst for b in inst.info['bads']})
if bads:
for inst in all_inst:
inst.drop_channels(bads)
equalize_channels(all_inst, copy=False)
# make grand_average object using combine_[evoked/tfr]
grand_average = combine(all_inst, weights='equal')
# change the grand_average.nave to the number of Evokeds
grand_average.nave = len(all_inst)
# change comment field
grand_average.comment = "Grand average (n = %d)" % grand_average.nave
return grand_average
def object_hash(x, h=None):
"""Hash a reasonable python object.
Parameters
----------
x : object
Object to hash. Can be anything comprised of nested versions of:
{dict, list, tuple, ndarray, str, bytes, float, int, None}.
h : hashlib HASH object | None
Optional, object to add the hash to. None creates an MD5 hash.
Returns
-------
digest : int
The digest resulting from the hash.
"""
if h is None:
h = hashlib.md5()
if hasattr(x, 'keys'):
# dict-like types
keys = _sort_keys(x)
for key in keys:
object_hash(key, h)
object_hash(x[key], h)
elif isinstance(x, bytes):
# must come before "str" below
h.update(x)
elif isinstance(x, (str, float, int, type(None))):
h.update(str(type(x)).encode('utf-8'))
h.update(str(x).encode('utf-8'))
elif isinstance(x, (np.ndarray, np.number, np.bool_)):
x = np.asarray(x)
h.update(str(x.shape).encode('utf-8'))
h.update(str(x.dtype).encode('utf-8'))
h.update(x.tobytes())
elif isinstance(x, datetime):
object_hash(_dt_to_stamp(x))
elif hasattr(x, '__len__'):
# all other list-like types
h.update(str(type(x)).encode('utf-8'))
for xx in x:
object_hash(xx, h)
else:
raise RuntimeError('unsupported type: %s (%s)' % (type(x), x))
return int(h.hexdigest(), 16)
def object_size(x, memo=None):
"""Estimate the size of a reasonable python object.
Parameters
----------
x : object
Object to approximate the size of.
Can be anything comprised of nested versions of:
{dict, list, tuple, ndarray, str, bytes, float, int, None}.
memo : dict | None
The memodict.
Returns
-------
size : int
The estimated size in bytes of the object.
"""
# Note: this will not process object arrays properly (since those only)
# hold references
if memo is None:
memo = dict()
id_ = id(x)
if id_ in memo:
return 0 # do not add already existing ones
if isinstance(x, (bytes, str, int, float, type(None))):
size = sys.getsizeof(x)
elif isinstance(x, np.ndarray):
# On newer versions of NumPy, just doing sys.getsizeof(x) works,
# but on older ones you always get something small :(
size = sys.getsizeof(np.array([]))
if x.base is None or id(x.base) not in memo:
size += x.nbytes
elif isinstance(x, np.generic):
size = x.nbytes
elif isinstance(x, dict):
size = sys.getsizeof(x)
for key, value in x.items():
size += object_size(key, memo)
size += object_size(value, memo)
elif isinstance(x, (list, tuple)):
size = sys.getsizeof(x) + sum(object_size(xx, memo) for xx in x)
elif isinstance(x, datetime):
size = object_size(_dt_to_stamp(x), memo)
elif sparse.isspmatrix_csc(x) or sparse.isspmatrix_csr(x):
size = sum(sys.getsizeof(xx)
for xx in [x, x.data, x.indices, x.indptr])
else:
raise RuntimeError('unsupported type: %s (%s)' % (type(x), x))
memo[id_] = size
return size
def _sort_keys(x):
"""Sort and return keys of dict."""
keys = list(x.keys()) # note: not thread-safe
idx = np.argsort([str(k) for k in keys])
keys = [keys[ii] for ii in idx]
return keys
def _array_equal_nan(a, b):
try:
np.testing.assert_array_equal(a, b)
except AssertionError:
return False
return True
def object_diff(a, b, pre=''):
"""Compute all differences between two python variables.
Parameters
----------
a : object
Currently supported: dict, list, tuple, ndarray, int, str, bytes,
float, StringIO, BytesIO.
b : object
Must be same type as ``a``.
pre : str
String to prepend to each line.
Returns
-------
diffs : str
A string representation of the differences.
"""
out = ''
if type(a) != type(b):
# Deal with NamedInt and NamedFloat
for sub in (int, float):
if isinstance(a, sub) and isinstance(b, sub):
break
else:
return pre + ' type mismatch (%s, %s)\n' % (type(a), type(b))
if isinstance(a, dict):
k1s = _sort_keys(a)
k2s = _sort_keys(b)
m1 = set(k2s) - set(k1s)
if len(m1):
out += pre + ' left missing keys %s\n' % (m1)
for key in k1s:
if key not in k2s:
out += pre + ' right missing key %s\n' % key
else:
out += object_diff(a[key], b[key],
pre=(pre + '[%s]' % repr(key)))
elif isinstance(a, (list, tuple)):
if len(a) != len(b):
out += pre + ' length mismatch (%s, %s)\n' % (len(a), len(b))
else:
for ii, (xx1, xx2) in enumerate(zip(a, b)):
out += object_diff(xx1, xx2, pre + '[%s]' % ii)
elif isinstance(a, float):
if not _array_equal_nan(a, b):
out += pre + ' value mismatch (%s, %s)\n' % (a, b)
elif isinstance(a, (str, int, bytes, np.generic)):
if a != b:
out += pre + ' value mismatch (%s, %s)\n' % (a, b)
elif a is None:
if b is not None:
out += pre + ' left is None, right is not (%s)\n' % (b)
elif isinstance(a, np.ndarray):
if not _array_equal_nan(a, b):
out += pre + ' array mismatch\n'
elif isinstance(a, (StringIO, BytesIO)):
if a.getvalue() != b.getvalue():
out += pre + ' StringIO mismatch\n'
elif isinstance(a, datetime):
if (a - b).total_seconds() != 0:
out += pre + ' datetime mismatch\n'
elif sparse.isspmatrix(a):
# sparsity and sparse type of b vs a already checked above by type()
if b.shape != a.shape:
out += pre + (' sparse matrix a and b shape mismatch'
'(%s vs %s)' % (a.shape, b.shape))
else:
c = a - b
c.eliminate_zeros()
if c.nnz > 0:
out += pre + (' sparse matrix a and b differ on %s '
'elements' % c.nnz)
elif hasattr(a, '__getstate__'):
out += object_diff(a.__getstate__(), b.__getstate__(), pre)
else:
raise RuntimeError(pre + ': unsupported type %s (%s)' % (type(a), a))
return out
class _PCA(object):
"""Principal component analysis (PCA)."""
# Adapted from sklearn and stripped down to just use linalg.svd
# and make it easier to later provide a "center" option if we want
def __init__(self, n_components=None, whiten=False):
self.n_components = n_components
self.whiten = whiten
def fit_transform(self, X, y=None):
X = X.copy()
U, S, _ = self._fit(X)
U = U[:, :self.n_components_]
if self.whiten:
# X_new = X * V / S * sqrt(n_samples) = U * sqrt(n_samples)
U *= sqrt(X.shape[0] - 1)
else:
# X_new = X * V = U * S * V^T * V = U * S
U *= S[:self.n_components_]
return U
def _fit(self, X):
if self.n_components is None:
n_components = min(X.shape)
else:
n_components = self.n_components
n_samples, n_features = X.shape
if n_components == 'mle':
if n_samples < n_features:
raise ValueError("n_components='mle' is only supported "
"if n_samples >= n_features")
elif not 0 <= n_components <= min(n_samples, n_features):
raise ValueError("n_components=%r must be between 0 and "
"min(n_samples, n_features)=%r with "
"svd_solver='full'"
% (n_components, min(n_samples, n_features)))
elif n_components >= 1:
if not isinstance(n_components, (numbers.Integral, np.integer)):
raise ValueError("n_components=%r must be of type int "
"when greater than or equal to 1, "
"was of type=%r"
% (n_components, type(n_components)))
self.mean_ = np.mean(X, axis=0)
X -= self.mean_
U, S, V = _safe_svd(X, full_matrices=False)
# flip eigenvectors' sign to enforce deterministic output
U, V = svd_flip(U, V)
components_ = V
# Get variance explained by singular values
explained_variance_ = (S ** 2) / (n_samples - 1)
total_var = explained_variance_.sum()
explained_variance_ratio_ = explained_variance_ / total_var
singular_values_ = S.copy() # Store the singular values.
# Postprocess the number of components required
if n_components == 'mle':
n_components = \
_infer_dimension_(explained_variance_, n_samples, n_features)
elif 0 < n_components < 1.0:
# number of components for which the cumulated explained
# variance percentage is superior to the desired threshold
ratio_cumsum = stable_cumsum(explained_variance_ratio_)
n_components = np.searchsorted(ratio_cumsum, n_components) + 1
# Compute noise covariance using Probabilistic PCA model
# The sigma2 maximum likelihood (cf. eq. 12.46)
if n_components < min(n_features, n_samples):
self.noise_variance_ = explained_variance_[n_components:].mean()
else:
self.noise_variance_ = 0.
self.n_samples_, self.n_features_ = n_samples, n_features
self.components_ = components_[:n_components]
self.n_components_ = n_components
self.explained_variance_ = explained_variance_[:n_components]
self.explained_variance_ratio_ = \
explained_variance_ratio_[:n_components]
self.singular_values_ = singular_values_[:n_components]
return U, S, V
def _mask_to_onsets_offsets(mask):
"""Group boolean mask into contiguous onset:offset pairs."""
assert mask.dtype == bool and mask.ndim == 1
mask = mask.astype(int)
diff = np.diff(mask)
onsets = np.where(diff > 0)[0] + 1
if mask[0]:
onsets = np.concatenate([[0], onsets])
offsets = np.where(diff < 0)[0] + 1
if mask[-1]:
offsets = np.concatenate([offsets, [len(mask)]])
assert len(onsets) == len(offsets)
return onsets, offsets
def _julian_to_dt(jd):
"""Convert Julian integer to a datetime object.
Parameters
----------
jd : int
Julian date - number of days since julian day 0
Julian day number 0 assigned to the day starting at
noon on January 1, 4713 BC, proleptic Julian calendar
November 24, 4714 BC, in the proleptic Gregorian calendar
Returns
-------
jd_date : datetime
Datetime representation of jd
"""
# https://aa.usno.navy.mil/data/docs/JulianDate.php
# Thursday, A.D. 1970 Jan 1 12:00:00.0 2440588.000000
jd_t0 = 2440588
datetime_t0 = datetime(1970, 1, 1, 12, 0, 0, 0, tzinfo=timezone.utc)
dt = timedelta(days=(jd - jd_t0))
return datetime_t0 + dt
def _dt_to_julian(jd_date):
"""Convert datetime object to a Julian integer.
Parameters
----------
jd_date : datetime
Returns
-------
jd : float
Julian date corresponding to jd_date
- number of days since julian day 0
Julian day number 0 assigned to the day starting at
noon on January 1, 4713 BC, proleptic Julian calendar
November 24, 4714 BC, in the proleptic Gregorian calendar
"""
# https://aa.usno.navy.mil/data/docs/JulianDate.php
# Thursday, A.D. 1970 Jan 1 12:00:00.0 2440588.000000
jd_t0 = 2440588
datetime_t0 = datetime(1970, 1, 1, 12, 0, 0, 0, tzinfo=timezone.utc)
dt = jd_date - datetime_t0
return jd_t0 + dt.days
def _cal_to_julian(year, month, day):
"""Convert calendar date (year, month, day) to a Julian integer.
Parameters
----------
year : int
Year as an integer.
month : int
Month as an integer.
day : int
Day as an integer.
Returns
-------
jd: int
Julian date.
"""
return int(_dt_to_julian(datetime(year, month, day, 12, 0, 0,
tzinfo=timezone.utc)))
def _julian_to_cal(jd):
"""Convert calendar date (year, month, day) to a Julian integer.
Parameters
----------
jd: int, float
Julian date.
Returns
-------
year : int
Year as an integer.
month : int
Month as an integer.
day : int
Day as an integer.
"""
tmp_date = _julian_to_dt(jd)
return tmp_date.year, tmp_date.month, tmp_date.day
def _check_dt(dt):
if not isinstance(dt, datetime) or dt.tzinfo is None or \
dt.tzinfo is not timezone.utc:
raise ValueError('Date must be datetime object in UTC: %r' % (dt,))
def _dt_to_stamp(inp_date):
"""Convert a datetime object to a timestamp."""
_check_dt(inp_date)
return int(inp_date.timestamp() // 1), inp_date.microsecond
def _stamp_to_dt(utc_stamp):
"""Convert timestamp to datetime object in Windows-friendly way."""
# The min on windows is 86400
stamp = [int(s) for s in utc_stamp]
if len(stamp) == 1: # In case there is no microseconds information
stamp.append(0)
return (datetime.fromtimestamp(0, tz=timezone.utc) +
timedelta(0, stamp[0], stamp[1])) # day, sec, µs
class _ReuseCycle(object):
"""Cycle over a variable, preferring to reuse earlier indices.
Requires the values in ``x`` to be hashable and unique. This holds
nicely for matplotlib's color cycle, which gives HTML hex color strings.
"""
def __init__(self, x):
self.indices = list()
self.popped = dict()
assert len(x) > 0
self.x = x
def __iter__(self):
while True:
yield self.__next__()
def __next__(self):
if not len(self.indices):
self.indices = list(range(len(self.x)))
self.popped = dict()
idx = self.indices.pop(0)
val = self.x[idx]
assert val not in self.popped
self.popped[val] = idx
return val
def restore(self, val):
try:
idx = self.popped.pop(val)
except KeyError:
warn('Could not find value: %s' % (val,))
else:
loc = np.searchsorted(self.indices, idx)
self.indices.insert(loc, idx)
| bsd-3-clause |
costypetrisor/scikit-learn | examples/exercises/plot_iris_exercise.py | 323 | 1602 | """
================================
SVM Exercise
================================
A tutorial exercise for using different SVM kernels.
This exercise is used in the :ref:`using_kernels_tut` part of the
:ref:`supervised_learning_tut` section of the :ref:`stat_learn_tut_index`.
"""
print(__doc__)
import numpy as np
import matplotlib.pyplot as plt
from sklearn import datasets, svm
iris = datasets.load_iris()
X = iris.data
y = iris.target
X = X[y != 0, :2]
y = y[y != 0]
n_sample = len(X)
np.random.seed(0)
order = np.random.permutation(n_sample)
X = X[order]
y = y[order].astype(np.float)
X_train = X[:.9 * n_sample]
y_train = y[:.9 * n_sample]
X_test = X[.9 * n_sample:]
y_test = y[.9 * n_sample:]
# fit the model
for fig_num, kernel in enumerate(('linear', 'rbf', 'poly')):
clf = svm.SVC(kernel=kernel, gamma=10)
clf.fit(X_train, y_train)
plt.figure(fig_num)
plt.clf()
plt.scatter(X[:, 0], X[:, 1], c=y, zorder=10, cmap=plt.cm.Paired)
# Circle out the test data
plt.scatter(X_test[:, 0], X_test[:, 1], s=80, facecolors='none', zorder=10)
plt.axis('tight')
x_min = X[:, 0].min()
x_max = X[:, 0].max()
y_min = X[:, 1].min()
y_max = X[:, 1].max()
XX, YY = np.mgrid[x_min:x_max:200j, y_min:y_max:200j]
Z = clf.decision_function(np.c_[XX.ravel(), YY.ravel()])
# Put the result into a color plot
Z = Z.reshape(XX.shape)
plt.pcolormesh(XX, YY, Z > 0, cmap=plt.cm.Paired)
plt.contour(XX, YY, Z, colors=['k', 'k', 'k'], linestyles=['--', '-', '--'],
levels=[-.5, 0, .5])
plt.title(kernel)
plt.show()
| bsd-3-clause |
dssg/cincinnati2015-public | evaluation/webapp/evaluation.py | 1 | 5838 | from datetime import datetime
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
from sklearn import metrics
from webapp import config
def weighted_f1(scores):
f1_0 = scores["f1"][0] * scores["support"][0]
f1_1 = scores["f1"][1] * scores["support"][1]
return (f1_0 + f1_1) / (scores["support"][0] + scores["support"][1])
def plot_normalized_confusion_matrix(labels, predictions):
cutoff = 0.5
predictions_binary = np.copy(predictions)
predictions_binary[predictions_binary >= cutoff] = 1
predictions_binary[predictions_binary < cutoff] = 0
cm = metrics.confusion_matrix(labels, predictions_binary)
np.set_printoptions(precision=2)
fig = plt.figure()
cm_normalized = cm.astype('float') / cm.sum(axis=1)[:, np.newaxis]
target_names = ["No violation", "Violation"]
plt.imshow(cm_normalized, interpolation='nearest', cmap=plt.cm.Blues)
plt.title("Normalized Confusion Matrix")
plt.colorbar()
tick_marks = np.arange(len(target_names))
plt.xticks(tick_marks, target_names, rotation=45)
plt.yticks(tick_marks, target_names)
plt.tight_layout()
plt.ylabel('True label')
plt.xlabel('Predicted label')
return fig
def plot_feature_importances(feature_names, feature_importances):
importances = list(zip(feature_names, list(feature_importances)))
importances = pd.DataFrame(importances, columns=["Feature", "Importance"])
importances = importances.set_index("Feature")
importances = importances.sort(columns="Importance", ascending=False)
importances = importances[0:20]
with plt.style.context(('ggplot')):
fig, ax = plt.subplots()
importances.plot(kind="barh", legend=False, ax=ax)
plt.tight_layout()
plt.title("Feature importances (Top 20)")
return fig
def plot_growth(results):
results = pd.DataFrame(results, columns=["date", "score"])
results = results.set_index("date")
results["score"] = results["score"].astype(float)
results = results.reindex(pd.date_range(datetime(2015, 7, 28), datetime(2015, 8, 27)))
results["random"] = pd.Series(3409/float(6124), index=results.index)
with plt.style.context(('ggplot')):
fig, ax = plt.subplots(figsize=(8, 3))
results["score"].plot(legend=False, ax=ax, marker="x")
results["random"].plot(legend=False, ax=ax, style='--')
ax.set_ylabel(config.score_name)
plt.tight_layout()
ax.set_ylim(0.5, 1.0)
return fig
def precision_at_x_percent(test_labels, test_predictions, x_percent=0.01, return_cutoff=False):
cutoff_index = int(len(test_predictions) * x_percent)
cutoff_index = min(cutoff_index, len(test_predictions) -1)
sorted_by_probability = np.sort(test_predictions)[::-1]
cutoff_probability = sorted_by_probability[cutoff_index]
test_predictions_binary = np.copy(test_predictions)
test_predictions_binary[test_predictions_binary >= cutoff_probability] = 1
test_predictions_binary[test_predictions_binary < cutoff_probability] = 0
precision, _, _, _ = metrics.precision_recall_fscore_support(test_labels, test_predictions_binary)
precision = precision[1] # only interested in precision for label 1
if return_cutoff:
return precision, cutoff_probability
else:
return precision
def plot_precision_recall_n(test_labels, test_predictions):
y_score = test_predictions
precision_curve, recall_curve, pr_thresholds = metrics.precision_recall_curve(test_labels, y_score)
precision_curve = precision_curve[:-1]
recall_curve = recall_curve[:-1]
pct_above_per_thresh = []
number_scored = len(y_score)
for value in pr_thresholds:
num_above_thresh = len(y_score[y_score>=value])
pct_above_thresh = num_above_thresh / float(number_scored)
pct_above_per_thresh.append(pct_above_thresh)
pct_above_per_thresh = np.array(pct_above_per_thresh)
with plt.style.context(('ggplot')):
plt.clf()
fig, ax1 = plt.subplots()
ax1.plot(pct_above_per_thresh, precision_curve, "#000099")
ax1.set_xlabel('percent of population')
ax1.set_ylabel('precision', color="#000099")
plt.ylim([0.0, 1.0])
ax2 = ax1.twinx()
ax2.plot(pct_above_per_thresh, recall_curve, "#CC0000")
ax2.set_ylabel('recall', color="#CC0000")
plt.xlim([0.0, 1.0])
plt.ylim([0.0, 1.0])
plt.title("Precision-recall for top x%")
return fig
def plot_precision_cutoff(test_labels, test_predictions):
percent_range = [0.001* i for i in range(1, 10)] + [0.01 * i for i in range(1, 101)]
precisions_and_cutoffs = [precision_at_x_percent(test_labels, test_predictions, x_percent=p, return_cutoff=True)
for p in percent_range]
precisions, cutoffs = zip(*precisions_and_cutoffs)
with plt.style.context(('ggplot')):
fig, ax = plt.subplots()
ax.plot(percent_range, precisions, "#000099")
ax.set_xlabel('percent of population')
ax.set_ylabel('precision', color="#000099")
plt.ylim([0.0, 1.0])
ax2 = ax.twinx()
ax2.plot(percent_range, cutoffs, "#CC0000")
ax2.set_ylabel('cutoff at', color="#CC0000")
plt.xlim([0.0, 1.0])
plt.ylim([0.0, 1.0])
plt.title("Precision at x%")
return fig
def plot_ROC(test_labels, test_predictions):
fpr, tpr, thresholds = metrics.roc_curve(test_labels, test_predictions, pos_label=1)
with plt.style.context(('ggplot')):
fig, ax = plt.subplots()
ax.plot(fpr[2], tpr[2])
#ax.plot([0, 1], [0, 1], 'k--')
#plt.xlim([0.0, 1.0])
#plt.ylim([0.0, 1.05])
plt.xlabel('False Positive Rate')
plt.ylabel('True Positive Rate')
plt.title('Receiver operating characteristic')
return fig
| mit |
mfjb/scikit-learn | sklearn/feature_selection/rfe.py | 137 | 17066 | # Authors: Alexandre Gramfort <alexandre.gramfort@inria.fr>
# Vincent Michel <vincent.michel@inria.fr>
# Gilles Louppe <g.louppe@gmail.com>
#
# License: BSD 3 clause
"""Recursive feature elimination for feature ranking"""
import warnings
import numpy as np
from ..utils import check_X_y, safe_sqr
from ..utils.metaestimators import if_delegate_has_method
from ..base import BaseEstimator
from ..base import MetaEstimatorMixin
from ..base import clone
from ..base import is_classifier
from ..cross_validation import check_cv
from ..cross_validation import _safe_split, _score
from ..metrics.scorer import check_scoring
from .base import SelectorMixin
class RFE(BaseEstimator, MetaEstimatorMixin, SelectorMixin):
"""Feature ranking with recursive feature elimination.
Given an external estimator that assigns weights to features (e.g., the
coefficients of a linear model), the goal of recursive feature elimination
(RFE) is to select features by recursively considering smaller and smaller
sets of features. First, the estimator is trained on the initial set of
features and weights are assigned to each one of them. Then, features whose
absolute weights are the smallest are pruned from the current set features.
That procedure is recursively repeated on the pruned set until the desired
number of features to select is eventually reached.
Read more in the :ref:`User Guide <rfe>`.
Parameters
----------
estimator : object
A supervised learning estimator with a `fit` method that updates a
`coef_` attribute that holds the fitted parameters. Important features
must correspond to high absolute values in the `coef_` array.
For instance, this is the case for most supervised learning
algorithms such as Support Vector Classifiers and Generalized
Linear Models from the `svm` and `linear_model` modules.
n_features_to_select : int or None (default=None)
The number of features to select. If `None`, half of the features
are selected.
step : int or float, optional (default=1)
If greater than or equal to 1, then `step` corresponds to the (integer)
number of features to remove at each iteration.
If within (0.0, 1.0), then `step` corresponds to the percentage
(rounded down) of features to remove at each iteration.
estimator_params : dict
Parameters for the external estimator.
This attribute is deprecated as of version 0.16 and will be removed in
0.18. Use estimator initialisation or set_params method instead.
verbose : int, default=0
Controls verbosity of output.
Attributes
----------
n_features_ : int
The number of selected features.
support_ : array of shape [n_features]
The mask of selected features.
ranking_ : array of shape [n_features]
The feature ranking, such that ``ranking_[i]`` corresponds to the
ranking position of the i-th feature. Selected (i.e., estimated
best) features are assigned rank 1.
estimator_ : object
The external estimator fit on the reduced dataset.
Examples
--------
The following example shows how to retrieve the 5 right informative
features in the Friedman #1 dataset.
>>> from sklearn.datasets import make_friedman1
>>> from sklearn.feature_selection import RFE
>>> from sklearn.svm import SVR
>>> X, y = make_friedman1(n_samples=50, n_features=10, random_state=0)
>>> estimator = SVR(kernel="linear")
>>> selector = RFE(estimator, 5, step=1)
>>> selector = selector.fit(X, y)
>>> selector.support_ # doctest: +NORMALIZE_WHITESPACE
array([ True, True, True, True, True,
False, False, False, False, False], dtype=bool)
>>> selector.ranking_
array([1, 1, 1, 1, 1, 6, 4, 3, 2, 5])
References
----------
.. [1] Guyon, I., Weston, J., Barnhill, S., & Vapnik, V., "Gene selection
for cancer classification using support vector machines",
Mach. Learn., 46(1-3), 389--422, 2002.
"""
def __init__(self, estimator, n_features_to_select=None, step=1,
estimator_params=None, verbose=0):
self.estimator = estimator
self.n_features_to_select = n_features_to_select
self.step = step
self.estimator_params = estimator_params
self.verbose = verbose
@property
def _estimator_type(self):
return self.estimator._estimator_type
def fit(self, X, y):
"""Fit the RFE model and then the underlying estimator on the selected
features.
Parameters
----------
X : {array-like, sparse matrix}, shape = [n_samples, n_features]
The training input samples.
y : array-like, shape = [n_samples]
The target values.
"""
return self._fit(X, y)
def _fit(self, X, y, step_score=None):
X, y = check_X_y(X, y, "csc")
# Initialization
n_features = X.shape[1]
if self.n_features_to_select is None:
n_features_to_select = n_features / 2
else:
n_features_to_select = self.n_features_to_select
if 0.0 < self.step < 1.0:
step = int(max(1, self.step * n_features))
else:
step = int(self.step)
if step <= 0:
raise ValueError("Step must be >0")
if self.estimator_params is not None:
warnings.warn("The parameter 'estimator_params' is deprecated as "
"of version 0.16 and will be removed in 0.18. The "
"parameter is no longer necessary because the value "
"is set via the estimator initialisation or "
"set_params method.", DeprecationWarning)
support_ = np.ones(n_features, dtype=np.bool)
ranking_ = np.ones(n_features, dtype=np.int)
if step_score:
self.scores_ = []
# Elimination
while np.sum(support_) > n_features_to_select:
# Remaining features
features = np.arange(n_features)[support_]
# Rank the remaining features
estimator = clone(self.estimator)
if self.estimator_params:
estimator.set_params(**self.estimator_params)
if self.verbose > 0:
print("Fitting estimator with %d features." % np.sum(support_))
estimator.fit(X[:, features], y)
# Get coefs
if hasattr(estimator, 'coef_'):
coefs = estimator.coef_
elif hasattr(estimator, 'feature_importances_'):
coefs = estimator.feature_importances_
else:
raise RuntimeError('The classifier does not expose '
'"coef_" or "feature_importances_" '
'attributes')
# Get ranks
if coefs.ndim > 1:
ranks = np.argsort(safe_sqr(coefs).sum(axis=0))
else:
ranks = np.argsort(safe_sqr(coefs))
# for sparse case ranks is matrix
ranks = np.ravel(ranks)
# Eliminate the worse features
threshold = min(step, np.sum(support_) - n_features_to_select)
# Compute step score on the previous selection iteration
# because 'estimator' must use features
# that have not been eliminated yet
if step_score:
self.scores_.append(step_score(estimator, features))
support_[features[ranks][:threshold]] = False
ranking_[np.logical_not(support_)] += 1
# Set final attributes
features = np.arange(n_features)[support_]
self.estimator_ = clone(self.estimator)
if self.estimator_params:
self.estimator_.set_params(**self.estimator_params)
self.estimator_.fit(X[:, features], y)
# Compute step score when only n_features_to_select features left
if step_score:
self.scores_.append(step_score(self.estimator_, features))
self.n_features_ = support_.sum()
self.support_ = support_
self.ranking_ = ranking_
return self
@if_delegate_has_method(delegate='estimator')
def predict(self, X):
"""Reduce X to the selected features and then predict using the
underlying estimator.
Parameters
----------
X : array of shape [n_samples, n_features]
The input samples.
Returns
-------
y : array of shape [n_samples]
The predicted target values.
"""
return self.estimator_.predict(self.transform(X))
@if_delegate_has_method(delegate='estimator')
def score(self, X, y):
"""Reduce X to the selected features and then return the score of the
underlying estimator.
Parameters
----------
X : array of shape [n_samples, n_features]
The input samples.
y : array of shape [n_samples]
The target values.
"""
return self.estimator_.score(self.transform(X), y)
def _get_support_mask(self):
return self.support_
@if_delegate_has_method(delegate='estimator')
def decision_function(self, X):
return self.estimator_.decision_function(self.transform(X))
@if_delegate_has_method(delegate='estimator')
def predict_proba(self, X):
return self.estimator_.predict_proba(self.transform(X))
@if_delegate_has_method(delegate='estimator')
def predict_log_proba(self, X):
return self.estimator_.predict_log_proba(self.transform(X))
class RFECV(RFE, MetaEstimatorMixin):
"""Feature ranking with recursive feature elimination and cross-validated
selection of the best number of features.
Read more in the :ref:`User Guide <rfe>`.
Parameters
----------
estimator : object
A supervised learning estimator with a `fit` method that updates a
`coef_` attribute that holds the fitted parameters. Important features
must correspond to high absolute values in the `coef_` array.
For instance, this is the case for most supervised learning
algorithms such as Support Vector Classifiers and Generalized
Linear Models from the `svm` and `linear_model` modules.
step : int or float, optional (default=1)
If greater than or equal to 1, then `step` corresponds to the (integer)
number of features to remove at each iteration.
If within (0.0, 1.0), then `step` corresponds to the percentage
(rounded down) of features to remove at each iteration.
cv : int or cross-validation generator, optional (default=None)
If int, it is the number of folds.
If None, 3-fold cross-validation is performed by default.
Specific cross-validation objects can also be passed, see
`sklearn.cross_validation module` for details.
scoring : string, callable or None, optional, default: None
A string (see model evaluation documentation) or
a scorer callable object / function with signature
``scorer(estimator, X, y)``.
estimator_params : dict
Parameters for the external estimator.
This attribute is deprecated as of version 0.16 and will be removed in
0.18. Use estimator initialisation or set_params method instead.
verbose : int, default=0
Controls verbosity of output.
Attributes
----------
n_features_ : int
The number of selected features with cross-validation.
support_ : array of shape [n_features]
The mask of selected features.
ranking_ : array of shape [n_features]
The feature ranking, such that `ranking_[i]`
corresponds to the ranking
position of the i-th feature.
Selected (i.e., estimated best)
features are assigned rank 1.
grid_scores_ : array of shape [n_subsets_of_features]
The cross-validation scores such that
``grid_scores_[i]`` corresponds to
the CV score of the i-th subset of features.
estimator_ : object
The external estimator fit on the reduced dataset.
Notes
-----
The size of ``grid_scores_`` is equal to ceil((n_features - 1) / step) + 1,
where step is the number of features removed at each iteration.
Examples
--------
The following example shows how to retrieve the a-priori not known 5
informative features in the Friedman #1 dataset.
>>> from sklearn.datasets import make_friedman1
>>> from sklearn.feature_selection import RFECV
>>> from sklearn.svm import SVR
>>> X, y = make_friedman1(n_samples=50, n_features=10, random_state=0)
>>> estimator = SVR(kernel="linear")
>>> selector = RFECV(estimator, step=1, cv=5)
>>> selector = selector.fit(X, y)
>>> selector.support_ # doctest: +NORMALIZE_WHITESPACE
array([ True, True, True, True, True,
False, False, False, False, False], dtype=bool)
>>> selector.ranking_
array([1, 1, 1, 1, 1, 6, 4, 3, 2, 5])
References
----------
.. [1] Guyon, I., Weston, J., Barnhill, S., & Vapnik, V., "Gene selection
for cancer classification using support vector machines",
Mach. Learn., 46(1-3), 389--422, 2002.
"""
def __init__(self, estimator, step=1, cv=None, scoring=None,
estimator_params=None, verbose=0):
self.estimator = estimator
self.step = step
self.cv = cv
self.scoring = scoring
self.estimator_params = estimator_params
self.verbose = verbose
def fit(self, X, y):
"""Fit the RFE model and automatically tune the number of selected
features.
Parameters
----------
X : {array-like, sparse matrix}, shape = [n_samples, n_features]
Training vector, where `n_samples` is the number of samples and
`n_features` is the total number of features.
y : array-like, shape = [n_samples]
Target values (integers for classification, real numbers for
regression).
"""
X, y = check_X_y(X, y, "csr")
if self.estimator_params is not None:
warnings.warn("The parameter 'estimator_params' is deprecated as "
"of version 0.16 and will be removed in 0.18. "
"The parameter is no longer necessary because the "
"value is set via the estimator initialisation or "
"set_params method.", DeprecationWarning)
# Initialization
cv = check_cv(self.cv, X, y, is_classifier(self.estimator))
scorer = check_scoring(self.estimator, scoring=self.scoring)
n_features = X.shape[1]
n_features_to_select = 1
# Determine the number of subsets of features
scores = []
# Cross-validation
for n, (train, test) in enumerate(cv):
X_train, y_train = _safe_split(self.estimator, X, y, train)
X_test, y_test = _safe_split(self.estimator, X, y, test, train)
rfe = RFE(estimator=self.estimator,
n_features_to_select=n_features_to_select,
step=self.step, estimator_params=self.estimator_params,
verbose=self.verbose - 1)
rfe._fit(X_train, y_train, lambda estimator, features:
_score(estimator, X_test[:, features], y_test, scorer))
scores.append(np.array(rfe.scores_[::-1]).reshape(1, -1))
scores = np.sum(np.concatenate(scores, 0), 0)
# The index in 'scores' when 'n_features' features are selected
n_feature_index = np.ceil((n_features - n_features_to_select) /
float(self.step))
n_features_to_select = max(n_features_to_select,
n_features - ((n_feature_index -
np.argmax(scores)) *
self.step))
# Re-execute an elimination with best_k over the whole set
rfe = RFE(estimator=self.estimator,
n_features_to_select=n_features_to_select,
step=self.step, estimator_params=self.estimator_params)
rfe.fit(X, y)
# Set final attributes
self.support_ = rfe.support_
self.n_features_ = rfe.n_features_
self.ranking_ = rfe.ranking_
self.estimator_ = clone(self.estimator)
if self.estimator_params:
self.estimator_.set_params(**self.estimator_params)
self.estimator_.fit(self.transform(X), y)
# Fixing a normalization error, n is equal to len(cv) - 1
# here, the scores are normalized by len(cv)
self.grid_scores_ = scores / len(cv)
return self
| bsd-3-clause |
aavanian/bokeh | examples/app/crossfilter/main.py | 5 | 2462 | import pandas as pd
from bokeh.layouts import row, widgetbox
from bokeh.models import Select
from bokeh.palettes import Spectral5
from bokeh.plotting import curdoc, figure
from bokeh.sampledata.autompg import autompg_clean as df
df = df.copy()
SIZES = list(range(6, 22, 3))
COLORS = Spectral5
N_SIZES = len(SIZES)
N_COLORS = len(COLORS)
# data cleanup
df.cyl = df.cyl.astype(str)
df.yr = df.yr.astype(str)
del df['name']
columns = sorted(df.columns)
discrete = [x for x in columns if df[x].dtype == object]
continuous = [x for x in columns if x not in discrete]
def create_figure():
xs = df[x.value].values
ys = df[y.value].values
x_title = x.value.title()
y_title = y.value.title()
kw = dict()
if x.value in discrete:
kw['x_range'] = sorted(set(xs))
if y.value in discrete:
kw['y_range'] = sorted(set(ys))
kw['title'] = "%s vs %s" % (x_title, y_title)
p = figure(plot_height=600, plot_width=800, tools='pan,box_zoom,hover,reset', **kw)
p.xaxis.axis_label = x_title
p.yaxis.axis_label = y_title
if x.value in discrete:
p.xaxis.major_label_orientation = pd.np.pi / 4
sz = 9
if size.value != 'None':
if len(set(df[size.value])) > N_SIZES:
groups = pd.qcut(df[size.value].values, N_SIZES, duplicates='drop')
else:
groups = pd.Categorical(df[size.value])
sz = [SIZES[xx] for xx in groups.codes]
c = "#31AADE"
if color.value != 'None':
if len(set(df[color.value])) > N_SIZES:
groups = pd.qcut(df[color.value].values, N_COLORS, duplicates='drop')
else:
groups = pd.Categorical(df[color.value])
c = [COLORS[xx] for xx in groups.codes]
p.circle(x=xs, y=ys, color=c, size=sz, line_color="white", alpha=0.6, hover_color='white', hover_alpha=0.5)
return p
def update(attr, old, new):
layout.children[1] = create_figure()
x = Select(title='X-Axis', value='mpg', options=columns)
x.on_change('value', update)
y = Select(title='Y-Axis', value='hp', options=columns)
y.on_change('value', update)
size = Select(title='Size', value='None', options=['None'] + continuous)
size.on_change('value', update)
color = Select(title='Color', value='None', options=['None'] + continuous)
color.on_change('value', update)
controls = widgetbox([x, y, color, size], width=200)
layout = row(controls, create_figure())
curdoc().add_root(layout)
curdoc().title = "Crossfilter"
| bsd-3-clause |
rahulguptakota/paper-To-Reviewer-Matching-System | citeSentClassifier_gurki.py | 1 | 9088 | import xml.etree.ElementTree as ET
import re
import time
import os, csv
from nltk.tokenize import sent_tokenize
from textblob.classifiers import NaiveBayesClassifier
from nltk.corpus import stopwords
from nltk.tokenize import word_tokenize
from nltk.stem import PorterStemmer
from sklearn import naive_bayes
from random import shuffle
import numpy as np
import scipy as sc
import matplotlib.pyplot as plt
from prettyprint import pp
import os, re, pickle
from sklearn.feature_extraction.text import CountVectorizer, TfidfTransformer
from sklearn.naive_bayes import BernoulliNB, GaussianNB, MultinomialNB
from sklearn.metrics import confusion_matrix, f1_score, accuracy_score, precision_score, recall_score, classification_report
from sklearn.neighbors import KNeighborsClassifier
from sklearn.svm import LinearSVC, NuSVC, SVC
from sklearn.grid_search import GridSearchCV
from datetime import datetime as dt
from ipy_table import *
def testClassifier(x_train, y_train, x_test, y_test, clf, name):
"""
this method will first train the classifier on the training data
and will then test the trained classifier on test data.
Finally it will report some metrics on the classifier performance.
Parameters
----------
x_train: np.ndarray
train data matrix
y_train: list
train data label
x_test: np.ndarray
test data matrix
y_test: list
test data label
clf: sklearn classifier object implementing fit() and predict() methods
Returns
-------
metrics: list
[training time, testing time, recall and precision for every class, macro-averaged F1 score]
"""
print(name)
metrics = []
start = dt.now()
clf.fit(x_train, y_train)
end = dt.now()
print 'training time: ', (end - start)
pickle.dump( clf, open( name+".p", "wb" ) )
# add training time to metrics
metrics.append(end-start)
start = dt.now()
yhat = clf.predict(x_test)
end = dt.now()
print 'testing time: ', (end - start)
# add testing time to metrics
metrics.append(end-start)
print 'classification report: '
# print classification_report(y_test, yhat)
pp(classification_report(y_test, yhat))
print 'f1 score'
print f1_score(y_test, yhat, average='macro')
print 'accuracy score'
print accuracy_score(y_test, yhat)
precision = precision_score(y_test, yhat, average=None)
recall = recall_score(y_test, yhat, average=None)
# add precision and recall values to metrics
for p, r in zip(precision, recall):
metrics.append(p)
metrics.append(r)
#add macro-averaged F1 score to metrics
metrics.append(f1_score(y_test, yhat, average='macro'))
print 'confusion matrix:'
print confusion_matrix(y_test, yhat)
# plotting the confusion matrix
plt.imshow(confusion_matrix(y_test, yhat), interpolation='nearest')
# plt.show()
return metrics
stop_words = set(stopwords.words('english'))
clfrNB = naive_bayes.MultinomialNB()
train = []
test = []
rootDir = './data_label'
one_label = 0
zero_label = 0
ps = PorterStemmer()
for dirName, subdirList, fileList in os.walk(rootDir, topdown=False):
try:
# print(dirName)
fo = open(dirName + "/citeSents.csv", "r")
except:
continue
lines = fo.readlines()
for line in lines:
line = line.strip().lower()
# print(line)
splitsent = line.split(",,")
# print(splitsent)
word_tokens = word_tokenize(splitsent[0])
if splitsent[1] != '1' and splitsent[1] != '0' :
print(splitsent)
elif splitsent[1] == "1":
one_label += 1
else:
zero_label += 1
filtered_sentence = [w for w in word_tokens if not w in stop_words]
line = " ".join(filtered_sentence)
stemmed = [ps.stem(word) for word in line.split()]
stemmed = filter(lambda x: not(len(x)<3 or re.findall(r"[0-9]+",x)) , stemmed)
stemmed = list(stemmed)
line = " ".join(stemmed)
# print(line)
train.append((line, splitsent[1]))
shuffle(train)
# testindex = int(len(train)*4/5)
# test = train[testindex:]
# train = train[:testindex]
train_arr = []
# test_arr = []
train_lbl = []
# test_lbl = []
for x in train:
train_arr.append(x[0])
train_lbl.append(x[1])
# for x in test:
# test_arr.append(x[0])
# test_lbl.append(x[1])
vectorizer = CountVectorizer()
vectorizer.fit(train_arr)
pickle.dump(vectorizer, open("vectorizer.p", "wb"))
train_mat = vectorizer.transform(train_arr)
print train_mat
# print train_mat.shape
# test_mat = vectorizer.transform(test_arr)
# print test_mat.shape
tfidf = TfidfTransformer()
tfidf.fit(train_mat)
pickle.dump(tfidf, open("tfidf.p", "wb"))
train_tfmat = tfidf.transform(train_mat)
print train_tfmat.shape
print train_tfmat[0]
# test_tfmat = tfidf.transform(test_mat)
# print test_tfmat.shape
testindex = int(len(train)*4/5)
test_tfmat = train_tfmat[testindex:]
test_lbl = train_lbl[testindex:]
train_tfmat = train_tfmat[:testindex]
train_lbl = train_lbl[:testindex]
metrics_dict = []
bnb = BernoulliNB()
bnb_me = testClassifier(train_tfmat, train_lbl, test_tfmat, test_lbl, bnb, "bernoulliNB")
metrics_dict.append({'name':'BernoulliNB', 'metrics':bnb_me})
gnb = GaussianNB()
gnb_me = testClassifier(train_tfmat.toarray(), train_lbl, test_tfmat.toarray(), test_lbl, gnb, "guassianNB")
metrics_dict.append({'name':'GaussianNB', 'metrics':gnb_me})
mnb = MultinomialNB()
mnb_me = testClassifier(train_tfmat.toarray(), train_lbl, test_tfmat.toarray(), test_lbl, mnb, "MultinomialNB")
metrics_dict.append({'name':'MultinomialNB', 'metrics':mnb_me})
for nn in [5]:
print 'knn with ', nn, ' neighbors'
knn = KNeighborsClassifier(n_neighbors=nn)
knn_me = testClassifier(train_tfmat, train_lbl, test_tfmat, test_lbl, knn, "knn"+str(nn))
metrics_dict.append({'name':'5NN', 'metrics':knn_me})
print ' '
print("linear SVM starts:")
lsvm = LinearSVC( class_weight={'1': 1, '0' : 1})
lsvm_me = testClassifier(train_tfmat, train_lbl, test_tfmat, test_lbl, lsvm, "linearSVM")
metrics_dict.append({'name':'LinearSVM', 'metrics':lsvm_me})
rbfsvm = SVC(kernel = 'poly',degree=2,coef0=1 ,class_weight={'1': zero_label, '0' : one_label})
rbfsvm_me = testClassifier(train_tfmat, train_lbl, test_tfmat, test_lbl, rbfsvm, "rbfSVM")
metrics_dict.append({'name':'SVM with RBF kernel', 'metrics':rbfsvm_me})
bnb_params = {'alpha': [a*0.1 for a in range(0,11)]}
bnb_clf = GridSearchCV(BernoulliNB(), bnb_params, cv=10)
bnb_clf.fit(train_tfmat, train_lbl)
print 'best parameters'
print bnb_clf.best_params_
best_bnb = BernoulliNB(alpha=bnb_clf.best_params_['alpha'])
best_bnb_me = testClassifier(train_tfmat, train_lbl, test_tfmat, test_lbl, best_bnb,"bernoulliNB")
metrics_dict.append({'name':'Best BernoulliNB', 'metrics':best_bnb_me})
best_gnb = GaussianNB()
best_gnb_me = testClassifier(train_tfmat.toarray(), train_lbl, test_tfmat.toarray(), test_lbl, best_gnb, "guassianNB")
metrics_dict.append({'name':'Best GaussianNB', 'metrics':best_gnb_me})
mbn_params = {'alpha': [a*0.1 for a in range(0,11)]}
mbn_clf = GridSearchCV(MultinomialNB(), mbn_params, cv=10)
mbn_clf.fit(train_tfmat, train_lbl)
print 'best parameters'
print mbn_clf.best_params_
best_mbn = MultinomialNB(alpha=mbn_clf.best_params_['alpha'])
best_mbn_me = testClassifier(train_tfmat, train_lbl, test_tfmat, test_lbl, best_mbn, "MultinomialNB")
metrics_dict.append({'name':'Best MultinomialNB', 'metrics':best_mbn_me})
print metrics_dict
# knn_params = {'n_neighbors': range(1,21), 'weights': ['uniform', 'distance'], 'algorithm': ['ball_tree', 'kd_tree'],
# 'leaf_size': [15, 30, 50, 100], 'p': [1,2]}
# knn_clf = GridSearchCV(KNeighborsClassifier(), knn_params, cv=10)
# knn_clf.fit(train_tfmat, train_lbl)
# print 'best parameters'
# print knn_clf.best_params_
# best_knn = KNeighborsClassifier(n_neighbors=knn_clf.best_params_['n_neighbors'], weights=knn_clf.best_params_['weights'],
# algorithm=knn_clf.best_params_['algorithm'], leaf_size=knn_clf.best_params_['leaf_size'])
# best_knn_me = testClassifier(train_tfmat, train_lbl, test_tfmat, test_lbl, best_knn)
# metrics_dict.append({'name':'Best KNN', 'metrics':best_knn_me})
# nusvm = NuSVC()
# nusvm_me = testClassifier(train_tfmat, train_lbl, test_tfmat, test_lbl, nusvm)
# metrics_dict.append({'name':'nuSVM', 'metrics':nusvm_me})
# traindata = [data[0] for data in train]
# trainlabel = [data[1] for data in train]
# clfrNB.fit(traindata, trainlabel)
# print(test)
# cl = NaiveBayesClassifier(train)
# print(cl.classify("It is also possible to focus on non-compositional compounds, a key point in bilingual applications (CITATION; CITATION; Lin, 99)")) # "pos"
# print(cl.classify("I don't like their pizza.")) # "neg"
# for item in test:
# if(cl.classify(item[0]) == '1'):
# print(item, cl.classify(item[0]))
# print(cl.accuracy(test))
# print(cl.show_informative_features(100))
# print(train)
| mit |
weaver-viii/h2o-3 | py2/h2o_cmd.py | 20 | 16497 |
import h2o_nodes
from h2o_test import dump_json, verboseprint
import h2o_util
import h2o_print as h2p
from h2o_test import OutputObj
#************************************************************************
def runStoreView(node=None, **kwargs):
print "FIX! disabling runStoreView for now"
return {}
if not node: node = h2o_nodes.nodes[0]
print "\nStoreView:"
# FIX! are there keys other than frames and models
a = node.frames(**kwargs)
# print "storeview frames:", dump_json(a)
frameList = [af['key']['name'] for af in a['frames']]
for f in frameList:
print "frame:", f
print "# of frames:", len(frameList)
b = node.models()
# print "storeview models:", dump_json(b)
modelList = [bm['key'] for bm in b['models']]
for m in modelList:
print "model:", m
print "# of models:", len(modelList)
return {'keys': frameList + modelList}
#************************************************************************
def runExec(node=None, **kwargs):
if not node: node = h2o_nodes.nodes[0]
a = node.rapids(**kwargs)
return a
def runInspect(node=None, key=None, verbose=False, **kwargs):
if not key: raise Exception('No key for Inspect')
if not node: node = h2o_nodes.nodes[0]
a = node.frames(key, **kwargs)
if verbose:
print "inspect of %s:" % key, dump_json(a)
return a
#************************************************************************
def infoFromParse(parse):
if not parse:
raise Exception("parse is empty for infoFromParse")
# assumes just one result from Frames
if 'frames' not in parse:
raise Exception("infoFromParse expects parse= param from parse result: %s" % parse)
if len(parse['frames'])!=1:
raise Exception("infoFromParse expects parse= param from parse result: %s " % parse['frames'])
# it it index[0] or key '0' in a dictionary?
frame = parse['frames'][0]
# need more info about this dataset for debug
numCols = len(frame['columns'])
numRows = frame['rows']
key_name = frame['frame_id']['name']
return numRows, numCols, key_name
#************************************************************************
# make this be the basic way to get numRows, numCols
def infoFromInspect(inspect):
if not inspect:
raise Exception("inspect is empty for infoFromInspect")
# assumes just one result from Frames
if 'frames' not in inspect:
raise Exception("infoFromInspect expects inspect= param from Frames result (single): %s" % inspect)
if len(inspect['frames'])!=1:
raise Exception("infoFromInspect expects inspect= param from Frames result (single): %s " % inspect['frames'])
# it it index[0] or key '0' in a dictionary?
frame = inspect['frames'][0]
# need more info about this dataset for debug
columns = frame['columns']
key_name = frame['frame_id']['name']
missingList = []
labelList = []
typeList = []
for i, colDict in enumerate(columns): # columns is a list
if 'missing_count' not in colDict:
# debug
print "\ncolDict"
for k in colDict:
print " key: %s" % k
# data
# domain
# string_data
# type
# label
# percentiles
# precision
# mins
# maxs
# mean
# histogram_base
# histogram_bins
# histogram_stride
# zero_count
# missing_count
# positive_infinity_count
# negative_infinity_count
# __meta
mins = colDict['mins']
maxs = colDict['maxs']
missing = colDict['missing_count']
label = colDict['label']
stype = colDict['type']
missingList.append(missing)
labelList.append(label)
typeList.append(stype)
if missing!=0:
print "%s: col: %s %s, missing: %d" % (key_name, i, label, missing)
print "inspect typeList:", typeList
# make missingList empty if all 0's
if sum(missingList)==0:
missingList = []
# no type per col in inspect2
numCols = len(frame['columns'])
numRows = frame['rows']
print "\n%s numRows: %s, numCols: %s" % (key_name, numRows, numCols)
return missingList, labelList, numRows, numCols
#************************************************************************
# does all columns unless you specify column index.
# only will return first or specified column
def runSummary(node=None, key=None, column=None, expected=None, maxDelta=None, noPrint=False, **kwargs):
if not key: raise Exception('No key for Summary')
if not node: node = h2o_nodes.nodes[0]
# return node.summary(key, **kwargs)
i = InspectObj(key=key)
# just so I don't have to change names below
missingList = i.missingList
labelList = i.labelList
numRows = i.numRows
numCols = i.numCols
print "labelList:", labelList
assert labelList is not None
# doesn't take indices? only column labels?
# return first column, unless specified
if not (column is None or isinstance(column, (basestring, int))):
raise Exception("column param should be string or integer index or None %s %s" % (type(column), column))
# either return the first col, or the col indentified by label. the column identifed could be string or index?
if column is None: # means the summary json when we ask for col 0, will be what we return (do all though)
colNameToDo = labelList
colIndexToDo = range(len(labelList))
elif isinstance(column, int):
colNameToDo = [labelList[column]]
colIndexToDo = [column]
elif isinstance(column, basestring):
colNameToDo = [column]
if column not in labelList:
raise Exception("% not in labellist: %s" % (column, labellist))
colIndexToDo = [labelList.index(column)]
else:
raise Exception("wrong type %s for column %s" % (type(column), column))
# we get the first column as result after walking across all, if no column parameter
desiredResult = None
for (colIndex, colName) in zip(colIndexToDo, colNameToDo):
print "doing summary on %s %s" % (colIndex, colName)
# ugly looking up the colIndex
co = SummaryObj(key=key, colIndex=colIndex, colName=colName)
if not desiredResult:
desiredResult = co
if not noPrint:
for k,v in co:
# only print [0] of mins and maxs because of the e308 values when they don't have dataset values
if k=='mins' or k=='maxs':
print "%s[0]" % k, v[0]
else:
print k, v
if expected is not None:
print "len(co.histogram_bins):", len(co.histogram_bins)
print "co.label:", co.label, "mean (2 places):", h2o_util.twoDecimals(co.mean)
# what is precision. -1?
print "co.label:", co.label, "std dev. (2 places):", h2o_util.twoDecimals(co.sigma)
# print "FIX! hacking the co.percentiles because it's short by two"
# if co.percentiles:
# percentiles = [0] + co.percentiles + [0]
# else:
# percentiles = None
percentiles = co.percentiles
assert len(co.percentiles) == len(co.default_percentiles)
# the thresholds h2o used, should match what we expected
# expected = [0] * 5
# Fix. doesn't check for expected = 0?
# max of one bin
if maxDelta is None:
maxDelta = (co.maxs[0] - co.mins[0])/1000
if expected[0]: h2o_util.assertApproxEqual(co.mins[0], expected[0], tol=maxDelta,
msg='min is not approx. expected')
if expected[1]: h2o_util.assertApproxEqual(percentiles[2], expected[1], tol=maxDelta,
msg='25th percentile is not approx. expected')
if expected[2]: h2o_util.assertApproxEqual(percentiles[4], expected[2], tol=maxDelta,
msg='50th percentile (median) is not approx. expected')
if expected[3]: h2o_util.assertApproxEqual(percentiles[6], expected[3], tol=maxDelta,
msg='75th percentile is not approx. expected')
if expected[4]: h2o_util.assertApproxEqual(co.maxs[0], expected[4], tol=maxDelta,
msg='max is not approx. expected')
# figure out the expected max error
# use this for comparing to sklearn/sort
MAX_QBINS = 1000
if expected[0] and expected[4]:
expectedRange = expected[4] - expected[0]
# because of floor and ceil effects due we potentially lose 2 bins (worst case)
# the extra bin for the max value, is an extra bin..ignore
expectedBin = expectedRange/(MAX_QBINS-2)
maxErr = expectedBin # should we have some fuzz for fp?
else:
print "Test won't calculate max expected error"
maxErr = 0
pt = h2o_util.twoDecimals(percentiles)
# only look at [0] for now...bit e308 numbers if unpopulated due to not enough unique values in dataset column
mx = h2o_util.twoDecimals(co.maxs[0])
mn = h2o_util.twoDecimals(co.mins[0])
print "co.label:", co.label, "co.percentiles (2 places):", pt
print "co.default_percentiles:", co.default_percentiles
print "co.label:", co.label, "co.maxs: (2 places):", mx
print "co.label:", co.label, "co.mins: (2 places):", mn
# FIX! why would percentiles be None? enums?
if pt is None:
compareActual = mn, [None] * 3, mx
else:
compareActual = mn, pt[2], pt[4], pt[6], mx
h2p.green_print("actual min/25/50/75/max co.label:", co.label, "(2 places):", compareActual)
h2p.green_print("expected min/25/50/75/max co.label:", co.label, "(2 places):", expected)
return desiredResult
# this parses the json object returned for one col from runSummary...returns an OutputObj object
# summaryResult = h2o_cmd.runSummary(key=hex_key, column=0)
# co = h2o_cmd.infoFromSummary(summaryResult)
# print co.label
# legacy
def infoFromSummary(summaryResult, column=None):
return SummaryObj(summaryResult, column=column)
class ParseObj(OutputObj):
# the most basic thing is that the data frame has the # of rows and cols we expected
# embed that checking here, so every test doesn't have to
def __init__(self, parseResult, expectedNumRows=None, expectedNumCols=None, noPrint=False, **kwargs):
super(ParseObj, self).__init__(parseResult['frames'][0], "Parse", noPrint=noPrint)
# add my stuff
self.numRows, self.numCols, self.parse_key = infoFromParse(parseResult)
# h2o_import.py does this for test support
if 'python_elapsed' in parseResult:
self.python_elapsed = parseResult['python_elapsed']
if expectedNumRows is not None:
assert self.numRows == expectedNumRows, "%s %s" % (self.numRows, expectedNumRows)
if expectedNumCols is not None:
assert self.numCols == expectedNumCols, "%s %s" % (self.numCols, expectedNumCols)
print "ParseObj created for:", self.parse_key # vars(self)
# Let's experiment with creating new objects that are an api I control for generic operations (Inspect)
class InspectObj(OutputObj):
# the most basic thing is that the data frame has the # of rows and cols we expected
# embed that checking here, so every test doesn't have to
def __init__(self, key,
expectedNumRows=None, expectedNumCols=None, expectedMissingList=None, expectedLabelList=None,
noPrint=False, **kwargs):
inspectResult = runInspect(key=key)
super(InspectObj, self).__init__(inspectResult['frames'][0], "Inspect", noPrint=noPrint)
# add my stuff
self.missingList, self.labelList, self.numRows, self.numCols = infoFromInspect(inspectResult)
if expectedNumRows is not None:
assert self.numRows == expectedNumRows, "%s %s" % (self.numRows, expectedNumRows)
if expectedNumCols is not None:
assert self.numCols == expectedNumCols, "%s %s" % (self.numCols, expectedNumCols)
if expectedMissingList is not None:
assert self.missingList == expectedMissingList, "%s %s" % (self.MissingList, expectedMissingList)
if expectedLabelList is not None:
assert self.labelList == expectedLabelList, "%s %s" % (self.labelList, expectedLabelList)
print "InspectObj created for:", key #, vars(self)
class SummaryObj(OutputObj):
@classmethod
def check(self,
expectedNumRows=None, expectedNumCols=None,
expectedLabel=None, expectedType=None, expectedMissing=None, expectedDomain=None, expectedBinsSum=None,
noPrint=False, **kwargs):
if expectedLabel is not None:
assert self.label != expectedLabel
if expectedType is not None:
assert self.type != expectedType
if expectedMissing is not None:
assert self.missing != expectedMissing
if expectedDomain is not None:
assert self.domain != expectedDomain
if expectedBinsSum is not None:
assert self.binsSum != expectedBinsSum
# column is column name?
def __init__(self, key, colIndex, colName,
expectedNumRows=None, expectedNumCols=None,
expectedLabel=None, expectedType=None, expectedMissing=None, expectedDomain=None, expectedBinsSum=None,
noPrint=False, timeoutSecs=30, **kwargs):
# we need both colInndex and colName for doing Summary efficiently
# ugly.
assert colIndex is not None
assert colName is not None
summaryResult = h2o_nodes.nodes[0].summary(key=key, column=colName, timeoutSecs=timeoutSecs, **kwargs)
# this should be the same for all the cols? Or does the checksum change?
frame = summaryResult['frames'][0]
default_percentiles = frame['default_percentiles']
checksum = frame['checksum']
rows = frame['rows']
# assert colIndex < len(frame['columns']), "You're asking for colIndex %s but there are only %s. " % \
# (colIndex, len(frame['columns']))
# coJson = frame['columns'][colIndex]
# is it always 0 now? the one I asked for ?
coJson = frame['columns'][0]
assert checksum !=0 and checksum is not None
assert rows!=0 and rows is not None
# FIX! why is frame['key'] = None here?
# assert frame['key'] == key, "%s %s" % (frame['key'], key)
super(SummaryObj, self).__init__(coJson, "Summary for %s" % colName, noPrint=noPrint)
# how are enums binned. Stride of 1? (what about domain values)
# touch all
# print "vars", vars(self)
coList = [
len(self.data),
self.domain,
self.string_data,
self.type,
self.label,
self.percentiles,
self.precision,
self.mins,
self.maxs,
self.mean,
self.histogram_base,
len(self.histogram_bins),
self.histogram_stride,
self.zero_count,
self.missing_count,
self.positive_infinity_count,
self.negative_infinity_count,
]
assert self.label==colName, "%s You must have told me the wrong colName %s for the given colIndex %s" % \
(self.label, colName, colIndex)
print "you can look at this attributes in the returned object (which is OutputObj if you assigned to 'co')"
for k,v in self:
print "%s" % k,
# hack these into the column object from the full summary
self.default_percentiles = default_percentiles
self.checksum = checksum
self.rows = rows
print "\nSummaryObj for", key, "for colName", colName, "colIndex:", colIndex
print "SummaryObj created for:", key # vars(self)
# now do the assertion checks
self.check(expectedNumRows, expectedNumCols,
expectedLabel, expectedType, expectedMissing, expectedDomain, expectedBinsSum,
noPrint=noPrint, **kwargs)
| apache-2.0 |
kabrapratik28/Stanford_courses | cs224n/assignment1/q4_sentiment.py | 1 | 8150 | #!/usr/bin/env python
import argparse
import numpy as np
import matplotlib
matplotlib.use('agg')
import matplotlib.pyplot as plt
import itertools
from utils.treebank import StanfordSentiment
import utils.glove as glove
from q3_sgd import load_saved_params, sgd
# We will use sklearn here because it will run faster than implementing
# ourselves. However, for other parts of this assignment you must implement
# the functions yourself!
from sklearn.linear_model import LogisticRegression
from sklearn.metrics import confusion_matrix
def getArguments():
parser = argparse.ArgumentParser()
group = parser.add_mutually_exclusive_group(required=True)
group.add_argument("--pretrained", dest="pretrained", action="store_true",
help="Use pretrained GloVe vectors.")
group.add_argument("--yourvectors", dest="yourvectors", action="store_true",
help="Use your vectors from q3.")
return parser.parse_args()
def getSentenceFeatures(tokens, wordVectors, sentence):
"""
Obtain the sentence feature for sentiment analysis by averaging its
word vectors
"""
# Implement computation for the sentence features given a sentence.
# Inputs:
# tokens -- a dictionary that maps words to their indices in
# the word vector list
# wordVectors -- word vectors (each row) for all tokens
# sentence -- a list of words in the sentence of interest
# Output:
# - sentVector: feature vector for the sentence
sentVector = np.zeros((wordVectors.shape[1],))
### YOUR CODE HERE
for word in sentence:
index = tokens[word]
sentVector += wordVectors[index]
sentVector /= len(sentence)
### END YOUR CODE
assert sentVector.shape == (wordVectors.shape[1],)
return sentVector
def getRegularizationValues():
"""Try different regularizations
Return a sorted list of values to try.
"""
values = None # Assign a list of floats in the block below
### YOUR CODE HERE
values = [100, 10, 1, 0, 1e-1, 5e-1, 1e-2, 5e-2,
1e-3, 5e-3, 1e-4, 5e-4, 1e-5, 5e-5, 1e-6]
### END YOUR CODE
return sorted(values)
def chooseBestModel(results):
"""Choose the best model based on parameter tuning on the dev set
Arguments:
results -- A list of python dictionaries of the following format:
{
"reg": regularization,
"clf": classifier,
"train": trainAccuracy,
"dev": devAccuracy,
"test": testAccuracy
}
Returns:
Your chosen result dictionary.
"""
bestResult = None
### YOUR CODE HERE
currBestValue = -1.0
for each_result in results:
if each_result["dev"] > currBestValue:
currBestValue = each_result["dev"]
bestResult = each_result
### END YOUR CODE
return bestResult
def accuracy(y, yhat):
""" Precision for classifier """
assert(y.shape == yhat.shape)
return np.sum(y == yhat) * 100.0 / y.size
def plotRegVsAccuracy(regValues, results, filename):
""" Make a plot of regularization vs accuracy """
plt.plot(regValues, [x["train"] for x in results])
plt.plot(regValues, [x["dev"] for x in results])
plt.xscale('log')
plt.xlabel("regularization")
plt.ylabel("accuracy")
plt.legend(['train', 'dev'], loc='upper left')
plt.savefig(filename)
def outputConfusionMatrix(features, labels, clf, filename):
""" Generate a confusion matrix """
pred = clf.predict(features)
cm = confusion_matrix(labels, pred, labels=range(5))
plt.figure()
plt.imshow(cm, interpolation='nearest', cmap=plt.cm.Reds)
plt.colorbar()
classes = ["- -", "-", "neut", "+", "+ +"]
tick_marks = np.arange(len(classes))
plt.xticks(tick_marks, classes)
plt.yticks(tick_marks, classes)
thresh = cm.max() / 2.
for i, j in itertools.product(range(cm.shape[0]), range(cm.shape[1])):
plt.text(j, i, cm[i, j],
horizontalalignment="center",
color="white" if cm[i, j] > thresh else "black")
plt.tight_layout()
plt.ylabel('True label')
plt.xlabel('Predicted label')
plt.savefig(filename)
def outputPredictions(dataset, features, labels, clf, filename):
""" Write the predictions to file """
pred = clf.predict(features)
with open(filename, "w") as f:
print >> f, "True\tPredicted\tText"
for i in xrange(len(dataset)):
print >> f, "%d\t%d\t%s" % (
labels[i], pred[i], " ".join(dataset[i][0]))
def main(args):
""" Train a model to do sentiment analyis"""
# Load the dataset
dataset = StanfordSentiment()
tokens = dataset.tokens()
nWords = len(tokens)
if args.yourvectors:
_, wordVectors, _ = load_saved_params()
wordVectors = np.concatenate(
(wordVectors[:nWords,:], wordVectors[nWords:,:]),
axis=1)
elif args.pretrained:
wordVectors = glove.loadWordVectors(tokens)
dimVectors = wordVectors.shape[1]
# Load the train set
trainset = dataset.getTrainSentences()
nTrain = len(trainset)
trainFeatures = np.zeros((nTrain, dimVectors))
trainLabels = np.zeros((nTrain,), dtype=np.int32)
for i in xrange(nTrain):
words, trainLabels[i] = trainset[i]
trainFeatures[i, :] = getSentenceFeatures(tokens, wordVectors, words)
# Prepare dev set features
devset = dataset.getDevSentences()
nDev = len(devset)
devFeatures = np.zeros((nDev, dimVectors))
devLabels = np.zeros((nDev,), dtype=np.int32)
for i in xrange(nDev):
words, devLabels[i] = devset[i]
devFeatures[i, :] = getSentenceFeatures(tokens, wordVectors, words)
# Prepare test set features
testset = dataset.getTestSentences()
nTest = len(testset)
testFeatures = np.zeros((nTest, dimVectors))
testLabels = np.zeros((nTest,), dtype=np.int32)
for i in xrange(nTest):
words, testLabels[i] = testset[i]
testFeatures[i, :] = getSentenceFeatures(tokens, wordVectors, words)
# We will save our results from each run
results = []
regValues = getRegularizationValues()
for reg in regValues:
print "Training for reg=%f" % reg
# Note: add a very small number to regularization to please the library
clf = LogisticRegression(C=1.0/(reg + 1e-12))
clf.fit(trainFeatures, trainLabels)
# Test on train set
pred = clf.predict(trainFeatures)
trainAccuracy = accuracy(trainLabels, pred)
print "Train accuracy (%%): %f" % trainAccuracy
# Test on dev set
pred = clf.predict(devFeatures)
devAccuracy = accuracy(devLabels, pred)
print "Dev accuracy (%%): %f" % devAccuracy
# Test on test set
# Note: always running on test is poor style. Typically, you should
# do this only after validation.
pred = clf.predict(testFeatures)
testAccuracy = accuracy(testLabels, pred)
print "Test accuracy (%%): %f" % testAccuracy
results.append({
"reg": reg,
"clf": clf,
"train": trainAccuracy,
"dev": devAccuracy,
"test": testAccuracy})
# Print the accuracies
print ""
print "=== Recap ==="
print "Reg\t\tTrain\tDev\tTest"
for result in results:
print "%.2E\t%.3f\t%.3f\t%.3f" % (
result["reg"],
result["train"],
result["dev"],
result["test"])
print ""
bestResult = chooseBestModel(results)
print "Best regularization value: %0.2E" % bestResult["reg"]
print "Test accuracy (%%): %f" % bestResult["test"]
# do some error analysis
if args.pretrained:
plotRegVsAccuracy(regValues, results, "q4_reg_v_acc.png")
outputConfusionMatrix(devFeatures, devLabels, bestResult["clf"],
"q4_dev_conf.png")
outputPredictions(devset, devFeatures, devLabels, bestResult["clf"],
"q4_dev_pred.txt")
if __name__ == "__main__":
main(getArguments())
| apache-2.0 |
abhishekgahlot/scikit-learn | examples/linear_model/plot_sgd_separating_hyperplane.py | 260 | 1219 | """
=========================================
SGD: Maximum margin separating hyperplane
=========================================
Plot the maximum margin separating hyperplane within a two-class
separable dataset using a linear Support Vector Machines classifier
trained using SGD.
"""
print(__doc__)
import numpy as np
import matplotlib.pyplot as plt
from sklearn.linear_model import SGDClassifier
from sklearn.datasets.samples_generator import make_blobs
# we create 50 separable points
X, Y = make_blobs(n_samples=50, centers=2, random_state=0, cluster_std=0.60)
# fit the model
clf = SGDClassifier(loss="hinge", alpha=0.01, n_iter=200, fit_intercept=True)
clf.fit(X, Y)
# plot the line, the points, and the nearest vectors to the plane
xx = np.linspace(-1, 5, 10)
yy = np.linspace(-1, 5, 10)
X1, X2 = np.meshgrid(xx, yy)
Z = np.empty(X1.shape)
for (i, j), val in np.ndenumerate(X1):
x1 = val
x2 = X2[i, j]
p = clf.decision_function([x1, x2])
Z[i, j] = p[0]
levels = [-1.0, 0.0, 1.0]
linestyles = ['dashed', 'solid', 'dashed']
colors = 'k'
plt.contour(X1, X2, Z, levels, colors=colors, linestyles=linestyles)
plt.scatter(X[:, 0], X[:, 1], c=Y, cmap=plt.cm.Paired)
plt.axis('tight')
plt.show()
| bsd-3-clause |
dimmddr/roadSignsNN | prepare_images.py | 1 | 8513 | import cv2
import matplotlib.pyplot as plt
import numpy as np
from numpy.lib.stride_tricks import as_strided
import nn
from settings import COVER_PERCENT
IMG_WIDTH = 1025
IMG_HEIGHT = 523
IMG_LAYERS = 3
SUB_IMG_WIDTH = 48
SUB_IMG_HEIGHT = 48
SUB_IMG_LAYERS = 3
WIDTH = 2
HEIGHT = 1
LAYERS = 0
XMIN = 0
YMIN = 1
XMAX = 2
YMAX = 3
# TODO: переписать либо все с использованием Rectangle namedtuple, либо через numpy. Например с помощью recarray
def compute_covering(window, label):
dx = min(window.xmax, label.xmax) - max(window.xmin, label.xmin)
dy = min(window.ymax, label.ymax) - max(window.ymin, label.ymin)
if (dx >= 0) and (dy >= 0):
label_cover = dx * dy / ((label.xmax - label.xmin) * (label.ymax - label.ymin))
window_cover = dx * dy / ((window.xmax - window.xmin) * (window.ymax - window.ymin))
return max(label_cover, window_cover)
else:
return 0
def split_into_subimgs(img, sub_img_shape, debug, step=1):
shape = (int(np.floor((img.shape[HEIGHT] - sub_img_shape[HEIGHT]) / step)),
int(np.floor((img.shape[WIDTH] - sub_img_shape[WIDTH]) / step)),
SUB_IMG_LAYERS, SUB_IMG_HEIGHT, SUB_IMG_WIDTH)
# shape = (lbl_array.shape[0], SUB_IMG_LAYERS, SUB_IMG_HEIGHT, SUB_IMG_WIDTH)
result_array = as_strided(img, shape=shape,
strides=(
img.strides[1] * step + (img.shape[WIDTH] - sub_img_shape[WIDTH]) % step *
img.strides[2],
img.strides[2] * step,
img.strides[0], img.strides[1], img.strides[2]))
return result_array
def get_labels(labels, result_array_shape, step, sub_img_shape):
lbl_array = np.zeros(shape=(result_array_shape[0], result_array_shape[1]))
index = 0
for i in range(lbl_array.shape[0]):
for ii in range(lbl_array.shape[1]):
# Rectangle = namedtuple('Rectangle', ['xmin', 'ymin', 'xmax', 'ymax'])
window = nn.Rectangle(ii * step, i * step, ii * step + sub_img_shape[HEIGHT],
i * step + sub_img_shape[WIDTH])
cover = np.array([compute_covering(window=window,
label=nn.Rectangle(lbl[0], lbl[1], lbl[2], lbl[3])) for lbl in labels])
is_cover = int(np.any(cover > COVER_PERCENT))
lbl_array[i, ii] = is_cover
index += 1
return lbl_array
def prepare(img_path, labels, debug=False):
step = 2
img = cv2.imread(img_path, cv2.IMREAD_UNCHANGED)
if debug:
print("Prepare image " + img_path)
print(img.shape)
print(labels)
res_img = img / 255
res_img = np.array([res_img[:, :, 0], res_img[:, :, 1], res_img[:, :, 2]])
res = split_into_subimgs(res_img, sub_img_shape=(SUB_IMG_LAYERS, SUB_IMG_HEIGHT, SUB_IMG_WIDTH),
step=step, debug=debug)
lbl_res = get_labels(labels=labels, result_array_shape=res.shape,
step=step, sub_img_shape=(SUB_IMG_LAYERS, SUB_IMG_HEIGHT, SUB_IMG_WIDTH))
return res, lbl_res
def prepare_calibration(img_path, labels, debug=False):
# Возвращает метки в виде (yn, xn, wn, hn), для калибровки рамки изображения
# если (x, y) координаты верхенго левого угла и (w, h) соответственно ширина и высота,
# то новая рамка будет (x - xn * w / wn, y - yn * h / hn), (w / wn, h / hn)
"""
:param img_path:
:param labels:
:param debug:
:return:
@note: Первая сетка должна преобразовывать изображение в пределах [16, 64], вторая в [8, 128]
Так как изначально окно 32х32, то максимальное значение корректировки должно быть 2, минимально 0.5.
Делать по три класса на ширину и высоту удобно, но вряд ли практично. Стоит попробовать сделать хотя бы по 5.
Делать удобно нечетное количество, чтобы были доступны три варианта: максимум, минимум и оставить как есть.
Варианты множителей получаются: [1/2, 3/4, 1, 6/4, 2]
соответсвенно размеры для изначального варианта 32: [16, 24, 32, 48, 64]
"""
step = 2
img = cv2.imread(img_path, cv2.IMREAD_UNCHANGED)
if debug:
print("Prepare image " + img_path)
print(img.shape)
print(labels)
res_img = img / 255
res_img = np.array([res_img[:, :, 0], res_img[:, :, 1], res_img[:, :, 2]])
res = split_into_subimgs(res_img, sub_img_shape=(SUB_IMG_LAYERS, SUB_IMG_HEIGHT, SUB_IMG_WIDTH),
step=step, debug=debug)
lbl_res = get_labels(labels=labels, result_array_shape=res.shape,
step=step, sub_img_shape=(SUB_IMG_LAYERS, SUB_IMG_HEIGHT, SUB_IMG_WIDTH))
# todo: необходимо решить что делать в случае с несколькими знаками -
# нужно каким-то образом получить координаты нужного
xmin, ymin, xmax, ymax = labels[0]
for image in res[lbl_res == 1]:
pass
# нужно из массива изображений и массива меток к ним вытащить координаты изображений.
# Ширина и высота в случае первичной подготовки известны и одинаковы.
# Координаты можно получить индекса изображения, нужно только достать этот индекс
return res, lbl_res
def show_sign(img_path, lbl):
print(img_path)
print(lbl)
img = cv2.imread(img_path, cv2.IMREAD_UNCHANGED)
cv2.imshow("img", img[lbl[1]:lbl[3], lbl[0]:lbl[2], :])
cv2.waitKey(0)
cv2.destroyAllWindows()
cv2.rectangle(img, (lbl[0], lbl[1]), (lbl[2], lbl[3]), 2)
cv2.imshow("img", img)
cv2.waitKey(0)
cv2.destroyAllWindows()
def show_roi(roi_list):
for roi in roi_list:
(r, g, b) = (roi[0], roi[1], roi[2])
roi = cv2.merge((r, g, b))
cv2.imshow("img", roi)
cv2.waitKey(0)
cv2.destroyAllWindows()
def show_rectangles(filename, rectangles_list, show_type='matplotlib'):
img = cv2.imread(filename, cv2.IMREAD_UNCHANGED)
for rect in rectangles_list:
if rect is not None:
cv2.rectangle(img, (rect[XMIN], rect[YMIN]), (rect[XMAX], rect[YMAX]), (0, 255, 0), 1)
if show_type == 'matplotlib':
(b, g, r) = cv2.split(img)
img = cv2.merge((r, g, b))
plt.imshow(img)
plt.show()
else:
cv2.imshow(filename, img)
cv2.waitKey()
# TODO добавить схранение в отдельный каталог
def save_img_with_rectangles(dataset_path, filename, rectangles_list):
img = cv2.imread(dataset_path + filename, cv2.IMREAD_UNCHANGED)
for rect in rectangles_list:
if rect is not None:
cv2.rectangle(img, (rect[XMIN], rect[YMIN]), (rect[XMAX], rect[YMAX]), (0, 255, 0), 1)
cv2.imwrite(dataset_path + "results/" + filename + "_with_rects.jpg", img)
# Probably temp function before I fix localization
def get_roi_from_images(images, img_path):
res_roi = []
res_label = []
label_dict = dict()
for image in images:
img = cv2.imread(img_path + image.filename.decode('utf8'), cv2.IMREAD_UNCHANGED)
for sign in image.signs:
if sign.label not in label_dict:
label_dict[sign.label] = len(label_dict)
(x1, y1, x2, y2) = sign.coord
roi = img[y1:y2, x1:x2, :]
res_roi.append(np.array([roi[:, :, 0], roi[:, :, 1], roi[:, :, 2]]))
res_label.append(label_dict[sign.label])
return res_roi, res_label, label_dict
def create_synthetic_data(imgs):
# Create array of size mods [1, 4], step = 0.5
sizes = np.arange(start=1, stop=4.5, step=0.5)
total = imgs.shape[0] * sizes.shape[0] * 2 # *2
res = []
return imgs
| mit |
heli522/scikit-learn | examples/model_selection/plot_roc.py | 96 | 4487 | """
=======================================
Receiver Operating Characteristic (ROC)
=======================================
Example of Receiver Operating Characteristic (ROC) metric to evaluate
classifier output quality.
ROC curves typically feature true positive rate on the Y axis, and false
positive rate on the X axis. This means that the top left corner of the plot is
the "ideal" point - a false positive rate of zero, and a true positive rate of
one. This is not very realistic, but it does mean that a larger area under the
curve (AUC) is usually better.
The "steepness" of ROC curves is also important, since it is ideal to maximize
the true positive rate while minimizing the false positive rate.
Multiclass settings
-------------------
ROC curves are typically used in binary classification to study the output of
a classifier. In order to extend ROC curve and ROC area to multi-class
or multi-label classification, it is necessary to binarize the output. One ROC
curve can be drawn per label, but one can also draw a ROC curve by considering
each element of the label indicator matrix as a binary prediction
(micro-averaging).
Another evaluation measure for multi-class classification is
macro-averaging, which gives equal weight to the classification of each
label.
.. note::
See also :func:`sklearn.metrics.roc_auc_score`,
:ref:`example_model_selection_plot_roc_crossval.py`.
"""
print(__doc__)
import numpy as np
import matplotlib.pyplot as plt
from sklearn import svm, datasets
from sklearn.metrics import roc_curve, auc
from sklearn.cross_validation import train_test_split
from sklearn.preprocessing import label_binarize
from sklearn.multiclass import OneVsRestClassifier
# Import some data to play with
iris = datasets.load_iris()
X = iris.data
y = iris.target
# Binarize the output
y = label_binarize(y, classes=[0, 1, 2])
n_classes = y.shape[1]
# Add noisy features to make the problem harder
random_state = np.random.RandomState(0)
n_samples, n_features = X.shape
X = np.c_[X, random_state.randn(n_samples, 200 * n_features)]
# shuffle and split training and test sets
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=.5,
random_state=0)
# Learn to predict each class against the other
classifier = OneVsRestClassifier(svm.SVC(kernel='linear', probability=True,
random_state=random_state))
y_score = classifier.fit(X_train, y_train).decision_function(X_test)
# Compute ROC curve and ROC area for each class
fpr = dict()
tpr = dict()
roc_auc = dict()
for i in range(n_classes):
fpr[i], tpr[i], _ = roc_curve(y_test[:, i], y_score[:, i])
roc_auc[i] = auc(fpr[i], tpr[i])
# Compute micro-average ROC curve and ROC area
fpr["micro"], tpr["micro"], _ = roc_curve(y_test.ravel(), y_score.ravel())
roc_auc["micro"] = auc(fpr["micro"], tpr["micro"])
##############################################################################
# Plot of a ROC curve for a specific class
plt.figure()
plt.plot(fpr[2], tpr[2], label='ROC curve (area = %0.2f)' % roc_auc[2])
plt.plot([0, 1], [0, 1], 'k--')
plt.xlim([0.0, 1.0])
plt.ylim([0.0, 1.05])
plt.xlabel('False Positive Rate')
plt.ylabel('True Positive Rate')
plt.title('Receiver operating characteristic example')
plt.legend(loc="lower right")
plt.show()
##############################################################################
# Plot ROC curves for the multiclass problem
# Compute macro-average ROC curve and ROC area
fpr["macro"] = np.mean([fpr[i] for i in range(n_classes)], axis=0)
tpr["macro"] = np.mean([tpr[i] for i in range(n_classes)], axis=0)
roc_auc["macro"] = auc(fpr["macro"], tpr["macro"])
plt.figure()
plt.plot(fpr["micro"], tpr["micro"],
label='micro-average ROC curve (area = {0:0.2f})'
''.format(roc_auc["micro"]),
linewidth=2)
plt.plot(fpr["macro"], tpr["macro"],
label='macro-average ROC curve (area = {0:0.2f})'
''.format(roc_auc["macro"]),
linewidth=2)
for i in range(n_classes):
plt.plot(fpr[i], tpr[i], label='ROC curve of class {0} (area = {1:0.2f})'
''.format(i, roc_auc[i]))
plt.plot([0, 1], [0, 1], 'k--')
plt.xlim([0.0, 1.0])
plt.ylim([0.0, 1.05])
plt.xlabel('False Positive Rate')
plt.ylabel('True Positive Rate')
plt.title('Some extension of Receiver operating characteristic to multi-class')
plt.legend(loc="lower right")
plt.show()
| bsd-3-clause |
TinyOS-Camp/DDEA-DEV | Archive/[14_10_03] Data_Collection_Sample/DB access sample code/vtt/sampling_density_VTT.py | 1 | 6262 | import os
import sys
import json
from datetime import datetime
import time
import math
import numpy as np
import scipy as sp
import matplotlib.pyplot as plt
import pylab as pl
import pickle
######
### Configurations
######
UUID_FILE = 'finland_ids.csv'
#DATA_FOLDER = 'VTT_week/'
DATA_FOLDER = 'data_year/'
DATA_EXT = '.csv'
SCRIPT_DIR = os.path.dirname(__file__)
def saveObjectBinary(obj, filename):
with open(filename, "wb") as output:
pickle.dump(obj, output, pickle.HIGHEST_PROTOCOL)
def loadObjectBinary(filename):
with open(filename, "rb") as input:
obj = pickle.load(input)
return obj
def group_uuids(uuid_list):
sensors_metadata = []
for uuid in uuid_list:
metadata_filepath = os.path.join(SCRIPT_DIR, 'metadata/meta_' + uuid + '.dat')
### open metadata file ###
with open(str(metadata_filepath)) as f:
#metadata = f.read().strip()
#sensors_metadata.append(metadata)
sensor_metadata = json.load(f)
sensors_metadata.append((uuid, sensor_metadata[0]['Path']))
sensors_metadata.sort(key=lambda tup: tup[1])
#print sensors_metadata
return sensors_metadata
### delta_t in ms ; max_sr in ms ###
### start_time = "2013/11/01-00:00:00"
### end_time = "2013/11/07-23:59:59"
def load_uuid_list():
uuid_list = []
uuid_filepath = os.path.join(SCRIPT_DIR, UUID_FILE)
temp_uuid_list = open(uuid_filepath).readlines()
for line in temp_uuid_list:
tokens = line.strip().split(',')
if len(tokens) == 0:
continue
uuid_list.append(tokens[0].strip())
return uuid_list
def print_readings(uuid):
sensor_filepath = os.path.join(SCRIPT_DIR, 'readings/' + uuid + '.dat')
sensors_readings = []
with open(str(sensor_filepath)) as f:
# sensors_metadata.append(f.read())
json_readings = json.load(f)
sensors_readings = json_readings[0]['Readings']
if len(sensors_readings) == 0:
return
for pair in sensors_readings:
if pair[1] is None:
continue
ts = pair[0]
readable_ts = datetime.fromtimestamp(int(ts) / 1000).strftime('%Y-%m-%d %H:%M:%S')
reading = pair[1]
print str(ts), str(readable_ts), reading
def compute_sampling_density(uuid, start_time, end_time, delta_t, max_sr):
### for testing ###
#start_time = "2013/11/01-00:00:00"
#end_time = "2013/11/07-23:59:59"
start_ts = int(time.mktime(datetime.strptime(start_time, "%Y/%m/%d-%H:%M:%S").timetuple()) * 1000)
end_ts = int(time.mktime(datetime.strptime(end_time, "%Y/%m/%d-%H:%M:%S").timetuple()) * 1000)
if (end_ts - start_ts) * 1.0 / delta_t == int ( math.floor((end_ts - start_ts) / delta_t)):
num_intervals = int ( (end_ts - start_ts) / delta_t) + 1
else:
num_intervals = int(math.ceil((end_ts - start_ts) * 1.0 / delta_t))
sampling_density = [0] * num_intervals
###### open reading of uuid - BERKELEY SDH BUILDING ######
# sensor_filepath = os.path.join(SCRIPT_DIR, 'readings/' + uuid + '.dat')
# with open(str(sensor_filepath)) as f:
# # sensors_metadata.append(f.read())
# json_readings = json.load(f)
# sensors_readings = json_readings[0]['Readings']
# if len(sensors_readings) == 0:
# return sampling_density
###### open reading of uuid - VTT FINLAND ######
sensor_filepath = os.path.join(SCRIPT_DIR, DATA_FOLDER + uuid + DATA_EXT)
lines = open(str(sensor_filepath)).readlines()
sensors_readings = []
for line in lines:
pair = []
if line == "":
continue
tokens = line.strip().split(',')
if len(tokens) < 2:
continue
#[curr_date, curr_time] = tokens[0].split(' ')
#print curr_date.strip() + '-' + curr_time.strip()
ts = int(time.mktime(datetime.strptime(tokens[0].strip(), "%Y-%m-%d %H:%M:%S").timetuple()) * 1000)
reading = float(tokens[1].strip())
pair.append(ts)
pair.append(reading)
#print tokens[0].strip(), str(ts), str(reading)
# sensors_metadata.append(f.read())
###for pair in sensors_readings:
curr_ts = int(pair[0])
#reading = float(pair[1])
if curr_ts < start_ts:
continue
if curr_ts > end_ts:
break
if pair[1] is None:
continue
curr_reading_index = int( (curr_ts - start_ts) / delta_t)
sampling_density[curr_reading_index] = sampling_density[curr_reading_index] + 1
### compute density
max_num_samples = delta_t / max_sr
for i in range(0, num_intervals):
sampling_density[i] = sampling_density[i] * 1.0 / max_num_samples
return sampling_density
def compute_sampling_density_matrix(start_time, end_time, delta_t, max_sr):
uuid_list = load_uuid_list()
uuid_list = uuid_list[0:1000]
sampling_density_matrix = []
for uuid in uuid_list:
sampling_density = compute_sampling_density(uuid, start_time, end_time, delta_t, max_sr)
if len(sampling_density) == 0:
continue
sampling_density_matrix.append(sampling_density)
return sampling_density_matrix
def visualize_density_matrix(sampling_density_matrix):
plt.imshow(sampling_density_matrix, interpolation="nearest", cmap=pl.cm.spectral)
pl.savefig('density.png', bbox_inches='tight')
######
### Example
######
#uuid = "GW1.HA1_AS_TE_AH_FM"
start_time = "2013/11/01-00:00:00"
end_time = "2013/11/07-23:59:59"
max_sr = 300000 ### 1000 ms = 1s, 5mins
delta_t = 1200000 ### ms ; 20 mins
sys_argv = sys.argv
if len(sys_argv) == 5:
start_time = sys_argv[1]
end_time = sys_argv[2]
delta_t = int(sys_argv[3])
max_sr = int(sys_argv[4])
### compute sampling density matrix and visualize
sampling_density_matrix = np.asarray(compute_sampling_density_matrix(start_time, end_time, delta_t, max_sr))
visualize_density_matrix(sampling_density_matrix)
| gpl-2.0 |
JackKelly/neuralnilm_prototype | scripts/e127.py | 2 | 4534 | from __future__ import print_function, division
import matplotlib
matplotlib.use('Agg') # Must be before importing matplotlib.pyplot or pylab!
from neuralnilm import Net, RealApplianceSource, BLSTMLayer, SubsampleLayer, DimshuffleLayer
from lasagne.nonlinearities import sigmoid, rectify
from lasagne.objectives import crossentropy, mse
from lasagne.init import Uniform, Normal
from lasagne.layers import LSTMLayer, DenseLayer, Conv1DLayer, ReshapeLayer
from lasagne.updates import adagrad, nesterov_momentum
from functools import partial
import os
from neuralnilm.source import standardise
from neuralnilm.experiment import run_experiment
from neuralnilm.net import TrainingError
import __main__
NAME = os.path.splitext(os.path.split(__main__.__file__)[1])[0]
PATH = "/homes/dk3810/workspace/python/neuralnilm/figures"
SAVE_PLOT_INTERVAL = 250
GRADIENT_STEPS = 100
"""
e103
Discovered that bottom layer is hardly changing. So will try
just a single lstm layer
e104
standard init
lower learning rate
e106
lower learning rate to 0.001
e108
is e107 but with batch size of 5
e109
Normal(1) for LSTM
e110
* Back to Uniform(5) for LSTM
* Using nntools eb17bd923ef9ff2cacde2e92d7323b4e51bb5f1f
RESULTS: Seems to run fine again!
e111
* Try with nntools head
* peepholes=False
RESULTS: appears to be working well. Haven't seen a NaN,
even with training rate of 0.1
e112
* n_seq_per_batch = 50
e114
* Trying looking at layer by layer training again.
* Start with single LSTM layer
e115
* Learning rate = 1
e116
* Standard inits
e117
* Uniform(1) init
e119
* Learning rate 10
# Result: didn't work well!
e120
* init: Normal(1)
* not as good as Uniform(5)
e121
* Uniform(25)
e122
* Just 10 cells
* Uniform(5)
e125
* Pre-train lower layers
"""
def exp_a(name):
source = RealApplianceSource(
filename='/data/dk3810/ukdale.h5',
appliances=[
['fridge freezer', 'fridge', 'freezer'],
'hair straighteners',
'television'
# 'dish washer',
# ['washer dryer', 'washing machine']
],
max_appliance_powers=[300, 500, 200], #, 2500, 2400],
on_power_thresholds=[20, 20, 20], #, 20, 20],
max_input_power=1000,
min_on_durations=[60, 60, 60], #, 1800, 1800],
window=("2013-06-01", "2014-07-01"),
seq_length=1000,
output_one_appliance=False,
boolean_targets=False,
min_off_duration=60,
train_buildings=[1],
validation_buildings=[1],
skip_probability=0,
n_seq_per_batch=50
)
net = Net(
experiment_name=name,
source=source,
save_plot_interval=SAVE_PLOT_INTERVAL,
loss_function=crossentropy,
updates=partial(nesterov_momentum, learning_rate=1.0),
layers_config=[
{
'type': LSTMLayer,
'num_units': 50,
'W_in_to_cell': Uniform(25),
'gradient_steps': GRADIENT_STEPS,
'peepholes': False
},
{
'type': DenseLayer,
'num_units': source.n_outputs,
'nonlinearity': sigmoid
}
],
layer_changes={
501: {
'remove_from': -3,
'new_layers':
[
{
'type': LSTMLayer,
'num_units': 50,
'W_in_to_cell': Uniform(1),
'gradient_steps': GRADIENT_STEPS,
'peepholes': False
},
{
'type': DenseLayer,
'num_units': source.n_outputs,
'nonlinearity': sigmoid
}
]
}
}
)
return net
def init_experiment(experiment):
full_exp_name = NAME + experiment
func_call = 'exp_{:s}(full_exp_name)'.format(experiment)
print("***********************************")
print("Preparing", full_exp_name, "...")
net = eval(func_call)
return net
def main():
for experiment in list('a'):
full_exp_name = NAME + experiment
path = os.path.join(PATH, full_exp_name)
try:
net = init_experiment(experiment)
run_experiment(net, path, epochs=5000)
except KeyboardInterrupt:
break
except TrainingError as e:
print("EXCEPTION:", e)
if __name__ == "__main__":
main()
| mit |
kingsfordgroup/armatus | scripts/HiCvis.py | 1 | 7843 | #!/usr/env python
import numpy as np
import seaborn as sb
import matplotlib.pyplot as plt
import argparse
import math
from scipy.sparse import coo_matrix
def plotall(datamat,domains1,domains2,bounds,legendname1,legendname2,outputname):
""" Show heatmap of Hi-C data along with any domain sets given
:param datamat: Hi-C data matrix as numpy array
:param domains1: nx2 list of domains (optional, use [] to just see heatmap of Hi-C matrix)
:param domains2: nx2 list of domains (optional, use [] if no second set of domains)
:param bounds: (x,y) to view only bins between x and y (optional, use () to see entire chromosome)
:param legendname1: legend label for first set of domains
:param legendname2: legend label for second set of domains
:param outputname: filename of image to be saved (optional - use [] to view instead of save)
:return: either show image (if outputname == []) or save image as outputname
"""
if bounds == (): # plot full Hi-C matrix with all TADs
logdata = np.ma.log(datamat)
logdata = logdata.filled(0)
labelspacing = int(math.floor(round(len(logdata),-int(math.floor(math.log10(len(logdata)))))/10))
ax = sb.heatmap(logdata,cbar=False,xticklabels=labelspacing,yticklabels=labelspacing)
if domains1 != []:
for interval in domains1: # plot outline of each domain
plt.plot((interval[0]-1,interval[1]),(len(logdata)-interval[0]+1,len(logdata)-interval[0]+1),'g')
plt.plot((interval[1],interval[1]),(len(logdata)-interval[0]+1,len(logdata)-interval[1]),'g')
dom1Artist = plt.Line2D((0,1),(0,0), color='green', linestyle='solid')
if domains2 != []:
for interval in domains2:
plt.plot((interval[0]-1,interval[1]),(len(logdata)-interval[1],len(logdata)-interval[1]),'b')
plt.plot((interval[0]-1,interval[0]-1),(len(logdata)-interval[0]+1,len(logdata)-interval[1]),'b')
dom2Artist = plt.Line2D((0,1),(0,0), color='blue', linestyle='solid')
else: # show only range of matrix between bounds
logdata = np.ma.log(datamat[bounds[0]:bounds[1],bounds[0]:bounds[1]])
logdata = logdata.filled(0)
labelspacing = int(math.floor(round(len(logdata),-int(math.floor(math.log10(len(logdata)))))/10))
ax = sb.heatmap(logdata,cbar=False,xticklabels=labelspacing,yticklabels=labelspacing)
if domains1 != []:
for interval in domains1:
if interval[0] >= bounds[0] and interval[1] <= bounds[1]:
interval -= bounds[0]
plt.plot((interval[0]-1,interval[1]),(len(logdata)-interval[0]+1,len(logdata)-interval[0]+1),'g')
plt.plot((interval[1],interval[1]),(len(logdata)-interval[0]+1,len(logdata)-interval[1]),'g')
dom1Artist = plt.Line2D((0,1),(0,0), color='green', linestyle='solid')
if domains2 != []:
for interval in domains2:
if interval[0] >= bounds[0] and interval[1] <= bounds[1]:
interval -= bounds[0]
plt.plot((interval[0]-1,interval[1]),(len(logdata)-interval[1],len(logdata)-interval[1]),'b')
plt.plot((interval[0]-1,interval[0]-1),(len(logdata)-interval[0]+1,len(logdata)-interval[1]),'b')
dom2Artist = plt.Line2D((0,1),(0,0), color='blue', linestyle='solid')
if legendname1 and legendname2:
legend = ax.legend([dom1Artist,dom2Artist], [legendname1, legendname2],frameon = 1)
legendframe = legend.get_frame()
legendframe.set_facecolor('white')
legendframe.set_edgecolor('black')
elif legendname1:
legend = ax.legend([dom1Artist],[legendname1])
legendframe = legend.get_frame()
legendframe.set_facecolor('white')
legendframe.set_edgecolor('black')
# save image to file if filename was given, .png is default if no extension given
if outputname:
plt.savefig(outputname)
else: # just display image
plt.show()
def parseRaoFormat(datamat,res):
""" turn sparse Rao data format into dense matrix for heatmap
:param datamat: Hi-C data in sparse format as numpy array (n x 3)
:param res: resolution of data
:return: dense Hi-C data matrix
"""
datamat[:,0:2] = datamat[:,0:2]/res
datamat = coo_matrix((datamat[:,2], (datamat[:,0],datamat[:,1]) ))
datamat = datamat.todense()
if datamat.shape[0] > datamat.shape[1]:
# add column(s) of zeros to make square matrix
ncols = datamat.shape[0] - datamat.shape[1]
sqmat = np.zeros((datamat.shape[0],datamat.shape[0]))
sqmat[:,:-1*ncols] = datamat
datamat = sqmat
elif datamat.shape[1] > datamat.shape[0]:
# add row(s) of zeros to make square matrix
nrows = datamat.shape[1] - datamat.shape[0]
sqmat = np.zeros((datamat.shape[1],datamat.shape[1]))
sqmat[:-1*nrows,:] = datamat
datamat = sqmat
datamat = datamat + np.transpose(datamat) - np.diagonal(datamat)*np.identity(len(datamat))
return datamat
def main(datafile, res, domainfile1, domainfile2, domainres1, domainres2, windowbounds, legendname1, legendname2, outputname):
datamat = np.genfromtxt(datafile,delimiter='\t')
if datamat.shape[1] == 3: # Rao format
datamat = parseRaoFormat(datamat, res)
else: # remove any NaNs
datamat = datamat[~np.isnan(datamat)]
datamat = np.reshape(datamat,(np.sqrt(len(datamat)), np.sqrt(len(datamat))))
if domainfile1:
domains1 = np.genfromtxt(domainfile1,delimiter='\t')
domains1 = domains1[~np.isnan(domains1)]/domainres1
domains1 = np.reshape(domains1,(len(domains1)/2,2))
else:
domains1 = []
if domainfile2:
domains2 = np.genfromtxt(domainfile2,delimiter='\t')
domains2 = domains2[~np.isnan(domains2)]/domainres2
domains2 = np.reshape(domains2,(len(domains2)/2,2))
else:
domains2 = []
if windowbounds:
bounds = (int(windowbounds[0]),int(windowbounds[1]))
else:
bounds = ()
if not legendname1: # make filenames legend entry, if none is given
legendname1 = domainfile1
if not legendname2:
legendname2 = domainfile2
plotall(datamat,domains1,domains2,bounds,legendname1,legendname2,outputname)
if __name__ == "__main__":
parser = argparse.ArgumentParser(description='Domain visualization tool for Hi-C data.')
parser.add_argument('-i', metavar='inputFile', help='raw Hi-C data filename (tab-delimited text file of Hi-C data or Rao format)')
parser.add_argument('-r', metavar='Resolution', default = [], type=int, help='Hi-C Resolution (only needed if using Rao data format)')
parser.add_argument('-b', metavar=('startBound','endBound'), nargs=2, default=(), help='Bounds for viewing window (optional)')
parser.add_argument('-d1', metavar='domainFile1', default=[], help='TAD file')
parser.add_argument('-d2', metavar='domainFile2', default=[], help='second TAD file (optional)')
parser.add_argument('-dr1', metavar='domainResolution1', type=int, default=1, help='Resolution of domains in domainFile1')
parser.add_argument('-dr2', metavar='domainResolution2', type=int, default=1, help='Resolution of domains in domainFile2')
parser.add_argument('-l1', metavar='legendName1', default=[], type=str, help='Legend name for first set of domains')
parser.add_argument('-l2', metavar='legendName2', default=[], type=str, help='Legend name for second set of domains')
parser.add_argument('-o', metavar='outputFile', default=[], type=str, help='Filename for saved image file')
args = parser.parse_args()
main(args.i, args.r, args.d1, args.d2, args.dr1, args.dr2, args.b, args.l1, args.l2, args.o)
| bsd-2-clause |
sinhrks/scikit-learn | sklearn/utils/tests/test_shortest_path.py | 303 | 2841 | from collections import defaultdict
import numpy as np
from numpy.testing import assert_array_almost_equal
from sklearn.utils.graph import (graph_shortest_path,
single_source_shortest_path_length)
def floyd_warshall_slow(graph, directed=False):
N = graph.shape[0]
#set nonzero entries to infinity
graph[np.where(graph == 0)] = np.inf
#set diagonal to zero
graph.flat[::N + 1] = 0
if not directed:
graph = np.minimum(graph, graph.T)
for k in range(N):
for i in range(N):
for j in range(N):
graph[i, j] = min(graph[i, j], graph[i, k] + graph[k, j])
graph[np.where(np.isinf(graph))] = 0
return graph
def generate_graph(N=20):
#sparse grid of distances
rng = np.random.RandomState(0)
dist_matrix = rng.random_sample((N, N))
#make symmetric: distances are not direction-dependent
dist_matrix = dist_matrix + dist_matrix.T
#make graph sparse
i = (rng.randint(N, size=N * N // 2), rng.randint(N, size=N * N // 2))
dist_matrix[i] = 0
#set diagonal to zero
dist_matrix.flat[::N + 1] = 0
return dist_matrix
def test_floyd_warshall():
dist_matrix = generate_graph(20)
for directed in (True, False):
graph_FW = graph_shortest_path(dist_matrix, directed, 'FW')
graph_py = floyd_warshall_slow(dist_matrix.copy(), directed)
assert_array_almost_equal(graph_FW, graph_py)
def test_dijkstra():
dist_matrix = generate_graph(20)
for directed in (True, False):
graph_D = graph_shortest_path(dist_matrix, directed, 'D')
graph_py = floyd_warshall_slow(dist_matrix.copy(), directed)
assert_array_almost_equal(graph_D, graph_py)
def test_shortest_path():
dist_matrix = generate_graph(20)
# We compare path length and not costs (-> set distances to 0 or 1)
dist_matrix[dist_matrix != 0] = 1
for directed in (True, False):
if not directed:
dist_matrix = np.minimum(dist_matrix, dist_matrix.T)
graph_py = floyd_warshall_slow(dist_matrix.copy(), directed)
for i in range(dist_matrix.shape[0]):
# Non-reachable nodes have distance 0 in graph_py
dist_dict = defaultdict(int)
dist_dict.update(single_source_shortest_path_length(dist_matrix,
i))
for j in range(graph_py[i].shape[0]):
assert_array_almost_equal(dist_dict[j], graph_py[i, j])
def test_dijkstra_bug_fix():
X = np.array([[0., 0., 4.],
[1., 0., 2.],
[0., 5., 0.]])
dist_FW = graph_shortest_path(X, directed=False, method='FW')
dist_D = graph_shortest_path(X, directed=False, method='D')
assert_array_almost_equal(dist_D, dist_FW)
| bsd-3-clause |
kcompher/topik | topik/models.py | 1 | 2641 | from __future__ import absolute_import
import logging
import gensim
import pandas as pd
# imports used only for doctests
from topik.readers import read_input
from topik.tests import test_data_path
from topik.preprocessing import preprocess
class LDA(object):
"""A high interface for an LDA (Latent Dirichlet Allocation) model.
Parameters
----------
corpus_file: string
Location of the corpus serialized in Matrix Market format
dict_file: string
Location of the dictionary
>>> raw_data = read_input(
'{}/test_data_json_stream.json'.format(test_data_path),
content_field="abstract")
>>> processed_data = preprocess(raw_data)
>>> my_lda = LDA(processed_data)
"""
def __init__(self, corpus_file, dict_file, ntopics=10, **kwargs):
self.corpus = gensim.corpora.MmCorpus(corpus_file)
self.dictionary = gensim.corpora.Dictionary.load(dict_file)
self.model = gensim.models.LdaModel(self.corpus, num_topics=ntopics, id2word=self.dictionary, **kwargs)
def save(self, filename):
self.model.save(filename)
def get_top_words(self, topn):
top_words = [self.model.show_topic(topicno, topn) for topicno in range(self.model.num_topics)]
return top_words
def termite_data(self, filename="termite.csv", topn_words=15):
"""Generate the csv file input for the termite plot.
Parameters
----------
filename: string
Desired name for the generated csv file
>>> raw_data = read_input(
'{}/test_data_json_stream.json'.format(test_data_path),
content_field="text")
>>> processed_data = preprocess(raw_data)
>>> my_lda = LDA(processed_data)
>>> my_lda.termite_data('termite.csv', 15)
"""
logging.info("generating termite plot input from %s " % self.corpus)
top_words = self.get_top_words(topn_words)
count = 1
for topic in top_words:
if count == 1:
df_temp = pd.DataFrame(topic, columns=['weight', 'word'])
df_temp['topic'] = pd.Series(count, index=df_temp.index)
df = df_temp
else:
df_temp = pd.DataFrame(topic, columns=['weight', 'word'])
df_temp['topic'] = pd.Series(count, index=df_temp.index)
df = df.append(df_temp, ignore_index=True)
count += 1
logging.info("saving termite plot input csv file to %s " % filename)
df.to_csv(filename, index=False, encoding='utf-8')
return df
| bsd-3-clause |
sangwook236/general-development-and-testing | sw_dev/python/rnd/test/image_processing/skimage/skimage_transform.py | 2 | 1365 | #!/usr/bin/env python
# -*- coding: UTF-8 -*-
import numpy as np
import matplotlib.pyplot as plt
from skimage.transform import PiecewiseAffineTransform, warp
from skimage import data
#---------------------------------------------------------------------
# REF [site] >> http://scikit-image.org/docs/stable/auto_examples/transform/plot_piecewise_affine.html
def piecewise_affine_transform():
image = data.astronaut()
rows, cols = image.shape[0], image.shape[1]
src_cols = np.linspace(0, cols, 20)
src_rows = np.linspace(0, rows, 10)
src_rows, src_cols = np.meshgrid(src_rows, src_cols)
src = np.dstack([src_cols.flat, src_rows.flat])[0]
# Add sinusoidal oscillation to row coordinates.
dst_rows = src[:, 1] - np.sin(np.linspace(0, 3 * np.pi, src.shape[0])) * 50
dst_cols = src[:, 0]
dst_rows *= 1.5
dst_rows -= 1.5 * 50
dst = np.vstack([dst_cols, dst_rows]).T
tform = PiecewiseAffineTransform()
tform.estimate(src, dst)
out_rows = image.shape[0] - 1.5 * 50
out_cols = cols
out = warp(image, tform, output_shape=(out_rows, out_cols))
fig, ax = plt.subplots()
ax.imshow(out)
ax.plot(tform.inverse(src)[:, 0], tform.inverse(src)[:, 1], '.b')
ax.axis((0, out_cols, out_rows, 0))
plt.show()
def main():
piecewise_affine_transform()
#--------------------------------------------------------------------
if '__main__' == __name__:
main()
| gpl-2.0 |
AmurG/tardis | tardis/simulation.py | 11 | 2036 | import logging
import time
from pandas import HDFStore
import os
# Adding logging support
logger = logging.getLogger(__name__)
def run_radial1d(radial1d_model, history_fname=None):
if history_fname:
if os.path.exists(history_fname):
logger.warn('History file %s exists - it will be overwritten', history_fname)
os.system('rm %s' % history_fname)
history_buffer = HDFStore(history_fname)
radial1d_model.atom_data.lines.to_hdf(history_buffer, 'atom_data/lines')
radial1d_model.atom_data.levels.to_hdf(history_buffer, 'atom_data/levels')
start_time = time.time()
initialize_j_blues = True
initialize_nlte = True
update_radiation_field = False
while radial1d_model.iterations_remaining > 1:
logger.info('Remaining run %d', radial1d_model.iterations_remaining)
radial1d_model.simulate(update_radiation_field=update_radiation_field, enable_virtual=False, initialize_nlte=initialize_nlte,
initialize_j_blues=initialize_j_blues)
initialize_j_blues=False
initialize_nlte=False
update_radiation_field = True
if history_fname:
radial1d_model.to_hdf5(history_buffer, path='model%03d' % radial1d_model.iterations_executed, close_h5=False)
#Finished second to last loop running one more time
logger.info('Doing last run')
if radial1d_model.tardis_config.montecarlo.last_no_of_packets is not None:
radial1d_model.current_no_of_packets = radial1d_model.tardis_config.montecarlo.last_no_of_packets
radial1d_model.simulate(enable_virtual=True, update_radiation_field=update_radiation_field, initialize_nlte=initialize_nlte,
initialize_j_blues=initialize_j_blues)
if history_fname:
radial1d_model.to_hdf5(history_buffer, path='model%03d' % radial1d_model.iterations_executed)
logger.info("Finished in %d iterations and took %.2f s", radial1d_model.iterations_executed, time.time()-start_time)
| bsd-3-clause |
bsipocz/statsmodels | statsmodels/graphics/tests/test_mosaicplot.py | 17 | 18878 | from __future__ import division
from statsmodels.compat.python import iterkeys, zip, lrange, iteritems, range
from numpy.testing import assert_, assert_raises, dec
from numpy.testing import run_module_suite
# utilities for the tests
from statsmodels.compat.collections import OrderedDict
from statsmodels.api import datasets
import numpy as np
from itertools import product
try:
import matplotlib.pyplot as pylab
have_matplotlib = True
except:
have_matplotlib = False
import pandas
pandas_old = int(pandas.__version__.split('.')[1]) < 9
# the main drawing function
from statsmodels.graphics.mosaicplot import mosaic
# other functions to be tested for accuracy
from statsmodels.graphics.mosaicplot import _hierarchical_split
from statsmodels.graphics.mosaicplot import _reduce_dict
from statsmodels.graphics.mosaicplot import _key_splitting
from statsmodels.graphics.mosaicplot import _normalize_split
from statsmodels.graphics.mosaicplot import _split_rect
@dec.skipif(not have_matplotlib or pandas_old)
def test_data_conversion():
# It will not reorder the elements
# so the dictionary will look odd
# as it key order has the c and b
# keys swapped
import pandas
fig, ax = pylab.subplots(4, 4)
data = {'ax': 1, 'bx': 2, 'cx': 3}
mosaic(data, ax=ax[0, 0], title='basic dict', axes_label=False)
data = pandas.Series(data)
mosaic(data, ax=ax[0, 1], title='basic series', axes_label=False)
data = [1, 2, 3]
mosaic(data, ax=ax[0, 2], title='basic list', axes_label=False)
data = np.asarray(data)
mosaic(data, ax=ax[0, 3], title='basic array', axes_label=False)
data = {('ax', 'cx'): 1, ('bx', 'cx'): 2, ('ax', 'dx'): 3, ('bx', 'dx'): 4}
mosaic(data, ax=ax[1, 0], title='compound dict', axes_label=False)
mosaic(data, ax=ax[2, 0], title='inverted keys dict', index=[1, 0], axes_label=False)
data = pandas.Series(data)
mosaic(data, ax=ax[1, 1], title='compound series', axes_label=False)
mosaic(data, ax=ax[2, 1], title='inverted keys series', index=[1, 0])
data = [[1, 2], [3, 4]]
mosaic(data, ax=ax[1, 2], title='compound list', axes_label=False)
mosaic(data, ax=ax[2, 2], title='inverted keys list', index=[1, 0])
data = np.array([[1, 2], [3, 4]])
mosaic(data, ax=ax[1, 3], title='compound array', axes_label=False)
mosaic(data, ax=ax[2, 3], title='inverted keys array', index=[1, 0], axes_label=False)
gender = ['male', 'male', 'male', 'female', 'female', 'female']
pet = ['cat', 'dog', 'dog', 'cat', 'dog', 'cat']
data = pandas.DataFrame({'gender': gender, 'pet': pet})
mosaic(data, ['gender'], ax=ax[3, 0], title='dataframe by key 1', axes_label=False)
mosaic(data, ['pet'], ax=ax[3, 1], title='dataframe by key 2', axes_label=False)
mosaic(data, ['gender', 'pet'], ax=ax[3, 2], title='both keys', axes_label=False)
mosaic(data, ['pet', 'gender'], ax=ax[3, 3], title='keys inverted', axes_label=False)
pylab.suptitle('testing data conversion (plot 1 of 4)')
#pylab.show()
@dec.skipif(not have_matplotlib)
def test_mosaic_simple():
# display a simple plot of 4 categories of data, splitted in four
# levels with increasing size for each group
# creation of the levels
key_set = (['male', 'female'], ['old', 'adult', 'young'],
['worker', 'unemployed'], ['healty', 'ill'])
# the cartesian product of all the categories is
# the complete set of categories
keys = list(product(*key_set))
data = OrderedDict(zip(keys, range(1, 1 + len(keys))))
# which colours should I use for the various categories?
# put it into a dict
props = {}
#males and females in blue and red
props[('male',)] = {'color': 'b'}
props[('female',)] = {'color': 'r'}
# all the groups corresponding to ill groups have a different color
for key in keys:
if 'ill' in key:
if 'male' in key:
props[key] = {'color': 'BlueViolet' , 'hatch': '+'}
else:
props[key] = {'color': 'Crimson' , 'hatch': '+'}
# mosaic of the data, with given gaps and colors
mosaic(data, gap=0.05, properties=props, axes_label=False)
pylab.suptitle('syntetic data, 4 categories (plot 2 of 4)')
#pylab.show()
@dec.skipif(not have_matplotlib or pandas_old)
def test_mosaic():
# make the same analysis on a known dataset
# load the data and clean it a bit
affairs = datasets.fair.load_pandas()
datas = affairs.exog
# any time greater than 0 is cheating
datas['cheated'] = affairs.endog > 0
# sort by the marriage quality and give meaningful name
# [rate_marriage, age, yrs_married, children,
# religious, educ, occupation, occupation_husb]
datas = datas.sort(['rate_marriage', 'religious'])
num_to_desc = {1: 'awful', 2: 'bad', 3: 'intermediate',
4: 'good', 5: 'wonderful'}
datas['rate_marriage'] = datas['rate_marriage'].map(num_to_desc)
num_to_faith = {1: 'non religious', 2: 'poorly religious', 3: 'religious',
4: 'very religious'}
datas['religious'] = datas['religious'].map(num_to_faith)
num_to_cheat = {False: 'faithful', True: 'cheated'}
datas['cheated'] = datas['cheated'].map(num_to_cheat)
# finished cleaning
fig, ax = pylab.subplots(2, 2)
mosaic(datas, ['rate_marriage', 'cheated'], ax=ax[0, 0],
title='by marriage happiness')
mosaic(datas, ['religious', 'cheated'], ax=ax[0, 1],
title='by religiosity')
mosaic(datas, ['rate_marriage', 'religious', 'cheated'], ax=ax[1, 0],
title='by both', labelizer=lambda k:'')
ax[1, 0].set_xlabel('marriage rating')
ax[1, 0].set_ylabel('religion status')
mosaic(datas, ['religious', 'rate_marriage'], ax=ax[1, 1],
title='inter-dependence', axes_label=False)
pylab.suptitle("extramarital affairs (plot 3 of 4)")
#pylab.show()
@dec.skipif(not have_matplotlib)
def test_mosaic_very_complex():
# make a scattermatrix of mosaic plots to show the correlations between
# each pair of variable in a dataset. Could be easily converted into a
# new function that does this automatically based on the type of data
key_name = ['gender', 'age', 'health', 'work']
key_base = (['male', 'female'], ['old', 'young'],
['healty', 'ill'], ['work', 'unemployed'])
keys = list(product(*key_base))
data = OrderedDict(zip(keys, range(1, 1 + len(keys))))
props = {}
props[('male', 'old')] = {'color': 'r'}
props[('female',)] = {'color': 'pink'}
L = len(key_base)
fig, axes = pylab.subplots(L, L)
for i in range(L):
for j in range(L):
m = set(range(L)).difference(set((i, j)))
if i == j:
axes[i, i].text(0.5, 0.5, key_name[i],
ha='center', va='center')
axes[i, i].set_xticks([])
axes[i, i].set_xticklabels([])
axes[i, i].set_yticks([])
axes[i, i].set_yticklabels([])
else:
ji = max(i, j)
ij = min(i, j)
temp_data = OrderedDict([((k[ij], k[ji]) + tuple(k[r] for r in m), v)
for k, v in iteritems(data)])
keys = list(iterkeys(temp_data))
for k in keys:
value = _reduce_dict(temp_data, k[:2])
temp_data[k[:2]] = value
del temp_data[k]
mosaic(temp_data, ax=axes[i, j], axes_label=False,
properties=props, gap=0.05, horizontal=i > j)
pylab.suptitle('old males should look bright red, (plot 4 of 4)')
#pylab.show()
@dec.skipif(not have_matplotlib)
def test_axes_labeling():
from numpy.random import rand
key_set = (['male', 'female'], ['old', 'adult', 'young'],
['worker', 'unemployed'], ['yes', 'no'])
# the cartesian product of all the categories is
# the complete set of categories
keys = list(product(*key_set))
data = OrderedDict(zip(keys, rand(len(keys))))
lab = lambda k: ''.join(s[0] for s in k)
fig, (ax1, ax2) = pylab.subplots(1, 2, figsize=(16, 8))
mosaic(data, ax=ax1, labelizer=lab, horizontal=True, label_rotation=45)
mosaic(data, ax=ax2, labelizer=lab, horizontal=False,
label_rotation=[0, 45, 90, 0])
#fig.tight_layout()
fig.suptitle("correct alignment of the axes labels")
#pylab.show()
@dec.skipif(not have_matplotlib or pandas_old)
def test_mosaic_empty_cells():
# SMOKE test see #2286
import pandas as pd
mydata = pd.DataFrame({'id2': {64: 'Angelica',
65: 'DXW_UID', 66: 'casuid01',
67: 'casuid01', 68: 'EC93_uid',
69: 'EC93_uid', 70: 'EC93_uid',
60: 'DXW_UID', 61: 'AtmosFox',
62: 'DXW_UID', 63: 'DXW_UID'},
'id1': {64: 'TGP',
65: 'Retention01', 66: 'default',
67: 'default', 68: 'Musa_EC_9_3',
69: 'Musa_EC_9_3', 70: 'Musa_EC_9_3',
60: 'default', 61: 'default',
62: 'default', 63: 'default'}})
ct = pd.crosstab(mydata.id1, mydata.id2)
fig, vals = mosaic(ct.T.unstack())
fig, vals = mosaic(mydata, ['id1','id2'])
eq = lambda x, y: assert_(np.allclose(x, y))
def test_recursive_split():
keys = list(product('mf'))
data = OrderedDict(zip(keys, [1] * len(keys)))
res = _hierarchical_split(data, gap=0)
assert_(list(iterkeys(res)) == keys)
res[('m',)] = (0.0, 0.0, 0.5, 1.0)
res[('f',)] = (0.5, 0.0, 0.5, 1.0)
keys = list(product('mf', 'yao'))
data = OrderedDict(zip(keys, [1] * len(keys)))
res = _hierarchical_split(data, gap=0)
assert_(list(iterkeys(res)) == keys)
res[('m', 'y')] = (0.0, 0.0, 0.5, 1 / 3)
res[('m', 'a')] = (0.0, 1 / 3, 0.5, 1 / 3)
res[('m', 'o')] = (0.0, 2 / 3, 0.5, 1 / 3)
res[('f', 'y')] = (0.5, 0.0, 0.5, 1 / 3)
res[('f', 'a')] = (0.5, 1 / 3, 0.5, 1 / 3)
res[('f', 'o')] = (0.5, 2 / 3, 0.5, 1 / 3)
def test__reduce_dict():
data = OrderedDict(zip(list(product('mf', 'oy', 'wn')), [1] * 8))
eq(_reduce_dict(data, ('m',)), 4)
eq(_reduce_dict(data, ('m', 'o')), 2)
eq(_reduce_dict(data, ('m', 'o', 'w')), 1)
data = OrderedDict(zip(list(product('mf', 'oy', 'wn')), lrange(8)))
eq(_reduce_dict(data, ('m',)), 6)
eq(_reduce_dict(data, ('m', 'o')), 1)
eq(_reduce_dict(data, ('m', 'o', 'w')), 0)
def test__key_splitting():
# subdivide starting with an empty tuple
base_rect = {tuple(): (0, 0, 1, 1)}
res = _key_splitting(base_rect, ['a', 'b'], [1, 1], tuple(), True, 0)
assert_(list(iterkeys(res)) == [('a',), ('b',)])
eq(res[('a',)], (0, 0, 0.5, 1))
eq(res[('b',)], (0.5, 0, 0.5, 1))
# subdivide a in two sublevel
res_bis = _key_splitting(res, ['c', 'd'], [1, 1], ('a',), False, 0)
assert_(list(iterkeys(res_bis)) == [('a', 'c'), ('a', 'd'), ('b',)])
eq(res_bis[('a', 'c')], (0.0, 0.0, 0.5, 0.5))
eq(res_bis[('a', 'd')], (0.0, 0.5, 0.5, 0.5))
eq(res_bis[('b',)], (0.5, 0, 0.5, 1))
# starting with a non empty tuple and uneven distribution
base_rect = {('total',): (0, 0, 1, 1)}
res = _key_splitting(base_rect, ['a', 'b'], [1, 2], ('total',), True, 0)
assert_(list(iterkeys(res)) == [('total',) + (e,) for e in ['a', 'b']])
eq(res[('total', 'a')], (0, 0, 1 / 3, 1))
eq(res[('total', 'b')], (1 / 3, 0, 2 / 3, 1))
def test_proportion_normalization():
# extremes should give the whole set, as well
# as if 0 is inserted
eq(_normalize_split(0.), [0.0, 0.0, 1.0])
eq(_normalize_split(1.), [0.0, 1.0, 1.0])
eq(_normalize_split(2.), [0.0, 1.0, 1.0])
# negative values should raise ValueError
assert_raises(ValueError, _normalize_split, -1)
assert_raises(ValueError, _normalize_split, [1., -1])
assert_raises(ValueError, _normalize_split, [1., -1, 0.])
# if everything is zero it will complain
assert_raises(ValueError, _normalize_split, [0.])
assert_raises(ValueError, _normalize_split, [0., 0.])
# one-element array should return the whole interval
eq(_normalize_split([0.5]), [0.0, 1.0])
eq(_normalize_split([1.]), [0.0, 1.0])
eq(_normalize_split([2.]), [0.0, 1.0])
# simple division should give two pieces
for x in [0.3, 0.5, 0.9]:
eq(_normalize_split(x), [0., x, 1.0])
# multiple division should split as the sum of the components
for x, y in [(0.25, 0.5), (0.1, 0.8), (10., 30.)]:
eq(_normalize_split([x, y]), [0., x / (x + y), 1.0])
for x, y, z in [(1., 1., 1.), (0.1, 0.5, 0.7), (10., 30., 40)]:
eq(_normalize_split(
[x, y, z]), [0., x / (x + y + z), (x + y) / (x + y + z), 1.0])
def test_false_split():
# if you ask it to be divided in only one piece, just return the original
# one
pure_square = [0., 0., 1., 1.]
conf_h = dict(proportion=[1], gap=0.0, horizontal=True)
conf_v = dict(proportion=[1], gap=0.0, horizontal=False)
eq(_split_rect(*pure_square, **conf_h), pure_square)
eq(_split_rect(*pure_square, **conf_v), pure_square)
conf_h = dict(proportion=[1], gap=0.5, horizontal=True)
conf_v = dict(proportion=[1], gap=0.5, horizontal=False)
eq(_split_rect(*pure_square, **conf_h), pure_square)
eq(_split_rect(*pure_square, **conf_v), pure_square)
# identity on a void rectangle should not give anything strange
null_square = [0., 0., 0., 0.]
conf = dict(proportion=[1], gap=0.0, horizontal=True)
eq(_split_rect(*null_square, **conf), null_square)
conf = dict(proportion=[1], gap=1.0, horizontal=True)
eq(_split_rect(*null_square, **conf), null_square)
# splitting a negative rectangle should raise error
neg_square = [0., 0., -1., 0.]
conf = dict(proportion=[1], gap=0.0, horizontal=True)
assert_raises(ValueError, _split_rect, *neg_square, **conf)
conf = dict(proportion=[1, 1], gap=0.0, horizontal=True)
assert_raises(ValueError, _split_rect, *neg_square, **conf)
conf = dict(proportion=[1], gap=0.5, horizontal=True)
assert_raises(ValueError, _split_rect, *neg_square, **conf)
conf = dict(proportion=[1, 1], gap=0.5, horizontal=True)
assert_raises(ValueError, _split_rect, *neg_square, **conf)
def test_rect_pure_split():
pure_square = [0., 0., 1., 1.]
# division in two equal pieces from the perfect square
h_2split = [(0.0, 0.0, 0.5, 1.0), (0.5, 0.0, 0.5, 1.0)]
conf_h = dict(proportion=[1, 1], gap=0.0, horizontal=True)
eq(_split_rect(*pure_square, **conf_h), h_2split)
v_2split = [(0.0, 0.0, 1.0, 0.5), (0.0, 0.5, 1.0, 0.5)]
conf_v = dict(proportion=[1, 1], gap=0.0, horizontal=False)
eq(_split_rect(*pure_square, **conf_v), v_2split)
# division in two non-equal pieces from the perfect square
h_2split = [(0.0, 0.0, 1 / 3, 1.0), (1 / 3, 0.0, 2 / 3, 1.0)]
conf_h = dict(proportion=[1, 2], gap=0.0, horizontal=True)
eq(_split_rect(*pure_square, **conf_h), h_2split)
v_2split = [(0.0, 0.0, 1.0, 1 / 3), (0.0, 1 / 3, 1.0, 2 / 3)]
conf_v = dict(proportion=[1, 2], gap=0.0, horizontal=False)
eq(_split_rect(*pure_square, **conf_v), v_2split)
# division in three equal pieces from the perfect square
h_2split = [(0.0, 0.0, 1 / 3, 1.0), (1 / 3, 0.0, 1 / 3, 1.0), (2 / 3, 0.0,
1 / 3, 1.0)]
conf_h = dict(proportion=[1, 1, 1], gap=0.0, horizontal=True)
eq(_split_rect(*pure_square, **conf_h), h_2split)
v_2split = [(0.0, 0.0, 1.0, 1 / 3), (0.0, 1 / 3, 1.0, 1 / 3), (0.0, 2 / 3,
1.0, 1 / 3)]
conf_v = dict(proportion=[1, 1, 1], gap=0.0, horizontal=False)
eq(_split_rect(*pure_square, **conf_v), v_2split)
# division in three non-equal pieces from the perfect square
h_2split = [(0.0, 0.0, 1 / 4, 1.0), (1 / 4, 0.0, 1 / 2, 1.0), (3 / 4, 0.0,
1 / 4, 1.0)]
conf_h = dict(proportion=[1, 2, 1], gap=0.0, horizontal=True)
eq(_split_rect(*pure_square, **conf_h), h_2split)
v_2split = [(0.0, 0.0, 1.0, 1 / 4), (0.0, 1 / 4, 1.0, 1 / 2), (0.0, 3 / 4,
1.0, 1 / 4)]
conf_v = dict(proportion=[1, 2, 1], gap=0.0, horizontal=False)
eq(_split_rect(*pure_square, **conf_v), v_2split)
# splitting on a void rectangle should give multiple void
null_square = [0., 0., 0., 0.]
conf = dict(proportion=[1, 1], gap=0.0, horizontal=True)
eq(_split_rect(*null_square, **conf), [null_square, null_square])
conf = dict(proportion=[1, 2], gap=1.0, horizontal=True)
eq(_split_rect(*null_square, **conf), [null_square, null_square])
def test_rect_deformed_split():
non_pure_square = [1., -1., 1., 0.5]
# division in two equal pieces from the perfect square
h_2split = [(1.0, -1.0, 0.5, 0.5), (1.5, -1.0, 0.5, 0.5)]
conf_h = dict(proportion=[1, 1], gap=0.0, horizontal=True)
eq(_split_rect(*non_pure_square, **conf_h), h_2split)
v_2split = [(1.0, -1.0, 1.0, 0.25), (1.0, -0.75, 1.0, 0.25)]
conf_v = dict(proportion=[1, 1], gap=0.0, horizontal=False)
eq(_split_rect(*non_pure_square, **conf_v), v_2split)
# division in two non-equal pieces from the perfect square
h_2split = [(1.0, -1.0, 1 / 3, 0.5), (1 + 1 / 3, -1.0, 2 / 3, 0.5)]
conf_h = dict(proportion=[1, 2], gap=0.0, horizontal=True)
eq(_split_rect(*non_pure_square, **conf_h), h_2split)
v_2split = [(1.0, -1.0, 1.0, 1 / 6), (1.0, 1 / 6 - 1, 1.0, 2 / 6)]
conf_v = dict(proportion=[1, 2], gap=0.0, horizontal=False)
eq(_split_rect(*non_pure_square, **conf_v), v_2split)
def test_gap_split():
pure_square = [0., 0., 1., 1.]
# null split
conf_h = dict(proportion=[1], gap=1.0, horizontal=True)
eq(_split_rect(*pure_square, **conf_h), pure_square)
# equal split
h_2split = [(0.0, 0.0, 0.25, 1.0), (0.75, 0.0, 0.25, 1.0)]
conf_h = dict(proportion=[1, 1], gap=1.0, horizontal=True)
eq(_split_rect(*pure_square, **conf_h), h_2split)
# disequal split
h_2split = [(0.0, 0.0, 1 / 6, 1.0), (0.5 + 1 / 6, 0.0, 1 / 3, 1.0)]
conf_h = dict(proportion=[1, 2], gap=1.0, horizontal=True)
eq(_split_rect(*pure_square, **conf_h), h_2split)
def test_default_arg_index():
# 2116
import pandas as pd
df = pd.DataFrame({'size' : ['small', 'large', 'large', 'small', 'large',
'small'],
'length' : ['long', 'short', 'short', 'long', 'long',
'short']})
assert_raises(ValueError, mosaic, data=df, title='foobar')
if __name__ == '__main__':
run_module_suite()
| bsd-3-clause |
CorySimon/pyIAST | test/python_scripts/Test IAST for Langmuir case.py | 2 | 7330 | # coding: utf-8
# # Test pyIAST for match with competitive Langmuir model
# In the case that the pure-component isotherms $N_{i,pure}(P)$ follow the Langmuir model with the same saturation loading $M$:
#
# $N_{i,pure} = M \frac{K_iP}{1+K_iP},$
#
# The mixed gas adsorption isotherm follows the competitive Langmuir isotherm:
#
# $N_i = M \frac{K_i p_i}{1 + \sum_j K_jp_j},$
#
# where $p_i$ is the partial pressure of component $i$. Here, we generate synthetic pure-component adsorption isotherm data and confirm that pyIAST agrees with the competitive Langmuir isotherm for 3 components.
# In[1]:
from __future__ import absolute_import
import numpy as np
import pyiast
import pandas as pd
import matplotlib.pyplot as plt
from six.moves import range
plt.style.use('fivethirtyeight')
colors = ['b', 'g', 'r'] # for representing each component
component_names = {0: 'A', 1: 'B', 2: 'C'}
# ## Generate synthetic pure-component isotherm data, fit Langmuir models to them.
# Model parameters ($M$, $\{K_i\}$)
# In[2]:
M = 1.0
langmuirKs = [2.0, 10.0, 20.0] # K_i
# Generate data according to Langmuir model, store in list of Pandas DataFrames
# In[3]:
pressure = np.logspace(-3, np.log10(10), 20)
dfs = [
pd.DataFrame({
'P':
pressure,
'L':
M * langmuirKs[i] * pressure / (1.0 + langmuirKs[i] * pressure)
}) for i in range(3)
]
# Use pyIAST to fit Lanmguir models to the data, then plot fits
# In[4]:
isotherms = [
pyiast.ModelIsotherm(
dfs[i], pressure_key='P', loading_key='L', model='Langmuir')
for i in range(3)
]
for i in range(len(isotherms)):
isotherms[i].print_params()
pyiast.plot_isotherm(isotherms[i])
# Plot synthetic data all in one plot for paper
# In[5]:
p_plot = np.logspace(-3, np.log10(11)) # for plotting
fig = plt.figure(facecolor='w')
for i in range(len(isotherms)):
plt.scatter(dfs[i]['P'], dfs[i]['L'], color=colors[i], s=50, label=None)
plt.plot(
p_plot,
M * langmuirKs[i] * p_plot / (1.0 + langmuirKs[i] * p_plot),
color=colors[i],
linewidth=2,
label=r'$N_%s(P) = \frac{%d P}{1+%dP}$' %
(component_names[i], langmuirKs[i], langmuirKs[i]))
plt.xlim([-.05 * 10, 1.05 * 10])
plt.ylim([-.05 * M, M * 1.05])
plt.xlabel('Pressure (bar)')
plt.ylabel('Gas uptake (mmol/g)')
plt.legend(loc='lower right')
plt.tight_layout()
plt.savefig(
'pure_component_Langmuir.png',
format='png',
dpi=300,
facecolor=fig.get_facecolor())
plt.show()
# ## Compare pyIAST predicted component loadings to that of competitive Langmuir
# Let us consider a tertiary mixture of components 0, 1, and 2 above at a total pressure of `total_pressure` bar.
# In[6]:
total_pressure = 1.0
# We will explore gas phase composition space ($\{y_i\}$) by generating random compositions and checking that they are within the triangle. We do not want to get too close to a pure phase boundary becuase of numerical instability, so we keep a distance `dx` away from pure phases. We will perform `num_tests` tests.
# In[15]:
dx = 0.0001
num_tests = 100
# Generate the compositions and store in list `compositions`
# In[16]:
compositions = []
test_no = 0
while test_no < num_tests:
# generate random compoisitions
y1 = np.random.uniform(dx, 1.0 - dx)
y2 = np.random.uniform(dx, 1.0 - dx)
y3 = 1.0 - y2 - y1
# check that composition is within the triangle
if y3 < dx:
continue
# viable composition
compositions.append([y1, y2, y3])
# keep generating until we have num_tests
test_no += 1
# Next, we assert that pyIAST gives the same result as the competitive Langmuir isotherm for each of these compositions.
# Function to compute loading according to competitive Langmuir
# In[17]:
def competitive_langmuir_loading(partial_pressures, i):
"""
Calculate loading of component i according to competitive Langmuir
"""
return M * langmuirKs[i] * partial_pressures[i] / (
1.0 + np.dot(langmuirKs, partial_pressures))
# Function to compute loading according to pyIAST
# In[10]:
def iast_loading(partial_pressures, i):
"""
Calculate loading of component i according to IAST
partial_pressures: Array, partial pressures of each component
i: component in the mixture
"""
component_loadings = pyiast.iast(partial_pressures, isotherms)
return component_loadings[i]
# Loop over compositions, assert pyIAST agrees with competitive Langmuir for each component. If this runs, then there is agreement!
# In[14]:
for i in range(num_tests):
partial_pressure = np.array(compositions[i]) * total_pressure
# for each component...
for c in range(len(langmuirKs)):
np.testing.assert_almost_equal(
competitive_langmuir_loading(partial_pressure, c),
iast_loading(partial_pressure, c),
decimal=4)
# ### This is using a custom library to plot the phase diagrams for the paper.
# Use ternary to plot phase diagram
# https://github.com/marcharper/python-ternary
# In[19]:
import ternary
scale = 10 # resolution in triangle
axis_colors = {'l': colors[1], 'r': colors[0], 'b': colors[2]}
cmaps = ["Blues", "Greens", "Reds"]
iast_or_lang = 'lang'
for c in range(3):
if iast_or_lang == 'lang':
f = lambda p: competitive_langmuir_loading(p, c)
else:
f = lambda p: iast_loading(p, c)
# loop over component
fig, ax = plt.subplots(facecolor='w')
ax.axis("off")
figure, tax = ternary.figure(ax=ax, scale=scale)
tax.heatmapf(
f,
boundary=False,
style="hexagonal",
cmap=plt.cm.get_cmap(cmaps[c]),
vmax=M,
vmin=0.0,
cbarlabel="%s uptake (mmol/g)" % component_names[c])
tax.boundary(linewidth=2.0, color_dict=axis_colors)
tax.left_axis_label("$p_1$ (bar)", color=axis_colors['l'], offset=0.16)
tax.right_axis_label("$p_0$ (bar)", color=axis_colors['r'], offset=0.16)
tax.bottom_axis_label("$p_2$ (bar)", color=axis_colors['b'], offset=-0.06)
tax.gridlines(
color="blue",
multiple=1,
linewidth=2,
horizontal_kwargs={'color': axis_colors['b']},
left_kwargs={'color': axis_colors['l']},
right_kwargs={'color': axis_colors['r']},
alpha=0.7) # Every 5th gridline, can be a float
tax.ticks(
axis='rlb',
linewidth=1,
locations=np.arange(scale + 1),
clockwise=True,
color_dict=axis_colors,
ticks=["%.1f" % (1.0 - 1.0 * i / scale) for i in range(scale + 1)],
offset=0.03)
tax.clear_matplotlib_ticks()
tax._redraw_labels()
# if iast_or_lang == 'iast':
# tax.set_title("IAST uptake, component %d" % c, y=1.08, fontsize=14)
# if iast_or_lang == 'lang':
# tax.set_title("Competitive Langmuir uptake, component %d" % c, y=1.08, fontsize=14)
plt.tight_layout()
if iast_or_lang == 'iast':
plt.savefig(
"Tertiary_diagram_IAST_component_%d.png" % c,
format='png',
dpi=300,
facecolor=fig.get_facecolor())
if iast_or_lang == 'lang':
plt.savefig(
"Tertiary_diagram_Langmuir_component_%d.png" % c,
format='png',
dpi=300,
facecolor=fig.get_facecolor())
tax.show()
# In[ ]:
| mit |
pv/scikit-learn | sklearn/tree/tests/test_export.py | 76 | 9318 | """
Testing for export functions of decision trees (sklearn.tree.export).
"""
from numpy.testing import assert_equal
from nose.tools import assert_raises
from sklearn.tree import DecisionTreeClassifier, DecisionTreeRegressor
from sklearn.tree import export_graphviz
from sklearn.externals.six import StringIO
# toy sample
X = [[-2, -1], [-1, -1], [-1, -2], [1, 1], [1, 2], [2, 1]]
y = [-1, -1, -1, 1, 1, 1]
y2 = [[-1, 1], [-1, 2], [-1, 3], [1, 1], [1, 2], [1, 3]]
w = [1, 1, 1, .5, .5, .5]
def test_graphviz_toy():
# Check correctness of export_graphviz
clf = DecisionTreeClassifier(max_depth=3,
min_samples_split=1,
criterion="gini",
random_state=2)
clf.fit(X, y)
# Test export code
out = StringIO()
export_graphviz(clf, out_file=out)
contents1 = out.getvalue()
contents2 = 'digraph Tree {\n' \
'node [shape=box] ;\n' \
'0 [label="X[0] <= 0.0\\ngini = 0.5\\nsamples = 6\\n' \
'value = [3, 3]"] ;\n' \
'1 [label="gini = 0.0\\nsamples = 3\\nvalue = [3, 0]"] ;\n' \
'0 -> 1 [labeldistance=2.5, labelangle=45, ' \
'headlabel="True"] ;\n' \
'2 [label="gini = 0.0\\nsamples = 3\\nvalue = [0, 3]"] ;\n' \
'0 -> 2 [labeldistance=2.5, labelangle=-45, ' \
'headlabel="False"] ;\n' \
'}'
assert_equal(contents1, contents2)
# Test with feature_names
out = StringIO()
export_graphviz(clf, out_file=out, feature_names=["feature0", "feature1"])
contents1 = out.getvalue()
contents2 = 'digraph Tree {\n' \
'node [shape=box] ;\n' \
'0 [label="feature0 <= 0.0\\ngini = 0.5\\nsamples = 6\\n' \
'value = [3, 3]"] ;\n' \
'1 [label="gini = 0.0\\nsamples = 3\\nvalue = [3, 0]"] ;\n' \
'0 -> 1 [labeldistance=2.5, labelangle=45, ' \
'headlabel="True"] ;\n' \
'2 [label="gini = 0.0\\nsamples = 3\\nvalue = [0, 3]"] ;\n' \
'0 -> 2 [labeldistance=2.5, labelangle=-45, ' \
'headlabel="False"] ;\n' \
'}'
assert_equal(contents1, contents2)
# Test with class_names
out = StringIO()
export_graphviz(clf, out_file=out, class_names=["yes", "no"])
contents1 = out.getvalue()
contents2 = 'digraph Tree {\n' \
'node [shape=box] ;\n' \
'0 [label="X[0] <= 0.0\\ngini = 0.5\\nsamples = 6\\n' \
'value = [3, 3]\\nclass = yes"] ;\n' \
'1 [label="gini = 0.0\\nsamples = 3\\nvalue = [3, 0]\\n' \
'class = yes"] ;\n' \
'0 -> 1 [labeldistance=2.5, labelangle=45, ' \
'headlabel="True"] ;\n' \
'2 [label="gini = 0.0\\nsamples = 3\\nvalue = [0, 3]\\n' \
'class = no"] ;\n' \
'0 -> 2 [labeldistance=2.5, labelangle=-45, ' \
'headlabel="False"] ;\n' \
'}'
assert_equal(contents1, contents2)
# Test plot_options
out = StringIO()
export_graphviz(clf, out_file=out, filled=True, impurity=False,
proportion=True, special_characters=True, rounded=True)
contents1 = out.getvalue()
contents2 = 'digraph Tree {\n' \
'node [shape=box, style="filled, rounded", color="black", ' \
'fontname=helvetica] ;\n' \
'edge [fontname=helvetica] ;\n' \
'0 [label=<X<SUB>0</SUB> ≤ 0.0<br/>samples = 100.0%<br/>' \
'value = [0.5, 0.5]>, fillcolor="#e5813900"] ;\n' \
'1 [label=<samples = 50.0%<br/>value = [1.0, 0.0]>, ' \
'fillcolor="#e58139ff"] ;\n' \
'0 -> 1 [labeldistance=2.5, labelangle=45, ' \
'headlabel="True"] ;\n' \
'2 [label=<samples = 50.0%<br/>value = [0.0, 1.0]>, ' \
'fillcolor="#399de5ff"] ;\n' \
'0 -> 2 [labeldistance=2.5, labelangle=-45, ' \
'headlabel="False"] ;\n' \
'}'
assert_equal(contents1, contents2)
# Test max_depth
out = StringIO()
export_graphviz(clf, out_file=out, max_depth=0, class_names=True)
contents1 = out.getvalue()
contents2 = 'digraph Tree {\n' \
'node [shape=box] ;\n' \
'0 [label="X[0] <= 0.0\\ngini = 0.5\\nsamples = 6\\n' \
'value = [3, 3]\\nclass = y[0]"] ;\n' \
'1 [label="(...)"] ;\n' \
'0 -> 1 ;\n' \
'2 [label="(...)"] ;\n' \
'0 -> 2 ;\n' \
'}'
assert_equal(contents1, contents2)
# Test max_depth with plot_options
out = StringIO()
export_graphviz(clf, out_file=out, max_depth=0, filled=True,
node_ids=True)
contents1 = out.getvalue()
contents2 = 'digraph Tree {\n' \
'node [shape=box, style="filled", color="black"] ;\n' \
'0 [label="node #0\\nX[0] <= 0.0\\ngini = 0.5\\n' \
'samples = 6\\nvalue = [3, 3]", fillcolor="#e5813900"] ;\n' \
'1 [label="(...)", fillcolor="#C0C0C0"] ;\n' \
'0 -> 1 ;\n' \
'2 [label="(...)", fillcolor="#C0C0C0"] ;\n' \
'0 -> 2 ;\n' \
'}'
assert_equal(contents1, contents2)
# Test multi-output with weighted samples
clf = DecisionTreeClassifier(max_depth=2,
min_samples_split=1,
criterion="gini",
random_state=2)
clf = clf.fit(X, y2, sample_weight=w)
out = StringIO()
export_graphviz(clf, out_file=out, filled=True, impurity=False)
contents1 = out.getvalue()
contents2 = 'digraph Tree {\n' \
'node [shape=box, style="filled", color="black"] ;\n' \
'0 [label="X[0] <= 0.0\\nsamples = 6\\n' \
'value = [[3.0, 1.5, 0.0]\\n' \
'[1.5, 1.5, 1.5]]", fillcolor="#e5813900"] ;\n' \
'1 [label="X[1] <= -1.5\\nsamples = 3\\n' \
'value = [[3, 0, 0]\\n[1, 1, 1]]", ' \
'fillcolor="#e5813965"] ;\n' \
'0 -> 1 [labeldistance=2.5, labelangle=45, ' \
'headlabel="True"] ;\n' \
'2 [label="samples = 1\\nvalue = [[1, 0, 0]\\n' \
'[0, 0, 1]]", fillcolor="#e58139ff"] ;\n' \
'1 -> 2 ;\n' \
'3 [label="samples = 2\\nvalue = [[2, 0, 0]\\n' \
'[1, 1, 0]]", fillcolor="#e581398c"] ;\n' \
'1 -> 3 ;\n' \
'4 [label="X[0] <= 1.5\\nsamples = 3\\n' \
'value = [[0.0, 1.5, 0.0]\\n[0.5, 0.5, 0.5]]", ' \
'fillcolor="#e5813965"] ;\n' \
'0 -> 4 [labeldistance=2.5, labelangle=-45, ' \
'headlabel="False"] ;\n' \
'5 [label="samples = 2\\nvalue = [[0.0, 1.0, 0.0]\\n' \
'[0.5, 0.5, 0.0]]", fillcolor="#e581398c"] ;\n' \
'4 -> 5 ;\n' \
'6 [label="samples = 1\\nvalue = [[0.0, 0.5, 0.0]\\n' \
'[0.0, 0.0, 0.5]]", fillcolor="#e58139ff"] ;\n' \
'4 -> 6 ;\n' \
'}'
assert_equal(contents1, contents2)
# Test regression output with plot_options
clf = DecisionTreeRegressor(max_depth=3,
min_samples_split=1,
criterion="mse",
random_state=2)
clf.fit(X, y)
out = StringIO()
export_graphviz(clf, out_file=out, filled=True, leaves_parallel=True,
rotate=True, rounded=True)
contents1 = out.getvalue()
contents2 = 'digraph Tree {\n' \
'node [shape=box, style="filled, rounded", color="black", ' \
'fontname=helvetica] ;\n' \
'graph [ranksep=equally, splines=polyline] ;\n' \
'edge [fontname=helvetica] ;\n' \
'rankdir=LR ;\n' \
'0 [label="X[0] <= 0.0\\nmse = 1.0\\nsamples = 6\\n' \
'value = 0.0", fillcolor="#e581397f"] ;\n' \
'1 [label="mse = 0.0\\nsamples = 3\\nvalue = -1.0", ' \
'fillcolor="#e5813900"] ;\n' \
'0 -> 1 [labeldistance=2.5, labelangle=-45, ' \
'headlabel="True"] ;\n' \
'2 [label="mse = 0.0\\nsamples = 3\\nvalue = 1.0", ' \
'fillcolor="#e58139ff"] ;\n' \
'0 -> 2 [labeldistance=2.5, labelangle=45, ' \
'headlabel="False"] ;\n' \
'{rank=same ; 0} ;\n' \
'{rank=same ; 1; 2} ;\n' \
'}'
assert_equal(contents1, contents2)
def test_graphviz_errors():
# Check for errors of export_graphviz
clf = DecisionTreeClassifier(max_depth=3, min_samples_split=1)
clf.fit(X, y)
# Check feature_names error
out = StringIO()
assert_raises(IndexError, export_graphviz, clf, out, feature_names=[])
# Check class_names error
out = StringIO()
assert_raises(IndexError, export_graphviz, clf, out, class_names=[])
| bsd-3-clause |
probml/pyprobml | scripts/svi_gmm_tfp_scratch.py | 1 | 7626 | # SVI for a GMM
# Modified from
# https://github.com/brendanhasz/svi-gaussian-mixture-model/blob/master/BayesianGaussianMixtureModel.ipynb
#pip install tf-nightly
#pip install --upgrade tfp-nightly -q
# Imports
import numpy as np
import matplotlib.pyplot as plt
import seaborn as sns
import tensorflow as tf
import tensorflow_probability as tfp
tfd = tfp.distributions
from time import time
# Plot settings
#%config InlineBackend.figure_format = 'svg'
# Random seed
np.random.seed(12345)
tf.random.set_seed(12345)
# Generate some data
N = 3000
X = np.random.randn(N, 2).astype('float32')
X[:1000, :] += [2, 0]
X[1000:2000, :] -= [2, 4]
X[2000:, :] += [-2, 4]
# Plot the data
plt.plot(X[:, 0], X[:, 1], '.')
plt.axis('equal')
plt.show()
# Make a TensorFlow Dataset from that data
batch_size = 500
dataset = tf.data.Dataset.from_tensor_slices(
(X)).shuffle(10000).batch(batch_size)
class GaussianMixtureModel(tf.keras.Model):
"""A Bayesian Gaussian mixture model.
Assumes Gaussians' variances in each dimension are independent.
Parameters
----------
Nc : int > 0
Number of mixture components.
Nd : int > 0
Number of dimensions.
"""
def __init__(self, Nc, Nd):
# Initialize
super(GaussianMixtureModel, self).__init__()
self.Nc = Nc
self.Nd = Nd
# Variational distribution variables for means
self.locs = tf.Variable(tf.random.normal((Nc, Nd)))
self.scales = tf.Variable(tf.pow(tf.random.gamma((Nc, Nd), 5, 5), -0.5))
# Variational distribution variables for standard deviations
self.alpha = tf.Variable(tf.random.uniform((Nc, Nd), 4., 6.))
self.beta = tf.Variable(tf.random.uniform((Nc, Nd), 4., 6.))
# Variational distribution variables for component weights
self.counts = tf.Variable(2*tf.ones((Nc,)))
# Prior distributions for the means
self.mu_prior = tfd.Normal(tf.zeros((Nc, Nd)), tf.ones((Nc, Nd)))
# Prior distributions for the standard deviations
self.sigma_prior = tfd.Gamma(5*tf.ones((Nc, Nd)), 5*tf.ones((Nc, Nd)))
# Prior distributions for the component weights
self.theta_prior = tfd.Dirichlet(2*tf.ones((Nc,)))
def call(self, x, sampling=True, independent=True):
"""Compute losses given a batch of data.
Parameters
----------
x : tf.Tensor
A batch of data
sampling : bool
Whether to sample from the variational posterior
distributions (if True, the default), or just use the
mean of the variational distributions (if False).
Returns
-------
log_likelihoods : tf.Tensor
Log likelihood for each sample
kl_sum : tf.Tensor
Sum of the KL divergences between the variational
distributions and their priors
"""
# The variational distributions
mu = tfd.Normal(self.locs, self.scales)
sigma = tfd.Gamma(self.alpha, self.beta)
theta = tfd.Dirichlet(self.counts)
# Sample from the variational distributions
if sampling:
Nb = x.shape[0] #number of samples in the batch
mu_sample = mu.sample(Nb)
sigma_sample = tf.pow(sigma.sample(Nb), -0.5)
theta_sample = theta.sample(Nb)
else:
mu_sample = tf.reshape(mu.mean(), (1, self.Nc, self.Nd))
sigma_sample = tf.pow(tf.reshape(sigma.mean(), (1, self.Nc, self.Nd)), -0.5)
theta_sample = tf.reshape(theta.mean(), (1, self.Nc))
# The mixture density
density = tfd.Mixture(
cat=tfd.Categorical(probs=theta_sample),
components=[
tfd.MultivariateNormalDiag(loc=mu_sample[:, i, :],
scale_diag=sigma_sample[:, i, :])
for i in range(self.Nc)])
# Compute the mean log likelihood
log_likelihoods = density.log_prob(x)
# Compute the KL divergence sum
mu_div = tf.reduce_sum(tfd.kl_divergence(mu, self.mu_prior))
sigma_div = tf.reduce_sum(tfd.kl_divergence(sigma, self.sigma_prior))
theta_div = tf.reduce_sum(tfd.kl_divergence(theta, self.theta_prior))
kl_sum = mu_div + sigma_div + theta_div
# Return both losses
return log_likelihoods, kl_sum
def fit(self, data, nepochs):
optimizer = tf.keras.optimizers.Adam(lr=1e-3)
@tf.function
def train_step(data):
with tf.GradientTape() as tape:
log_likelihoods, kl_sum = self(data)
elbo_loss = kl_sum/N - tf.reduce_mean(log_likelihoods)
gradients = tape.gradient(elbo_loss, model.trainable_variables)
optimizer.apply_gradients(zip(gradients, model.trainable_variables))
for epoch in range(nepochs):
for data in dataset:
train_step(data)
def gmm_fit(model, data, nepochs):
optimizer = tf.keras.optimizers.Adam(lr=1e-3)
@tf.function
def train_step(data):
with tf.GradientTape() as tape:
log_likelihoods, kl_sum = model(data)
elbo_loss = kl_sum/N - tf.reduce_mean(log_likelihoods)
gradients = tape.gradient(elbo_loss, model.trainable_variables)
optimizer.apply_gradients(zip(gradients, model.trainable_variables))
for epoch in range(nepochs):
for data in dataset:
train_step(data)
nepochs = 1000
nmix = 3
ndim = 2
model = GaussianMixtureModel(nmix, ndim)
### Fitting
time_start = time()
method = 3
if method == 1:
model.fit(dataset, nepochs)
if method == 2:
gmm_fit(model, dataset, nepochs)
if method == 3:
# Relies on 'model' and 'optimizer' being in scope = yuk!
model = GaussianMixtureModel(nmix, ndim)
optimizer = tf.keras.optimizers.Adam(lr=1e-3)
@tf.function
def train_step(data):
with tf.GradientTape() as tape:
log_likelihoods, kl_sum = model(data)
elbo_loss = kl_sum/N - tf.reduce_mean(log_likelihoods)
gradients = tape.gradient(elbo_loss, model.trainable_variables)
optimizer.apply_gradients(zip(gradients, model.trainable_variables))
for epoch in range(nepochs):
for data in dataset:
train_step(data)
elapsed_time = (time() - time_start)
print('method {}'.format(method))
print(elapsed_time)
### Evaluation
# Compute log likelihood at each point on a grid
Np = 100 #number of grid points
Xp, Yp = np.meshgrid(np.linspace(-6, 6, Np), np.linspace(-6, 6, Np))
Pp = np.column_stack([Xp.flatten(), Yp.flatten()]).astype('float32')
Z, _ = model(Pp, sampling=False)
Z = np.reshape(Z, (Np, Np))
# Show the fit mixture density
plt.figure()
plt.imshow(np.exp(Z),
extent=(-6, 6, -6, 6),
origin='lower')
cbar = plt.colorbar()
cbar.ax.set_ylabel('Likelihood')
model.locs
model.trainable_variables
# Sample from the std deviation variational posterior
stds = tf.pow(tfd.Gamma(model.alpha, model.beta).sample(10000), -0.5)
# Plot the samples
plt.figure()
sns.distplot(stds[:, 0, 0])
# Sample from the mean variational posterior
means = tfd.Normal(model.locs, model.scales).sample(10000)
# Plot the mean samples for a single
plt.figure()
sns.kdeplot(means[:, 0, 0].numpy(),
means[:, 0, 1].numpy(),
n_levels=10)
| mit |
alexmilesyounger/ds_basics | src/numpy_utils.py | 2 | 3188 | # coding: utf-8
# numpy_utils for Intro to Data Science with Python
# Author: Kat Chuang
# Created: Nov 2014
# --------------------------------------
import numpy
## Stage 2 begin
fieldNames = ['', 'id', 'priceLabel', 'name','brandId', 'brandName', 'imageLink',
'desc', 'vendor', 'patterned', 'material']
dataTypes = [('myint', 'i'), ('myid', 'i'), ('price', 'f8'), ('name', 'a200'),
('brandId', '<i8'), ('brandName', 'a200'), ('imageUrl', '|S500'),
('description', '|S900'), ('vendor', '|S100'), ('pattern', '|S50'), ('material', '|S50'), ]
def load_data(filename):
my_csv = numpy.genfromtxt(filename, delimiter='\t', skip_header=1,
names=fieldNames, invalid_raise=False,
dtype=dataTypes)
return my_csv
#2.a count
def size(my_csv):
print("Length (numpy): {}".format(my_csv.size))
#2.b sum
def calculate_numpy_sum(my_field):
field_in_float = [float(item) for item in my_field]
total = numpy.sum(field_in_float)
return total
#2.c mean
def find_numpy_average(my_field):
field_in_float = [float(item) for item in my_field]
total = calculate_numpy_sum(field_in_float)
size = len(my_field)
average = total / size
return average
#2.d max, min
def numpy_max(my_field_in_float):
return numpy.amax(my_field_in_float)
def numpy_min(my_field_in_float):
return numpy.amin(my_field_in_float)
## Stage 2 end
# --------------------------------------
## Stage 3 begin
from my_utils import filter_col_by_string, filter_col_by_float
## Stage 3 end
# --------------------------------------
## Stage 4 begin
from my_utils import write_to_file, write_brand_and_price_to_file
## Stage 4 end
# --------------------------------------
## Stage 5 begin
import matplotlib.pyplot as plt
plt.style.use('ggplot')
def plot_all_bars(prices_in_float, exported_figure_filename):
fig = plt.figure()
ax = fig.add_subplot(1, 1, 1)
prices = list(map(int, prices_in_float))
X = numpy.arange(len(prices))
width = 0.25
ax.bar(X+width, prices, width)
ax.set_xlim([0, 5055])
fig.savefig(exported_figure_filename)
def create_chart_for_embed(sample, title):
prices = sorted(map(int, sample))
x_axis_ticks = list( range(len(sample)) )
plt.plot(x_axis_ticks, prices, 'g', label='price points', linewidth=2)
def export_chart(sample, title):
fig = plt.figure()
ax = fig.add_subplot(1, 1, 1)
prices = sorted(map(int, sample))
x_axis_ticks = list( range(len(sample)) )
ax.plot(x_axis_ticks, prices, 'g', label='price points', linewidth=2)
ax.set_title(title)
ax.set_xlabel(title)
ax.set_ylabel('Number of Ties')
if len(prices) > 20:
ax.set_xlim([0, round(len(prices), -1)])
else:
ax.set_xlim([0, len(prices)])
fig.savefig('_charts/' + title + '.png')
def prices_of_list(sampleData):
temp_list = []
for row in sampleData[1:]:
priceCol = float(row[2])
temp_list.append(priceCol)
return temp_list
## Stage 5 end
# --------------------------------------
## Stage 6 begin
## Stage 6 end
# --------------------------------------
| mit |
thesuperzapper/tensorflow | tensorflow/contrib/learn/python/learn/learn_io/data_feeder_test.py | 71 | 12923 | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for `DataFeeder`."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
import six
from six.moves import xrange # pylint: disable=redefined-builtin
# pylint: disable=wildcard-import
from tensorflow.contrib.learn.python.learn.learn_io import *
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.platform import test
# pylint: enable=wildcard-import
class DataFeederTest(test.TestCase):
# pylint: disable=undefined-variable
"""Tests for `DataFeeder`."""
def _wrap_dict(self, data, prepend=''):
return {prepend + '1': data, prepend + '2': data}
def _assert_raises(self, input_data):
with self.assertRaisesRegexp(TypeError, 'annot convert'):
data_feeder.DataFeeder(input_data, None, n_classes=0, batch_size=1)
def test_input_uint32(self):
data = np.matrix([[1, 2], [3, 4]], dtype=np.uint32)
self._assert_raises(data)
self._assert_raises(self._wrap_dict(data))
def test_input_uint64(self):
data = np.matrix([[1, 2], [3, 4]], dtype=np.uint64)
self._assert_raises(data)
self._assert_raises(self._wrap_dict(data))
def _assert_dtype(self, expected_np_dtype, expected_tf_dtype, input_data):
feeder = data_feeder.DataFeeder(input_data, None, n_classes=0, batch_size=1)
if isinstance(input_data, dict):
for k, v in list(feeder.input_dtype.items()):
self.assertEqual(expected_np_dtype, v)
else:
self.assertEqual(expected_np_dtype, feeder.input_dtype)
with ops.Graph().as_default() as g, self.test_session(g):
inp, _ = feeder.input_builder()
if isinstance(inp, dict):
for k, v in list(inp.items()):
self.assertEqual(expected_tf_dtype, v.dtype)
else:
self.assertEqual(expected_tf_dtype, inp.dtype)
def test_input_int8(self):
data = np.matrix([[1, 2], [3, 4]], dtype=np.int8)
self._assert_dtype(np.int8, dtypes.int8, data)
self._assert_dtype(np.int8, dtypes.int8, self._wrap_dict(data))
def test_input_int16(self):
data = np.matrix([[1, 2], [3, 4]], dtype=np.int16)
self._assert_dtype(np.int16, dtypes.int16, data)
self._assert_dtype(np.int16, dtypes.int16, self._wrap_dict(data))
def test_input_int32(self):
data = np.matrix([[1, 2], [3, 4]], dtype=np.int32)
self._assert_dtype(np.int32, dtypes.int32, data)
self._assert_dtype(np.int32, dtypes.int32, self._wrap_dict(data))
def test_input_int64(self):
data = np.matrix([[1, 2], [3, 4]], dtype=np.int64)
self._assert_dtype(np.int64, dtypes.int64, data)
self._assert_dtype(np.int64, dtypes.int64, self._wrap_dict(data))
def test_input_uint8(self):
data = np.matrix([[1, 2], [3, 4]], dtype=np.uint8)
self._assert_dtype(np.uint8, dtypes.uint8, data)
self._assert_dtype(np.uint8, dtypes.uint8, self._wrap_dict(data))
def test_input_uint16(self):
data = np.matrix([[1, 2], [3, 4]], dtype=np.uint16)
self._assert_dtype(np.uint16, dtypes.uint16, data)
self._assert_dtype(np.uint16, dtypes.uint16, self._wrap_dict(data))
def test_input_float16(self):
data = np.matrix([[1, 2], [3, 4]], dtype=np.float16)
self._assert_dtype(np.float16, dtypes.float16, data)
self._assert_dtype(np.float16, dtypes.float16, self._wrap_dict(data))
def test_input_float32(self):
data = np.matrix([[1, 2], [3, 4]], dtype=np.float32)
self._assert_dtype(np.float32, dtypes.float32, data)
self._assert_dtype(np.float32, dtypes.float32, self._wrap_dict(data))
def test_input_float64(self):
data = np.matrix([[1, 2], [3, 4]], dtype=np.float64)
self._assert_dtype(np.float64, dtypes.float64, data)
self._assert_dtype(np.float64, dtypes.float64, self._wrap_dict(data))
def test_input_bool(self):
data = np.array([[False for _ in xrange(2)] for _ in xrange(2)])
self._assert_dtype(np.bool, dtypes.bool, data)
self._assert_dtype(np.bool, dtypes.bool, self._wrap_dict(data))
def test_input_string(self):
input_data = np.array([['str%d' % i for i in xrange(2)] for _ in xrange(2)])
self._assert_dtype(input_data.dtype, dtypes.string, input_data)
self._assert_dtype(input_data.dtype, dtypes.string,
self._wrap_dict(input_data))
def _assertAllClose(self, src, dest, src_key_of=None, src_prop=None):
def func(x):
val = getattr(x, src_prop) if src_prop else x
return val if src_key_of is None else src_key_of[val]
if isinstance(src, dict):
for k in list(src.keys()):
self.assertAllClose(func(src[k]), dest)
else:
self.assertAllClose(func(src), dest)
def test_unsupervised(self):
def func(feeder):
with self.test_session():
inp, _ = feeder.input_builder()
feed_dict_fn = feeder.get_feed_dict_fn()
feed_dict = feed_dict_fn()
self._assertAllClose(inp, [[1, 2]], feed_dict, 'name')
data = np.matrix([[1, 2], [2, 3], [3, 4]])
func(data_feeder.DataFeeder(data, None, n_classes=0, batch_size=1))
func(
data_feeder.DataFeeder(
self._wrap_dict(data), None, n_classes=0, batch_size=1))
def test_data_feeder_regression(self):
def func(df):
inp, out = df.input_builder()
feed_dict_fn = df.get_feed_dict_fn()
feed_dict = feed_dict_fn()
self._assertAllClose(inp, [[3, 4], [1, 2]], feed_dict, 'name')
self._assertAllClose(out, [2, 1], feed_dict, 'name')
x = np.matrix([[1, 2], [3, 4]])
y = np.array([1, 2])
func(data_feeder.DataFeeder(x, y, n_classes=0, batch_size=3))
func(
data_feeder.DataFeeder(
self._wrap_dict(x, 'in'),
self._wrap_dict(y, 'out'),
n_classes=self._wrap_dict(0, 'out'),
batch_size=3))
def test_epoch(self):
def func(feeder):
with self.test_session():
feeder.input_builder()
epoch = feeder.make_epoch_variable()
feed_dict_fn = feeder.get_feed_dict_fn()
# First input
feed_dict = feed_dict_fn()
self.assertAllClose(feed_dict[epoch.name], [0])
# Second input
feed_dict = feed_dict_fn()
self.assertAllClose(feed_dict[epoch.name], [0])
# Third input
feed_dict = feed_dict_fn()
self.assertAllClose(feed_dict[epoch.name], [0])
# Back to the first input again, so new epoch.
feed_dict = feed_dict_fn()
self.assertAllClose(feed_dict[epoch.name], [1])
data = np.matrix([[1, 2], [2, 3], [3, 4]])
labels = np.array([0, 0, 1])
func(data_feeder.DataFeeder(data, labels, n_classes=0, batch_size=1))
func(
data_feeder.DataFeeder(
self._wrap_dict(data, 'in'),
self._wrap_dict(labels, 'out'),
n_classes=self._wrap_dict(0, 'out'),
batch_size=1))
def test_data_feeder_multioutput_regression(self):
def func(df):
inp, out = df.input_builder()
feed_dict_fn = df.get_feed_dict_fn()
feed_dict = feed_dict_fn()
self._assertAllClose(inp, [[3, 4], [1, 2]], feed_dict, 'name')
self._assertAllClose(out, [[3, 4], [1, 2]], feed_dict, 'name')
x = np.matrix([[1, 2], [3, 4]])
y = np.array([[1, 2], [3, 4]])
func(data_feeder.DataFeeder(x, y, n_classes=0, batch_size=2))
func(
data_feeder.DataFeeder(
self._wrap_dict(x, 'in'),
self._wrap_dict(y, 'out'),
n_classes=self._wrap_dict(0, 'out'),
batch_size=2))
def test_data_feeder_multioutput_classification(self):
def func(df):
inp, out = df.input_builder()
feed_dict_fn = df.get_feed_dict_fn()
feed_dict = feed_dict_fn()
self._assertAllClose(inp, [[3, 4], [1, 2]], feed_dict, 'name')
self._assertAllClose(
out, [[[0, 0, 1, 0, 0], [0, 0, 0, 1, 0], [0, 0, 0, 0, 1]],
[[1, 0, 0, 0, 0], [0, 1, 0, 0, 0], [0, 0, 1, 0, 0]]], feed_dict,
'name')
x = np.matrix([[1, 2], [3, 4]])
y = np.array([[0, 1, 2], [2, 3, 4]])
func(data_feeder.DataFeeder(x, y, n_classes=5, batch_size=2))
func(
data_feeder.DataFeeder(
self._wrap_dict(x, 'in'),
self._wrap_dict(y, 'out'),
n_classes=self._wrap_dict(5, 'out'),
batch_size=2))
def test_streaming_data_feeder(self):
def func(df):
inp, out = df.input_builder()
feed_dict_fn = df.get_feed_dict_fn()
feed_dict = feed_dict_fn()
self._assertAllClose(inp, [[[1, 2]], [[3, 4]]], feed_dict, 'name')
self._assertAllClose(out, [[[1], [2]], [[2], [2]]], feed_dict, 'name')
def x_iter(wrap_dict=False):
yield np.array([[1, 2]]) if not wrap_dict else self._wrap_dict(
np.array([[1, 2]]), 'in')
yield np.array([[3, 4]]) if not wrap_dict else self._wrap_dict(
np.array([[3, 4]]), 'in')
def y_iter(wrap_dict=False):
yield np.array([[1], [2]]) if not wrap_dict else self._wrap_dict(
np.array([[1], [2]]), 'out')
yield np.array([[2], [2]]) if not wrap_dict else self._wrap_dict(
np.array([[2], [2]]), 'out')
func(
data_feeder.StreamingDataFeeder(
x_iter(), y_iter(), n_classes=0, batch_size=2))
func(
data_feeder.StreamingDataFeeder(
x_iter(True),
y_iter(True),
n_classes=self._wrap_dict(0, 'out'),
batch_size=2))
# Test non-full batches.
func(
data_feeder.StreamingDataFeeder(
x_iter(), y_iter(), n_classes=0, batch_size=10))
func(
data_feeder.StreamingDataFeeder(
x_iter(True),
y_iter(True),
n_classes=self._wrap_dict(0, 'out'),
batch_size=10))
def test_dask_data_feeder(self):
if HAS_PANDAS and HAS_DASK:
x = pd.DataFrame(
dict(
a=np.array([.1, .3, .4, .6, .2, .1, .6]),
b=np.array([.7, .8, .1, .2, .5, .3, .9])))
x = dd.from_pandas(x, npartitions=2)
y = pd.DataFrame(dict(labels=np.array([1, 0, 2, 1, 0, 1, 2])))
y = dd.from_pandas(y, npartitions=2)
# TODO(ipolosukhin): Remove or restore this.
# x = extract_dask_data(x)
# y = extract_dask_labels(y)
df = data_feeder.DaskDataFeeder(x, y, n_classes=2, batch_size=2)
inp, out = df.input_builder()
feed_dict_fn = df.get_feed_dict_fn()
feed_dict = feed_dict_fn()
self.assertAllClose(feed_dict[inp.name], [[0.40000001, 0.1],
[0.60000002, 0.2]])
self.assertAllClose(feed_dict[out.name], [[0., 0., 1.], [0., 1., 0.]])
def test_hdf5_data_feeder(self):
def func(df):
inp, out = df.input_builder()
feed_dict_fn = df.get_feed_dict_fn()
feed_dict = feed_dict_fn()
self._assertAllClose(inp, [[3, 4], [1, 2]], feed_dict, 'name')
self.assertAllClose(out, [2, 1], feed_dict, 'name')
try:
import h5py # pylint: disable=g-import-not-at-top
x = np.matrix([[1, 2], [3, 4]])
y = np.array([1, 2])
h5f = h5py.File('test_hdf5.h5', 'w')
h5f.create_dataset('x', data=x)
h5f.create_dataset('y', data=y)
h5f.close()
h5f = h5py.File('test_hdf5.h5', 'r')
x = h5f['x']
y = h5f['y']
func(data_feeder.DataFeeder(x, y, n_classes=0, batch_size=3))
func(
data_feeder.DataFeeder(
self._wrap_dict(x, 'in'),
self._wrap_dict(y, 'out'),
n_classes=self._wrap_dict(0, 'out'),
batch_size=3))
except ImportError:
print("Skipped test for hdf5 since it's not installed.")
class SetupPredictDataFeederTest(DataFeederTest):
"""Tests for `DataFeeder.setup_predict_data_feeder`."""
def test_iterable_data(self):
# pylint: disable=undefined-variable
def func(df):
self._assertAllClose(six.next(df), [[1, 2], [3, 4]])
self._assertAllClose(six.next(df), [[5, 6]])
data = [[1, 2], [3, 4], [5, 6]]
x = iter(data)
x_dict = iter([self._wrap_dict(v) for v in iter(data)])
func(data_feeder.setup_predict_data_feeder(x, batch_size=2))
func(data_feeder.setup_predict_data_feeder(x_dict, batch_size=2))
if __name__ == '__main__':
test.main()
| apache-2.0 |
creyesp/RF_Estimation | Clustering/clustering/gmm.py | 2 | 6063 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# SpectralClustering.py
#
# Copyright 2014 Carlos "casep" Sepulveda <casep@alumnos.inf.utfsm.cl>
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston,
# MA 02110-1301, USA.
#
#
# Performs SpectralClustering using scikit-learn
import sys, os
sys.path.append(os.path.join(os.path.dirname(__file__), '../..','LIB'))
import rfestimationLib as rfe
import argparse #argument parsing
import numpy as np
import scipy.ndimage
from sklearn.decomposition import PCA
import matplotlib.pyplot as plt
import matplotlib.mlab as mlab
from sklearn import mixture
from sklearn import metrics
clustersColours = ['#fcfa00', '#ff0000', '#820c2c', '#ff006f', '#af00ff','#0200ff','#008dff','#00e8ff','#0c820e','#28ea04','#ea8404','#c8628f','#6283ff','#5b6756','#0c8248','k','#820cff','#932c11','#002c11','#829ca7']
def main():
parser = argparse.ArgumentParser(prog='kmeans_scikit.py',
description='Performs K-means using scikit-learn',
formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument('--sourceFolder',
help='Source folder',
type=str, required=True)
parser.add_argument('--outputFolder',
help='Output folder',
type=str, required=True)
parser.add_argument('--clustersNumber',
help='Number of clusters',
type=int, default='5', choices=[3,4,5,6,7,8,9,10,11,12,13,14,15], required=False)
parser.add_argument('--framesNumber',
help='Number of frames used in STA analysis',
type=int, default='20', required=False)
parser.add_argument('--blockSize',
help='Size of each block in micrometres',
type=int, default='50', required=False)
args = parser.parse_args()
#Source folder of the files with the timestamps
sourceFolder = rfe.fixPath(args.sourceFolder)
if not os.path.exists(sourceFolder):
print ''
print 'Source folder does not exists ' + sourceFolder
sys.exit()
#Output folder for the graphics
outputFolder = rfe.fixPath(args.outputFolder)
if not os.path.exists(outputFolder):
try:
os.makedirs(outputFolder)
except:
print ''
print 'Unable to create folder ' + outputFolder
sys.exit()
#Clusters number for the kmeans algorithm
clustersNumber = args.clustersNumber
#Frames used in STA analysis
framesNumber = args.framesNumber
#Size of each block in micrometres
blockSize = args.blockSize
#dataCluster stores the data to be used for the clustering process
#the size is equal to the number of frames, aka, the time component
#plus 7 as we are incorporating the 2 dimensions of the ellipse,
#2 dimensions of the ellipse on micrometres,
#x position, y position and angle
dataCluster = np.zeros((1,framesNumber+7))
units=[]
dato=np.zeros((1,1))
for unitFile in os.listdir(sourceFolder):
if os.path.isdir(sourceFolder+unitFile):
unitName = unitFile.rsplit('_', 1)[0]
dataUnit, coordinates = rfe.loadSTACurve(sourceFolder,unitFile,unitName)
xSize = dataUnit.shape[0]
ySize = dataUnit.shape[1]
fitResult = rfe.loadFitMatrix(sourceFolder,unitFile)
#should we use the not-gaussian-fitted data for clustering?
dataUnitGauss = scipy.ndimage.gaussian_filter(dataUnit[coordinates[0][0],[coordinates[1][0]],:],2)
#A radius of the RF ellipse, adjusted to micrometres
dato[0] = blockSize * fitResult[0][2]
dataUnitCompleta = np.concatenate((dataUnitGauss,dato),1)
#B radius of the RF ellipse, adjusted to micrometres
dato[0] = blockSize * fitResult[0][3]
dataUnitCompleta = np.concatenate((dataUnitCompleta,dato),1)
#A radius of the RF ellipse
dato[0] = fitResult[0][2]
dataUnitCompleta = np.concatenate((dataUnitCompleta,dato),1)
#B radius of the RF ellipse
dato[0] = fitResult[0][3]
dataUnitCompleta = np.concatenate((dataUnitCompleta,dato),1)
#angle of the RF ellipse
dato[0] = fitResult[0][1]
dataUnitCompleta = np.concatenate((dataUnitCompleta,dato),1)
#X coordinate of the RF ellipse
dato[0] = fitResult[0][4]
dataUnitCompleta = np.concatenate((dataUnitCompleta,dato),1)
#Y coordinate of the RF ellipse
dato[0] = fitResult[0][5]
dataUnitCompleta = np.concatenate((dataUnitCompleta,dato),1)
dataCluster = np.append(dataCluster,dataUnitCompleta, axis=0)
units.append(unitName)
# remove the first row of zeroes
dataCluster = dataCluster[1:,:]
data = dataCluster[:,0:framesNumber+2]
gmix = mixture.GMM(n_components=clustersNumber, covariance_type='full')
gmix.fit(data)
labels = gmix.predict(data)
fit = metrics.silhouette_score(data, labels, metric='euclidean')
rfe.graficaCluster(labels, dataCluster[:,0:framesNumber-1], outputFolder+'gmm.png', clustersColours, fit)
# generate graphics of all ellipses
for clusterId in range(clustersNumber):
dataGrilla = np.zeros((1,framesNumber+7))
for unitId in range(dataCluster.shape[0]):
if labels[unitId] == clusterId:
datos=np.zeros((1,framesNumber+7))
datos[0]=dataCluster[unitId,:]
dataGrilla = np.append(dataGrilla,datos, axis=0)
## remove the first row of zeroes
dataGrilla = dataGrilla[1:,:]
rfe.graficaGrilla(dataGrilla, outputFolder+'Grilla_'+str(clusterId)+'.png', clustersColours[clusterId], framesNumber, xSize, ySize)
rfe.graficaCluster(labels, dataGrilla[:,0:framesNumber-1], outputFolder+'cluster_'+str(clusterId)+'.png', clustersColours[clusterId])
rfe.guardaClustersIDs(outputFolder, units, labels, outputFolder+'clusterings.csv')
return 0
if __name__ == '__main__':
main()
| gpl-2.0 |
zaxliu/scipy | doc/source/tutorial/examples/normdiscr_plot2.py | 84 | 1642 | import numpy as np
import matplotlib.pyplot as plt
from scipy import stats
npoints = 20 # number of integer support points of the distribution minus 1
npointsh = npoints / 2
npointsf = float(npoints)
nbound = 4 #bounds for the truncated normal
normbound = (1 + 1 / npointsf) * nbound #actual bounds of truncated normal
grid = np.arange(-npointsh, npointsh+2,1) #integer grid
gridlimitsnorm = (grid - 0.5) / npointsh * nbound #bin limits for the truncnorm
gridlimits = grid - 0.5
grid = grid[:-1]
probs = np.diff(stats.truncnorm.cdf(gridlimitsnorm, -normbound, normbound))
gridint = grid
normdiscrete = stats.rv_discrete(
values=(gridint, np.round(probs, decimals=7)),
name='normdiscrete')
n_sample = 500
np.random.seed(87655678) #fix the seed for replicability
rvs = normdiscrete.rvs(size=n_sample)
rvsnd = rvs
f,l = np.histogram(rvs,bins=gridlimits)
sfreq = np.vstack([gridint,f,probs*n_sample]).T
fs = sfreq[:,1] / float(n_sample)
ft = sfreq[:,2] / float(n_sample)
fs = sfreq[:,1].cumsum() / float(n_sample)
ft = sfreq[:,2].cumsum() / float(n_sample)
nd_std = np.sqrt(normdiscrete.stats(moments='v'))
ind = gridint # the x locations for the groups
width = 0.35 # the width of the bars
plt.figure()
plt.subplot(111)
rects1 = plt.bar(ind, ft, width, color='b')
rects2 = plt.bar(ind+width, fs, width, color='r')
normline = plt.plot(ind+width/2.0, stats.norm.cdf(ind+0.5,scale=nd_std),
color='b')
plt.ylabel('cdf')
plt.title('Cumulative Frequency and CDF of normdiscrete')
plt.xticks(ind+width, ind)
plt.legend((rects1[0], rects2[0]), ('true', 'sample'))
plt.show()
| bsd-3-clause |
aminert/scikit-learn | examples/applications/plot_model_complexity_influence.py | 323 | 6372 | """
==========================
Model Complexity Influence
==========================
Demonstrate how model complexity influences both prediction accuracy and
computational performance.
The dataset is the Boston Housing dataset (resp. 20 Newsgroups) for
regression (resp. classification).
For each class of models we make the model complexity vary through the choice
of relevant model parameters and measure the influence on both computational
performance (latency) and predictive power (MSE or Hamming Loss).
"""
print(__doc__)
# Author: Eustache Diemert <eustache@diemert.fr>
# License: BSD 3 clause
import time
import numpy as np
import matplotlib.pyplot as plt
from mpl_toolkits.axes_grid1.parasite_axes import host_subplot
from mpl_toolkits.axisartist.axislines import Axes
from scipy.sparse.csr import csr_matrix
from sklearn import datasets
from sklearn.utils import shuffle
from sklearn.metrics import mean_squared_error
from sklearn.svm.classes import NuSVR
from sklearn.ensemble.gradient_boosting import GradientBoostingRegressor
from sklearn.linear_model.stochastic_gradient import SGDClassifier
from sklearn.metrics import hamming_loss
###############################################################################
# Routines
# initialize random generator
np.random.seed(0)
def generate_data(case, sparse=False):
"""Generate regression/classification data."""
bunch = None
if case == 'regression':
bunch = datasets.load_boston()
elif case == 'classification':
bunch = datasets.fetch_20newsgroups_vectorized(subset='all')
X, y = shuffle(bunch.data, bunch.target)
offset = int(X.shape[0] * 0.8)
X_train, y_train = X[:offset], y[:offset]
X_test, y_test = X[offset:], y[offset:]
if sparse:
X_train = csr_matrix(X_train)
X_test = csr_matrix(X_test)
else:
X_train = np.array(X_train)
X_test = np.array(X_test)
y_test = np.array(y_test)
y_train = np.array(y_train)
data = {'X_train': X_train, 'X_test': X_test, 'y_train': y_train,
'y_test': y_test}
return data
def benchmark_influence(conf):
"""
Benchmark influence of :changing_param: on both MSE and latency.
"""
prediction_times = []
prediction_powers = []
complexities = []
for param_value in conf['changing_param_values']:
conf['tuned_params'][conf['changing_param']] = param_value
estimator = conf['estimator'](**conf['tuned_params'])
print("Benchmarking %s" % estimator)
estimator.fit(conf['data']['X_train'], conf['data']['y_train'])
conf['postfit_hook'](estimator)
complexity = conf['complexity_computer'](estimator)
complexities.append(complexity)
start_time = time.time()
for _ in range(conf['n_samples']):
y_pred = estimator.predict(conf['data']['X_test'])
elapsed_time = (time.time() - start_time) / float(conf['n_samples'])
prediction_times.append(elapsed_time)
pred_score = conf['prediction_performance_computer'](
conf['data']['y_test'], y_pred)
prediction_powers.append(pred_score)
print("Complexity: %d | %s: %.4f | Pred. Time: %fs\n" % (
complexity, conf['prediction_performance_label'], pred_score,
elapsed_time))
return prediction_powers, prediction_times, complexities
def plot_influence(conf, mse_values, prediction_times, complexities):
"""
Plot influence of model complexity on both accuracy and latency.
"""
plt.figure(figsize=(12, 6))
host = host_subplot(111, axes_class=Axes)
plt.subplots_adjust(right=0.75)
par1 = host.twinx()
host.set_xlabel('Model Complexity (%s)' % conf['complexity_label'])
y1_label = conf['prediction_performance_label']
y2_label = "Time (s)"
host.set_ylabel(y1_label)
par1.set_ylabel(y2_label)
p1, = host.plot(complexities, mse_values, 'b-', label="prediction error")
p2, = par1.plot(complexities, prediction_times, 'r-',
label="latency")
host.legend(loc='upper right')
host.axis["left"].label.set_color(p1.get_color())
par1.axis["right"].label.set_color(p2.get_color())
plt.title('Influence of Model Complexity - %s' % conf['estimator'].__name__)
plt.show()
def _count_nonzero_coefficients(estimator):
a = estimator.coef_.toarray()
return np.count_nonzero(a)
###############################################################################
# main code
regression_data = generate_data('regression')
classification_data = generate_data('classification', sparse=True)
configurations = [
{'estimator': SGDClassifier,
'tuned_params': {'penalty': 'elasticnet', 'alpha': 0.001, 'loss':
'modified_huber', 'fit_intercept': True},
'changing_param': 'l1_ratio',
'changing_param_values': [0.25, 0.5, 0.75, 0.9],
'complexity_label': 'non_zero coefficients',
'complexity_computer': _count_nonzero_coefficients,
'prediction_performance_computer': hamming_loss,
'prediction_performance_label': 'Hamming Loss (Misclassification Ratio)',
'postfit_hook': lambda x: x.sparsify(),
'data': classification_data,
'n_samples': 30},
{'estimator': NuSVR,
'tuned_params': {'C': 1e3, 'gamma': 2 ** -15},
'changing_param': 'nu',
'changing_param_values': [0.1, 0.25, 0.5, 0.75, 0.9],
'complexity_label': 'n_support_vectors',
'complexity_computer': lambda x: len(x.support_vectors_),
'data': regression_data,
'postfit_hook': lambda x: x,
'prediction_performance_computer': mean_squared_error,
'prediction_performance_label': 'MSE',
'n_samples': 30},
{'estimator': GradientBoostingRegressor,
'tuned_params': {'loss': 'ls'},
'changing_param': 'n_estimators',
'changing_param_values': [10, 50, 100, 200, 500],
'complexity_label': 'n_trees',
'complexity_computer': lambda x: x.n_estimators,
'data': regression_data,
'postfit_hook': lambda x: x,
'prediction_performance_computer': mean_squared_error,
'prediction_performance_label': 'MSE',
'n_samples': 30},
]
for conf in configurations:
prediction_performances, prediction_times, complexities = \
benchmark_influence(conf)
plot_influence(conf, prediction_performances, prediction_times,
complexities)
| bsd-3-clause |
cwu2011/seaborn | doc/sphinxext/ipython_directive.py | 37 | 37557 | # -*- coding: utf-8 -*-
"""
Sphinx directive to support embedded IPython code.
This directive allows pasting of entire interactive IPython sessions, prompts
and all, and their code will actually get re-executed at doc build time, with
all prompts renumbered sequentially. It also allows you to input code as a pure
python input by giving the argument python to the directive. The output looks
like an interactive ipython section.
To enable this directive, simply list it in your Sphinx ``conf.py`` file
(making sure the directory where you placed it is visible to sphinx, as is
needed for all Sphinx directives). For example, to enable syntax highlighting
and the IPython directive::
extensions = ['IPython.sphinxext.ipython_console_highlighting',
'IPython.sphinxext.ipython_directive']
The IPython directive outputs code-blocks with the language 'ipython'. So
if you do not have the syntax highlighting extension enabled as well, then
all rendered code-blocks will be uncolored. By default this directive assumes
that your prompts are unchanged IPython ones, but this can be customized.
The configurable options that can be placed in conf.py are:
ipython_savefig_dir:
The directory in which to save the figures. This is relative to the
Sphinx source directory. The default is `html_static_path`.
ipython_rgxin:
The compiled regular expression to denote the start of IPython input
lines. The default is re.compile('In \[(\d+)\]:\s?(.*)\s*'). You
shouldn't need to change this.
ipython_rgxout:
The compiled regular expression to denote the start of IPython output
lines. The default is re.compile('Out\[(\d+)\]:\s?(.*)\s*'). You
shouldn't need to change this.
ipython_promptin:
The string to represent the IPython input prompt in the generated ReST.
The default is 'In [%d]:'. This expects that the line numbers are used
in the prompt.
ipython_promptout:
The string to represent the IPython prompt in the generated ReST. The
default is 'Out [%d]:'. This expects that the line numbers are used
in the prompt.
ipython_mplbackend:
The string which specifies if the embedded Sphinx shell should import
Matplotlib and set the backend. The value specifies a backend that is
passed to `matplotlib.use()` before any lines in `ipython_execlines` are
executed. If not specified in conf.py, then the default value of 'agg' is
used. To use the IPython directive without matplotlib as a dependency, set
the value to `None`. It may end up that matplotlib is still imported
if the user specifies so in `ipython_execlines` or makes use of the
@savefig pseudo decorator.
ipython_execlines:
A list of strings to be exec'd in the embedded Sphinx shell. Typical
usage is to make certain packages always available. Set this to an empty
list if you wish to have no imports always available. If specified in
conf.py as `None`, then it has the effect of making no imports available.
If omitted from conf.py altogether, then the default value of
['import numpy as np', 'import matplotlib.pyplot as plt'] is used.
ipython_holdcount
When the @suppress pseudo-decorator is used, the execution count can be
incremented or not. The default behavior is to hold the execution count,
corresponding to a value of `True`. Set this to `False` to increment
the execution count after each suppressed command.
As an example, to use the IPython directive when `matplotlib` is not available,
one sets the backend to `None`::
ipython_mplbackend = None
An example usage of the directive is:
.. code-block:: rst
.. ipython::
In [1]: x = 1
In [2]: y = x**2
In [3]: print(y)
See http://matplotlib.org/sampledoc/ipython_directive.html for additional
documentation.
ToDo
----
- Turn the ad-hoc test() function into a real test suite.
- Break up ipython-specific functionality from matplotlib stuff into better
separated code.
Authors
-------
- John D Hunter: orignal author.
- Fernando Perez: refactoring, documentation, cleanups, port to 0.11.
- VáclavŠmilauer <eudoxos-AT-arcig.cz>: Prompt generalizations.
- Skipper Seabold, refactoring, cleanups, pure python addition
"""
from __future__ import print_function
from __future__ import unicode_literals
#-----------------------------------------------------------------------------
# Imports
#-----------------------------------------------------------------------------
# Stdlib
import os
import re
import sys
import tempfile
import ast
from pandas.compat import zip, range, map, lmap, u, cStringIO as StringIO
import warnings
# To keep compatibility with various python versions
try:
from hashlib import md5
except ImportError:
from md5 import md5
# Third-party
import sphinx
from docutils.parsers.rst import directives
from docutils import nodes
from sphinx.util.compat import Directive
# Our own
from IPython import Config, InteractiveShell
from IPython.core.profiledir import ProfileDir
from IPython.utils import io
from IPython.utils.py3compat import PY3
if PY3:
from io import StringIO
text_type = str
else:
from StringIO import StringIO
text_type = unicode
#-----------------------------------------------------------------------------
# Globals
#-----------------------------------------------------------------------------
# for tokenizing blocks
COMMENT, INPUT, OUTPUT = range(3)
#-----------------------------------------------------------------------------
# Functions and class declarations
#-----------------------------------------------------------------------------
def block_parser(part, rgxin, rgxout, fmtin, fmtout):
"""
part is a string of ipython text, comprised of at most one
input, one ouput, comments, and blank lines. The block parser
parses the text into a list of::
blocks = [ (TOKEN0, data0), (TOKEN1, data1), ...]
where TOKEN is one of [COMMENT | INPUT | OUTPUT ] and
data is, depending on the type of token::
COMMENT : the comment string
INPUT: the (DECORATOR, INPUT_LINE, REST) where
DECORATOR: the input decorator (or None)
INPUT_LINE: the input as string (possibly multi-line)
REST : any stdout generated by the input line (not OUTPUT)
OUTPUT: the output string, possibly multi-line
"""
block = []
lines = part.split('\n')
N = len(lines)
i = 0
decorator = None
while 1:
if i==N:
# nothing left to parse -- the last line
break
line = lines[i]
i += 1
line_stripped = line.strip()
if line_stripped.startswith('#'):
block.append((COMMENT, line))
continue
if line_stripped.startswith('@'):
# we're assuming at most one decorator -- may need to
# rethink
decorator = line_stripped
continue
# does this look like an input line?
matchin = rgxin.match(line)
if matchin:
lineno, inputline = int(matchin.group(1)), matchin.group(2)
# the ....: continuation string
continuation = ' %s:'%''.join(['.']*(len(str(lineno))+2))
Nc = len(continuation)
# input lines can continue on for more than one line, if
# we have a '\' line continuation char or a function call
# echo line 'print'. The input line can only be
# terminated by the end of the block or an output line, so
# we parse out the rest of the input line if it is
# multiline as well as any echo text
rest = []
while i<N:
# look ahead; if the next line is blank, or a comment, or
# an output line, we're done
nextline = lines[i]
matchout = rgxout.match(nextline)
#print "nextline=%s, continuation=%s, starts=%s"%(nextline, continuation, nextline.startswith(continuation))
if matchout or nextline.startswith('#'):
break
elif nextline.startswith(continuation):
nextline = nextline[Nc:]
if nextline and nextline[0] == ' ':
nextline = nextline[1:]
inputline += '\n' + nextline
else:
rest.append(nextline)
i+= 1
block.append((INPUT, (decorator, inputline, '\n'.join(rest))))
continue
# if it looks like an output line grab all the text to the end
# of the block
matchout = rgxout.match(line)
if matchout:
lineno, output = int(matchout.group(1)), matchout.group(2)
if i<N-1:
output = '\n'.join([output] + lines[i:])
block.append((OUTPUT, output))
break
return block
class DecodingStringIO(StringIO, object):
def __init__(self,buf='',encodings=('utf8',), *args, **kwds):
super(DecodingStringIO, self).__init__(buf, *args, **kwds)
self.set_encodings(encodings)
def set_encodings(self, encodings):
self.encodings = encodings
def write(self,data):
if isinstance(data, text_type):
return super(DecodingStringIO, self).write(data)
else:
for enc in self.encodings:
try:
data = data.decode(enc)
return super(DecodingStringIO, self).write(data)
except :
pass
# default to brute utf8 if no encoding succeded
return super(DecodingStringIO, self).write(data.decode('utf8', 'replace'))
class EmbeddedSphinxShell(object):
"""An embedded IPython instance to run inside Sphinx"""
def __init__(self, exec_lines=None,state=None):
self.cout = DecodingStringIO(u'')
if exec_lines is None:
exec_lines = []
self.state = state
# Create config object for IPython
config = Config()
config.InteractiveShell.autocall = False
config.InteractiveShell.autoindent = False
config.InteractiveShell.colors = 'NoColor'
# create a profile so instance history isn't saved
tmp_profile_dir = tempfile.mkdtemp(prefix='profile_')
profname = 'auto_profile_sphinx_build'
pdir = os.path.join(tmp_profile_dir,profname)
profile = ProfileDir.create_profile_dir(pdir)
# Create and initialize global ipython, but don't start its mainloop.
# This will persist across different EmbededSphinxShell instances.
IP = InteractiveShell.instance(config=config, profile_dir=profile)
# io.stdout redirect must be done after instantiating InteractiveShell
io.stdout = self.cout
io.stderr = self.cout
# For debugging, so we can see normal output, use this:
#from IPython.utils.io import Tee
#io.stdout = Tee(self.cout, channel='stdout') # dbg
#io.stderr = Tee(self.cout, channel='stderr') # dbg
# Store a few parts of IPython we'll need.
self.IP = IP
self.user_ns = self.IP.user_ns
self.user_global_ns = self.IP.user_global_ns
self.input = ''
self.output = ''
self.is_verbatim = False
self.is_doctest = False
self.is_suppress = False
# Optionally, provide more detailed information to shell.
self.directive = None
# on the first call to the savefig decorator, we'll import
# pyplot as plt so we can make a call to the plt.gcf().savefig
self._pyplot_imported = False
# Prepopulate the namespace.
for line in exec_lines:
self.process_input_line(line, store_history=False)
def clear_cout(self):
self.cout.seek(0)
self.cout.truncate(0)
def process_input_line(self, line, store_history=True):
"""process the input, capturing stdout"""
stdout = sys.stdout
splitter = self.IP.input_splitter
try:
sys.stdout = self.cout
splitter.push(line)
more = splitter.push_accepts_more()
if not more:
try:
source_raw = splitter.source_raw_reset()[1]
except:
# recent ipython #4504
source_raw = splitter.raw_reset()
self.IP.run_cell(source_raw, store_history=store_history)
finally:
sys.stdout = stdout
def process_image(self, decorator):
"""
# build out an image directive like
# .. image:: somefile.png
# :width 4in
#
# from an input like
# savefig somefile.png width=4in
"""
savefig_dir = self.savefig_dir
source_dir = self.source_dir
saveargs = decorator.split(' ')
filename = saveargs[1]
# insert relative path to image file in source
outfile = os.path.relpath(os.path.join(savefig_dir,filename),
source_dir)
imagerows = ['.. image:: %s'%outfile]
for kwarg in saveargs[2:]:
arg, val = kwarg.split('=')
arg = arg.strip()
val = val.strip()
imagerows.append(' :%s: %s'%(arg, val))
image_file = os.path.basename(outfile) # only return file name
image_directive = '\n'.join(imagerows)
return image_file, image_directive
# Callbacks for each type of token
def process_input(self, data, input_prompt, lineno):
"""
Process data block for INPUT token.
"""
decorator, input, rest = data
image_file = None
image_directive = None
is_verbatim = decorator=='@verbatim' or self.is_verbatim
is_doctest = (decorator is not None and \
decorator.startswith('@doctest')) or self.is_doctest
is_suppress = decorator=='@suppress' or self.is_suppress
is_okexcept = decorator=='@okexcept' or self.is_okexcept
is_okwarning = decorator=='@okwarning' or self.is_okwarning
is_savefig = decorator is not None and \
decorator.startswith('@savefig')
# set the encodings to be used by DecodingStringIO
# to convert the execution output into unicode if
# needed. this attrib is set by IpythonDirective.run()
# based on the specified block options, defaulting to ['ut
self.cout.set_encodings(self.output_encoding)
input_lines = input.split('\n')
if len(input_lines) > 1:
if input_lines[-1] != "":
input_lines.append('') # make sure there's a blank line
# so splitter buffer gets reset
continuation = ' %s:'%''.join(['.']*(len(str(lineno))+2))
if is_savefig:
image_file, image_directive = self.process_image(decorator)
ret = []
is_semicolon = False
# Hold the execution count, if requested to do so.
if is_suppress and self.hold_count:
store_history = False
else:
store_history = True
# Note: catch_warnings is not thread safe
with warnings.catch_warnings(record=True) as ws:
for i, line in enumerate(input_lines):
if line.endswith(';'):
is_semicolon = True
if i == 0:
# process the first input line
if is_verbatim:
self.process_input_line('')
self.IP.execution_count += 1 # increment it anyway
else:
# only submit the line in non-verbatim mode
self.process_input_line(line, store_history=store_history)
formatted_line = '%s %s'%(input_prompt, line)
else:
# process a continuation line
if not is_verbatim:
self.process_input_line(line, store_history=store_history)
formatted_line = '%s %s'%(continuation, line)
if not is_suppress:
ret.append(formatted_line)
if not is_suppress and len(rest.strip()) and is_verbatim:
# the "rest" is the standard output of the
# input, which needs to be added in
# verbatim mode
ret.append(rest)
self.cout.seek(0)
output = self.cout.read()
if not is_suppress and not is_semicolon:
ret.append(output)
elif is_semicolon: # get spacing right
ret.append('')
# context information
filename = self.state.document.current_source
lineno = self.state.document.current_line
# output any exceptions raised during execution to stdout
# unless :okexcept: has been specified.
if not is_okexcept and "Traceback" in output:
s = "\nException in %s at block ending on line %s\n" % (filename, lineno)
s += "Specify :okexcept: as an option in the ipython:: block to suppress this message\n"
sys.stdout.write('\n\n>>>' + ('-' * 73))
sys.stdout.write(s)
sys.stdout.write(output)
sys.stdout.write('<<<' + ('-' * 73) + '\n\n')
# output any warning raised during execution to stdout
# unless :okwarning: has been specified.
if not is_okwarning:
for w in ws:
s = "\nWarning in %s at block ending on line %s\n" % (filename, lineno)
s += "Specify :okwarning: as an option in the ipython:: block to suppress this message\n"
sys.stdout.write('\n\n>>>' + ('-' * 73))
sys.stdout.write(s)
sys.stdout.write('-' * 76 + '\n')
s=warnings.formatwarning(w.message, w.category,
w.filename, w.lineno, w.line)
sys.stdout.write(s)
sys.stdout.write('<<<' + ('-' * 73) + '\n')
self.cout.truncate(0)
return (ret, input_lines, output, is_doctest, decorator, image_file,
image_directive)
def process_output(self, data, output_prompt,
input_lines, output, is_doctest, decorator, image_file):
"""
Process data block for OUTPUT token.
"""
TAB = ' ' * 4
if is_doctest and output is not None:
found = output
found = found.strip()
submitted = data.strip()
if self.directive is None:
source = 'Unavailable'
content = 'Unavailable'
else:
source = self.directive.state.document.current_source
content = self.directive.content
# Add tabs and join into a single string.
content = '\n'.join([TAB + line for line in content])
# Make sure the output contains the output prompt.
ind = found.find(output_prompt)
if ind < 0:
e = ('output does not contain output prompt\n\n'
'Document source: {0}\n\n'
'Raw content: \n{1}\n\n'
'Input line(s):\n{TAB}{2}\n\n'
'Output line(s):\n{TAB}{3}\n\n')
e = e.format(source, content, '\n'.join(input_lines),
repr(found), TAB=TAB)
raise RuntimeError(e)
found = found[len(output_prompt):].strip()
# Handle the actual doctest comparison.
if decorator.strip() == '@doctest':
# Standard doctest
if found != submitted:
e = ('doctest failure\n\n'
'Document source: {0}\n\n'
'Raw content: \n{1}\n\n'
'On input line(s):\n{TAB}{2}\n\n'
'we found output:\n{TAB}{3}\n\n'
'instead of the expected:\n{TAB}{4}\n\n')
e = e.format(source, content, '\n'.join(input_lines),
repr(found), repr(submitted), TAB=TAB)
raise RuntimeError(e)
else:
self.custom_doctest(decorator, input_lines, found, submitted)
def process_comment(self, data):
"""Process data fPblock for COMMENT token."""
if not self.is_suppress:
return [data]
def save_image(self, image_file):
"""
Saves the image file to disk.
"""
self.ensure_pyplot()
command = ('plt.gcf().savefig("%s", bbox_inches="tight", '
'dpi=100)' % image_file)
#print 'SAVEFIG', command # dbg
self.process_input_line('bookmark ipy_thisdir', store_history=False)
self.process_input_line('cd -b ipy_savedir', store_history=False)
self.process_input_line(command, store_history=False)
self.process_input_line('cd -b ipy_thisdir', store_history=False)
self.process_input_line('bookmark -d ipy_thisdir', store_history=False)
self.clear_cout()
def process_block(self, block):
"""
process block from the block_parser and return a list of processed lines
"""
ret = []
output = None
input_lines = None
lineno = self.IP.execution_count
input_prompt = self.promptin % lineno
output_prompt = self.promptout % lineno
image_file = None
image_directive = None
for token, data in block:
if token == COMMENT:
out_data = self.process_comment(data)
elif token == INPUT:
(out_data, input_lines, output, is_doctest, decorator,
image_file, image_directive) = \
self.process_input(data, input_prompt, lineno)
elif token == OUTPUT:
out_data = \
self.process_output(data, output_prompt,
input_lines, output, is_doctest,
decorator, image_file)
if out_data:
ret.extend(out_data)
# save the image files
if image_file is not None:
self.save_image(image_file)
return ret, image_directive
def ensure_pyplot(self):
"""
Ensures that pyplot has been imported into the embedded IPython shell.
Also, makes sure to set the backend appropriately if not set already.
"""
# We are here if the @figure pseudo decorator was used. Thus, it's
# possible that we could be here even if python_mplbackend were set to
# `None`. That's also strange and perhaps worthy of raising an
# exception, but for now, we just set the backend to 'agg'.
if not self._pyplot_imported:
if 'matplotlib.backends' not in sys.modules:
# Then ipython_matplotlib was set to None but there was a
# call to the @figure decorator (and ipython_execlines did
# not set a backend).
#raise Exception("No backend was set, but @figure was used!")
import matplotlib
matplotlib.use('agg')
# Always import pyplot into embedded shell.
self.process_input_line('import matplotlib.pyplot as plt',
store_history=False)
self._pyplot_imported = True
def process_pure_python(self, content):
"""
content is a list of strings. it is unedited directive content
This runs it line by line in the InteractiveShell, prepends
prompts as needed capturing stderr and stdout, then returns
the content as a list as if it were ipython code
"""
output = []
savefig = False # keep up with this to clear figure
multiline = False # to handle line continuation
multiline_start = None
fmtin = self.promptin
ct = 0
for lineno, line in enumerate(content):
line_stripped = line.strip()
if not len(line):
output.append(line)
continue
# handle decorators
if line_stripped.startswith('@'):
output.extend([line])
if 'savefig' in line:
savefig = True # and need to clear figure
continue
# handle comments
if line_stripped.startswith('#'):
output.extend([line])
continue
# deal with lines checking for multiline
continuation = u' %s:'% ''.join(['.']*(len(str(ct))+2))
if not multiline:
modified = u"%s %s" % (fmtin % ct, line_stripped)
output.append(modified)
ct += 1
try:
ast.parse(line_stripped)
output.append(u'')
except Exception: # on a multiline
multiline = True
multiline_start = lineno
else: # still on a multiline
modified = u'%s %s' % (continuation, line)
output.append(modified)
# if the next line is indented, it should be part of multiline
if len(content) > lineno + 1:
nextline = content[lineno + 1]
if len(nextline) - len(nextline.lstrip()) > 3:
continue
try:
mod = ast.parse(
'\n'.join(content[multiline_start:lineno+1]))
if isinstance(mod.body[0], ast.FunctionDef):
# check to see if we have the whole function
for element in mod.body[0].body:
if isinstance(element, ast.Return):
multiline = False
else:
output.append(u'')
multiline = False
except Exception:
pass
if savefig: # clear figure if plotted
self.ensure_pyplot()
self.process_input_line('plt.clf()', store_history=False)
self.clear_cout()
savefig = False
return output
def custom_doctest(self, decorator, input_lines, found, submitted):
"""
Perform a specialized doctest.
"""
from .custom_doctests import doctests
args = decorator.split()
doctest_type = args[1]
if doctest_type in doctests:
doctests[doctest_type](self, args, input_lines, found, submitted)
else:
e = "Invalid option to @doctest: {0}".format(doctest_type)
raise Exception(e)
class IPythonDirective(Directive):
has_content = True
required_arguments = 0
optional_arguments = 4 # python, suppress, verbatim, doctest
final_argumuent_whitespace = True
option_spec = { 'python': directives.unchanged,
'suppress' : directives.flag,
'verbatim' : directives.flag,
'doctest' : directives.flag,
'okexcept': directives.flag,
'okwarning': directives.flag,
'output_encoding': directives.unchanged_required
}
shell = None
seen_docs = set()
def get_config_options(self):
# contains sphinx configuration variables
config = self.state.document.settings.env.config
# get config variables to set figure output directory
confdir = self.state.document.settings.env.app.confdir
savefig_dir = config.ipython_savefig_dir
source_dir = os.path.dirname(self.state.document.current_source)
if savefig_dir is None:
savefig_dir = config.html_static_path
if isinstance(savefig_dir, list):
savefig_dir = savefig_dir[0] # safe to assume only one path?
savefig_dir = os.path.join(confdir, savefig_dir)
# get regex and prompt stuff
rgxin = config.ipython_rgxin
rgxout = config.ipython_rgxout
promptin = config.ipython_promptin
promptout = config.ipython_promptout
mplbackend = config.ipython_mplbackend
exec_lines = config.ipython_execlines
hold_count = config.ipython_holdcount
return (savefig_dir, source_dir, rgxin, rgxout,
promptin, promptout, mplbackend, exec_lines, hold_count)
def setup(self):
# Get configuration values.
(savefig_dir, source_dir, rgxin, rgxout, promptin, promptout,
mplbackend, exec_lines, hold_count) = self.get_config_options()
if self.shell is None:
# We will be here many times. However, when the
# EmbeddedSphinxShell is created, its interactive shell member
# is the same for each instance.
if mplbackend:
import matplotlib
# Repeated calls to use() will not hurt us since `mplbackend`
# is the same each time.
matplotlib.use(mplbackend)
# Must be called after (potentially) importing matplotlib and
# setting its backend since exec_lines might import pylab.
self.shell = EmbeddedSphinxShell(exec_lines, self.state)
# Store IPython directive to enable better error messages
self.shell.directive = self
# reset the execution count if we haven't processed this doc
#NOTE: this may be borked if there are multiple seen_doc tmp files
#check time stamp?
if not self.state.document.current_source in self.seen_docs:
self.shell.IP.history_manager.reset()
self.shell.IP.execution_count = 1
self.shell.IP.prompt_manager.width = 0
self.seen_docs.add(self.state.document.current_source)
# and attach to shell so we don't have to pass them around
self.shell.rgxin = rgxin
self.shell.rgxout = rgxout
self.shell.promptin = promptin
self.shell.promptout = promptout
self.shell.savefig_dir = savefig_dir
self.shell.source_dir = source_dir
self.shell.hold_count = hold_count
# setup bookmark for saving figures directory
self.shell.process_input_line('bookmark ipy_savedir %s'%savefig_dir,
store_history=False)
self.shell.clear_cout()
return rgxin, rgxout, promptin, promptout
def teardown(self):
# delete last bookmark
self.shell.process_input_line('bookmark -d ipy_savedir',
store_history=False)
self.shell.clear_cout()
def run(self):
debug = False
#TODO, any reason block_parser can't be a method of embeddable shell
# then we wouldn't have to carry these around
rgxin, rgxout, promptin, promptout = self.setup()
options = self.options
self.shell.is_suppress = 'suppress' in options
self.shell.is_doctest = 'doctest' in options
self.shell.is_verbatim = 'verbatim' in options
self.shell.is_okexcept = 'okexcept' in options
self.shell.is_okwarning = 'okwarning' in options
self.shell.output_encoding = [options.get('output_encoding', 'utf8')]
# handle pure python code
if 'python' in self.arguments:
content = self.content
self.content = self.shell.process_pure_python(content)
parts = '\n'.join(self.content).split('\n\n')
lines = ['.. code-block:: ipython', '']
figures = []
for part in parts:
block = block_parser(part, rgxin, rgxout, promptin, promptout)
if len(block):
rows, figure = self.shell.process_block(block)
for row in rows:
lines.extend([' %s'%line for line in row.split('\n')])
if figure is not None:
figures.append(figure)
for figure in figures:
lines.append('')
lines.extend(figure.split('\n'))
lines.append('')
if len(lines)>2:
if debug:
print('\n'.join(lines))
else:
# This has to do with input, not output. But if we comment
# these lines out, then no IPython code will appear in the
# final output.
self.state_machine.insert_input(
lines, self.state_machine.input_lines.source(0))
# cleanup
self.teardown()
return []
# Enable as a proper Sphinx directive
def setup(app):
setup.app = app
app.add_directive('ipython', IPythonDirective)
app.add_config_value('ipython_savefig_dir', None, 'env')
app.add_config_value('ipython_rgxin',
re.compile('In \[(\d+)\]:\s?(.*)\s*'), 'env')
app.add_config_value('ipython_rgxout',
re.compile('Out\[(\d+)\]:\s?(.*)\s*'), 'env')
app.add_config_value('ipython_promptin', 'In [%d]:', 'env')
app.add_config_value('ipython_promptout', 'Out[%d]:', 'env')
# We could just let matplotlib pick whatever is specified as the default
# backend in the matplotlibrc file, but this would cause issues if the
# backend didn't work in headless environments. For this reason, 'agg'
# is a good default backend choice.
app.add_config_value('ipython_mplbackend', 'agg', 'env')
# If the user sets this config value to `None`, then EmbeddedSphinxShell's
# __init__ method will treat it as [].
execlines = ['import numpy as np', 'import matplotlib.pyplot as plt']
app.add_config_value('ipython_execlines', execlines, 'env')
app.add_config_value('ipython_holdcount', True, 'env')
# Simple smoke test, needs to be converted to a proper automatic test.
def test():
examples = [
r"""
In [9]: pwd
Out[9]: '/home/jdhunter/py4science/book'
In [10]: cd bookdata/
/home/jdhunter/py4science/book/bookdata
In [2]: from pylab import *
In [2]: ion()
In [3]: im = imread('stinkbug.png')
@savefig mystinkbug.png width=4in
In [4]: imshow(im)
Out[4]: <matplotlib.image.AxesImage object at 0x39ea850>
""",
r"""
In [1]: x = 'hello world'
# string methods can be
# used to alter the string
@doctest
In [2]: x.upper()
Out[2]: 'HELLO WORLD'
@verbatim
In [3]: x.st<TAB>
x.startswith x.strip
""",
r"""
In [130]: url = 'http://ichart.finance.yahoo.com/table.csv?s=CROX\
.....: &d=9&e=22&f=2009&g=d&a=1&br=8&c=2006&ignore=.csv'
In [131]: print url.split('&')
['http://ichart.finance.yahoo.com/table.csv?s=CROX', 'd=9', 'e=22', 'f=2009', 'g=d', 'a=1', 'b=8', 'c=2006', 'ignore=.csv']
In [60]: import urllib
""",
r"""\
In [133]: import numpy.random
@suppress
In [134]: numpy.random.seed(2358)
@doctest
In [135]: numpy.random.rand(10,2)
Out[135]:
array([[ 0.64524308, 0.59943846],
[ 0.47102322, 0.8715456 ],
[ 0.29370834, 0.74776844],
[ 0.99539577, 0.1313423 ],
[ 0.16250302, 0.21103583],
[ 0.81626524, 0.1312433 ],
[ 0.67338089, 0.72302393],
[ 0.7566368 , 0.07033696],
[ 0.22591016, 0.77731835],
[ 0.0072729 , 0.34273127]])
""",
r"""
In [106]: print x
jdh
In [109]: for i in range(10):
.....: print i
.....:
.....:
0
1
2
3
4
5
6
7
8
9
""",
r"""
In [144]: from pylab import *
In [145]: ion()
# use a semicolon to suppress the output
@savefig test_hist.png width=4in
In [151]: hist(np.random.randn(10000), 100);
@savefig test_plot.png width=4in
In [151]: plot(np.random.randn(10000), 'o');
""",
r"""
# use a semicolon to suppress the output
In [151]: plt.clf()
@savefig plot_simple.png width=4in
In [151]: plot([1,2,3])
@savefig hist_simple.png width=4in
In [151]: hist(np.random.randn(10000), 100);
""",
r"""
# update the current fig
In [151]: ylabel('number')
In [152]: title('normal distribution')
@savefig hist_with_text.png
In [153]: grid(True)
@doctest float
In [154]: 0.1 + 0.2
Out[154]: 0.3
@doctest float
In [155]: np.arange(16).reshape(4,4)
Out[155]:
array([[ 0, 1, 2, 3],
[ 4, 5, 6, 7],
[ 8, 9, 10, 11],
[12, 13, 14, 15]])
In [1]: x = np.arange(16, dtype=float).reshape(4,4)
In [2]: x[0,0] = np.inf
In [3]: x[0,1] = np.nan
@doctest float
In [4]: x
Out[4]:
array([[ inf, nan, 2., 3.],
[ 4., 5., 6., 7.],
[ 8., 9., 10., 11.],
[ 12., 13., 14., 15.]])
""",
]
# skip local-file depending first example:
examples = examples[1:]
#ipython_directive.DEBUG = True # dbg
#options = dict(suppress=True) # dbg
options = dict()
for example in examples:
content = example.split('\n')
IPythonDirective('debug', arguments=None, options=options,
content=content, lineno=0,
content_offset=None, block_text=None,
state=None, state_machine=None,
)
# Run test suite as a script
if __name__=='__main__':
if not os.path.isdir('_static'):
os.mkdir('_static')
test()
print('All OK? Check figures in _static/')
| bsd-3-clause |
prajjwal1/prajjwal1.github.io | markdown_generator/talks.py | 199 | 4000 |
# coding: utf-8
# # Talks markdown generator for academicpages
#
# Takes a TSV of talks with metadata and converts them for use with [academicpages.github.io](academicpages.github.io). This is an interactive Jupyter notebook ([see more info here](http://jupyter-notebook-beginner-guide.readthedocs.io/en/latest/what_is_jupyter.html)). The core python code is also in `talks.py`. Run either from the `markdown_generator` folder after replacing `talks.tsv` with one containing your data.
#
# TODO: Make this work with BibTex and other databases, rather than Stuart's non-standard TSV format and citation style.
# In[1]:
import pandas as pd
import os
# ## Data format
#
# The TSV needs to have the following columns: title, type, url_slug, venue, date, location, talk_url, description, with a header at the top. Many of these fields can be blank, but the columns must be in the TSV.
#
# - Fields that cannot be blank: `title`, `url_slug`, `date`. All else can be blank. `type` defaults to "Talk"
# - `date` must be formatted as YYYY-MM-DD.
# - `url_slug` will be the descriptive part of the .md file and the permalink URL for the page about the paper.
# - The .md file will be `YYYY-MM-DD-[url_slug].md` and the permalink will be `https://[yourdomain]/talks/YYYY-MM-DD-[url_slug]`
# - The combination of `url_slug` and `date` must be unique, as it will be the basis for your filenames
#
# ## Import TSV
#
# Pandas makes this easy with the read_csv function. We are using a TSV, so we specify the separator as a tab, or `\t`.
#
# I found it important to put this data in a tab-separated values format, because there are a lot of commas in this kind of data and comma-separated values can get messed up. However, you can modify the import statement, as pandas also has read_excel(), read_json(), and others.
# In[3]:
talks = pd.read_csv("talks.tsv", sep="\t", header=0)
talks
# ## Escape special characters
#
# YAML is very picky about how it takes a valid string, so we are replacing single and double quotes (and ampersands) with their HTML encoded equivilents. This makes them look not so readable in raw format, but they are parsed and rendered nicely.
# In[4]:
html_escape_table = {
"&": "&",
'"': """,
"'": "'"
}
def html_escape(text):
if type(text) is str:
return "".join(html_escape_table.get(c,c) for c in text)
else:
return "False"
# ## Creating the markdown files
#
# This is where the heavy lifting is done. This loops through all the rows in the TSV dataframe, then starts to concatentate a big string (```md```) that contains the markdown for each type. It does the YAML metadata first, then does the description for the individual page.
# In[5]:
loc_dict = {}
for row, item in talks.iterrows():
md_filename = str(item.date) + "-" + item.url_slug + ".md"
html_filename = str(item.date) + "-" + item.url_slug
year = item.date[:4]
md = "---\ntitle: \"" + item.title + '"\n'
md += "collection: talks" + "\n"
if len(str(item.type)) > 3:
md += 'type: "' + item.type + '"\n'
else:
md += 'type: "Talk"\n'
md += "permalink: /talks/" + html_filename + "\n"
if len(str(item.venue)) > 3:
md += 'venue: "' + item.venue + '"\n'
if len(str(item.location)) > 3:
md += "date: " + str(item.date) + "\n"
if len(str(item.location)) > 3:
md += 'location: "' + str(item.location) + '"\n'
md += "---\n"
if len(str(item.talk_url)) > 3:
md += "\n[More information here](" + item.talk_url + ")\n"
if len(str(item.description)) > 3:
md += "\n" + html_escape(item.description) + "\n"
md_filename = os.path.basename(md_filename)
#print(md)
with open("../_talks/" + md_filename, 'w') as f:
f.write(md)
# These files are in the talks directory, one directory below where we're working from.
| mit |
AnasGhrab/scikit-learn | doc/tutorial/text_analytics/skeletons/exercise_02_sentiment.py | 256 | 2406 | """Build a sentiment analysis / polarity model
Sentiment analysis can be casted as a binary text classification problem,
that is fitting a linear classifier on features extracted from the text
of the user messages so as to guess wether the opinion of the author is
positive or negative.
In this examples we will use a movie review dataset.
"""
# Author: Olivier Grisel <olivier.grisel@ensta.org>
# License: Simplified BSD
import sys
from sklearn.feature_extraction.text import TfidfVectorizer
from sklearn.svm import LinearSVC
from sklearn.pipeline import Pipeline
from sklearn.grid_search import GridSearchCV
from sklearn.datasets import load_files
from sklearn.cross_validation import train_test_split
from sklearn import metrics
if __name__ == "__main__":
# NOTE: we put the following in a 'if __name__ == "__main__"' protected
# block to be able to use a multi-core grid search that also works under
# Windows, see: http://docs.python.org/library/multiprocessing.html#windows
# The multiprocessing module is used as the backend of joblib.Parallel
# that is used when n_jobs != 1 in GridSearchCV
# the training data folder must be passed as first argument
movie_reviews_data_folder = sys.argv[1]
dataset = load_files(movie_reviews_data_folder, shuffle=False)
print("n_samples: %d" % len(dataset.data))
# split the dataset in training and test set:
docs_train, docs_test, y_train, y_test = train_test_split(
dataset.data, dataset.target, test_size=0.25, random_state=None)
# TASK: Build a vectorizer / classifier pipeline that filters out tokens
# that are too rare or too frequent
# TASK: Build a grid search to find out whether unigrams or bigrams are
# more useful.
# Fit the pipeline on the training set using grid search for the parameters
# TASK: print the cross-validated scores for the each parameters set
# explored by the grid search
# TASK: Predict the outcome on the testing set and store it in a variable
# named y_predicted
# Print the classification report
print(metrics.classification_report(y_test, y_predicted,
target_names=dataset.target_names))
# Print and plot the confusion matrix
cm = metrics.confusion_matrix(y_test, y_predicted)
print(cm)
# import matplotlib.pyplot as plt
# plt.matshow(cm)
# plt.show()
| bsd-3-clause |
duncanwp/iris | lib/iris/tests/unit/plot/test_points.py | 11 | 3049 | # (C) British Crown Copyright 2014 - 2016, Met Office
#
# This file is part of Iris.
#
# Iris is free software: you can redistribute it and/or modify it under
# the terms of the GNU Lesser General Public License as published by the
# Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Iris is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with Iris. If not, see <http://www.gnu.org/licenses/>.
"""Unit tests for the `iris.plot.points` function."""
from __future__ import (absolute_import, division, print_function)
from six.moves import (filter, input, map, range, zip) # noqa
# Import iris.tests first so that some things can be initialised before
# importing anything else.
import iris.tests as tests
import numpy as np
from iris.tests.stock import simple_2d
from iris.tests.unit.plot import TestGraphicStringCoord, MixinCoords
if tests.MPL_AVAILABLE:
import iris.plot as iplt
@tests.skip_plot
class TestStringCoordPlot(TestGraphicStringCoord):
def test_yaxis_labels(self):
iplt.points(self.cube, coords=('bar', 'str_coord'))
self.assertBoundsTickLabels('yaxis')
def test_xaxis_labels(self):
iplt.points(self.cube, coords=('str_coord', 'bar'))
self.assertBoundsTickLabels('xaxis')
def test_xaxis_labels_with_axes(self):
import matplotlib.pyplot as plt
fig = plt.figure()
ax = fig.add_subplot(111)
ax.set_xlim(0, 3)
iplt.points(self.cube, coords=('str_coord', 'bar'), axes=ax)
plt.close(fig)
self.assertPointsTickLabels('xaxis', ax)
def test_yaxis_labels_with_axes(self):
import matplotlib.pyplot as plt
fig = plt.figure()
ax = fig.add_subplot(111)
ax.set_ylim(0, 3)
iplt.points(self.cube, coords=('bar', 'str_coord'), axes=ax)
plt.close(fig)
self.assertPointsTickLabels('yaxis', ax)
def test_geoaxes_exception(self):
import matplotlib.pyplot as plt
fig = plt.figure()
ax = fig.add_subplot(111)
self.assertRaises(TypeError, iplt.points,
self.lat_lon_cube, axes=ax)
plt.close(fig)
@tests.skip_plot
class TestCoords(tests.IrisTest, MixinCoords):
def setUp(self):
# We have a 2d cube with dimensionality (bar: 3; foo: 4)
self.cube = simple_2d(with_bounds=False)
self.foo = self.cube.coord('foo').points
self.foo_index = np.arange(self.foo.size)
self.bar = self.cube.coord('bar').points
self.bar_index = np.arange(self.bar.size)
self.data = None
self.dataT = None
self.mpl_patch = self.patch('matplotlib.pyplot.scatter')
self.draw_func = iplt.points
if __name__ == "__main__":
tests.main()
| lgpl-3.0 |
aetilley/scikit-learn | examples/svm/plot_svm_anova.py | 250 | 2000 | """
=================================================
SVM-Anova: SVM with univariate feature selection
=================================================
This example shows how to perform univariate feature before running a SVC
(support vector classifier) to improve the classification scores.
"""
print(__doc__)
import numpy as np
import matplotlib.pyplot as plt
from sklearn import svm, datasets, feature_selection, cross_validation
from sklearn.pipeline import Pipeline
###############################################################################
# Import some data to play with
digits = datasets.load_digits()
y = digits.target
# Throw away data, to be in the curse of dimension settings
y = y[:200]
X = digits.data[:200]
n_samples = len(y)
X = X.reshape((n_samples, -1))
# add 200 non-informative features
X = np.hstack((X, 2 * np.random.random((n_samples, 200))))
###############################################################################
# Create a feature-selection transform and an instance of SVM that we
# combine together to have an full-blown estimator
transform = feature_selection.SelectPercentile(feature_selection.f_classif)
clf = Pipeline([('anova', transform), ('svc', svm.SVC(C=1.0))])
###############################################################################
# Plot the cross-validation score as a function of percentile of features
score_means = list()
score_stds = list()
percentiles = (1, 3, 6, 10, 15, 20, 30, 40, 60, 80, 100)
for percentile in percentiles:
clf.set_params(anova__percentile=percentile)
# Compute cross-validation score using all CPUs
this_scores = cross_validation.cross_val_score(clf, X, y, n_jobs=1)
score_means.append(this_scores.mean())
score_stds.append(this_scores.std())
plt.errorbar(percentiles, score_means, np.array(score_stds))
plt.title(
'Performance of the SVM-Anova varying the percentile of features selected')
plt.xlabel('Percentile')
plt.ylabel('Prediction rate')
plt.axis('tight')
plt.show()
| bsd-3-clause |
wmvanvliet/mne-python | tutorials/sample-datasets/plot_brainstorm_phantom_elekta.py | 10 | 6588 | # -*- coding: utf-8 -*-
"""
.. _tut-brainstorm-elekta-phantom:
==========================================
Brainstorm Elekta phantom dataset tutorial
==========================================
Here we compute the evoked from raw for the Brainstorm Elekta phantom
tutorial dataset. For comparison, see :footcite:`TadelEtAl2011` and:
https://neuroimage.usc.edu/brainstorm/Tutorials/PhantomElekta
References
----------
.. footbibliography::
"""
# sphinx_gallery_thumbnail_number = 9
# Authors: Eric Larson <larson.eric.d@gmail.com>
#
# License: BSD (3-clause)
import os.path as op
import numpy as np
import matplotlib.pyplot as plt
import mne
from mne import find_events, fit_dipole
from mne.datasets.brainstorm import bst_phantom_elekta
from mne.io import read_raw_fif
print(__doc__)
###############################################################################
# The data were collected with an Elekta Neuromag VectorView system at 1000 Hz
# and low-pass filtered at 330 Hz. Here the medium-amplitude (200 nAm) data
# are read to construct instances of :class:`mne.io.Raw`.
data_path = bst_phantom_elekta.data_path(verbose=True)
subject = 'sample'
raw_fname = op.join(data_path, 'kojak_all_200nAm_pp_no_chpi_no_ms_raw.fif')
raw = read_raw_fif(raw_fname)
###############################################################################
# Data channel array consisted of 204 MEG planor gradiometers,
# 102 axial magnetometers, and 3 stimulus channels. Let's get the events
# for the phantom, where each dipole (1-32) gets its own event:
events = find_events(raw, 'STI201')
raw.plot(events=events)
raw.info['bads'] = ['MEG1933', 'MEG2421']
###############################################################################
# The data have strong line frequency (60 Hz and harmonics) and cHPI coil
# noise (five peaks around 300 Hz). Here we plot only out to 60 seconds
# to save memory:
raw.plot_psd(tmax=30., average=False)
###############################################################################
# Our phantom produces sinusoidal bursts at 20 Hz:
raw.plot(events=events)
###############################################################################
# Now we epoch our data, average it, and look at the first dipole response.
# The first peak appears around 3 ms. Because we low-passed at 40 Hz,
# we can also decimate our data to save memory.
tmin, tmax = -0.1, 0.1
bmax = -0.05 # Avoid capture filter ringing into baseline
event_id = list(range(1, 33))
epochs = mne.Epochs(raw, events, event_id, tmin, tmax, baseline=(None, bmax),
preload=False)
epochs['1'].average().plot(time_unit='s')
###############################################################################
# .. _plt_brainstorm_phantom_elekta_eeg_sphere_geometry:
#
# Let's use a :ref:`sphere head geometry model <eeg_sphere_model>`
# and let's see the coordinate alignment and the sphere location. The phantom
# is properly modeled by a single-shell sphere with origin (0., 0., 0.).
sphere = mne.make_sphere_model(r0=(0., 0., 0.), head_radius=0.08)
mne.viz.plot_alignment(epochs.info, subject=subject, show_axes=True,
bem=sphere, dig=True, surfaces='head')
###############################################################################
# Let's do some dipole fits. We first compute the noise covariance,
# then do the fits for each event_id taking the time instant that maximizes
# the global field power.
# here we can get away with using method='oas' for speed (faster than "shrunk")
# but in general "shrunk" is usually better
cov = mne.compute_covariance(epochs, tmax=bmax)
mne.viz.plot_evoked_white(epochs['1'].average(), cov)
data = []
t_peak = 0.036 # true for Elekta phantom
for ii in event_id:
# Avoid the first and last trials -- can contain dipole-switching artifacts
evoked = epochs[str(ii)][1:-1].average().crop(t_peak, t_peak)
data.append(evoked.data[:, 0])
evoked = mne.EvokedArray(np.array(data).T, evoked.info, tmin=0.)
del epochs
dip, residual = fit_dipole(evoked, cov, sphere, n_jobs=1)
###############################################################################
# Do a quick visualization of how much variance we explained, putting the
# data and residuals on the same scale (here the "time points" are the
# 32 dipole peak values that we fit):
fig, axes = plt.subplots(2, 1)
evoked.plot(axes=axes)
for ax in axes:
ax.texts = []
for line in ax.lines:
line.set_color('#98df81')
residual.plot(axes=axes)
###############################################################################
# Now we can compare to the actual locations, taking the difference in mm:
actual_pos, actual_ori = mne.dipole.get_phantom_dipoles()
actual_amp = 100. # nAm
fig, (ax1, ax2, ax3) = plt.subplots(nrows=3, ncols=1, figsize=(6, 7))
diffs = 1000 * np.sqrt(np.sum((dip.pos - actual_pos) ** 2, axis=-1))
print('mean(position error) = %0.1f mm' % (np.mean(diffs),))
ax1.bar(event_id, diffs)
ax1.set_xlabel('Dipole index')
ax1.set_ylabel('Loc. error (mm)')
angles = np.rad2deg(np.arccos(np.abs(np.sum(dip.ori * actual_ori, axis=1))))
print(u'mean(angle error) = %0.1f°' % (np.mean(angles),))
ax2.bar(event_id, angles)
ax2.set_xlabel('Dipole index')
ax2.set_ylabel(u'Angle error (°)')
amps = actual_amp - dip.amplitude / 1e-9
print('mean(abs amplitude error) = %0.1f nAm' % (np.mean(np.abs(amps)),))
ax3.bar(event_id, amps)
ax3.set_xlabel('Dipole index')
ax3.set_ylabel('Amplitude error (nAm)')
fig.tight_layout()
plt.show()
###############################################################################
# Let's plot the positions and the orientations of the actual and the estimated
# dipoles
actual_amp = np.ones(len(dip)) # misc amp to create Dipole instance
actual_gof = np.ones(len(dip)) # misc GOF to create Dipole instance
dip_true = \
mne.Dipole(dip.times, actual_pos, actual_amp, actual_ori, actual_gof)
fig = mne.viz.plot_alignment(evoked.info, bem=sphere, surfaces='inner_skull',
coord_frame='head', meg='helmet', show_axes=True)
# Plot the position and the orientation of the actual dipole
fig = mne.viz.plot_dipole_locations(dipoles=dip_true, mode='arrow',
subject=subject, color=(0., 0., 0.),
fig=fig)
# Plot the position and the orientation of the estimated dipole
fig = mne.viz.plot_dipole_locations(dipoles=dip, mode='arrow', subject=subject,
color=(0.2, 1., 0.5), fig=fig)
mne.viz.set_3d_view(figure=fig, azimuth=70, elevation=80, distance=0.5)
| bsd-3-clause |
jhamman/xray | xarray/tests/test_variable.py | 1 | 54048 | from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from collections import namedtuple
from copy import copy, deepcopy
from datetime import datetime, timedelta
from textwrap import dedent
import pytest
from distutils.version import LooseVersion
import numpy as np
import pytz
import pandas as pd
from xarray import Variable, IndexVariable, Coordinate, Dataset
from xarray.core import indexing
from xarray.core.variable import as_variable, as_compatible_data
from xarray.core.indexing import PandasIndexAdapter, LazilyIndexedArray
from xarray.core.pycompat import PY3, OrderedDict
from xarray.core.common import full_like, zeros_like, ones_like
from . import TestCase, source_ndarray, requires_dask
class VariableSubclassTestCases(object):
def test_properties(self):
data = 0.5 * np.arange(10)
v = self.cls(['time'], data, {'foo': 'bar'})
self.assertEqual(v.dims, ('time',))
self.assertArrayEqual(v.values, data)
self.assertEqual(v.dtype, float)
self.assertEqual(v.shape, (10,))
self.assertEqual(v.size, 10)
self.assertEqual(v.sizes, {'time': 10})
self.assertEqual(v.nbytes, 80)
self.assertEqual(v.ndim, 1)
self.assertEqual(len(v), 10)
self.assertEqual(v.attrs, {'foo': u'bar'})
def test_attrs(self):
v = self.cls(['time'], 0.5 * np.arange(10))
self.assertEqual(v.attrs, {})
attrs = {'foo': 'bar'}
v.attrs = attrs
self.assertEqual(v.attrs, attrs)
self.assertIsInstance(v.attrs, OrderedDict)
v.attrs['foo'] = 'baz'
self.assertEqual(v.attrs['foo'], 'baz')
def test_getitem_dict(self):
v = self.cls(['x'], np.random.randn(5))
actual = v[{'x': 0}]
expected = v[0]
self.assertVariableIdentical(expected, actual)
def _assertIndexedLikeNDArray(self, variable, expected_value0,
expected_dtype=None):
"""Given a 1-dimensional variable, verify that the variable is indexed
like a numpy.ndarray.
"""
self.assertEqual(variable[0].shape, ())
self.assertEqual(variable[0].ndim, 0)
self.assertEqual(variable[0].size, 1)
# test identity
self.assertTrue(variable.equals(variable.copy()))
self.assertTrue(variable.identical(variable.copy()))
# check value is equal for both ndarray and Variable
self.assertEqual(variable.values[0], expected_value0)
self.assertEqual(variable[0].values, expected_value0)
# check type or dtype is consistent for both ndarray and Variable
if expected_dtype is None:
# check output type instead of array dtype
self.assertEqual(type(variable.values[0]), type(expected_value0))
self.assertEqual(type(variable[0].values), type(expected_value0))
elif expected_dtype is not False:
self.assertEqual(variable.values[0].dtype, expected_dtype)
self.assertEqual(variable[0].values.dtype, expected_dtype)
def test_index_0d_int(self):
for value, dtype in [(0, np.int_),
(np.int32(0), np.int32)]:
x = self.cls(['x'], [value])
self._assertIndexedLikeNDArray(x, value, dtype)
def test_index_0d_float(self):
for value, dtype in [(0.5, np.float_),
(np.float32(0.5), np.float32)]:
x = self.cls(['x'], [value])
self._assertIndexedLikeNDArray(x, value, dtype)
def test_index_0d_string(self):
for value, dtype in [('foo', np.dtype('U3' if PY3 else 'S3')),
(u'foo', np.dtype('U3'))]:
x = self.cls(['x'], [value])
self._assertIndexedLikeNDArray(x, value, dtype)
def test_index_0d_datetime(self):
d = datetime(2000, 1, 1)
x = self.cls(['x'], [d])
self._assertIndexedLikeNDArray(x, np.datetime64(d))
x = self.cls(['x'], [np.datetime64(d)])
self._assertIndexedLikeNDArray(x, np.datetime64(d), 'datetime64[ns]')
x = self.cls(['x'], pd.DatetimeIndex([d]))
self._assertIndexedLikeNDArray(x, np.datetime64(d), 'datetime64[ns]')
def test_index_0d_timedelta64(self):
td = timedelta(hours=1)
x = self.cls(['x'], [np.timedelta64(td)])
self._assertIndexedLikeNDArray(x, np.timedelta64(td), 'timedelta64[ns]')
x = self.cls(['x'], pd.to_timedelta([td]))
self._assertIndexedLikeNDArray(x, np.timedelta64(td), 'timedelta64[ns]')
def test_index_0d_not_a_time(self):
d = np.datetime64('NaT', 'ns')
x = self.cls(['x'], [d])
self._assertIndexedLikeNDArray(x, d)
def test_index_0d_object(self):
class HashableItemWrapper(object):
def __init__(self, item):
self.item = item
def __eq__(self, other):
return self.item == other.item
def __hash__(self):
return hash(self.item)
def __repr__(self):
return '%s(item=%r)' % (type(self).__name__, self.item)
item = HashableItemWrapper((1, 2, 3))
x = self.cls('x', [item])
self._assertIndexedLikeNDArray(x, item, expected_dtype=False)
def test_0d_object_array_with_list(self):
listarray = np.empty((1,), dtype=object)
listarray[0] = [1, 2, 3]
x = self.cls('x', listarray)
assert x.data == listarray
assert x[0].data == listarray.squeeze()
assert x.squeeze().data == listarray.squeeze()
def test_index_and_concat_datetime(self):
# regression test for #125
date_range = pd.date_range('2011-09-01', periods=10)
for dates in [date_range, date_range.values,
date_range.to_pydatetime()]:
expected = self.cls('t', dates)
for times in [[expected[i] for i in range(10)],
[expected[i:(i + 1)] for i in range(10)],
[expected[[i]] for i in range(10)]]:
actual = Variable.concat(times, 't')
self.assertEqual(expected.dtype, actual.dtype)
self.assertArrayEqual(expected, actual)
def test_0d_time_data(self):
# regression test for #105
x = self.cls('time', pd.date_range('2000-01-01', periods=5))
expected = np.datetime64('2000-01-01T00Z', 'ns')
self.assertEqual(x[0].values, expected)
def test_datetime64_conversion(self):
times = pd.date_range('2000-01-01', periods=3)
for values, preserve_source in [
(times, True),
(times.values, True),
(times.values.astype('datetime64[s]'), False),
(times.to_pydatetime(), False),
]:
v = self.cls(['t'], values)
self.assertEqual(v.dtype, np.dtype('datetime64[ns]'))
self.assertArrayEqual(v.values, times.values)
self.assertEqual(v.values.dtype, np.dtype('datetime64[ns]'))
same_source = source_ndarray(v.values) is source_ndarray(values)
assert preserve_source == same_source
def test_timedelta64_conversion(self):
times = pd.timedelta_range(start=0, periods=3)
for values, preserve_source in [
(times, True),
(times.values, True),
(times.values.astype('timedelta64[s]'), False),
(times.to_pytimedelta(), False),
]:
v = self.cls(['t'], values)
self.assertEqual(v.dtype, np.dtype('timedelta64[ns]'))
self.assertArrayEqual(v.values, times.values)
self.assertEqual(v.values.dtype, np.dtype('timedelta64[ns]'))
same_source = source_ndarray(v.values) is source_ndarray(values)
assert preserve_source == same_source
def test_object_conversion(self):
data = np.arange(5).astype(str).astype(object)
actual = self.cls('x', data)
self.assertEqual(actual.dtype, data.dtype)
def test_pandas_data(self):
v = self.cls(['x'], pd.Series([0, 1, 2], index=[3, 2, 1]))
self.assertVariableIdentical(v, v[[0, 1, 2]])
v = self.cls(['x'], pd.Index([0, 1, 2]))
self.assertEqual(v[0].values, v.values[0])
def test_pandas_period_index(self):
v = self.cls(['x'], pd.period_range(start='2000', periods=20, freq='B'))
self.assertEqual(v[0], pd.Period('2000', freq='B'))
assert "Period('2000-01-03', 'B')" in repr(v)
def test_1d_math(self):
x = 1.0 * np.arange(5)
y = np.ones(5)
# should we need `.to_base_variable()`?
# probably a break that `+v` changes type?
v = self.cls(['x'], x)
base_v = v.to_base_variable()
# unary ops
self.assertVariableIdentical(base_v, +v)
self.assertVariableIdentical(base_v, abs(v))
self.assertArrayEqual((-v).values, -x)
# binary ops with numbers
self.assertVariableIdentical(base_v, v + 0)
self.assertVariableIdentical(base_v, 0 + v)
self.assertVariableIdentical(base_v, v * 1)
self.assertArrayEqual((v > 2).values, x > 2)
self.assertArrayEqual((0 == v).values, 0 == x)
self.assertArrayEqual((v - 1).values, x - 1)
self.assertArrayEqual((1 - v).values, 1 - x)
# binary ops with numpy arrays
self.assertArrayEqual((v * x).values, x ** 2)
self.assertArrayEqual((x * v).values, x ** 2)
self.assertArrayEqual(v - y, v - 1)
self.assertArrayEqual(y - v, 1 - v)
# verify attributes are dropped
v2 = self.cls(['x'], x, {'units': 'meters'})
self.assertVariableIdentical(base_v, +v2)
# binary ops with all variables
self.assertArrayEqual(v + v, 2 * v)
w = self.cls(['x'], y, {'foo': 'bar'})
self.assertVariableIdentical(v + w, self.cls(['x'], x + y).to_base_variable())
self.assertArrayEqual((v * w).values, x * y)
# something complicated
self.assertArrayEqual((v ** 2 * w - 1 + x).values, x ** 2 * y - 1 + x)
# make sure dtype is preserved (for Index objects)
self.assertEqual(float, (+v).dtype)
self.assertEqual(float, (+v).values.dtype)
self.assertEqual(float, (0 + v).dtype)
self.assertEqual(float, (0 + v).values.dtype)
# check types of returned data
self.assertIsInstance(+v, Variable)
self.assertNotIsInstance(+v, IndexVariable)
self.assertIsInstance(0 + v, Variable)
self.assertNotIsInstance(0 + v, IndexVariable)
def test_1d_reduce(self):
x = np.arange(5)
v = self.cls(['x'], x)
actual = v.sum()
expected = Variable((), 10)
self.assertVariableIdentical(expected, actual)
self.assertIs(type(actual), Variable)
def test_array_interface(self):
x = np.arange(5)
v = self.cls(['x'], x)
self.assertArrayEqual(np.asarray(v), x)
# test patched in methods
self.assertArrayEqual(v.astype(float), x.astype(float))
# think this is a break, that argsort changes the type
self.assertVariableIdentical(v.argsort(), v.to_base_variable())
self.assertVariableIdentical(v.clip(2, 3),
self.cls('x', x.clip(2, 3)).to_base_variable())
# test ufuncs
self.assertVariableIdentical(np.sin(v), self.cls(['x'], np.sin(x)).to_base_variable())
self.assertIsInstance(np.sin(v), Variable)
self.assertNotIsInstance(np.sin(v), IndexVariable)
def example_1d_objects(self):
for data in [range(3),
0.5 * np.arange(3),
0.5 * np.arange(3, dtype=np.float32),
pd.date_range('2000-01-01', periods=3),
np.array(['a', 'b', 'c'], dtype=object)]:
yield (self.cls('x', data), data)
def test___array__(self):
for v, data in self.example_1d_objects():
self.assertArrayEqual(v.values, np.asarray(data))
self.assertArrayEqual(np.asarray(v), np.asarray(data))
self.assertEqual(v[0].values, np.asarray(data)[0])
self.assertEqual(np.asarray(v[0]), np.asarray(data)[0])
def test_equals_all_dtypes(self):
for v, _ in self.example_1d_objects():
v2 = v.copy()
self.assertTrue(v.equals(v2))
self.assertTrue(v.identical(v2))
self.assertTrue(v.no_conflicts(v2))
self.assertTrue(v[0].equals(v2[0]))
self.assertTrue(v[0].identical(v2[0]))
self.assertTrue(v[0].no_conflicts(v2[0]))
self.assertTrue(v[:2].equals(v2[:2]))
self.assertTrue(v[:2].identical(v2[:2]))
self.assertTrue(v[:2].no_conflicts(v2[:2]))
def test_eq_all_dtypes(self):
# ensure that we don't choke on comparisons for which numpy returns
# scalars
expected = Variable('x', 3 * [False])
for v, _ in self.example_1d_objects():
actual = 'z' == v
self.assertVariableIdentical(expected, actual)
actual = ~('z' != v)
self.assertVariableIdentical(expected, actual)
def test_encoding_preserved(self):
expected = self.cls('x', range(3), {'foo': 1}, {'bar': 2})
for actual in [expected.T,
expected[...],
expected.squeeze(),
expected.isel(x=slice(None)),
expected.set_dims({'x': 3}),
expected.copy(deep=True),
expected.copy(deep=False)]:
self.assertVariableIdentical(expected.to_base_variable(),
actual.to_base_variable())
self.assertEqual(expected.encoding, actual.encoding)
def test_concat(self):
x = np.arange(5)
y = np.arange(5, 10)
v = self.cls(['a'], x)
w = self.cls(['a'], y)
self.assertVariableIdentical(Variable(['b', 'a'], np.array([x, y])),
Variable.concat([v, w], 'b'))
self.assertVariableIdentical(Variable(['b', 'a'], np.array([x, y])),
Variable.concat((v, w), 'b'))
self.assertVariableIdentical(Variable(['b', 'a'], np.array([x, y])),
Variable.concat((v, w), 'b'))
with self.assertRaisesRegexp(ValueError, 'inconsistent dimensions'):
Variable.concat([v, Variable(['c'], y)], 'b')
# test indexers
actual = Variable.concat(
[v, w],
positions=[np.arange(0, 10, 2), np.arange(1, 10, 2)],
dim='a')
expected = Variable('a', np.array([x, y]).ravel(order='F'))
self.assertVariableIdentical(expected, actual)
# test concatenating along a dimension
v = Variable(['time', 'x'], np.random.random((10, 8)))
self.assertVariableIdentical(v, Variable.concat([v[:5], v[5:]], 'time'))
self.assertVariableIdentical(v, Variable.concat([v[:5], v[5:6], v[6:]], 'time'))
self.assertVariableIdentical(v, Variable.concat([v[:1], v[1:]], 'time'))
# test dimension order
self.assertVariableIdentical(v, Variable.concat([v[:, :5], v[:, 5:]], 'x'))
with self.assertRaisesRegexp(ValueError, 'all input arrays must have'):
Variable.concat([v[:, 0], v[:, 1:]], 'x')
def test_concat_attrs(self):
# different or conflicting attributes should be removed
v = self.cls('a', np.arange(5), {'foo': 'bar'})
w = self.cls('a', np.ones(5))
expected = self.cls('a', np.concatenate([np.arange(5), np.ones(5)])).to_base_variable()
self.assertVariableIdentical(expected, Variable.concat([v, w], 'a'))
w.attrs['foo'] = 2
self.assertVariableIdentical(expected, Variable.concat([v, w], 'a'))
w.attrs['foo'] = 'bar'
expected.attrs['foo'] = 'bar'
self.assertVariableIdentical(expected, Variable.concat([v, w], 'a'))
def test_concat_fixed_len_str(self):
# regression test for #217
for kind in ['S', 'U']:
x = self.cls('animal', np.array(['horse'], dtype=kind))
y = self.cls('animal', np.array(['aardvark'], dtype=kind))
actual = Variable.concat([x, y], 'animal')
expected = Variable(
'animal', np.array(['horse', 'aardvark'], dtype=kind))
self.assertVariableEqual(expected, actual)
def test_concat_number_strings(self):
# regression test for #305
a = self.cls('x', ['0', '1', '2'])
b = self.cls('x', ['3', '4'])
actual = Variable.concat([a, b], dim='x')
expected = Variable('x', np.arange(5).astype(str).astype(object))
self.assertVariableIdentical(expected, actual)
self.assertEqual(expected.dtype, object)
self.assertEqual(type(expected.values[0]), str)
def test_copy(self):
v = self.cls('x', 0.5 * np.arange(10), {'foo': 'bar'})
for deep in [True, False]:
w = v.copy(deep=deep)
self.assertIs(type(v), type(w))
self.assertVariableIdentical(v, w)
self.assertEqual(v.dtype, w.dtype)
if self.cls is Variable:
if deep:
self.assertIsNot(source_ndarray(v.values),
source_ndarray(w.values))
else:
self.assertIs(source_ndarray(v.values),
source_ndarray(w.values))
self.assertVariableIdentical(v, copy(v))
def test_copy_index(self):
midx = pd.MultiIndex.from_product([['a', 'b'], [1, 2], [-1, -2]],
names=('one', 'two', 'three'))
v = self.cls('x', midx)
for deep in [True, False]:
w = v.copy(deep=deep)
self.assertIsInstance(w._data, PandasIndexAdapter)
self.assertIsInstance(w.to_index(), pd.MultiIndex)
self.assertArrayEqual(v._data.array, w._data.array)
def test_real_and_imag(self):
v = self.cls('x', np.arange(3) - 1j * np.arange(3), {'foo': 'bar'})
expected_re = self.cls('x', np.arange(3), {'foo': 'bar'})
self.assertVariableIdentical(v.real, expected_re)
expected_im = self.cls('x', -np.arange(3), {'foo': 'bar'})
self.assertVariableIdentical(v.imag, expected_im)
expected_abs = self.cls('x', np.sqrt(2 * np.arange(3) ** 2)).to_base_variable()
self.assertVariableAllClose(abs(v), expected_abs)
def test_aggregate_complex(self):
# should skip NaNs
v = self.cls('x', [1, 2j, np.nan])
expected = Variable((), 0.5 + 1j)
self.assertVariableAllClose(v.mean(), expected)
def test_pandas_cateogrical_dtype(self):
data = pd.Categorical(np.arange(10, dtype='int64'))
v = self.cls('x', data)
print(v) # should not error
assert v.dtype == 'int64'
def test_pandas_datetime64_with_tz(self):
data = pd.date_range(start='2000-01-01',
tz=pytz.timezone('America/New_York'),
periods=10, freq='1h')
v = self.cls('x', data)
print(v) # should not error
if 'America/New_York' in str(data.dtype):
# pandas is new enough that it has datetime64 with timezone dtype
assert v.dtype == 'object'
def test_multiindex(self):
idx = pd.MultiIndex.from_product([list('abc'), [0, 1]])
v = self.cls('x', idx)
self.assertVariableIdentical(Variable((), ('a', 0)), v[0])
self.assertVariableIdentical(v, v[:])
def test_load(self):
array = self.cls('x', np.arange(5))
orig_data = array._data
copied = array.copy(deep=True)
array.load()
assert type(array._data) is type(orig_data)
assert type(copied._data) is type(orig_data)
self.assertVariableIdentical(array, copied)
class TestVariable(TestCase, VariableSubclassTestCases):
cls = staticmethod(Variable)
def setUp(self):
self.d = np.random.random((10, 3)).astype(np.float64)
def test_data_and_values(self):
v = Variable(['time', 'x'], self.d)
self.assertArrayEqual(v.data, self.d)
self.assertArrayEqual(v.values, self.d)
self.assertIs(source_ndarray(v.values), self.d)
with self.assertRaises(ValueError):
# wrong size
v.values = np.random.random(5)
d2 = np.random.random((10, 3))
v.values = d2
self.assertIs(source_ndarray(v.values), d2)
d3 = np.random.random((10, 3))
v.data = d3
self.assertIs(source_ndarray(v.data), d3)
def test_numpy_same_methods(self):
v = Variable([], np.float32(0.0))
self.assertEqual(v.item(), 0)
self.assertIs(type(v.item()), float)
v = IndexVariable('x', np.arange(5))
self.assertEqual(2, v.searchsorted(2))
def test_datetime64_conversion_scalar(self):
expected = np.datetime64('2000-01-01T00:00:00Z', 'ns')
for values in [
np.datetime64('2000-01-01T00Z'),
pd.Timestamp('2000-01-01T00'),
datetime(2000, 1, 1),
]:
v = Variable([], values)
self.assertEqual(v.dtype, np.dtype('datetime64[ns]'))
self.assertEqual(v.values, expected)
self.assertEqual(v.values.dtype, np.dtype('datetime64[ns]'))
def test_timedelta64_conversion_scalar(self):
expected = np.timedelta64(24 * 60 * 60 * 10 ** 9, 'ns')
for values in [
np.timedelta64(1, 'D'),
pd.Timedelta('1 day'),
timedelta(days=1),
]:
v = Variable([], values)
self.assertEqual(v.dtype, np.dtype('timedelta64[ns]'))
self.assertEqual(v.values, expected)
self.assertEqual(v.values.dtype, np.dtype('timedelta64[ns]'))
def test_0d_str(self):
v = Variable([], u'foo')
self.assertEqual(v.dtype, np.dtype('U3'))
self.assertEqual(v.values, 'foo')
v = Variable([], np.string_('foo'))
self.assertEqual(v.dtype, np.dtype('S3'))
self.assertEqual(v.values, bytes('foo', 'ascii') if PY3 else 'foo')
def test_0d_datetime(self):
v = Variable([], pd.Timestamp('2000-01-01'))
self.assertEqual(v.dtype, np.dtype('datetime64[ns]'))
self.assertEqual(v.values, np.datetime64('2000-01-01T00Z', 'ns'))
def test_0d_timedelta(self):
for td in [pd.to_timedelta('1s'), np.timedelta64(1, 's')]:
v = Variable([], td)
self.assertEqual(v.dtype, np.dtype('timedelta64[ns]'))
self.assertEqual(v.values, np.timedelta64(10 ** 9, 'ns'))
def test_equals_and_identical(self):
d = np.random.rand(10, 3)
d[0, 0] = np.nan
v1 = Variable(('dim1', 'dim2'), data=d,
attrs={'att1': 3, 'att2': [1, 2, 3]})
v2 = Variable(('dim1', 'dim2'), data=d,
attrs={'att1': 3, 'att2': [1, 2, 3]})
self.assertTrue(v1.equals(v2))
self.assertTrue(v1.identical(v2))
v3 = Variable(('dim1', 'dim3'), data=d)
self.assertFalse(v1.equals(v3))
v4 = Variable(('dim1', 'dim2'), data=d)
self.assertTrue(v1.equals(v4))
self.assertFalse(v1.identical(v4))
v5 = deepcopy(v1)
v5.values[:] = np.random.rand(10, 3)
self.assertFalse(v1.equals(v5))
self.assertFalse(v1.equals(None))
self.assertFalse(v1.equals(d))
self.assertFalse(v1.identical(None))
self.assertFalse(v1.identical(d))
def test_broadcast_equals(self):
v1 = Variable((), np.nan)
v2 = Variable(('x'), [np.nan, np.nan])
self.assertTrue(v1.broadcast_equals(v2))
self.assertFalse(v1.equals(v2))
self.assertFalse(v1.identical(v2))
v3 = Variable(('x'), [np.nan])
self.assertTrue(v1.broadcast_equals(v3))
self.assertFalse(v1.equals(v3))
self.assertFalse(v1.identical(v3))
self.assertFalse(v1.broadcast_equals(None))
v4 = Variable(('x'), [np.nan] * 3)
self.assertFalse(v2.broadcast_equals(v4))
def test_no_conflicts(self):
v1 = Variable(('x'), [1, 2, np.nan, np.nan])
v2 = Variable(('x'), [np.nan, 2, 3, np.nan])
self.assertTrue(v1.no_conflicts(v2))
self.assertFalse(v1.equals(v2))
self.assertFalse(v1.broadcast_equals(v2))
self.assertFalse(v1.identical(v2))
self.assertFalse(v1.no_conflicts(None))
v3 = Variable(('y'), [np.nan, 2, 3, np.nan])
self.assertFalse(v3.no_conflicts(v1))
d = np.array([1, 2, np.nan, np.nan])
self.assertFalse(v1.no_conflicts(d))
self.assertFalse(v2.no_conflicts(d))
v4 = Variable(('w', 'x'), [d])
self.assertTrue(v1.no_conflicts(v4))
def test_as_variable(self):
data = np.arange(10)
expected = Variable('x', data)
expected_extra = Variable('x', data, attrs={'myattr': 'val'},
encoding={'scale_factor': 1})
self.assertVariableIdentical(expected, as_variable(expected))
ds = Dataset({'x': expected})
var = as_variable(ds['x']).to_base_variable()
self.assertVariableIdentical(expected, var)
self.assertNotIsInstance(ds['x'], Variable)
self.assertIsInstance(as_variable(ds['x']), Variable)
FakeVariable = namedtuple('FakeVariable', 'values dims')
fake_xarray = FakeVariable(expected.values, expected.dims)
self.assertVariableIdentical(expected, as_variable(fake_xarray))
FakeVariable = namedtuple('FakeVariable', 'data dims')
fake_xarray = FakeVariable(expected.data, expected.dims)
self.assertVariableIdentical(expected, as_variable(fake_xarray))
FakeVariable = namedtuple('FakeVariable',
'data values dims attrs encoding')
fake_xarray = FakeVariable(expected_extra.data, expected_extra.values,
expected_extra.dims, expected_extra.attrs,
expected_extra.encoding)
self.assertVariableIdentical(expected_extra, as_variable(fake_xarray))
xarray_tuple = (expected_extra.dims, expected_extra.values,
expected_extra.attrs, expected_extra.encoding)
self.assertVariableIdentical(expected_extra, as_variable(xarray_tuple))
with self.assertRaisesRegexp(TypeError, 'tuples to convert'):
as_variable(tuple(data))
with self.assertRaisesRegexp(
TypeError, 'without an explicit list of dimensions'):
as_variable(data)
actual = as_variable(data, name='x')
self.assertVariableIdentical(expected.to_index_variable(), actual)
actual = as_variable(0)
expected = Variable([], 0)
self.assertVariableIdentical(expected, actual)
data = np.arange(9).reshape((3, 3))
expected = Variable(('x', 'y'), data)
with self.assertRaisesRegexp(
ValueError, 'without explicit dimension names'):
as_variable(data, name='x')
with self.assertRaisesRegexp(
ValueError, 'has more than 1-dimension'):
as_variable(expected, name='x')
def test_repr(self):
v = Variable(['time', 'x'], [[1, 2, 3], [4, 5, 6]], {'foo': 'bar'})
expected = dedent("""
<xarray.Variable (time: 2, x: 3)>
array([[1, 2, 3],
[4, 5, 6]])
Attributes:
foo: bar
""").strip()
self.assertEqual(expected, repr(v))
def test_repr_lazy_data(self):
v = Variable('x', LazilyIndexedArray(np.arange(2e5)))
self.assertIn('200000 values with dtype', repr(v))
self.assertIsInstance(v._data, LazilyIndexedArray)
def test_items(self):
data = np.random.random((10, 11))
v = Variable(['x', 'y'], data)
# test slicing
self.assertVariableIdentical(v, v[:])
self.assertVariableIdentical(v, v[...])
self.assertVariableIdentical(Variable(['y'], data[0]), v[0])
self.assertVariableIdentical(Variable(['x'], data[:, 0]), v[:, 0])
self.assertVariableIdentical(Variable(['x', 'y'], data[:3, :2]),
v[:3, :2])
# test array indexing
x = Variable(['x'], np.arange(10))
y = Variable(['y'], np.arange(11))
self.assertVariableIdentical(v, v[x.values])
self.assertVariableIdentical(v, v[x])
self.assertVariableIdentical(v[:3], v[x < 3])
self.assertVariableIdentical(v[:, 3:], v[:, y >= 3])
self.assertVariableIdentical(v[:3, 3:], v[x < 3, y >= 3])
self.assertVariableIdentical(v[:3, :2], v[x[:3], y[:2]])
self.assertVariableIdentical(v[:3, :2], v[range(3), range(2)])
# test iteration
for n, item in enumerate(v):
self.assertVariableIdentical(Variable(['y'], data[n]), item)
with self.assertRaisesRegexp(TypeError, 'iteration over a 0-d'):
iter(Variable([], 0))
# test setting
v.values[:] = 0
self.assertTrue(np.all(v.values == 0))
# test orthogonal setting
v[range(10), range(11)] = 1
self.assertArrayEqual(v.values, np.ones((10, 11)))
def test_isel(self):
v = Variable(['time', 'x'], self.d)
self.assertVariableIdentical(v.isel(time=slice(None)), v)
self.assertVariableIdentical(v.isel(time=0), v[0])
self.assertVariableIdentical(v.isel(time=slice(0, 3)), v[:3])
self.assertVariableIdentical(v.isel(x=0), v[:, 0])
with self.assertRaisesRegexp(ValueError, 'do not exist'):
v.isel(not_a_dim=0)
def test_index_0d_numpy_string(self):
# regression test to verify our work around for indexing 0d strings
v = Variable([], np.string_('asdf'))
self.assertVariableIdentical(v[()], v)
v = Variable([], np.unicode_(u'asdf'))
self.assertVariableIdentical(v[()], v)
def test_indexing_0d_unicode(self):
# regression test for GH568
actual = Variable(('x'), [u'tmax'])[0][()]
expected = Variable((), u'tmax')
self.assertVariableIdentical(actual, expected)
def test_shift(self):
v = Variable('x', [1, 2, 3, 4, 5])
self.assertVariableIdentical(v, v.shift(x=0))
self.assertIsNot(v, v.shift(x=0))
expected = Variable('x', [np.nan, 1, 2, 3, 4])
self.assertVariableIdentical(expected, v.shift(x=1))
expected = Variable('x', [np.nan, np.nan, 1, 2, 3])
self.assertVariableIdentical(expected, v.shift(x=2))
expected = Variable('x', [2, 3, 4, 5, np.nan])
self.assertVariableIdentical(expected, v.shift(x=-1))
expected = Variable('x', [np.nan] * 5)
self.assertVariableIdentical(expected, v.shift(x=5))
self.assertVariableIdentical(expected, v.shift(x=6))
with self.assertRaisesRegexp(ValueError, 'dimension'):
v.shift(z=0)
v = Variable('x', [1, 2, 3, 4, 5], {'foo': 'bar'})
self.assertVariableIdentical(v, v.shift(x=0))
expected = Variable('x', [np.nan, 1, 2, 3, 4], {'foo': 'bar'})
self.assertVariableIdentical(expected, v.shift(x=1))
def test_shift2d(self):
v = Variable(('x', 'y'), [[1, 2], [3, 4]])
expected = Variable(('x', 'y'), [[np.nan, np.nan], [np.nan, 1]])
self.assertVariableIdentical(expected, v.shift(x=1, y=1))
def test_roll(self):
v = Variable('x', [1, 2, 3, 4, 5])
self.assertVariableIdentical(v, v.roll(x=0))
self.assertIsNot(v, v.roll(x=0))
expected = Variable('x', [5, 1, 2, 3, 4])
self.assertVariableIdentical(expected, v.roll(x=1))
self.assertVariableIdentical(expected, v.roll(x=-4))
self.assertVariableIdentical(expected, v.roll(x=6))
expected = Variable('x', [4, 5, 1, 2, 3])
self.assertVariableIdentical(expected, v.roll(x=2))
self.assertVariableIdentical(expected, v.roll(x=-3))
with self.assertRaisesRegexp(ValueError, 'dimension'):
v.roll(z=0)
def test_roll_consistency(self):
v = Variable(('x', 'y'), np.random.randn(5, 6))
for axis, dim in [(0, 'x'), (1, 'y')]:
for shift in [-3, 0, 1, 7, 11]:
expected = np.roll(v.values, shift, axis=axis)
actual = v.roll(**{dim: shift}).values
self.assertArrayEqual(expected, actual)
def test_transpose(self):
v = Variable(['time', 'x'], self.d)
v2 = Variable(['x', 'time'], self.d.T)
self.assertVariableIdentical(v, v2.transpose())
self.assertVariableIdentical(v.transpose(), v.T)
x = np.random.randn(2, 3, 4, 5)
w = Variable(['a', 'b', 'c', 'd'], x)
w2 = Variable(['d', 'b', 'c', 'a'], np.einsum('abcd->dbca', x))
self.assertEqual(w2.shape, (5, 3, 4, 2))
self.assertVariableIdentical(w2, w.transpose('d', 'b', 'c', 'a'))
self.assertVariableIdentical(w, w2.transpose('a', 'b', 'c', 'd'))
w3 = Variable(['b', 'c', 'd', 'a'], np.einsum('abcd->bcda', x))
self.assertVariableIdentical(w, w3.transpose('a', 'b', 'c', 'd'))
def test_transpose_0d(self):
for value in [
3.5,
('a', 1),
np.datetime64('2000-01-01'),
np.timedelta64(1, 'h'),
None,
object(),
]:
variable = Variable([], value)
actual = variable.transpose()
assert actual.identical(variable)
def test_squeeze(self):
v = Variable(['x', 'y'], [[1]])
self.assertVariableIdentical(Variable([], 1), v.squeeze())
self.assertVariableIdentical(Variable(['y'], [1]), v.squeeze('x'))
self.assertVariableIdentical(Variable(['y'], [1]), v.squeeze(['x']))
self.assertVariableIdentical(Variable(['x'], [1]), v.squeeze('y'))
self.assertVariableIdentical(Variable([], 1), v.squeeze(['x', 'y']))
v = Variable(['x', 'y'], [[1, 2]])
self.assertVariableIdentical(Variable(['y'], [1, 2]), v.squeeze())
self.assertVariableIdentical(Variable(['y'], [1, 2]), v.squeeze('x'))
with self.assertRaisesRegexp(ValueError, 'cannot select a dimension'):
v.squeeze('y')
def test_get_axis_num(self):
v = Variable(['x', 'y', 'z'], np.random.randn(2, 3, 4))
self.assertEqual(v.get_axis_num('x'), 0)
self.assertEqual(v.get_axis_num(['x']), (0,))
self.assertEqual(v.get_axis_num(['x', 'y']), (0, 1))
self.assertEqual(v.get_axis_num(['z', 'y', 'x']), (2, 1, 0))
with self.assertRaisesRegexp(ValueError, 'not found in array dim'):
v.get_axis_num('foobar')
def test_set_dims(self):
v = Variable(['x'], [0, 1])
actual = v.set_dims(['x', 'y'])
expected = Variable(['x', 'y'], [[0], [1]])
self.assertVariableIdentical(actual, expected)
actual = v.set_dims(['y', 'x'])
self.assertVariableIdentical(actual, expected.T)
actual = v.set_dims(OrderedDict([('x', 2), ('y', 2)]))
expected = Variable(['x', 'y'], [[0, 0], [1, 1]])
self.assertVariableIdentical(actual, expected)
v = Variable(['foo'], [0, 1])
actual = v.set_dims('foo')
expected = v
self.assertVariableIdentical(actual, expected)
with self.assertRaisesRegexp(ValueError, 'must be a superset'):
v.set_dims(['z'])
def test_set_dims_object_dtype(self):
v = Variable([], ('a', 1))
actual = v.set_dims(('x',), (3,))
exp_values = np.empty((3,), dtype=object)
for i in range(3):
exp_values[i] = ('a', 1)
expected = Variable(['x'], exp_values)
assert actual.identical(expected)
def test_stack(self):
v = Variable(['x', 'y'], [[0, 1], [2, 3]], {'foo': 'bar'})
actual = v.stack(z=('x', 'y'))
expected = Variable('z', [0, 1, 2, 3], v.attrs)
self.assertVariableIdentical(actual, expected)
actual = v.stack(z=('x',))
expected = Variable(('y', 'z'), v.data.T, v.attrs)
self.assertVariableIdentical(actual, expected)
actual = v.stack(z=(),)
self.assertVariableIdentical(actual, v)
actual = v.stack(X=('x',), Y=('y',)).transpose('X', 'Y')
expected = Variable(('X', 'Y'), v.data, v.attrs)
self.assertVariableIdentical(actual, expected)
def test_stack_errors(self):
v = Variable(['x', 'y'], [[0, 1], [2, 3]], {'foo': 'bar'})
with self.assertRaisesRegexp(ValueError, 'invalid existing dim'):
v.stack(z=('x1',))
with self.assertRaisesRegexp(ValueError, 'cannot create a new dim'):
v.stack(x=('x',))
def test_unstack(self):
v = Variable('z', [0, 1, 2, 3], {'foo': 'bar'})
actual = v.unstack(z=OrderedDict([('x', 2), ('y', 2)]))
expected = Variable(('x', 'y'), [[0, 1], [2, 3]], v.attrs)
self.assertVariableIdentical(actual, expected)
actual = v.unstack(z=OrderedDict([('x', 4), ('y', 1)]))
expected = Variable(('x', 'y'), [[0], [1], [2], [3]], v.attrs)
self.assertVariableIdentical(actual, expected)
actual = v.unstack(z=OrderedDict([('x', 4)]))
expected = Variable('x', [0, 1, 2, 3], v.attrs)
self.assertVariableIdentical(actual, expected)
def test_unstack_errors(self):
v = Variable('z', [0, 1, 2, 3])
with self.assertRaisesRegexp(ValueError, 'invalid existing dim'):
v.unstack(foo={'x': 4})
with self.assertRaisesRegexp(ValueError, 'cannot create a new dim'):
v.stack(z=('z',))
with self.assertRaisesRegexp(ValueError, 'the product of the new dim'):
v.unstack(z={'x': 5})
def test_unstack_2d(self):
v = Variable(['x', 'y'], [[0, 1], [2, 3]])
actual = v.unstack(y={'z': 2})
expected = Variable(['x', 'z'], v.data)
self.assertVariableIdentical(actual, expected)
actual = v.unstack(x={'z': 2})
expected = Variable(['y', 'z'], v.data.T)
self.assertVariableIdentical(actual, expected)
def test_stack_unstack_consistency(self):
v = Variable(['x', 'y'], [[0, 1], [2, 3]])
actual = (v.stack(z=('x', 'y'))
.unstack(z=OrderedDict([('x', 2), ('y', 2)])))
self.assertVariableIdentical(actual, v)
def test_broadcasting_math(self):
x = np.random.randn(2, 3)
v = Variable(['a', 'b'], x)
# 1d to 2d broadcasting
self.assertVariableIdentical(
v * v,
Variable(['a', 'b'], np.einsum('ab,ab->ab', x, x)))
self.assertVariableIdentical(
v * v[0],
Variable(['a', 'b'], np.einsum('ab,b->ab', x, x[0])))
self.assertVariableIdentical(
v[0] * v,
Variable(['b', 'a'], np.einsum('b,ab->ba', x[0], x)))
self.assertVariableIdentical(
v[0] * v[:, 0],
Variable(['b', 'a'], np.einsum('b,a->ba', x[0], x[:, 0])))
# higher dim broadcasting
y = np.random.randn(3, 4, 5)
w = Variable(['b', 'c', 'd'], y)
self.assertVariableIdentical(
v * w, Variable(['a', 'b', 'c', 'd'],
np.einsum('ab,bcd->abcd', x, y)))
self.assertVariableIdentical(
w * v, Variable(['b', 'c', 'd', 'a'],
np.einsum('bcd,ab->bcda', y, x)))
self.assertVariableIdentical(
v * w[0], Variable(['a', 'b', 'c', 'd'],
np.einsum('ab,cd->abcd', x, y[0])))
def test_broadcasting_failures(self):
a = Variable(['x'], np.arange(10))
b = Variable(['x'], np.arange(5))
c = Variable(['x', 'x'], np.arange(100).reshape(10, 10))
with self.assertRaisesRegexp(ValueError, 'mismatched lengths'):
a + b
with self.assertRaisesRegexp(ValueError, 'duplicate dimensions'):
a + c
def test_inplace_math(self):
x = np.arange(5)
v = Variable(['x'], x)
v2 = v
v2 += 1
self.assertIs(v, v2)
# since we provided an ndarray for data, it is also modified in-place
self.assertIs(source_ndarray(v.values), x)
self.assertArrayEqual(v.values, np.arange(5) + 1)
with self.assertRaisesRegexp(ValueError, 'dimensions cannot change'):
v += Variable('y', np.arange(5))
def test_reduce(self):
v = Variable(['x', 'y'], self.d, {'ignored': 'attributes'})
self.assertVariableIdentical(v.reduce(np.std, 'x'),
Variable(['y'], self.d.std(axis=0)))
self.assertVariableIdentical(v.reduce(np.std, axis=0),
v.reduce(np.std, dim='x'))
self.assertVariableIdentical(v.reduce(np.std, ['y', 'x']),
Variable([], self.d.std(axis=(0, 1))))
self.assertVariableIdentical(v.reduce(np.std),
Variable([], self.d.std()))
self.assertVariableIdentical(
v.reduce(np.mean, 'x').reduce(np.std, 'y'),
Variable([], self.d.mean(axis=0).std()))
self.assertVariableAllClose(v.mean('x'), v.reduce(np.mean, 'x'))
with self.assertRaisesRegexp(ValueError, 'cannot supply both'):
v.mean(dim='x', axis=0)
@pytest.mark.skipif(LooseVersion(np.__version__) < LooseVersion('1.10.0'),
reason='requires numpy version 1.10.0 or later')
def test_quantile(self):
v = Variable(['x', 'y'], self.d)
for q in [0.25, [0.50], [0.25, 0.75]]:
for axis, dim in zip([None, 0, [0], [0, 1]],
[None, 'x', ['x'], ['x', 'y']]):
actual = v.quantile(q, dim=dim)
expected = np.nanpercentile(self.d, np.array(q) * 100,
axis=axis)
np.testing.assert_allclose(actual.values, expected)
@requires_dask
def test_quantile_dask_raises(self):
# regression for GH1524
v = Variable(['x', 'y'], self.d).chunk(2)
with self.assertRaisesRegexp(TypeError, 'arrays stored as dask'):
v.quantile(0.5, dim='x')
def test_big_endian_reduce(self):
# regression test for GH489
data = np.ones(5, dtype='>f4')
v = Variable(['x'], data)
expected = Variable([], 5)
self.assertVariableIdentical(expected, v.sum())
def test_reduce_funcs(self):
v = Variable('x', np.array([1, np.nan, 2, 3]))
self.assertVariableIdentical(v.mean(), Variable([], 2))
self.assertVariableIdentical(v.mean(skipna=True), Variable([], 2))
self.assertVariableIdentical(v.mean(skipna=False), Variable([], np.nan))
self.assertVariableIdentical(np.mean(v), Variable([], 2))
self.assertVariableIdentical(v.prod(), Variable([], 6))
self.assertVariableIdentical(v.cumsum(axis=0),
Variable('x', np.array([1, 1, 3, 6])))
self.assertVariableIdentical(v.cumprod(axis=0),
Variable('x', np.array([1, 1, 2, 6])))
self.assertVariableIdentical(v.var(), Variable([], 2.0 / 3))
if LooseVersion(np.__version__) < '1.9':
with self.assertRaises(NotImplementedError):
v.median()
else:
self.assertVariableIdentical(v.median(), Variable([], 2))
v = Variable('x', [True, False, False])
self.assertVariableIdentical(v.any(), Variable([], True))
self.assertVariableIdentical(v.all(dim='x'), Variable([], False))
v = Variable('t', pd.date_range('2000-01-01', periods=3))
with self.assertRaises(NotImplementedError):
v.max(skipna=True)
self.assertVariableIdentical(
v.max(), Variable([], pd.Timestamp('2000-01-03')))
def test_reduce_keep_attrs(self):
_attrs = {'units': 'test', 'long_name': 'testing'}
v = Variable(['x', 'y'], self.d, _attrs)
# Test dropped attrs
vm = v.mean()
self.assertEqual(len(vm.attrs), 0)
self.assertEqual(vm.attrs, OrderedDict())
# Test kept attrs
vm = v.mean(keep_attrs=True)
self.assertEqual(len(vm.attrs), len(_attrs))
self.assertEqual(vm.attrs, _attrs)
def test_count(self):
expected = Variable([], 3)
actual = Variable(['x'], [1, 2, 3, np.nan]).count()
self.assertVariableIdentical(expected, actual)
v = Variable(['x'], np.array(['1', '2', '3', np.nan], dtype=object))
actual = v.count()
self.assertVariableIdentical(expected, actual)
actual = Variable(['x'], [True, False, True]).count()
self.assertVariableIdentical(expected, actual)
self.assertEqual(actual.dtype, int)
expected = Variable(['x'], [2, 3])
actual = Variable(['x', 'y'], [[1, 0, np.nan], [1, 1, 1]]).count('y')
self.assertVariableIdentical(expected, actual)
class TestIndexVariable(TestCase, VariableSubclassTestCases):
cls = staticmethod(IndexVariable)
def test_init(self):
with self.assertRaisesRegexp(ValueError, 'must be 1-dimensional'):
IndexVariable((), 0)
def test_to_index(self):
data = 0.5 * np.arange(10)
v = IndexVariable(['time'], data, {'foo': 'bar'})
self.assertTrue(pd.Index(data, name='time').identical(v.to_index()))
def test_multiindex_default_level_names(self):
midx = pd.MultiIndex.from_product([['a', 'b'], [1, 2]])
v = IndexVariable(['x'], midx, {'foo': 'bar'})
self.assertEqual(v.to_index().names, ('x_level_0', 'x_level_1'))
def test_data(self):
x = IndexVariable('x', np.arange(3.0))
self.assertIsInstance(x._data, PandasIndexAdapter)
self.assertIsInstance(x.data, np.ndarray)
self.assertEqual(float, x.dtype)
self.assertArrayEqual(np.arange(3), x)
self.assertEqual(float, x.values.dtype)
with self.assertRaisesRegexp(TypeError, 'cannot be modified'):
x[:] = 0
def test_name(self):
coord = IndexVariable('x', [10.0])
self.assertEqual(coord.name, 'x')
with self.assertRaises(AttributeError):
coord.name = 'y'
def test_level_names(self):
midx = pd.MultiIndex.from_product([['a', 'b'], [1, 2]],
names=['level_1', 'level_2'])
x = IndexVariable('x', midx)
self.assertEqual(x.level_names, midx.names)
self.assertIsNone(IndexVariable('y', [10.0]).level_names)
def test_get_level_variable(self):
midx = pd.MultiIndex.from_product([['a', 'b'], [1, 2]],
names=['level_1', 'level_2'])
x = IndexVariable('x', midx)
level_1 = IndexVariable('x', midx.get_level_values('level_1'))
self.assertVariableIdentical(x.get_level_variable('level_1'), level_1)
with self.assertRaisesRegexp(ValueError, 'has no MultiIndex'):
IndexVariable('y', [10.0]).get_level_variable('level')
def test_concat_periods(self):
periods = pd.period_range('2000-01-01', periods=10)
coords = [IndexVariable('t', periods[:5]), IndexVariable('t', periods[5:])]
expected = IndexVariable('t', periods)
actual = IndexVariable.concat(coords, dim='t')
assert actual.identical(expected)
assert isinstance(actual.to_index(), pd.PeriodIndex)
positions = [list(range(5)), list(range(5, 10))]
actual = IndexVariable.concat(coords, dim='t', positions=positions)
assert actual.identical(expected)
assert isinstance(actual.to_index(), pd.PeriodIndex)
def test_concat_multiindex(self):
idx = pd.MultiIndex.from_product([[0, 1, 2], ['a', 'b']])
coords = [IndexVariable('x', idx[:2]), IndexVariable('x', idx[2:])]
expected = IndexVariable('x', idx)
actual = IndexVariable.concat(coords, dim='x')
assert actual.identical(expected)
assert isinstance(actual.to_index(), pd.MultiIndex)
def test_coordinate_alias(self):
with self.assertWarns('deprecated'):
x = Coordinate('x', [1, 2, 3])
self.assertIsInstance(x, IndexVariable)
class TestAsCompatibleData(TestCase):
def test_unchanged_types(self):
types = (np.asarray, PandasIndexAdapter, indexing.LazilyIndexedArray)
for t in types:
for data in [np.arange(3),
pd.date_range('2000-01-01', periods=3),
pd.date_range('2000-01-01', periods=3).values]:
x = t(data)
self.assertIs(source_ndarray(x),
source_ndarray(as_compatible_data(x)))
def test_converted_types(self):
for input_array in [[[0, 1, 2]], pd.DataFrame([[0, 1, 2]])]:
actual = as_compatible_data(input_array)
self.assertArrayEqual(np.asarray(input_array), actual)
self.assertEqual(np.ndarray, type(actual))
self.assertEqual(np.asarray(input_array).dtype, actual.dtype)
def test_masked_array(self):
original = np.ma.MaskedArray(np.arange(5))
expected = np.arange(5)
actual = as_compatible_data(original)
self.assertArrayEqual(expected, actual)
self.assertEqual(np.dtype(int), actual.dtype)
original = np.ma.MaskedArray(np.arange(5), mask=4 * [False] + [True])
expected = np.arange(5.0)
expected[-1] = np.nan
actual = as_compatible_data(original)
self.assertArrayEqual(expected, actual)
self.assertEqual(np.dtype(float), actual.dtype)
def test_datetime(self):
expected = np.datetime64('2000-01-01T00Z')
actual = as_compatible_data(expected)
self.assertEqual(expected, actual)
self.assertEqual(np.ndarray, type(actual))
self.assertEqual(np.dtype('datetime64[ns]'), actual.dtype)
expected = np.array([np.datetime64('2000-01-01T00Z')])
actual = as_compatible_data(expected)
self.assertEqual(np.asarray(expected), actual)
self.assertEqual(np.ndarray, type(actual))
self.assertEqual(np.dtype('datetime64[ns]'), actual.dtype)
expected = np.array([np.datetime64('2000-01-01T00Z', 'ns')])
actual = as_compatible_data(expected)
self.assertEqual(np.asarray(expected), actual)
self.assertEqual(np.ndarray, type(actual))
self.assertEqual(np.dtype('datetime64[ns]'), actual.dtype)
self.assertIs(expected, source_ndarray(np.asarray(actual)))
expected = np.datetime64('2000-01-01T00Z', 'ns')
actual = as_compatible_data(datetime(2000, 1, 1))
self.assertEqual(np.asarray(expected), actual)
self.assertEqual(np.ndarray, type(actual))
self.assertEqual(np.dtype('datetime64[ns]'), actual.dtype)
def test_full_like(self):
# For more thorough tests, see test_variable.py
orig = Variable(dims=('x', 'y'), data=[[1.5 ,2.0], [3.1, 4.3]],
attrs={'foo': 'bar'})
expect = orig.copy(deep=True)
expect.values = [[2.0, 2.0], [2.0, 2.0]]
self.assertVariableIdentical(expect, full_like(orig, 2))
# override dtype
expect.values = [[True, True], [True, True]]
self.assertEquals(expect.dtype, bool)
self.assertVariableIdentical(expect, full_like(orig, True, dtype=bool))
@requires_dask
def test_full_like_dask(self):
orig = Variable(dims=('x', 'y'), data=[[1.5, 2.0], [3.1, 4.3]],
attrs={'foo': 'bar'}).chunk(((1, 1), (2,)))
def check(actual, expect_dtype, expect_values):
self.assertEqual(actual.dtype, expect_dtype)
self.assertEqual(actual.shape, orig.shape)
self.assertEqual(actual.dims, orig.dims)
self.assertEqual(actual.attrs, orig.attrs)
self.assertEqual(actual.chunks, orig.chunks)
self.assertArrayEqual(actual.values, expect_values)
check(full_like(orig, 2),
orig.dtype, np.full_like(orig.values, 2))
# override dtype
check(full_like(orig, True, dtype=bool),
bool, np.full_like(orig.values, True, dtype=bool))
# Check that there's no array stored inside dask
# (e.g. we didn't create a numpy array and then we chunked it!)
dsk = full_like(orig, 1).data.dask
for v in dsk.values():
if isinstance(v, tuple):
for vi in v:
assert not isinstance(vi, np.ndarray)
else:
assert not isinstance(v, np.ndarray)
def test_zeros_like(self):
orig = Variable(dims=('x', 'y'), data=[[1.5 ,2.0], [3.1, 4.3]],
attrs={'foo': 'bar'})
self.assertVariableIdentical(zeros_like(orig),
full_like(orig, 0))
self.assertVariableIdentical(zeros_like(orig, dtype=int),
full_like(orig, 0, dtype=int))
def test_ones_like(self):
orig = Variable(dims=('x', 'y'), data=[[1.5 ,2.0], [3.1, 4.3]],
attrs={'foo': 'bar'})
self.assertVariableIdentical(ones_like(orig),
full_like(orig, 1))
self.assertVariableIdentical(ones_like(orig, dtype=int),
full_like(orig, 1, dtype=int))
| apache-2.0 |
flaviobarros/spyre | examples/stocks_example.py | 3 | 2387 | # tested with python2.7 and 3.4
from spyre import server
import pandas as pd
import json
try:
import urllib2
except ImportError:
import urllib.request as urllib2
class StockExample(server.App):
def __init__(self):
# implements a simple caching mechanism to avoid multiple calls to the yahoo finance api
self.data_cache = None
self.params_cache = None
title = "Historical Stock Prices"
inputs = [{ "type":'dropdown',
"label": 'Company',
"options" : [ {"label": "Google", "value":"GOOG"},
{"label": "Yahoo", "value":"YHOO"},
{"label": "Apple", "value":"AAPL"}],
"value":'GOOG',
"key": 'ticker',
"action_id": "update_data"}]
controls = [{ "type" : "hidden",
"id" : "update_data"}]
tabs = ["Plot", "Table"]
outputs = [{ "type" : "plot",
"id" : "plot",
"control_id" : "update_data",
"tab" : "Plot"},
{ "type" : "table",
"id" : "table_id",
"control_id" : "update_data",
"tab" : "Table",
"on_page_load" : True }]
def getData(self, params):
params.pop("output_id",None) # caching layer
if self.params_cache!=params: # caching layer
ticker = params['ticker']
# make call to yahoo finance api to get historical stock data
api_url = 'https://chartapi.finance.yahoo.com/instrument/1.0/{}/chartdata;type=quote;range=3m/json'.format(ticker)
result = urllib2.urlopen(api_url).read()
data = json.loads(result.decode('utf-8').replace('finance_charts_json_callback( ','')[:-1]) # strip away the javascript and load json
self.company_name = data['meta']['Company-Name']
df = pd.DataFrame.from_records(data['series'])
df['Date'] = pd.to_datetime(df['Date'],format='%Y%m%d')
self.data_cache = df # caching layer
self.params_cache = params # caching layer
return self.data_cache
def getPlot(self, params):
### implements a simple caching mechanism to avoid multiple calls to the yahoo finance api ###
params.pop("output_id",None)
while self.params_cache!=params:
pass
###############################################################################################
df = self.getData(params)
plt_obj = df.set_index('Date').drop(['volume'],axis=1).plot()
plt_obj.set_ylabel("Price")
plt_obj.set_title(self.company_name)
fig = plt_obj.get_figure()
return fig
if __name__ == '__main__':
app = StockExample()
app.launch(port=9093)
| mit |
SciTools/cube_browser | lib/cube_browser/explorer.py | 1 | 15222 | from collections import OrderedDict
import glob
import os
try:
# Python 3
from urllib.parse import urlparse, parse_qs
except ImportError:
# Python 2
from urlparse import urlparse, parse_qs
import IPython.display
import cartopy.crs as ccrs
import ipywidgets
import iris
import iris.plot as iplt
import matplotlib.pyplot as plt
import traitlets
import cube_browser
# Clear output, such as autosave disable notification.
IPython.display.clear_output()
class FilePicker(object):
"""
File picker widgets.
"""
def __init__(self, initial_value='', default=''):
if initial_value == '':
try:
initial_value = iris.sample_data_path('')
except ValueError:
initial_value = ''
# Define the file system path for input files.
self._path = ipywidgets.Text(
description='Path:',
value=initial_value,
width="100%")
# Observe the path.
self._path.observe(self._handle_path, names='value')
# Use default path value to initialise file options.
options = []
if os.path.exists(self._path.value):
options = glob.glob('{}/*'.format(self._path.value))
options.sort()
default_list = []
for default_value in default.split(','):
if default_value in options:
default_list.append(default_value)
default_tuple = tuple(default_list)
# Defines the files selected to be loaded.
self._files = ipywidgets.SelectMultiple(
description='Files:',
options=OrderedDict([(os.path.basename(f), f)
for f in options]),
value=default_tuple,
width="100%"
)
self.deleter = ipywidgets.Button(description='delete tab',
height='32px', width='75px')
hbox = ipywidgets.HBox(children=[self._files, self.deleter])
self._box = ipywidgets.Box(children=[self._path, hbox], width="100%")
@property
def files(self):
"""The files from the FilePicker."""
return self._files.value
def _handle_path(self, sender):
"""Path box action."""
if os.path.exists(self._path.value):
options = glob.glob('{}/*'.format(self._path.value))
options.sort()
self._files.value = ()
self._files.options = OrderedDict([(os.path.basename(f), f)
for f in options])
else:
self._files.options = OrderedDict()
self._files.width = "100%"
@property
def box(self):
"""The IPywidgets box to display."""
return self._box
class PlotControl(object):
"""Control widgets for a plot."""
def __init__(self):
self.mpl_kwargs = {}
# Defines the cube which is to be plotted.
self.cube_picker = ipywidgets.Dropdown(description='Cubes:',
options=('None', None),
value=None,
width='50%')
# Define the type of cube browser plot required
self.plot_type = ipywidgets.Dropdown(
description='Plot type:',
options={'pcolormesh': cube_browser.Pcolormesh,
'contour': cube_browser.Contour,
'contourf': cube_browser.Contourf},
value=cube_browser.Pcolormesh)
self.x_coord = ipywidgets.Dropdown(
description='X Coord',
options=('None', None))
self.y_coord = ipywidgets.Dropdown(
description='Y Coord',
options=('None', None))
self.cmap = ipywidgets.Text(
description='colour map')
# Handle events:
self.cube_picker.observe(self._handle_cube_selection,
names='value')
self.cmap.observe(self._handle_cmap, names='value')
self.plot_type.observe(self._handle_plot_type, names='value')
self._box = ipywidgets.Box(children=[self.cube_picker,
self.plot_type,
self.x_coord,
self.y_coord,
self.cmap])
def _handle_cube_selection(self, sender):
"""Cube selector action."""
if self.cube_picker.value is not None:
cube = self.cube_picker.cubes[self.cube_picker.value]
options = [('None', None)]
options += [(coord.name(), coord.name()) for coord in
cube.coords(dim_coords=True)]
ndims = cube.ndim
for i in range(ndims):
options.append(('dim{}'.format(i), i))
self.x_coord.options = options
if (cube.coords(axis='X', dim_coords=True) and
cube.coord(axis='X', dim_coords=True).name() in
[o[1] for o in self.x_coord.options]):
default = cube.coord(axis='X', dim_coords=True).name()
self.x_coord.value = default
self.y_coord.options = options
if (cube.coords(axis='Y', dim_coords=True) and
cube.coord(axis='Y', dim_coords=True).name() in
[o[1] for o in self.y_coord.options]):
default = cube.coord(axis='Y', dim_coords=True).name()
self.y_coord.value = default
def _handle_cmap(self, sender):
# This tests that the colour map string is valid: else warns.
from matplotlib.cm import cmap_d
cmap_string = self.cmap.value
if cmap_string and cmap_string in cmap_d.keys():
self.mpl_kwargs['cmap'] = cmap_string
self.cmap.description = 'colour map'
else:
self.cmap.description = 'not a cmap'
def _handle_plot_type(self, sender):
cmap = self.cmap.value
self.mpl_kwargs = {}
if cmap:
self.mpl_kwargs['cmap'] = cmap
@property
def box(self):
"""The IPywidgets box to display."""
return self._box
class Explorer(traitlets.HasTraits):
"""
IPyWidgets and workflow for exploring collections of cubes.
"""
_cubes = traitlets.List()
def __init__(self, url=''):
self.file_pickers = []
if url:
o = urlparse(url)
query = parse_qs(o.query)
pwd, = query.get('pwd', [''])
for fname in query.get('files', []):
self.file_pickers.append(FilePicker(pwd, os.path.join(pwd, fname)))
for fpath in query.get('folders', []):
self.file_pickers.append(FilePicker(fpath))
if not self.file_pickers:
self.file_pickers.append(FilePicker())
# Define load action.
self._load_button = ipywidgets.Button(description="load these files")
self._load_button.on_click(self._handle_load)
self._file_tab_button = ipywidgets.Button(description="add tab")
self._file_tab_button.on_click(self._handle_new_tab)
self._subplots = ipywidgets.RadioButtons(description='subplots',
options=[1, 2])
self._subplots.observe(self._handle_nplots, names='value')
# Plot action button.
self._plot_button = ipywidgets.Button(description="Plot my cube")
self._plot_button.on_click(self._goplot)
# Configure layout of the Explorer.
self._plot_container = ipywidgets.Box()
# Define a Tab container for the main controls in the browse interface.
children = [fp.box for fp in self.file_pickers]
self.ftabs = ipywidgets.Tab(children=children)
children = [self._load_button, self._file_tab_button]
self.bbox = ipywidgets.HBox(children=children)
children = [self.ftabs, self.bbox]
self._file_picker_tabs = ipywidgets.Box(children=children)
# Define the plot controls, start with 1 (self._subplots default)
self.plot_controls = [PlotControl()]
pcc_children = [pc.box for pc in self.plot_controls]
self._plot_control_container = ipywidgets.Tab(children=pcc_children)
self._plot_control_container.set_title(0, 'Plot Axes 0')
# Define an Accordian for files, subplots and plots
acc_children = [self._file_picker_tabs, self._subplots,
self._plot_control_container]
self._accord = ipywidgets.Accordion(children=acc_children)
self._accord.set_title(0, 'Files')
self._accord.set_title(1, 'SubPlots')
self._accord.set_title(2, 'Plots')
# Initialise cubes container
self._cubes = []
# Display the browse interface.
IPython.display.display(self._accord)
IPython.display.display(self._plot_button)
IPython.display.display(self._plot_container)
@property
def mpl_kwargs(self):
"""
The list of dictionaries of matplotlib keyword arguements in use
the PlotControls.
"""
return [pc.mpl_kwargs for pc in self.plot_controls]
@property
def cubes(self):
"""The list of cubes the explorer is currently working with."""
return self._cubes
@cubes.setter
def cubes(self, new_cubes):
"""To update the list of cubes the explorer is working with."""
self._cubes = new_cubes
@traitlets.observe('_cubes')
def update_cubes_list(self, change=None):
"""
Update the list of cubes available in the Explorer.
Assigning an updated list into `cubes` automatically runs this.
"""
# Build options list, using index values into the cube list.
# This avoids the loading of cube's data payload when the
# widget tests equality on selection.
options = [('{}: {}'.format(i, cube.summary(shorten=True)), i)
for i, cube in enumerate(self._cubes)]
for pc in self.plot_controls:
# Provide the cubes list to the cube_picker, to index into.
pc.cube_picker.cubes = self._cubes
pc.cube_picker.options = [('None', None)] + pc.cube_picker.options
pc.cube_picker.value = None
pc.cube_picker.options = [('None', None)] + options
if options:
pc.cube_picker.value = options[0][1]
pc.cube_picker.options = options
def _handle_load(self, sender):
"""Load button action."""
IPython.display.clear_output()
sender.description = 'loading......'
fpfs = [fp.files for fp in self.file_pickers]
selected_files = reduce(list.__add__, (list(files) for files in fpfs))
# Reassigning into self._cubes updates the cube_pickers.
self._cubes = iris.load(selected_files)
self._cubes = self._cubes.concatenate()
sender.description = 'files loaded, reload'
IPython.display.clear_output()
def _handle_new_tab(self, sender):
"""Add new file tab."""
self.file_pickers.append(FilePicker())
self._update_filepickers()
def _update_filepickers(self):
children = [fp.box for fp in self.file_pickers]
for i, child in enumerate(children):
fp.deleter.index = i
fp.deleter.on_click(self._handle_delete_tab)
self.ftabs = ipywidgets.Tab(children=children)
self._file_picker_tabs.children = [self.ftabs, self.bbox]
def _handle_delete_tab(self, sender):
"""remove a file tab"""
self.file_pickers.pop(sender.index)
self._update_filepickers()
def _handle_nplots(self, sender):
if self._subplots.value == 1:
self.plot_controls = [self.plot_controls[0]]
elif self._subplots.value == 2:
self.plot_controls = [self.plot_controls[0], PlotControl()]
pcc_children = [pc.box for pc in self.plot_controls]
self._plot_control_container.children = pcc_children
for i in range(self._subplots.value):
label = 'Plot Axes {}'.format(i)
self._plot_control_container.set_title(i, label)
self.update_cubes_list()
def _goplot(self, sender):
"""Create the cube_browser.Plot2D and cube_browser.Browser"""
IPython.display.clear_output()
fig = plt.figure(figsize=(16, 7))
sub_plots = 110
if self._subplots.value == 2:
sub_plots = 120
confs = []
for spl, pc in enumerate(self.plot_controls):
spl += 1
cube = None
if pc.cube_picker.value is not None:
cube = self.cubes[pc.cube_picker.value]
if cube and spl <= self._subplots.value:
pc_x_name = pc.x_coord.value
pc_y_name = pc.y_coord.value
x_coords = cube.coords(axis='X', dim_coords=True)
if len(x_coords) == 1:
x_name = x_coords[0].name()
else:
x_name = None
y_coords = cube.coords(axis='Y', dim_coords=True)
if len(y_coords) == 1:
y_name = y_coords[0].name()
else:
y_name = None
if x_name == pc_x_name and y_name == pc_y_name:
proj = iplt.default_projection(cube) or ccrs.PlateCarree()
ax = fig.add_subplot(sub_plots + spl, projection=proj)
# If the spatial extent is small, use high-res coastlines
extent = iplt.default_projection_extent(cube)
x0, y0 = ccrs.PlateCarree().transform_point(extent[0],
extent[2],
proj)
x1, y1 = ccrs.PlateCarree().transform_point(extent[1],
extent[3],
proj)
if x1-x0 < 20 and y1-y0 < 20:
ax.coastlines(resolution='10m')
elif x1-x0 < 180 and y1-y0 < 90:
ax.coastlines(resolution='50m')
else:
ax.coastlines()
else:
ax = plt.gca()
ax = fig.add_subplot(sub_plots+spl)
plot_type = pc.plot_type
coords = [pc_x_name, pc_y_name]
confs.append(plot_type.value(cube, ax, coords=coords,
**pc.mpl_kwargs))
title = cube.name().replace('_', ' ').capitalize()
ax.set_title(title)
self.browser = cube_browser.Browser(confs)
self.browser.on_change(None)
# For each PlotControl, assign the plot's mpl_kwargs back to
# that PlotControl.
for pc, plot in zip(self.plot_controls, confs):
pc.mpl_kwargs = plot.kwargs
self._plot_container.children = [self.browser.form]
| bsd-3-clause |
meduz/scikit-learn | benchmarks/bench_plot_lasso_path.py | 84 | 4005 | """Benchmarks of Lasso regularization path computation using Lars and CD
The input data is mostly low rank but is a fat infinite tail.
"""
from __future__ import print_function
from collections import defaultdict
import gc
import sys
from time import time
import numpy as np
from sklearn.linear_model import lars_path
from sklearn.linear_model import lasso_path
from sklearn.datasets.samples_generator import make_regression
def compute_bench(samples_range, features_range):
it = 0
results = defaultdict(lambda: [])
max_it = len(samples_range) * len(features_range)
for n_samples in samples_range:
for n_features in features_range:
it += 1
print('====================')
print('Iteration %03d of %03d' % (it, max_it))
print('====================')
dataset_kwargs = {
'n_samples': n_samples,
'n_features': n_features,
'n_informative': n_features / 10,
'effective_rank': min(n_samples, n_features) / 10,
#'effective_rank': None,
'bias': 0.0,
}
print("n_samples: %d" % n_samples)
print("n_features: %d" % n_features)
X, y = make_regression(**dataset_kwargs)
gc.collect()
print("benchmarking lars_path (with Gram):", end='')
sys.stdout.flush()
tstart = time()
G = np.dot(X.T, X) # precomputed Gram matrix
Xy = np.dot(X.T, y)
lars_path(X, y, Xy=Xy, Gram=G, method='lasso')
delta = time() - tstart
print("%0.3fs" % delta)
results['lars_path (with Gram)'].append(delta)
gc.collect()
print("benchmarking lars_path (without Gram):", end='')
sys.stdout.flush()
tstart = time()
lars_path(X, y, method='lasso')
delta = time() - tstart
print("%0.3fs" % delta)
results['lars_path (without Gram)'].append(delta)
gc.collect()
print("benchmarking lasso_path (with Gram):", end='')
sys.stdout.flush()
tstart = time()
lasso_path(X, y, precompute=True)
delta = time() - tstart
print("%0.3fs" % delta)
results['lasso_path (with Gram)'].append(delta)
gc.collect()
print("benchmarking lasso_path (without Gram):", end='')
sys.stdout.flush()
tstart = time()
lasso_path(X, y, precompute=False)
delta = time() - tstart
print("%0.3fs" % delta)
results['lasso_path (without Gram)'].append(delta)
return results
if __name__ == '__main__':
from mpl_toolkits.mplot3d import axes3d # register the 3d projection
import matplotlib.pyplot as plt
samples_range = np.linspace(10, 2000, 5).astype(np.int)
features_range = np.linspace(10, 2000, 5).astype(np.int)
results = compute_bench(samples_range, features_range)
max_time = max(max(t) for t in results.values())
fig = plt.figure('scikit-learn Lasso path benchmark results')
i = 1
for c, (label, timings) in zip('bcry', sorted(results.items())):
ax = fig.add_subplot(2, 2, i, projection='3d')
X, Y = np.meshgrid(samples_range, features_range)
Z = np.asarray(timings).reshape(samples_range.shape[0],
features_range.shape[0])
# plot the actual surface
ax.plot_surface(X, Y, Z.T, cstride=1, rstride=1, color=c, alpha=0.8)
# dummy point plot to stick the legend to since surface plot do not
# support legends (yet?)
# ax.plot([1], [1], [1], color=c, label=label)
ax.set_xlabel('n_samples')
ax.set_ylabel('n_features')
ax.set_zlabel('Time (s)')
ax.set_zlim3d(0.0, max_time * 1.1)
ax.set_title(label)
# ax.legend()
i += 1
plt.show()
| bsd-3-clause |
h-mayorquin/competitive_and_selective_learning | play.py | 1 | 1250 | """
This is the play
"""
import numpy as np
import matplotlib.pyplot as plt
import math
from sklearn.cluster import KMeans
from sklearn.datasets import make_blobs
from functions import selection_algorithm, scl
from csl import CSL
plot = True
verbose = False
tracking = True
selection = False
# Generate the data
n_samples = 1500
random_state = 20 # Does not converge
random_state = 41
random_state = 105 # Does not converge
random_state = 325325
random_state = 1111
n_features = 2
centers = 7
X, y = make_blobs(n_samples, n_features, centers, random_state=random_state)
# The algorithm
N = centers
s = 2 # Number of neurons to change per round
eta = 0.1
T = 100
csl = CSL(n_clusters=N, n_iter=T, tol=0.001, eta=eta, s0=s, random_state=np.random)
csl.fit(X)
neurons = csl.centers_
if False:
kmeans = KMeans(n_clusters=N)
kmeans.fit(X)
neurons = kmeans.cluster_centers_
if plot:
# Visualize X
fig = plt.figure(figsize=(16, 12))
ax = fig.add_subplot(111)
ax.plot(X[:, 0], X[:, 1], 'x', markersize=6)
ax.hold(True)
if True:
for n in range(N):
ax.plot(neurons[n, 0], neurons[n, 1], 'o', markersize=12, label='neuron ' + str(n))
ax.legend()
# fig.show()
plt.show()
| mit |
kagayakidan/scikit-learn | sklearn/metrics/setup.py | 299 | 1024 | import os
import os.path
import numpy
from numpy.distutils.misc_util import Configuration
from sklearn._build_utils import get_blas_info
def configuration(parent_package="", top_path=None):
config = Configuration("metrics", parent_package, top_path)
cblas_libs, blas_info = get_blas_info()
if os.name == 'posix':
cblas_libs.append('m')
config.add_extension("pairwise_fast",
sources=["pairwise_fast.c"],
include_dirs=[os.path.join('..', 'src', 'cblas'),
numpy.get_include(),
blas_info.pop('include_dirs', [])],
libraries=cblas_libs,
extra_compile_args=blas_info.pop('extra_compile_args',
[]),
**blas_info)
return config
if __name__ == "__main__":
from numpy.distutils.core import setup
setup(**configuration().todict())
| bsd-3-clause |
UltronAI/Deep-Learning | Pattern-Recognition/hw2-Feature-Selection/skfeature/function/wrapper/svm_backward.py | 1 | 1775 | import numpy as np
from sklearn.svm import SVC
from sklearn.model_selection import KFold
from sklearn.metrics import accuracy_score
def svm_backward(X, y, n_selected_features):
"""
This function implements the backward feature selection algorithm based on SVM
Input
-----
X: {numpy array}, shape (n_samples, n_features)
input data
y: {numpy array}, shape (n_samples,)
input class labels
n_selected_features: {int}
number of selected features
Output
------
F: {numpy array}, shape (n_features, )
index of selected features
"""
n_samples, n_features = X.shape
# using 10 fold cross validation
cv = KFold(n_samples, n_folds=10, shuffle=True)
# choose SVM as the classifier
clf = SVC()
# selected feature set, initialized to contain all features
F = range(n_features)
count = n_features
while count > n_selected_features:
max_acc = 0
for i in range(n_features):
if i in F:
F.remove(i)
X_tmp = X[:, F]
acc = 0
for train, test in cv:
clf.fit(X_tmp[train], y[train])
y_predict = clf.predict(X_tmp[test])
acc_tmp = accuracy_score(y[test], y_predict)
acc += acc_tmp
acc = float(acc)/10
F.append(i)
# record the feature which results in the largest accuracy
if acc > max_acc:
max_acc = acc
idx = i
# delete the feature which results in the largest accuracy
F.remove(idx)
count -= 1
return np.array(F)
| mit |
musically-ut/statsmodels | statsmodels/graphics/tests/test_regressionplots.py | 20 | 9978 | import numpy as np
import statsmodels.api as sm
from numpy.testing import dec
from statsmodels.graphics.regressionplots import (plot_fit, plot_ccpr,
plot_partregress, plot_regress_exog, abline_plot,
plot_partregress_grid, plot_ccpr_grid, add_lowess,
plot_added_variable, plot_partial_residuals,
plot_ceres_residuals)
from pandas import Series, DataFrame
try:
import matplotlib.pyplot as plt #makes plt available for test functions
have_matplotlib = True
except:
have_matplotlib = False
pdf_output = False
if pdf_output:
from matplotlib.backends.backend_pdf import PdfPages
pdf = PdfPages("test_regressionplots.pdf")
else:
pdf = None
def close_or_save(pdf, fig):
if pdf_output:
pdf.savefig(fig)
plt.close(fig)
@dec.skipif(not have_matplotlib)
def teardown_module():
plt.close('all')
if pdf_output:
pdf.close()
class TestPlot(object):
def __init__(self):
self.setup() #temp: for testing without nose
def setup(self):
nsample = 100
sig = 0.5
x1 = np.linspace(0, 20, nsample)
x2 = 5 + 3* np.random.randn(nsample)
X = np.c_[x1, x2, np.sin(0.5*x1), (x2-5)**2, np.ones(nsample)]
beta = [0.5, 0.5, 1, -0.04, 5.]
y_true = np.dot(X, beta)
y = y_true + sig * np.random.normal(size=nsample)
exog0 = sm.add_constant(np.c_[x1, x2], prepend=False)
res = sm.OLS(y, exog0).fit()
self.res = res
@dec.skipif(not have_matplotlib)
def test_plot_fit(self):
res = self.res
fig = plot_fit(res, 0, y_true=None)
x0 = res.model.exog[:, 0]
yf = res.fittedvalues
y = res.model.endog
px1, px2 = fig.axes[0].get_lines()[0].get_data()
np.testing.assert_equal(x0, px1)
np.testing.assert_equal(y, px2)
px1, px2 = fig.axes[0].get_lines()[1].get_data()
np.testing.assert_equal(x0, px1)
np.testing.assert_equal(yf, px2)
close_or_save(pdf, fig)
@dec.skipif(not have_matplotlib)
def test_plot_oth(self):
#just test that they run
res = self.res
plot_fit(res, 0, y_true=None)
plot_partregress_grid(res, exog_idx=[0,1])
plot_regress_exog(res, exog_idx=0)
plot_ccpr(res, exog_idx=0)
plot_ccpr_grid(res, exog_idx=[0])
fig = plot_ccpr_grid(res, exog_idx=[0,1])
for ax in fig.axes:
add_lowess(ax)
close_or_save(pdf, fig)
class TestPlotPandas(TestPlot):
def setup(self):
nsample = 100
sig = 0.5
x1 = np.linspace(0, 20, nsample)
x2 = 5 + 3* np.random.randn(nsample)
X = np.c_[x1, x2, np.sin(0.5*x1), (x2-5)**2, np.ones(nsample)]
beta = [0.5, 0.5, 1, -0.04, 5.]
y_true = np.dot(X, beta)
y = y_true + sig * np.random.normal(size=nsample)
exog0 = sm.add_constant(np.c_[x1, x2], prepend=False)
exog0 = DataFrame(exog0, columns=["const", "var1", "var2"])
y = Series(y, name="outcome")
res = sm.OLS(y, exog0).fit()
self.res = res
data = DataFrame(exog0, columns=["const", "var1", "var2"])
data['y'] = y
self.data = data
class TestPlotFormula(TestPlotPandas):
@dec.skipif(not have_matplotlib)
def test_one_column_exog(self):
from statsmodels.formula.api import ols
res = ols("y~var1-1", data=self.data).fit()
plot_regress_exog(res, "var1")
res = ols("y~var1", data=self.data).fit()
plot_regress_exog(res, "var1")
class TestABLine(object):
@classmethod
def setupClass(cls):
np.random.seed(12345)
X = sm.add_constant(np.random.normal(0, 20, size=30))
y = np.dot(X, [25, 3.5]) + np.random.normal(0, 30, size=30)
mod = sm.OLS(y,X).fit()
cls.X = X
cls.y = y
cls.mod = mod
@dec.skipif(not have_matplotlib)
def test_abline_model(self):
fig = abline_plot(model_results=self.mod)
ax = fig.axes[0]
ax.scatter(self.X[:,1], self.y)
close_or_save(pdf, fig)
@dec.skipif(not have_matplotlib)
def test_abline_model_ax(self):
fig = plt.figure()
ax = fig.add_subplot(111)
ax.scatter(self.X[:,1], self.y)
fig = abline_plot(model_results=self.mod, ax=ax)
close_or_save(pdf, fig)
@dec.skipif(not have_matplotlib)
def test_abline_ab(self):
mod = self.mod
intercept, slope = mod.params
fig = abline_plot(intercept=intercept, slope=slope)
close_or_save(pdf, fig)
@dec.skipif(not have_matplotlib)
def test_abline_ab_ax(self):
mod = self.mod
intercept, slope = mod.params
fig = plt.figure()
ax = fig.add_subplot(111)
ax.scatter(self.X[:,1], self.y)
fig = abline_plot(intercept=intercept, slope=slope, ax=ax)
close_or_save(pdf, fig)
class TestABLinePandas(TestABLine):
@classmethod
def setupClass(cls):
np.random.seed(12345)
X = sm.add_constant(np.random.normal(0, 20, size=30))
y = np.dot(X, [25, 3.5]) + np.random.normal(0, 30, size=30)
cls.X = X
cls.y = y
X = DataFrame(X, columns=["const", "someX"])
y = Series(y, name="outcome")
mod = sm.OLS(y,X).fit()
cls.mod = mod
class TestAddedVariablePlot(object):
@dec.skipif(not have_matplotlib)
def test_added_variable_poisson(self):
np.random.seed(3446)
n = 100
p = 3
exog = np.random.normal(size=(n, p))
lin_pred = 4 + exog[:, 0] + 0.2*exog[:, 1]**2
expval = np.exp(lin_pred)
endog = np.random.poisson(expval)
model = sm.GLM(endog, exog, family=sm.families.Poisson())
results = model.fit()
for focus_col in 0, 1, 2:
for use_glm_weights in False, True:
for resid_type in "resid_deviance", "resid_response":
weight_str = ["Unweighted", "Weighted"][use_glm_weights]
# Run directly and called as a results method.
for j in 0,1:
if j == 0:
fig = plot_added_variable(results, focus_col,
use_glm_weights=use_glm_weights,
resid_type=resid_type)
ti = "Added variable plot"
else:
fig = results.plot_added_variable(focus_col,
use_glm_weights=use_glm_weights,
resid_type=resid_type)
ti = "Added variable plot (called as method)"
ax = fig.get_axes()[0]
add_lowess(ax)
ax.set_position([0.1, 0.1, 0.8, 0.7])
effect_str = ["Linear effect, slope=1",
"Quadratic effect", "No effect"][focus_col]
ti += "\nPoisson regression\n"
ti += effect_str + "\n"
ti += weight_str + "\n"
ti += "Using '%s' residuals" % resid_type
ax.set_title(ti)
close_or_save(pdf, fig)
class TestPartialResidualPlot(object):
@dec.skipif(not have_matplotlib)
def test_partial_residual_poisson(self):
np.random.seed(3446)
n = 100
p = 3
exog = np.random.normal(size=(n, p))
exog[:, 0] = 1
lin_pred = 4 + exog[:, 1] + 0.2*exog[:, 2]**2
expval = np.exp(lin_pred)
endog = np.random.poisson(expval)
model = sm.GLM(endog, exog, family=sm.families.Poisson())
results = model.fit()
for focus_col in 1, 2:
for j in 0,1:
if j == 0:
fig = plot_partial_residuals(results, focus_col)
else:
fig = results.plot_partial_residuals(focus_col)
ax = fig.get_axes()[0]
add_lowess(ax)
ax.set_position([0.1, 0.1, 0.8, 0.77])
effect_str = ["Intercept", "Linear effect, slope=1",
"Quadratic effect"][focus_col]
ti = "Partial residual plot"
if j == 1:
ti += " (called as method)"
ax.set_title(ti + "\nPoisson regression\n" +
effect_str)
close_or_save(pdf, fig)
class TestCERESPlot(object):
@dec.skipif(not have_matplotlib)
def test_ceres_poisson(self):
np.random.seed(3446)
n = 100
p = 3
exog = np.random.normal(size=(n, p))
exog[:, 0] = 1
lin_pred = 4 + exog[:, 1] + 0.2*exog[:, 2]**2
expval = np.exp(lin_pred)
endog = np.random.poisson(expval)
model = sm.GLM(endog, exog, family=sm.families.Poisson())
results = model.fit()
for focus_col in 1, 2:
for j in 0, 1:
if j == 0:
fig = plot_ceres_residuals(results, focus_col)
else:
fig = results.plot_ceres_residuals(focus_col)
ax = fig.get_axes()[0]
add_lowess(ax)
ax.set_position([0.1, 0.1, 0.8, 0.77])
effect_str = ["Intercept", "Linear effect, slope=1",
"Quadratic effect"][focus_col]
ti = "CERES plot"
if j == 1:
ti += " (called as method)"
ax.set_title(ti + "\nPoisson regression\n" +
effect_str)
close_or_save(pdf, fig)
if __name__ == "__main__":
import nose
nose.runmodule(argv=[__file__, '-vvs', '-x', '--pdb'], exit=False)
| bsd-3-clause |
NeuralEnsemble/elephant | elephant/asset/asset.py | 2 | 102992 | # -*- coding: utf-8 -*-
"""
ASSET is a statistical method :cite:`asset-Torre16_e1004939` for the detection
of repeating sequences of synchronous spiking events in parallel spike trains.
ASSET analysis class object of finding patterns
-----------------------------------------------
.. autosummary::
:toctree: _toctree/asset/
ASSET
Patterns post-exploration
-------------------------
.. autosummary::
:toctree: _toctree/asset/
synchronous_events_intersection
synchronous_events_difference
synchronous_events_identical
synchronous_events_no_overlap
synchronous_events_contained_in
synchronous_events_contains_all
synchronous_events_overlap
Tutorial
--------
:doc:`View tutorial <../tutorials/asset>`
Run tutorial interactively:
.. image:: https://mybinder.org/badge.svg
:target: https://mybinder.org/v2/gh/NeuralEnsemble/elephant/master
?filepath=doc/tutorials/asset.ipynb
Examples
--------
In this example we
* simulate two noisy synfire chains;
* shuffle the neurons to destroy visual appearance;
* run ASSET analysis to recover the original neurons arrangement.
1. Simulate two noise synfire chains, shuffle the neurons to destroy the
pattern visually, and store shuffled activations in neo.SpikeTrains.
>>> import neo
>>> import numpy as np
>>> import quantities as pq
>>> np.random.seed(10)
>>> spiketrain = np.linspace(0, 50, num=10)
>>> np.random.shuffle(spiketrain)
>>> spiketrains = np.c_[spiketrain, spiketrain + 100]
>>> spiketrains += np.random.random_sample(spiketrains.shape) * 5
>>> spiketrains = [neo.SpikeTrain(st, units='ms', t_stop=1 * pq.s)
... for st in spiketrains]
2. Create `ASSET` class object that holds spike trains.
`ASSET` requires at least one argument - a list of spike trains. If
`spiketrains_y` is not provided, the same spike trains are used to build an
intersection matrix with.
>>> from elephant import asset
>>> asset_obj = asset.ASSET(spiketrains, bin_size=3*pq.ms)
3. Build the intersection matrix `imat`:
>>> imat = asset_obj.intersection_matrix()
4. Estimate the probability matrix `pmat`, using the analytical method:
>>> pmat = asset_obj.probability_matrix_analytical(imat,
... kernel_width=50*pq.ms)
5. Compute the joint probability matrix `jmat`, using a suitable filter:
>>> jmat = asset_obj.joint_probability_matrix(pmat, filter_shape=(5, 1),
... n_largest=3)
6. Create the masked version of the intersection matrix, `mmat`, from `pmat`
and `jmat`:
>>> mmat = asset_obj.mask_matrices([pmat, jmat], thresholds=.9)
7. Cluster significant elements of imat into diagonal structures:
>>> cmat = asset_obj.cluster_matrix_entries(mmat, max_distance=11,
... min_neighbors=3, stretch=5)
9. Extract sequences of synchronous events:
>>> sses = asset_obj.extract_synchronous_events(cmat)
The ASSET found the following sequences of synchronous events:
>>> sses
{1: {(36, 2): {5},
(37, 4): {1},
(40, 6): {4},
(41, 7): {8},
(43, 9): {2},
(47, 14): {7},
(48, 15): {0},
(50, 17): {9}}}
To visualize them, refer to Viziphant documentation and an example plot
:func:`viziphant.asset.plot_synchronous_events`.
"""
from __future__ import division, print_function, unicode_literals
import math
import os
import subprocess
import sys
import tempfile
import warnings
from pathlib import Path
import neo
import numpy as np
import quantities as pq
import scipy.spatial
import scipy.stats
from sklearn.cluster import dbscan
from sklearn.metrics import pairwise_distances, pairwise_distances_chunked
from tqdm import trange, tqdm
import elephant.conversion as conv
from elephant import spike_train_surrogates
from elephant.utils import get_cuda_capability_major
try:
from mpi4py import MPI
mpi_accelerated = True
comm = MPI.COMM_WORLD
size = comm.Get_size()
rank = comm.Get_rank()
except ImportError:
mpi_accelerated = False
size = 1
rank = 0
__all__ = [
"ASSET",
"synchronous_events_intersection",
"synchronous_events_difference",
"synchronous_events_identical",
"synchronous_events_no_overlap",
"synchronous_events_contained_in",
"synchronous_events_contains_all",
"synchronous_events_overlap"
]
# =============================================================================
# Some Utility Functions to be dealt with in some way or another
# =============================================================================
def _signals_same_attribute(signals, attr_name):
"""
Check whether a list of signals (`neo.AnalogSignal` or `neo.SpikeTrain`)
have same attribute `attr_name`. If so, return that value. Otherwise,
raise ValueError.
Parameters
----------
signals : list
A list of signals (e.g. `neo.AnalogSignal` or `neo.SpikeTrain`) having
attribute `attr_name`.
Returns
-------
pq.Quantity
The value of the common attribute `attr_name` of the list of signals.
Raises
------
ValueError
If `signals` is an empty list.
If `signals` have different `attr_name` attribute values.
"""
if len(signals) == 0:
raise ValueError('Empty signals list')
attribute = getattr(signals[0], attr_name)
for sig in signals[1:]:
if getattr(sig, attr_name) != attribute:
raise ValueError(
"Signals have different '{}' values".format(attr_name))
return attribute
def _quantities_almost_equal(x, y):
"""
Returns True if two quantities are almost equal, i.e., if `x - y` is
"very close to 0" (not larger than machine precision for floats).
Parameters
----------
x : pq.Quantity
First Quantity to compare.
y : pq.Quantity
Second Quantity to compare. Must have same unit type as `x`, but not
necessarily the same shape. Any shapes of `x` and `y` for which `x - y`
can be calculated are permitted.
Returns
-------
np.ndarray
Array of `bool`, which is True at any position where `x - y` is almost
zero.
Notes
-----
Not the same as `numpy.testing.assert_allclose` (which does not work
with Quantities) and `numpy.testing.assert_almost_equal` (which works only
with decimals)
"""
eps = np.finfo(float).eps
relative_diff = (x - y).magnitude
return np.all([-eps <= relative_diff, relative_diff <= eps], axis=0)
def _transactions(spiketrains, bin_size, t_start, t_stop, ids=None):
"""
Transform parallel spike trains into a list of sublists, called
transactions, each corresponding to a time bin and containing the list
of spikes in `spiketrains` falling into that bin.
To compute each transaction, the spike trains are binned (with adjacent
exclusive binning) and clipped (i.e., spikes from the same train falling
in the same bin are counted as one event). The list of spike IDs within
each bin form the corresponding transaction.
Parameters
----------
spiketrains : list of neo.SpikeTrain or list of tuple
A list of `neo.SpikeTrain` objects, or list of pairs
(Train_ID, `neo.SpikeTrain`), where `Train_ID` can be any hashable
object.
bin_size : pq.Quantity
Width of each time bin. Time is binned to determine synchrony.
t_start : pq.Quantity
The starting time. Only spikes occurring at times `t >= t_start` are
considered. The first transaction contains spikes falling into the
time segment `[t_start, t_start+bin_size]`.
If None, takes the value of `spiketrain.t_start`, common for all
input `spiketrains` (raises ValueError if it's not the case).
Default: None
t_stop : pq.Quantity
The ending time. Only spikes occurring at times `t < t_stop` are
considered.
If None, takes the value of `spiketrain.t_stop`, common for all
input `spiketrains` (raises ValueError if it's not the case).
Default: None
ids : list of int, optional
List of spike train IDs.
If None, the IDs `0` to `N-1` are used, where `N` is the number of
input spike trains.
Default: None
Returns
-------
list of list
A list of transactions, where each transaction corresponds to a time
bin and represents the list of spike train IDs having a spike in that
time bin.
Raises
------
TypeError
If `spiketrains` is not a list of `neo.SpikeTrain` or a list of tuples
(id, `neo.SpikeTrain`).
"""
if all(isinstance(st, neo.SpikeTrain) for st in spiketrains):
trains = spiketrains
if ids is None:
ids = range(len(spiketrains))
else:
# (id, SpikeTrain) pairs
try:
ids, trains = zip(*spiketrains)
except TypeError:
raise TypeError('spiketrains must be either a list of ' +
'SpikeTrains or a list of (id, SpikeTrain) pairs')
# Bin the spike trains and take for each of them the ids of filled bins
binned = conv.BinnedSpikeTrain(
trains, bin_size=bin_size, t_start=t_start, t_stop=t_stop)
filled_bins = binned.spike_indices
# Compute and return the transaction list
return [[train_id for train_id, b in zip(ids, filled_bins)
if bin_id in b] for bin_id in range(binned.n_bins)]
def _analog_signal_step_interp(signal, times):
"""
Compute the step-wise interpolation of a signal at desired times.
Given a signal (e.g. a `neo.AnalogSignal`) `s` taking values `s[t0]` and
`s[t1]` at two consecutive time points `t0` and `t1` (`t0 < t1`), the value
of the step-wise interpolation at time `t: t0 <= t < t1` is given by
`s[t] = s[t0]`.
Parameters
----------
signal : neo.AnalogSignal
The analog signal, containing the discretization of the function to
interpolate.
times : pq.Quantity
A vector of time points at which the step interpolation is computed.
Returns
-------
pq.Quantity
Object with same shape of `times` and containing
the values of the interpolated signal at the time points in `times`.
"""
dt = signal.sampling_period
# Compute the ids of the signal times to the left of each time in times
time_ids = np.floor(
((times - signal.t_start) / dt).rescale(
pq.dimensionless).magnitude).astype('i')
return (signal.magnitude[time_ids] * signal.units).rescale(signal.units)
# =============================================================================
# HERE ASSET STARTS
# =============================================================================
def _stretched_metric_2d(x, y, stretch, ref_angle, working_memory=None):
r"""
Given a list of points on the real plane, identified by their abscissa `x`
and ordinate `y`, compute a stretched transformation of the Euclidean
distance among each of them.
The classical euclidean distance `d` between points `(x1, y1)` and
`(x2, y2)`, i.e., :math:`\sqrt((x1-x2)^2 + (y1-y2)^2)`, is multiplied by a
factor
.. math::
1 + (stretch - 1.) * \abs(\sin(ref_angle - \theta)),
where :math:`\theta` is the angle between the points and the 45 degree
direction (i.e., the line `y = x`).
The stretching factor thus steadily varies between 1 (if the line
connecting `(x1, y1)` and `(x2, y2)` has inclination `ref_angle`) and
`stretch` (if that line has inclination `90 + ref_angle`).
Parameters
----------
x : (n,) np.ndarray
Array of abscissas of all points among which to compute the distance.
y : (n,) np.ndarray
Array of ordinates of all points among which to compute the distance
(same shape as `x`).
stretch : float
Maximum stretching factor, applied if the line connecting the points
has inclination `90 + ref_angle`.
ref_angle : float
Reference angle in degrees (i.e., the inclination along which the
stretching factor is 1).
Returns
-------
D : (n,n) np.ndarray
Square matrix of distances between all pairs of points.
"""
alpha = np.deg2rad(ref_angle) # reference angle in radians
# Create the array of points (one per row) for which to compute the
# stretched distance
points = np.column_stack([x, y])
x_array = np.expand_dims(x, axis=0)
y_array = np.expand_dims(y, axis=0)
def calculate_stretch_mat(theta_mat, D_mat):
# Transform [-pi, pi] back to [-pi/2, pi/2]
theta_mat[theta_mat < -np.pi / 2] += np.pi
theta_mat[theta_mat > np.pi / 2] -= np.pi
# Compute the matrix of stretching factors for each pair of points.
# Equivalent to:
# stretch_mat = 1 + (stretch - 1.) * np.abs(np.sin(alpha - theta))
_stretch_mat = np.subtract(alpha, theta_mat, out=theta_mat)
_stretch_mat = np.sin(_stretch_mat, out=_stretch_mat)
_stretch_mat = np.abs(_stretch_mat, out=_stretch_mat)
_stretch_mat = np.multiply(stretch - 1, _stretch_mat, out=_stretch_mat)
_stretch_mat = np.add(1, _stretch_mat, out=_stretch_mat)
_stretch_mat = np.multiply(D_mat, _stretch_mat, out=_stretch_mat)
return _stretch_mat
if working_memory is None:
# Compute the matrix D[i, j] of euclidean distances among points
# i and j
D = pairwise_distances(points)
# Compute the angular coefficients of the line between each pair of
# points
# dX[i,j]: x difference between points i and j
# dY[i,j]: y difference between points i and j
dX = x_array.T - x_array
dY = y_array.T - y_array
# Compute the matrix Theta of angles between each pair of points
theta = np.arctan2(dY, dX, dtype=np.float32)
stretch_mat = calculate_stretch_mat(theta, D)
else:
start = 0
# x and y sizes are the same
stretch_mat = np.empty((len(x), len(y)), dtype=np.float32)
for D_chunk in pairwise_distances_chunked(
points, working_memory=working_memory):
chunk_size = D_chunk.shape[0]
dX = x_array[:, start: start + chunk_size].T - x_array
dY = y_array[:, start: start + chunk_size].T - y_array
theta_chunk = np.arctan2(
dY, dX, out=stretch_mat[start: start + chunk_size, :])
# stretch_mat (theta_chunk) is updated in-place here
calculate_stretch_mat(theta_chunk, D_chunk)
start += chunk_size
# Return the stretched distance matrix
return stretch_mat
def _interpolate_signals(signals, sampling_times, verbose=False):
"""
Interpolate signals at given sampling times.
"""
# Reshape all signals to one-dimensional array object (e.g. AnalogSignal)
for i, signal in enumerate(signals):
if signal.ndim == 2:
signals[i] = signal.flatten()
elif signal.ndim > 2:
raise ValueError('elements in fir_rates must have 2 dimensions')
if verbose:
print('create time slices of the rates...')
# Interpolate in the time bins
interpolated_signal = np.vstack([_analog_signal_step_interp(
signal, sampling_times).rescale('Hz').magnitude
for signal in signals]) * pq.Hz
return interpolated_signal
class _GPUBackend:
"""
Parameters
----------
max_chunk_size: int or None, optional
Defines the maximum chunk size used in the `_split_axis` function. The
users typically don't need to set this parameter manually - it's used
to simulate scenarios when the input matrix is so large that it cannot
fit into GPU memory. Setting this parameter manually can resolve GPU
memory errors in case automatic parameters adjustment fails.
Notes
-----
1. PyOpenCL backend takes some time to compile the kernel for the first
time - the caching will affect your benchmarks unless you run each
program twice.
2. Pinned Host Memory.
Host (CPU) data allocations are pageable by default. The GPU cannot
access data directly from pageable host memory, so when a data transfer
from pageable host memory to device memory is invoked, the CUDA driver
must first allocate a temporary page-locked, or "pinned", host array,
copy the host data to the pinned array, and then transfer the data from
the pinned array to device memory, as illustrated at
https://developer.nvidia.com/blog/how-optimize-data-transfers-cuda-cc/
Same for OpenCL. Therefore, Python memory analyzers show increments in
the used RAM each time an OpenCL/CUDA buffer is created. As with any
Python objects, PyOpenCL and PyCUDA clean up and free allocated memory
automatically when garbage collection is executed.
"""
def __init__(self, max_chunk_size=None):
self.max_chunk_size = max_chunk_size
def _choose_backend(self):
# If CUDA is detected, always use CUDA.
# If OpenCL is detected, don't use it by default to avoid the system
# becoming unresponsive until the program terminates.
use_cuda = int(os.getenv("ELEPHANT_USE_CUDA", '1'))
use_opencl = int(os.getenv("ELEPHANT_USE_OPENCL", '1'))
cuda_detected = get_cuda_capability_major() != 0
if use_cuda and cuda_detected:
return self.pycuda
if use_opencl:
return self.pyopencl
return self.cpu
def _split_axis(self, chunk_size, axis_size, min_chunk_size=None):
chunk_size = min(chunk_size, axis_size)
if self.max_chunk_size is not None:
chunk_size = min(chunk_size, self.max_chunk_size)
if min_chunk_size is not None and chunk_size < min_chunk_size:
raise ValueError(f"[GPU not enough memory] Impossible to split "
f"the array into chunks of size at least "
f"{min_chunk_size} to fit into GPU memory")
n_chunks = math.ceil(axis_size / chunk_size)
chunk_size = math.ceil(axis_size / n_chunks) # align in size
if min_chunk_size is not None:
chunk_size = max(chunk_size, min_chunk_size)
split_idx = list(range(0, axis_size, chunk_size))
last_id = split_idx[-1]
last_size = axis_size - last_id # last is the smallest
split_idx = list(zip(split_idx[:-1], split_idx[1:]))
if min_chunk_size is not None and last_size < min_chunk_size:
# Overlap the last chunk with the previous.
# The overlapped part (intersection) will be computed twice.
last_id = axis_size - min_chunk_size
split_idx.append((last_id, axis_size))
return chunk_size, split_idx
class _JSFUniformOrderStat3D(_GPUBackend):
def __init__(self, n, d, precision='float', verbose=False,
cuda_threads=64, cuda_cwr_loops=32, tolerance=1e-5,
max_chunk_size=None):
super().__init__(max_chunk_size=max_chunk_size)
if d > n:
raise ValueError(f"d ({d}) must be less or equal n ({n})")
self.n = n
self.d = d
self.precision = precision
self.verbose = verbose and rank == 0
self.cuda_threads = cuda_threads
self.cuda_cwr_loops = cuda_cwr_loops
self.map_iterations = self._create_iteration_table()
bits = 32 if precision == "float" else 64
self.dtype = np.dtype(f"float{bits}")
self.tolerance = tolerance
@property
def num_iterations(self):
# map_iterations table is populated with element indices, not counts;
# therefore, we add 1
return self.map_iterations[:, -1].sum() + 1
def _create_iteration_table(self):
# do not use numpy arrays - they are limited to uint64
map_iterations = [list(range(self.n))]
for row_id in range(1, self.d):
prev_row = map_iterations[row_id - 1]
curr_row = [0] * (row_id + 1)
for col_id in range(row_id + 1, self.n):
cumsum = prev_row[col_id] + curr_row[-1]
curr_row.append(cumsum)
map_iterations.append(curr_row)
# here we can wrap the resulting array in numpy:
# if at least one item is greater than 2<<63 - 1,
# the data type will be set to 'object'
map_iterations = np.vstack(map_iterations)
return map_iterations
def _combinations_with_replacement(self):
# Generate sequences of {a_i} such that
# a_0 >= a_1 >= ... >= a_(d-1) and
# d-i <= a_i <= n, for each i in [0, d-1].
#
# Almost equivalent to
# list(itertools.combinations_with_replacement(range(n, 0, -1), r=d))
# [::-1]
#
# Example:
# _combinations_with_replacement(n=13, d=3) -->
# (3, 2, 1), (3, 2, 2), (3, 3, 1), ... , (13, 13, 12), (13, 13, 13).
#
# The implementation follows the insertion sort algorithm:
# insert a new element a_i from right to left to keep the reverse
# sorted order. Now substitute increment operation for insert.
if self.d > self.n:
return
if self.d == 1:
for matrix_entry in range(1, self.n + 1):
yield (matrix_entry,)
return
sequence_sorted = list(range(self.d, 0, -1))
input_order = tuple(sequence_sorted) # fixed
while sequence_sorted[0] != self.n + 1:
for last_element in range(1, sequence_sorted[-2] + 1):
sequence_sorted[-1] = last_element
yield tuple(sequence_sorted)
increment_id = self.d - 2
while increment_id > 0 and sequence_sorted[increment_id - 1] == \
sequence_sorted[increment_id]:
increment_id -= 1
sequence_sorted[increment_id + 1:] = input_order[increment_id + 1:]
sequence_sorted[increment_id] += 1
def cpu(self, log_du):
log_1 = np.log(1.)
# Compute the log of the integral's coefficient
logK = np.sum(np.log(np.arange(1, self.n + 1)))
# Add to the 3D matrix u a bottom layer equal to 0 and a
# top layer equal to 1. Then compute the difference du along
# the first dimension.
# prepare arrays for usage inside the loop
di_scratch = np.empty_like(log_du, dtype=np.int32)
log_du_scratch = np.empty_like(log_du)
# precompute log(factorial)s
# pad with a zero to get 0! = 1
log_factorial = np.hstack((0, np.cumsum(np.log(range(1, self.n + 1)))))
# compute the probabilities for each unique row of du
# only loop over the indices and do all du entries at once
# using matrix algebra
# initialise probabilities to 0
P_total = np.zeros(
log_du.shape[0],
dtype=np.float32 if self.precision == 'float' else np.float64
)
for iter_id, matrix_entries in enumerate(
tqdm(self._combinations_with_replacement(),
total=self.num_iterations,
desc="Joint survival function",
disable=not self.verbose)):
# if we are running with MPI
if mpi_accelerated and iter_id % size != rank:
continue
# we only need the differences of the indices:
di = -np.diff((self.n,) + matrix_entries + (0,))
# reshape the matrix to be compatible with du
di_scratch[:, range(len(di))] = di
# use precomputed factorials
sum_log_di_factorial = log_factorial[di].sum()
# Compute for each i,j the contribution to the probability
# given by this step, and add it to the total probability
# Use precomputed log
np.copyto(log_du_scratch, log_du)
# for each a=0,1,...,A-1 and b=0,1,...,B-1, replace du with 1
# whenever di_scratch = 0, so that du ** di_scratch = 1 (this
# avoids nans when both du and di_scratch are 0, and is
# mathematically correct)
log_du_scratch[di_scratch == 0] = log_1
di_log_du = di_scratch * log_du_scratch
sum_di_log_du = di_log_du.sum(axis=1)
logP = sum_di_log_du - sum_log_di_factorial
P_total += np.exp(logP + logK)
if mpi_accelerated:
totals = np.zeros_like(P_total)
# exchange all the results
mpi_float_type = MPI.FLOAT \
if self.precision == 'float' else MPI.DOUBLE
comm.Allreduce(
[P_total, mpi_float_type],
[totals, mpi_float_type],
op=MPI.SUM)
# We need to return the collected totals instead of the local
# P_total
P_total = totals
return P_total
def _compile_template(self, template_name, **kwargs):
from jinja2 import Template
cu_template_path = Path(__file__).parent / template_name
cu_template = Template(cu_template_path.read_text())
asset_cu = cu_template.render(
precision=self.precision,
CWR_LOOPS=self.cuda_cwr_loops,
N=self.n, D=self.d, **kwargs)
return asset_cu
def pyopencl(self, log_du, device_id=0):
import pyopencl as cl
import pyopencl.array as cl_array
self._check_input(log_du)
it_todo = self.num_iterations
u_length = log_du.shape[0]
context = cl.create_some_context(interactive=False)
if self.verbose:
print("Available OpenCL devices:\n", context.devices)
device = context.devices[device_id]
# A queue bounded to the device
queue = cl.CommandQueue(context)
max_l_block = device.local_mem_size // (
self.dtype.itemsize * (self.d + 2))
n_threads = min(self.cuda_threads, max_l_block,
device.max_work_group_size)
if n_threads > 32:
# It's more efficient to make the number of threads
# a multiple of the warp size (32).
n_threads -= n_threads % 32
iteration_table_str = ", ".join(f"{val}LU" for val in
self.map_iterations.flatten())
iteration_table_str = "{%s}" % iteration_table_str
log_factorial = np.r_[0, np.cumsum(np.log(range(1, self.n + 1)))]
logK = log_factorial[-1]
log_factorial_str = ", ".join(f"{val:.10f}" for val in log_factorial)
log_factorial_str = "{%s}" % log_factorial_str
atomic_int = 'int' if self.precision == 'float' else 'long'
# GPU_MAX_HEAP_SIZE OpenCL flag is set to 2 Gb (1 << 31) by default
mem_avail = min(device.max_mem_alloc_size, device.global_mem_size,
1 << 31)
# 4 * (D + 1) * size + 8 * size == mem_avail
chunk_size = mem_avail // (4 * log_du.shape[1] + self.dtype.itemsize)
chunk_size, split_idx = self._split_axis(chunk_size=chunk_size,
axis_size=u_length)
P_total = np.empty(u_length, dtype=self.dtype)
P_total_gpu = cl_array.Array(queue, shape=chunk_size, dtype=self.dtype)
for i_start, i_end in split_idx:
log_du_gpu = cl_array.to_device(queue, log_du[i_start: i_end],
async_=True)
P_total_gpu.fill(0, queue=queue)
chunk_size = i_end - i_start
l_block = min(n_threads, chunk_size)
l_num_blocks = math.ceil(chunk_size / l_block)
grid_size = math.ceil(it_todo / (n_threads * self.cuda_cwr_loops))
if grid_size > l_num_blocks:
# make grid_size divisible by l_num_blocks
grid_size -= grid_size % l_num_blocks
else:
# grid_size must be at least l_num_blocks
grid_size = l_num_blocks
if self.verbose:
print(f"[Joint prob. matrix] it_todo={it_todo}, "
f"grid_size={grid_size}, L_BLOCK={l_block}, "
f"N_THREADS={n_threads}")
# OpenCL defines unsigned long as uint64, therefore we're adding
# the LU suffix, not LLU, which would indicate unsupported uint128
# data type format.
asset_cl = self._compile_template(
template_name="joint_pmat.cl",
L=f"{chunk_size}LU",
L_BLOCK=l_block,
L_NUM_BLOCKS=l_num_blocks,
ITERATIONS_TODO=f"{it_todo}LU",
logK=f"{logK:.10f}f",
iteration_table=iteration_table_str,
log_factorial=log_factorial_str,
ATOMIC_UINT=f"unsigned {atomic_int}",
ASSET_ENABLE_DOUBLE_SUPPORT=int(self.precision == "double")
)
program = cl.Program(context, asset_cl).build()
# synchronize
cl.enqueue_barrier(queue)
kernel = program.jsf_uniform_orderstat_3d_kernel
kernel(queue, (grid_size,), (n_threads,),
P_total_gpu.data, log_du_gpu.data, g_times_l=True)
P_total_gpu[:chunk_size].get(ary=P_total[i_start: i_end])
return P_total
def pycuda(self, log_du):
try:
# PyCuda should not be in requirements-extra because CPU limited
# users won't be able to install Elephant.
import pycuda.autoinit
import pycuda.gpuarray as gpuarray
import pycuda.driver as drv
from pycuda.compiler import SourceModule
except ImportError as err:
raise ImportError(
"Install pycuda with 'pip install pycuda'") from err
self._check_input(log_du)
it_todo = self.num_iterations
u_length = log_du.shape[0]
device = pycuda.autoinit.device
max_l_block = device.MAX_SHARED_MEMORY_PER_BLOCK // (
self.dtype.itemsize * (self.d + 2))
n_threads = min(self.cuda_threads, max_l_block,
device.MAX_THREADS_PER_BLOCK)
if n_threads > device.WARP_SIZE:
# It's more efficient to make the number of threads
# a multiple of the warp size (32).
n_threads -= n_threads % device.WARP_SIZE
log_factorial = np.r_[0, np.cumsum(np.log(range(1, self.n + 1)))]
log_factorial = log_factorial.astype(self.dtype)
logK = log_factorial[-1]
free, total = drv.mem_get_info()
# 4 * (D + 1) * size + 8 * size == mem_avail
chunk_size = free // (4 * log_du.shape[1] + self.dtype.itemsize)
chunk_size, split_idx = self._split_axis(chunk_size=chunk_size,
axis_size=u_length)
P_total = np.empty(u_length, dtype=self.dtype)
P_total_gpu = gpuarray.GPUArray(chunk_size, dtype=self.dtype)
log_du_gpu = drv.mem_alloc(4 * chunk_size * log_du.shape[1])
for i_start, i_end in split_idx:
drv.memcpy_htod_async(dest=log_du_gpu, src=log_du[i_start: i_end])
P_total_gpu.fill(0)
chunk_size = i_end - i_start
l_block = min(n_threads, chunk_size)
l_num_blocks = math.ceil(chunk_size / l_block)
grid_size = math.ceil(it_todo / (n_threads * self.cuda_cwr_loops))
grid_size = min(grid_size, device.MAX_GRID_DIM_X)
if grid_size > l_num_blocks:
# make grid_size divisible by l_num_blocks
grid_size -= grid_size % l_num_blocks
else:
# grid_size must be at least l_num_blocks
grid_size = l_num_blocks
if self.verbose:
print(f"[Joint prob. matrix] it_todo={it_todo}, "
f"grid_size={grid_size}, L_BLOCK={l_block}, "
f"N_THREADS={n_threads}")
asset_cu = self._compile_template(
template_name="joint_pmat.cu",
L=f"{chunk_size}LLU",
L_BLOCK=l_block,
L_NUM_BLOCKS=l_num_blocks,
ITERATIONS_TODO=f"{it_todo}LLU",
logK=f"{logK:.10f}f",
)
module = SourceModule(asset_cu)
iteration_table_gpu, _ = module.get_global("iteration_table")
iteration_table = self.map_iterations.astype(np.uint64)
drv.memcpy_htod(iteration_table_gpu, iteration_table)
log_factorial_gpu, _ = module.get_global("log_factorial")
drv.memcpy_htod(log_factorial_gpu, log_factorial)
drv.Context.synchronize()
kernel = module.get_function("jsf_uniform_orderstat_3d_kernel")
kernel(P_total_gpu.gpudata, log_du_gpu, grid=(grid_size, 1),
block=(n_threads, 1, 1))
P_total_gpu[:chunk_size].get(ary=P_total[i_start: i_end])
return P_total
def _cuda(self, log_du):
# Compile a self-contained joint_pmat_old.cu file and run it
# in a terminal. Having this function is useful to debug ASSET CUDA
# application because it's self-contained and the logic is documented.
# Don't use this backend when the 'log_du' arrays are huge because
# of the disk I/O operations.
# A note to developers: remove this backend in half a year once the
# pycuda backend proves to be stable.
self._check_input(log_du)
asset_cu = self._compile_template(
template_name="joint_pmat_old.cu",
L=f"{log_du.shape[0]}LLU",
N_THREADS=self.cuda_threads,
ITERATIONS_TODO=f"{self.num_iterations}LLU",
ASSET_DEBUG=int(self.verbose)
)
with tempfile.TemporaryDirectory() as asset_tmp_folder:
asset_cu_path = os.path.join(asset_tmp_folder, 'asset.cu')
asset_bin_path = os.path.join(asset_tmp_folder, 'asset.o')
with open(asset_cu_path, 'w') as f:
f.write(asset_cu)
# -O3 optimization flag is for the host code only;
# by default, GPU device code is optimized with -O3.
# -w to ignore warnings.
compile_cmd = ['nvcc', '-w', '-O3', '-o', asset_bin_path,
asset_cu_path]
if self.precision == 'double' and get_cuda_capability_major() >= 6:
# atomicAdd(double) requires compute capability 6.x
compile_cmd.extend(['-arch', 'sm_60'])
compile_status = subprocess.run(
compile_cmd,
stdout=subprocess.PIPE, stderr=subprocess.PIPE)
if self.verbose:
print(compile_status.stdout.decode())
print(compile_status.stderr.decode(), file=sys.stderr)
compile_status.check_returncode()
log_du_path = os.path.join(asset_tmp_folder, "log_du.dat")
P_total_path = os.path.join(asset_tmp_folder, "P_total.dat")
with open(log_du_path, 'wb') as f:
log_du.tofile(f)
run_status = subprocess.run(
[asset_bin_path, log_du_path, P_total_path],
stdout=subprocess.PIPE, stderr=subprocess.PIPE)
if self.verbose:
print(run_status.stdout.decode())
print(run_status.stderr.decode(), file=sys.stderr)
run_status.check_returncode()
with open(P_total_path, 'rb') as f:
P_total = np.fromfile(f, dtype=self.dtype)
return P_total
def _check_input(self, log_du):
it_todo = self.num_iterations
if it_todo > np.iinfo(np.uint64).max:
raise ValueError(f"it_todo ({it_todo}) is larger than MAX_UINT64."
" Only Python backend is supported.")
# Don't convert log_du to float32 transparently for the user to avoid
# situations when the user accidentally passes an array with float64.
# Doing so wastes memory for nothing.
if log_du.dtype != np.float32:
raise ValueError("'log_du' must be a float32 array")
if log_du.shape[1] != self.d + 1:
raise ValueError(f"log_du.shape[1] ({log_du.shape[1]}) must be "
f"equal to D+1 ({self.d + 1})")
def compute(self, u):
if u.shape[1] != self.d:
raise ValueError("Invalid input data shape axis 1: expected {}, "
"got {}".format(self.d, u.shape[1]))
# A faster and memory efficient implementation of
# du = np.diff(u, prepend=0, append=1, axis=1).astype(np.float32)
du = np.empty((u.shape[0], u.shape[1] + 1), dtype=np.float32)
du[:, 0] = u[:, 0]
np.subtract(u[:, 1:], u[:, :-1], out=du[:, 1:-1])
np.subtract(1, u[:, -1], out=du[:, -1])
# precompute logarithms
# ignore warnings about infinities, see inside the loop:
# we replace 0 * ln(0) by 1 to get exp(0 * ln(0)) = 0 ** 0 = 1
# the remaining infinities correctly evaluate to
# exp(ln(0)) = exp(-inf) = 0
with warnings.catch_warnings():
warnings.simplefilter('ignore', RuntimeWarning)
log_du = np.log(du, out=du)
jsf_backend = self._choose_backend()
P_total = jsf_backend(log_du)
# Captures non-finite values like NaN, inf
inside = (P_total > -self.tolerance) & (P_total < 1 + self.tolerance)
outside_vals = P_total[~inside]
if len(outside_vals) > 0:
# A watchdog for unexpected results.
warnings.warn(f"{len(outside_vals)}/{P_total.shape[0]} values of "
"the computed joint prob. matrix lie outside of the "
f"valid [0, 1] interval:\n{outside_vals}\nIf you're "
"using PyOpenCL backend, make sure you've disabled "
"GPU Hangcheck as described here https://"
"software.intel.com/content/www/us/en/develop/"
"documentation/get-started-with-intel-oneapi-"
"base-linux/top/before-you-begin.html\n"
"Clipping the output array to 0 and 1.")
P_total = np.clip(P_total, a_min=0., a_max=1., out=P_total)
return P_total
class _PMatNeighbors(_GPUBackend):
"""
Parameters
----------
filter_shape : tuple of int
A pair of integers representing the kernel shape `(l, w)`.
n_largest : int
The number of largest neighbors to collect for each entry in `mat`.
"""
def __init__(self, filter_shape, n_largest, max_chunk_size=None):
super().__init__(max_chunk_size=max_chunk_size)
self.n_largest = n_largest
self.max_chunk_size = max_chunk_size
filter_size, filter_width = filter_shape
if filter_width >= filter_size:
raise ValueError('filter_shape width must be lower than length')
if not ((filter_width % 2) and (filter_size % 2)):
warnings.warn(
'The kernel is not centered on the datapoint in whose'
'calculation it is used. Consider using odd values'
'for both entries of filter_shape.')
# Construct the kernel
filt = np.ones((filter_size, filter_size), dtype=bool)
filt = np.triu(filt, -filter_width)
filt = np.tril(filt, filter_width)
if n_largest > len(filt.nonzero()[0]):
raise ValueError(f"Too small filter shape {filter_shape} to "
f"select {n_largest} largest elements.")
self.filter_kernel = filt
def _check_input(self, mat):
symmetric = np.all(np.diagonal(mat) == 0.5)
# Check consistent arguments
filter_size = self.filter_kernel.shape[0]
if (symmetric and mat.shape[0] < 2 * filter_size - 1) \
or (not symmetric and min(mat.shape) < filter_size):
raise ValueError(f"'filter_shape' {self.filter_kernel.shape} is "
f"too large for the input matrix of shape "
f"{mat.shape}")
if mat.dtype != np.float32:
raise ValueError("The input matrix dtype must be float32.")
def pyopencl(self, mat):
import pyopencl as cl
import pyopencl.array as cl_array
from jinja2 import Template
context = cl.create_some_context(interactive=False)
device = context.devices[0]
queue = cl.CommandQueue(context)
# if the matrix is symmetric the diagonal was set to 0.5
# when computing the probability matrix
symmetric = np.all(np.diagonal(mat) == 0.5)
self._check_input(mat)
filt_size = self.filter_kernel.shape[0] # filt is a square matrix
filt_rows, filt_cols = self.filter_kernel.nonzero()
filt_rows = "{%s}" % ", ".join(f"{row}U" for row in filt_rows)
filt_cols = "{%s}" % ", ".join(f"{col}U" for col in filt_cols)
lmat_padded = np.zeros((mat.shape[0], mat.shape[1], self.n_largest),
dtype=np.float32)
if symmetric:
mat = mat[filt_size:]
lmat = lmat_padded[filt_size + filt_size // 2: -filt_size // 2 + 1]
else:
lmat = lmat_padded[filt_size // 2: -filt_size // 2 + 1]
# GPU_MAX_HEAP_SIZE OpenCL flag is set to 2 Gb (1 << 31) by default
mem_avail = min(device.max_mem_alloc_size, device.global_mem_size,
1 << 31)
# 4 * size * n_cols * n_largest + 4 * (size + filt_size) * n_cols
chunk_size = (mem_avail // 4 - filt_size * lmat.shape[1]) // (
lmat.shape[1] * (self.n_largest + 1))
chunk_size, split_idx = self._split_axis(chunk_size=chunk_size,
axis_size=lmat.shape[0],
min_chunk_size=filt_size)
pmat_cl_path = Path(__file__).parent / "pmat_neighbors.cl"
pmat_cl_template = Template(pmat_cl_path.read_text())
lmat_gpu = cl_array.Array(
queue, shape=(chunk_size, lmat.shape[1], self.n_largest),
dtype=np.float32
)
for i_start, i_end in split_idx:
mat_gpu = cl_array.to_device(queue,
mat[i_start: i_end + filt_size],
async_=True)
lmat_gpu.fill(0, queue=queue)
chunk_size = i_end - i_start
it_todo = chunk_size * (lmat.shape[1] - filt_size + 1)
pmat_neighbors_cl = pmat_cl_template.render(
FILT_SIZE=filt_size,
N_LARGEST=self.n_largest,
PMAT_COLS=f"{lmat.shape[1]}LU",
Y_OFFSET=f"{i_start}LU",
NONZERO_SIZE=self.filter_kernel.sum(),
SYMMETRIC=int(symmetric),
filt_rows=filt_rows,
filt_cols=filt_cols
)
program = cl.Program(context, pmat_neighbors_cl).build()
# synchronize
cl.enqueue_barrier(queue)
kernel = program.pmat_neighbors
# When the grid size is set to the total number of work items to
# execute and the local size is set to None, PyOpenCL chooses the
# number of threads automatically such that the total number of
# work items exactly matches the desired number of iterations.
kernel(queue, (it_todo,), None, lmat_gpu.data, mat_gpu.data)
lmat_gpu[:chunk_size].get(ary=lmat[i_start: i_end])
return lmat_padded
def pycuda(self, mat):
from jinja2 import Template
try:
# PyCuda should not be in requirements-extra because CPU limited
# users won't be able to install Elephant.
import pycuda.autoinit
import pycuda.gpuarray as gpuarray
import pycuda.driver as drv
from pycuda.compiler import SourceModule
except ImportError as err:
raise ImportError(
"Install pycuda with 'pip install pycuda'") from err
# if the matrix is symmetric the diagonal was set to 0.5
# when computing the probability matrix
symmetric = np.all(np.diagonal(mat) == 0.5)
self._check_input(mat)
device = pycuda.autoinit.device
n_threads = device.MAX_THREADS_PER_BLOCK
filt_size = self.filter_kernel.shape[0]
filt_rows, filt_cols = self.filter_kernel.nonzero()
lmat_padded = np.zeros((mat.shape[0], mat.shape[1], self.n_largest),
dtype=np.float32)
if symmetric:
mat = mat[filt_size:]
lmat = lmat_padded[filt_size + filt_size // 2: -filt_size // 2 + 1]
else:
lmat = lmat_padded[filt_size // 2: -filt_size // 2 + 1]
free, total = drv.mem_get_info()
# 4 * size * n_cols * n_largest + 4 * (size + filt_size) * n_cols
chunk_size = (free // 4 - filt_size * lmat.shape[1]) // (
lmat.shape[1] * (self.n_largest + 1))
chunk_size, split_idx = self._split_axis(chunk_size=chunk_size,
axis_size=lmat.shape[0],
min_chunk_size=filt_size)
pmat_cu_path = Path(__file__).parent / "pmat_neighbors.cu"
pmat_cu_template = Template(pmat_cu_path.read_text())
lmat_gpu = gpuarray.GPUArray(
(chunk_size, lmat.shape[1], self.n_largest), dtype=np.float32)
mat_gpu = drv.mem_alloc(4 * (chunk_size + filt_size) * mat.shape[1])
for i_start, i_end in split_idx:
drv.memcpy_htod_async(dest=mat_gpu,
src=mat[i_start: i_end + filt_size])
lmat_gpu.fill(0)
chunk_size = i_end - i_start
it_todo = chunk_size * (lmat.shape[1] - filt_size + 1)
pmat_neighbors_cu = pmat_cu_template.render(
FILT_SIZE=filt_size,
N_LARGEST=self.n_largest,
PMAT_COLS=f"{lmat.shape[1]}LLU",
Y_OFFSET=f"{i_start}LLU",
NONZERO_SIZE=self.filter_kernel.sum(),
SYMMETRIC=int(symmetric),
IT_TODO=it_todo,
)
module = SourceModule(pmat_neighbors_cu)
filt_rows_gpu, _ = module.get_global("filt_rows")
drv.memcpy_htod(filt_rows_gpu, filt_rows.astype(np.uint32))
filt_cols_gpu, _ = module.get_global("filt_cols")
drv.memcpy_htod(filt_cols_gpu, filt_cols.astype(np.uint32))
drv.Context.synchronize()
grid_size = math.ceil(it_todo / n_threads)
if grid_size > device.MAX_GRID_DIM_X:
raise ValueError("Cannot launch a CUDA kernel with "
f"{grid_size} num. of blocks. Adjust the "
"'max_chunk_size' parameter.")
kernel = module.get_function("pmat_neighbors")
kernel(lmat_gpu.gpudata, mat_gpu, grid=(grid_size, 1),
block=(n_threads, 1, 1))
lmat_gpu[:chunk_size].get(ary=lmat[i_start: i_end])
return lmat_padded
def compute(self, mat):
"""
Build the 3D matrix `L` of largest neighbors of elements in a 2D matrix
`mat`.
For each entry `mat[i, j]`, collects the `n_largest` elements with
largest values around `mat[i, j]`, say `z_i, i=1,2,...,n_largest`,
and assigns them to `L[i, j, :]`.
The zone around `mat[i, j]` where largest neighbors are collected from
is a rectangular area (kernel) of shape `(l, w) = filter_shape`
centered around `mat[i, j]` and aligned along the diagonal.
If `mat` is symmetric, only the triangle below the diagonal is
considered.
Parameters
----------
mat : np.ndarray
A square matrix of real-valued elements.
Returns
-------
lmat : np.ndarray
A matrix of shape `(l, w, n_largest)` containing along the last
dimension `lmat[i, j, :]` the largest neighbors of `mat[i, j]`.
"""
backend = self._choose_backend()
lmat = backend(mat)
return lmat
def cpu(self, mat):
# if the matrix is symmetric the diagonal was set to 0.5
# when computing the probability matrix
symmetric = np.all(np.diagonal(mat) == 0.5)
self._check_input(mat)
filter_size = self.filter_kernel.shape[0]
# Initialize the matrix of d-largest values as a matrix of zeroes
lmat = np.zeros((mat.shape[0], mat.shape[1], self.n_largest),
dtype=np.float32)
N_bin_y = mat.shape[0]
N_bin_x = mat.shape[1]
# if the matrix is symmetric do not use kernel positions intersected
# by the diagonal
if symmetric:
bin_range_y = range(filter_size, N_bin_y - filter_size + 1)
else:
bin_range_y = range(N_bin_y - filter_size + 1)
bin_range_x = range(N_bin_x - filter_size + 1)
# compute matrix of largest values
for y in bin_range_y:
if symmetric:
# x range depends on y position
bin_range_x = range(y - filter_size + 1)
for x in bin_range_x:
patch = mat[y: y + filter_size, x: x + filter_size]
mskd = patch[self.filter_kernel]
largest_vals = np.sort(mskd)[-self.n_largest:]
lmat[y + (filter_size // 2), x + (filter_size // 2), :] = \
largest_vals
return lmat
def synchronous_events_intersection(sse1, sse2, intersection='linkwise'):
"""
Given two sequences of synchronous events (SSEs) `sse1` and `sse2`, each
consisting of a pool of positions `(iK, jK)` of matrix entries and
associated synchronous events `SK`, finds the intersection among them.
The intersection can be performed 'pixelwise' or 'linkwise'.
* if 'pixelwise', it yields a new SSE which retains only events in
`sse1` whose pixel position matches a pixel position in `sse2`. This
operation is not symmetric:
`intersection(sse1, sse2) != intersection(sse2, sse1)`.
* if 'linkwise', an additional step is performed where each retained
synchronous event `SK` in `sse1` is intersected with the
corresponding event in `sse2`. This yields a symmetric operation:
`intersection(sse1, sse2) = intersection(sse2, sse1)`.
Both `sse1` and `sse2` must be provided as dictionaries of the type
.. centered:: {(i1, j1): S1, (i2, j2): S2, ..., (iK, jK): SK},
where each `i`, `j` is an integer and each `S` is a set of neuron IDs.
Parameters
----------
sse1, sse2 : dict
Each is a dictionary of pixel positions `(i, j)` as keys and sets `S`
of synchronous events as values (see above).
intersection : {'pixelwise', 'linkwise'}, optional
The type of intersection to perform among the two SSEs (see above).
Default: 'linkwise'
Returns
-------
sse_new : dict
A new SSE (same structure as `sse1` and `sse2`) which retains only the
events of `sse1` associated to keys present both in `sse1` and `sse2`.
If `intersection = 'linkwise'`, such events are additionally
intersected with the associated events in `sse2`.
See Also
--------
ASSET.extract_synchronous_events : extract SSEs from given spike trains
"""
sse_new = sse1.copy()
for pixel1 in sse1.keys():
if pixel1 not in sse2.keys():
del sse_new[pixel1]
if intersection == 'linkwise':
for pixel1, link1 in sse_new.items():
sse_new[pixel1] = link1.intersection(sse2[pixel1])
if len(sse_new[pixel1]) == 0:
del sse_new[pixel1]
elif intersection == 'pixelwise':
pass
else:
raise ValueError(
"intersection (=%s) can only be" % intersection +
" 'pixelwise' or 'linkwise'")
return sse_new
def synchronous_events_difference(sse1, sse2, difference='linkwise'):
"""
Given two sequences of synchronous events (SSEs) `sse1` and `sse2`, each
consisting of a pool of pixel positions and associated synchronous events
(see below), computes the difference between `sse1` and `sse2`.
The difference can be performed 'pixelwise' or 'linkwise':
* if 'pixelwise', it yields a new SSE which contains all (and only) the
events in `sse1` whose pixel position doesn't match any pixel in
`sse2`.
* if 'linkwise', for each pixel `(i, j)` in `sse1` and corresponding
synchronous event `S1`, if `(i, j)` is a pixel in `sse2`
corresponding to the event `S2`, it retains the set difference
`S1 - S2`. If `(i, j)` is not a pixel in `sse2`, it retains the full
set `S1`.
Note that in either case the difference is a non-symmetric operation:
`intersection(sse1, sse2) != intersection(sse2, sse1)`.
Both `sse1` and `sse2` must be provided as dictionaries of the type
.. centered:: {(i1, j1): S1, (i2, j2): S2, ..., (iK, jK): SK},
where each `i`, `j` is an integer and each `S` is a set of neuron IDs.
Parameters
----------
sse1, sse2 : dict
Dictionaries of pixel positions `(i, j)` as keys and sets `S` of
synchronous events as values (see above).
difference : {'pixelwise', 'linkwise'}, optional
The type of difference to perform between `sse1` and `sse2` (see
above).
Default: 'linkwise'
Returns
-------
sse_new : dict
A new SSE (same structure as `sse1` and `sse2`) which retains the
difference between `sse1` and `sse2`.
See Also
--------
ASSET.extract_synchronous_events : extract SSEs from given spike trains
"""
sse_new = sse1.copy()
for pixel1 in sse1.keys():
if pixel1 in sse2.keys():
if difference == 'pixelwise':
del sse_new[pixel1]
elif difference == 'linkwise':
sse_new[pixel1] = sse_new[pixel1].difference(sse2[pixel1])
if len(sse_new[pixel1]) == 0:
del sse_new[pixel1]
else:
raise ValueError(
"difference (=%s) can only be" % difference +
" 'pixelwise' or 'linkwise'")
return sse_new
def _remove_empty_events(sse):
"""
Given a sequence of synchronous events (SSE) `sse` consisting of a pool of
pixel positions and associated synchronous events (see below), returns a
copy of `sse` where all empty events have been removed.
`sse` must be provided as a dictionary of type
.. centered:: {(i1, j1): S1, (i2, j2): S2, ..., (iK, jK): SK},
where each `i`, `j` is an integer and each `S` is a set of neuron IDs.
Parameters
----------
sse : dict
A dictionary of pixel positions `(i, j)` as keys, and sets `S` of
synchronous events as values (see above).
Returns
-------
sse_new : dict
A copy of `sse` where all empty events have been removed.
"""
sse_new = sse.copy()
for pixel, link in sse.items():
if link == set([]):
del sse_new[pixel]
return sse_new
def synchronous_events_identical(sse1, sse2):
"""
Given two sequences of synchronous events (SSEs) `sse1` and `sse2`, each
consisting of a pool of pixel positions and associated synchronous events
(see below), determines whether `sse1` is strictly contained in `sse2`.
`sse1` is strictly contained in `sse2` if all its pixels are pixels of
`sse2`,
if its associated events are subsets of the corresponding events
in `sse2`, and if `sse2` contains events, or neuron IDs in some event,
which do not belong to `sse1` (i.e., `sse1` and `sse2` are not identical).
Both `sse1` and `sse2` must be provided as dictionaries of the type
.. centered:: {(i1, j1): S1, (i2, j2): S2, ..., (iK, jK): SK},
where each `i`, `j` is an integer and each `S` is a set of neuron IDs.
Parameters
----------
sse1, sse2 : dict
Dictionaries of pixel positions `(i, j)` as keys and sets `S` of
synchronous events as values.
Returns
-------
bool
True if `sse1` is identical to `sse2`.
See Also
--------
ASSET.extract_synchronous_events : extract SSEs from given spike trains
"""
# Remove empty links from sse11 and sse22, if any
sse11 = _remove_empty_events(sse1)
sse22 = _remove_empty_events(sse2)
# Return whether sse11 == sse22
return sse11 == sse22
def synchronous_events_no_overlap(sse1, sse2):
"""
Given two sequences of synchronous events (SSEs) `sse1` and `sse2`, each
consisting of a pool of pixel positions and associated synchronous events
(see below), determines whether `sse1` and `sse2` are disjoint.
Two SSEs are disjoint if they don't share pixels, or if the events
associated to common pixels are disjoint.
Both `sse1` and `sse2` must be provided as dictionaries of the type
.. centered:: {(i1, j1): S1, (i2, j2): S2, ..., (iK, jK): SK},
where each `i`, `j` is an integer and each `S` is a set of neuron IDs.
Parameters
----------
sse1, sse2 : dict
Dictionaries of pixel positions `(i, j)` as keys and sets `S` of
synchronous events as values.
Returns
-------
bool
True if `sse1` is disjoint from `sse2`.
See Also
--------
ASSET.extract_synchronous_events : extract SSEs from given spike trains
"""
# Remove empty links from sse11 and sse22, if any
sse11 = _remove_empty_events(sse1)
sse22 = _remove_empty_events(sse2)
# If both SSEs are empty, return False (we consider them equal)
if sse11 == {} and sse22 == {}:
return False
common_pixels = set(sse11.keys()).intersection(set(sse22.keys()))
if len(common_pixels) == 0:
return True
if all(sse11[p].isdisjoint(sse22[p]) for p in common_pixels):
return True
return False
def synchronous_events_contained_in(sse1, sse2):
"""
Given two sequences of synchronous events (SSEs) `sse1` and `sse2`, each
consisting of a pool of pixel positions and associated synchronous events
(see below), determines whether `sse1` is strictly contained in `sse2`.
`sse1` is strictly contained in `sse2` if all its pixels are pixels of
`sse2`, if its associated events are subsets of the corresponding events
in `sse2`, and if `sse2` contains non-empty events, or neuron IDs in some
event, which do not belong to `sse1` (i.e., `sse1` and `sse2` are not
identical).
Both `sse1` and `sse2` must be provided as dictionaries of the type
.. centered:: {(i1, j1): S1, (i2, j2): S2, ..., (iK, jK): SK},
where each `i`, `j` is an integer and each `S` is a set of neuron IDs.
Parameters
----------
sse1, sse2 : dict
Dictionaries of pixel positions `(i, j)` as keys and sets `S` of
synchronous events as values.
Returns
-------
bool
True if `sse1` is a subset of `sse2`.
See Also
--------
ASSET.extract_synchronous_events : extract SSEs from given spike trains
"""
# Remove empty links from sse11 and sse22, if any
sse11 = _remove_empty_events(sse1)
sse22 = _remove_empty_events(sse2)
# Return False if sse11 and sse22 are disjoint
if synchronous_events_identical(sse11, sse22):
return False
# Return False if any pixel in sse1 is not contained in sse2, or if any
# link of sse1 is not a subset of the corresponding link in sse2.
# Otherwise (if sse1 is a subset of sse2) continue
for pixel1, link1 in sse11.items():
if pixel1 not in sse22.keys():
return False
if not link1.issubset(sse22[pixel1]):
return False
# Check that sse1 is a STRICT subset of sse2, i.e. that sse2 contains at
# least one pixel or neuron id not present in sse1.
return not synchronous_events_identical(sse11, sse22)
def synchronous_events_contains_all(sse1, sse2):
"""
Given two sequences of synchronous events (SSEs) `sse1` and `sse2`, each
consisting of a pool of pixel positions and associated synchronous events
(see below), determines whether `sse1` strictly contains `sse2`.
`sse1` strictly contains `sse2` if it contains all pixels of `sse2`, if all
associated events in `sse1` contain those in `sse2`, and if `sse1`
additionally contains other pixels / events not contained in `sse2`.
Both `sse1` and `sse2` must be provided as dictionaries of the type
.. centered:: {(i1, j1): S1, (i2, j2): S2, ..., (iK, jK): SK},
where each `i`, `j` is an integer and each `S` is a set of neuron IDs.
Parameters
----------
sse1, sse2 : dict
Dictionaries of pixel positions `(i, j)` as keys and sets `S` of
synchronous events as values.
Returns
-------
bool
True if `sse1` strictly contains `sse2`.
Notes
-----
`synchronous_events_contains_all(sse1, sse2)` is identical to
`synchronous_events_is_subsequence(sse2, sse1)`.
See Also
--------
ASSET.extract_synchronous_events : extract SSEs from given spike trains
"""
return synchronous_events_contained_in(sse2, sse1)
def synchronous_events_overlap(sse1, sse2):
"""
Given two sequences of synchronous events (SSEs) `sse1` and `sse2`, each
consisting of a pool of pixel positions and associated synchronous events
(see below), determines whether the two SSEs overlap.
The SSEs overlap if they are not equal and none of them is a superset of
the other one but they are also not disjoint.
Both `sse1` and `sse2` must be provided as dictionaries of the type
.. centered:: {(i1, j1): S1, (i2, j2): S2, ..., (iK, jK): SK},
where each `i`, `j` is an integer and each `S` is a set of neuron IDs.
Parameters
----------
sse1, sse2 : dict
Dictionaries of pixel positions `(i, j)` as keys and sets `S` of
synchronous events as values.
Returns
-------
bool
True if `sse1` and `sse2` overlap.
See Also
--------
ASSET.extract_synchronous_events : extract SSEs from given spike trains
"""
contained_in = synchronous_events_contained_in(sse1, sse2)
contains_all = synchronous_events_contains_all(sse1, sse2)
identical = synchronous_events_identical(sse1, sse2)
is_disjoint = synchronous_events_no_overlap(sse1, sse2)
return not (contained_in or contains_all or identical or is_disjoint)
def _signals_t_start_stop(signals, t_start=None, t_stop=None):
if t_start is None:
t_start = _signals_same_attribute(signals, 't_start')
if t_stop is None:
t_stop = _signals_same_attribute(signals, 't_stop')
return t_start, t_stop
def _intersection_matrix(spiketrains, spiketrains_y, bin_size, t_start_x,
t_start_y, t_stop_x, t_stop_y, normalization=None):
if spiketrains_y is None:
spiketrains_y = spiketrains
# Compute the binned spike train matrices, along both time axes
spiketrains_binned = conv.BinnedSpikeTrain(
spiketrains, bin_size=bin_size,
t_start=t_start_x, t_stop=t_stop_x)
spiketrains_binned_y = conv.BinnedSpikeTrain(
spiketrains_y, bin_size=bin_size,
t_start=t_start_y, t_stop=t_stop_y)
# Compute imat by matrix multiplication
bsts_x = spiketrains_binned.sparse_matrix
bsts_y = spiketrains_binned_y.sparse_matrix
# Compute the number of spikes in each bin, for both time axes
# 'A1' property returns self as a flattened ndarray.
spikes_per_bin_x = bsts_x.sum(axis=0).A1
spikes_per_bin_y = bsts_y.sum(axis=0).A1
# Compute the intersection matrix imat
imat = bsts_x.T.dot(bsts_y).toarray().astype(np.float32)
for ii in range(bsts_x.shape[1]):
# Normalize the row
col_sum = bsts_x[:, ii].sum()
if normalization is None or col_sum == 0:
norm_coef = 1.
elif normalization == 'intersection':
norm_coef = np.minimum(
spikes_per_bin_x[ii], spikes_per_bin_y)
elif normalization == 'mean':
# geometric mean
norm_coef = np.sqrt(
spikes_per_bin_x[ii] * spikes_per_bin_y)
elif normalization == 'union':
norm_coef = np.array([(bsts_x[:, ii]
+ bsts_y[:, jj]).count_nonzero()
for jj in range(bsts_y.shape[1])])
else:
raise ValueError(
"Invalid parameter 'norm': {}".format(normalization))
# If normalization required, for each j such that bsts_y[j] is
# identically 0 the code above sets imat[:, j] to identically nan.
# Substitute 0s instead.
imat[ii, :] = np.divide(imat[ii, :], norm_coef,
out=np.zeros(imat.shape[1],
dtype=np.float32),
where=norm_coef != 0)
# Return the intersection matrix and the edges of the bins used for the
# x and y axes, respectively.
return imat
class ASSET(object):
"""
Analysis of Sequences of Synchronous EvenTs class.
Parameters
----------
spiketrains_i, spiketrains_j : list of neo.SpikeTrain
Input spike trains for the first and second time dimensions,
respectively, to compute the p-values from.
If `spiketrains_y` is None, it's set to `spiketrains`.
bin_size : pq.Quantity, optional
The width of the time bins used to compute the probability matrix.
t_start_i, t_start_j : pq.Quantity, optional
The start time of the binning for the first and second axes,
respectively.
If None, the attribute `t_start` of the spike trains is used
(if the same for all spike trains).
Default: None
t_stop_i, t_stop_j : pq.Quantity, optional
The stop time of the binning for the first and second axes,
respectively.
If None, the attribute `t_stop` of the spike trains is used
(if the same for all spike trains).
Default: None
verbose : bool, optional
If True, print messages and show progress bar.
Default: True
Raises
------
ValueError
If the `t_start` & `t_stop` times are not (one of):
perfectly aligned;
fully disjoint.
"""
def __init__(self, spiketrains_i, spiketrains_j=None, bin_size=3 * pq.ms,
t_start_i=None, t_start_j=None, t_stop_i=None, t_stop_j=None,
verbose=True):
self.spiketrains_i = spiketrains_i
if spiketrains_j is None:
spiketrains_j = spiketrains_i
self.spiketrains_j = spiketrains_j
self.bin_size = bin_size
self.t_start_i, self.t_stop_i = _signals_t_start_stop(
spiketrains_i,
t_start=t_start_i,
t_stop=t_stop_i)
self.t_start_j, self.t_stop_j = _signals_t_start_stop(
spiketrains_j,
t_start=t_start_j,
t_stop=t_stop_j)
self.verbose = verbose and rank == 0
msg = 'The time intervals for x and y need to be either identical ' \
'or fully disjoint, but they are:\n' \
'x: ({}, {}) and y: ({}, {}).'.format(self.t_start_i,
self.t_stop_i,
self.t_start_j,
self.t_stop_j)
# the starts have to be perfectly aligned for the binning to work
# the stops can differ without impacting the binning
if self.t_start_i == self.t_start_j:
if not _quantities_almost_equal(self.t_stop_i, self.t_stop_j):
raise ValueError(msg)
elif (self.t_start_i < self.t_start_j < self.t_stop_i) \
or (self.t_start_i < self.t_stop_j < self.t_stop_i):
raise ValueError(msg)
# Compute the binned spike train matrices, along both time axes
self.spiketrains_binned_i = conv.BinnedSpikeTrain(
self.spiketrains_i, bin_size=self.bin_size,
t_start=self.t_start_i, t_stop=self.t_stop_i)
self.spiketrains_binned_j = conv.BinnedSpikeTrain(
self.spiketrains_j, bin_size=self.bin_size,
t_start=self.t_start_j, t_stop=self.t_stop_j)
@property
def x_edges(self):
"""
A Quantity array of `n+1` edges of the bins used for the horizontal
axis of the intersection matrix, where `n` is the number of bins that
time was discretized in.
"""
return self.spiketrains_binned_i.bin_edges.rescale(self.bin_size.units)
@property
def y_edges(self):
"""
A Quantity array of `n+1` edges of the bins used for the vertical axis
of the intersection matrix, where `n` is the number of bins that
time was discretized in.
"""
return self.spiketrains_binned_j.bin_edges.rescale(self.bin_size.units)
def is_symmetric(self):
"""
Returns
-------
bool
Whether the intersection matrix is symmetric or not.
See Also
--------
ASSET.intersection_matrix
"""
return _quantities_almost_equal(self.x_edges[0], self.y_edges[0])
def intersection_matrix(self, normalization=None):
"""
Generates the intersection matrix from a list of spike trains.
Given a list of `neo.SpikeTrain`, consider two binned versions of them
differing for the starting and ending times of the binning:
`t_start_x`, `t_stop_x`, `t_start_y` and `t_stop_y` respectively (the
time intervals can be either identical or completely disjoint). Then
calculate the intersection matrix `M` of the two binned data, where
`M[i,j]` is the overlap of bin `i` in the first binned data and bin `j`
in the second binned data (i.e., the number of spike trains spiking at
both bin `i` and bin `j`).
The matrix entries can be normalized to values between `0` and `1` via
different normalizations (see "Parameters" section).
Parameters
----------
normalization : {'intersection', 'mean', 'union'} or None, optional
The normalization type to be applied to each entry `M[i,j]` of the
intersection matrix `M`. Given the sets `s_i` and `s_j` of neuron
IDs in the bins `i` and `j` respectively, the normalization
coefficient can be:
* None: no normalisation (row counts)
* 'intersection': `len(intersection(s_i, s_j))`
* 'mean': `sqrt(len(s_1) * len(s_2))`
* 'union': `len(union(s_i, s_j))`
Default: None
Returns
-------
imat : (n,n) np.ndarray
The floating point intersection matrix of a list of spike trains.
It has the shape `(n, n)`, where `n` is the number of bins that
time was discretized in.
"""
imat = _intersection_matrix(self.spiketrains_i, self.spiketrains_j,
self.bin_size,
self.t_start_i, self.t_start_j,
self.t_stop_i, self.t_stop_j,
normalization=normalization)
return imat
def probability_matrix_montecarlo(self, n_surrogates, imat=None,
surrogate_method='dither_spikes',
surrogate_dt=None):
"""
Given a list of parallel spike trains, estimate the cumulative
probability of each entry in their intersection matrix by a Monte Carlo
approach using surrogate data.
Contrarily to the analytical version (see
:func:`ASSET.probability_matrix_analytical`) the Monte Carlo one does
not incorporate the assumptions of Poissonianity in the null
hypothesis.
The method produces surrogate spike trains (using one of several
methods at disposal, see "Parameters" section) and calculates their
intersection matrix `M`. For each entry `(i, j)`, the intersection CDF
`P[i, j]` is then given by:
.. centered:: P[i, j] = #(spike_train_surrogates such that
M[i, j] < I[i, j]) / #(spike_train_surrogates)
If `P[i, j]` is large (close to 1), `I[i, j]` is statistically
significant: the probability to observe an overlap equal to or larger
than `I[i, j]` under the null hypothesis is `1 - P[i, j]`, very small.
Parameters
----------
n_surrogates : int
The number of spike train surrogates to generate for the bootstrap
procedure.
imat : (n,n) np.ndarray or None, optional
The floating point intersection matrix of a list of spike trains.
It has the shape `(n, n)`, where `n` is the number of bins that
time was discretized in.
If None, the output of :func:`ASSET.intersection_matrix` is used.
Default: None
surrogate_method : {'dither_spike_train', 'dither_spikes',
'jitter_spikes',
'randomise_spikes', 'shuffle_isis',
'joint_isi_dithering'}, optional
The method to generate surrogate spike trains. Refer to the
:func:`spike_train_surrogates.surrogates` documentation for more
information about each surrogate method. Note that some of these
methods need `surrogate_dt` parameter, others ignore it.
Default: 'dither_spike_train'
surrogate_dt : pq.Quantity, optional
For surrogate methods shifting spike times randomly around their
original time ('dither_spike_train', 'dither_spikes') or replacing
them randomly within a certain window ('jitter_spikes'),
`surrogate_dt` represents the size of that shift (window). For
other methods, `surrogate_dt` is ignored.
If None, it's set to `self.bin_size * 5`.
Default: None
Returns
-------
pmat : np.ndarray
The cumulative probability matrix. `pmat[i, j]` represents the
estimated probability of having an overlap between bins `i` and `j`
STRICTLY LOWER than the observed overlap, under the null hypothesis
of independence of the input spike trains.
Notes
-----
We recommend playing with `surrogate_dt` parameter to see how it
influences the result matrix. For this, refer to the ASSET tutorial.
See Also
--------
ASSET.probability_matrix_analytical : analytical derivation of the
matrix
"""
if imat is None:
# Compute the intersection matrix of the original data
imat = self.intersection_matrix()
if surrogate_dt is None:
surrogate_dt = self.bin_size * 5
symmetric = self.is_symmetric()
# Generate surrogate spike trains as a list surrs
# Compute the p-value matrix pmat; pmat[i, j] counts the fraction of
# surrogate data whose intersection value at (i, j) is lower than or
# equal to that of the original data
pmat = np.zeros(imat.shape, dtype=np.int32)
for surr_id in trange(n_surrogates, desc="pmat_bootstrap",
disable=not self.verbose):
if mpi_accelerated and surr_id % size != rank:
continue
surrogates = [spike_train_surrogates.surrogates(
st, n_surrogates=1,
method=surrogate_method,
dt=surrogate_dt,
decimals=None,
edges=True)[0]
for st in self.spiketrains_i]
if symmetric:
surrogates_y = surrogates
else:
surrogates_y = [spike_train_surrogates.surrogates(
st, n_surrogates=1, method=surrogate_method,
dt=surrogate_dt, decimals=None, edges=True)[0]
for st in self.spiketrains_j]
imat_surr = _intersection_matrix(surrogates, surrogates_y,
self.bin_size,
self.t_start_i, self.t_start_j,
self.t_stop_i, self.t_stop_j)
pmat += (imat_surr <= (imat - 1))
del imat_surr
if mpi_accelerated:
pmat = comm.allreduce(pmat, op=MPI.SUM)
pmat = pmat * 1. / n_surrogates
if symmetric:
np.fill_diagonal(pmat, 0.5)
return pmat
def probability_matrix_analytical(self, imat=None,
firing_rates_x='estimate',
firing_rates_y='estimate',
kernel_width=100 * pq.ms):
r"""
Given a list of spike trains, approximates the cumulative probability
of each entry in their intersection matrix.
The approximation is analytical and works under the assumptions that
the input spike trains are independent and Poisson. It works as
follows:
* Bin each spike train at the specified `bin_size`: this yields a
binary array of 1s (spike in bin) and 0s (no spike in bin;
clipping used);
* If required, estimate the rate profile of each spike train by
convolving the binned array with a boxcar kernel of user-defined
length;
* For each neuron `k` and each pair of bins `i` and `j`, compute
the probability :math:`p_ijk` that neuron `k` fired in both bins
`i` and `j`.
* Approximate the probability distribution of the intersection
value at `(i, j)` by a Poisson distribution with mean parameter
:math:`l = \sum_k (p_ijk)`,
justified by Le Cam's approximation of a sum of independent
Bernouilli random variables with a Poisson distribution.
Parameters
----------
imat : (n,n) np.ndarray or None, optional
The intersection matrix of a list of spike trains.
It has the shape `(n, n)`, where `n` is the number of bins that
time was discretized in.
If None, the output of :func:`ASSET.intersection_matrix` is used.
Default: None
firing_rates_x, firing_rates_y : list of neo.AnalogSignal or 'estimate'
If a list, `firing_rates[i]` is the firing rate of the spike train
`spiketrains[i]`.
If 'estimate', firing rates are estimated by simple boxcar kernel
convolution, with the specified `kernel_width`.
Default: 'estimate'
kernel_width : pq.Quantity, optional
The total width of the kernel used to estimate the rate profiles
when `firing_rates` is 'estimate'.
Default: 100 * pq.ms
Returns
-------
pmat : np.ndarray
The cumulative probability matrix. `pmat[i, j]` represents the
estimated probability of having an overlap between bins `i` and `j`
STRICTLY LOWER than the observed overlap, under the null hypothesis
of independence of the input spike trains.
"""
if imat is None:
# Compute the intersection matrix of the original data
imat = self.intersection_matrix()
symmetric = self.is_symmetric()
bsts_x_matrix = self.spiketrains_binned_i.to_bool_array()
if symmetric:
bsts_y_matrix = bsts_x_matrix
else:
bsts_y_matrix = self.spiketrains_binned_j.to_bool_array()
# Check that the nr. neurons is identical between the two axes
if bsts_x_matrix.shape[0] != bsts_y_matrix.shape[0]:
raise ValueError(
'Different number of neurons along the x and y axis!')
# Define the firing rate profiles
if firing_rates_x == 'estimate':
# If rates are to be estimated, create the rate profiles as
# Quantity objects obtained by boxcar-kernel convolution
fir_rate_x = self._rate_of_binned_spiketrain(bsts_x_matrix,
kernel_width)
elif isinstance(firing_rates_x, list):
# If rates provided as lists of AnalogSignals, create time slices
# for both axes, interpolate in the time bins of interest and
# convert to Quantity
fir_rate_x = _interpolate_signals(
firing_rates_x, self.spiketrains_binned_i.bin_edges[:-1],
self.verbose)
else:
raise ValueError(
'fir_rates_x must be a list or the string "estimate"')
if symmetric:
fir_rate_y = fir_rate_x
elif firing_rates_y == 'estimate':
fir_rate_y = self._rate_of_binned_spiketrain(bsts_y_matrix,
kernel_width)
elif isinstance(firing_rates_y, list):
# If rates provided as lists of AnalogSignals, create time slices
# for both axes, interpolate in the time bins of interest and
# convert to Quantity
fir_rate_y = _interpolate_signals(
firing_rates_y, self.spiketrains_binned_j.bin_edges[:-1],
self.verbose)
else:
raise ValueError(
'fir_rates_y must be a list or the string "estimate"')
# For each neuron, compute the prob. that that neuron spikes in any bin
if self.verbose:
print('compute the prob. that each neuron fires in each pair of '
'bins...')
rate_bins_x = (fir_rate_x * self.bin_size).simplified.magnitude
spike_probs_x = 1. - np.exp(-rate_bins_x)
if symmetric:
spike_probs_y = spike_probs_x
else:
rate_bins_y = (fir_rate_y * self.bin_size).simplified.magnitude
spike_probs_y = 1. - np.exp(-rate_bins_y)
# Compute the matrix Mu[i, j] of parameters for the Poisson
# distributions which describe, at each (i, j), the approximated
# overlap probability. This matrix is just the sum of the probability
# matrices p_ijk computed for each neuron k:
# p_ijk is the probability that neuron k spikes in both bins i and j.
# The sum of outer products is equivalent to a dot product.
if self.verbose:
print(
"compute the probability matrix by Le Cam's approximation...")
Mu = spike_probs_x.T.dot(spike_probs_y)
# A straightforward implementation is:
# pmat_shape = spike_probs_x.shape[1], spike_probs_y.shape[1]
# Mu = np.zeros(pmat_shape, dtype=np.float64)
# for probx, proby in zip(spike_probs_x, spike_probs_y):
# Mu += np.outer(probx, proby)
# Compute the probability matrix obtained from imat using the Poisson
# pdfs
pmat = scipy.stats.poisson.cdf(imat - 1, Mu)
if symmetric:
# Substitute 0.5 to the elements along the main diagonal
if self.verbose:
print("substitute 0.5 to elements along the main diagonal...")
np.fill_diagonal(pmat, 0.5)
return pmat
def joint_probability_matrix(self, pmat, filter_shape, n_largest,
min_p_value=1e-5, precision='float',
cuda_threads=64, cuda_cwr_loops=32,
tolerance=1e-5):
"""
Map a probability matrix `pmat` to a joint probability matrix `jmat`,
where `jmat[i, j]` is the joint p-value of the largest neighbors of
`pmat[i, j]`.
The values of `pmat` are assumed to be uniformly distributed in the
range [0, 1]. Centered a rectangular kernel of shape
`filter_shape=(l, w)` around each entry `pmat[i, j]`,
aligned along the diagonal where `pmat[i, j]` lies into, extracts the
`n_largest` values falling within the kernel and computes their joint
p-value `jmat[i, j]`.
Parameters
----------
pmat : np.ndarray
A square matrix, the output of
:func:`ASSET.probability_matrix_montecarlo` or
:func:`ASSET.probability_matrix_analytical`, of cumulative
probability values between 0 and 1. The values are assumed
to be uniformly distributed in the said range.
filter_shape : tuple of int
A pair of integers representing the kernel shape `(l, w)`.
n_largest : int
The number of the largest neighbors to collect for each entry in
`jmat`.
min_p_value : float, optional
The minimum p-value in range `[0, 1)` for individual entries in
`pmat`. Each `pmat[i, j]` is set to
`min(pmat[i, j], 1-p_value_min)` to avoid that a single highly
significant value in `pmat` (extreme case: `pmat[i, j] = 1`) yields
joint significance of itself and its neighbors.
Default: 1e-5
<<<<<<< HEAD:elephant/asset.py
=======
precision : {'float', 'double'}, optional
Single or double floating-point precision for the resulting `jmat`
matrix.
* `'float'`: 32 bits; the tolerance error is ``≲1e-3``.
* `'double'`: 64 bits; the tolerance error is ``<1e-5``.
Double floating-point precision is typically x4 times slower than
the single floating-point equivalent.
Default: 'float'
cuda_threads : int, optional
[CUDA/OpenCL performance parameter that does not influence the
result.]
The number of CUDA/OpenCL threads per block (in X axis) between 1
and 1024 and is used only if CUDA or OpenCL backend is enabled.
For performance reasons, it should be a multiple of 32.
Old GPUs (Tesla K80) perform faster with `cuda_threads` larger
than 64 while new series (Tesla T4) with capabilities 6.x and more
work best with 32 threads.
Default: 64
cuda_cwr_loops : int, optional
[CUDA/OpenCL performance parameter that does not influence the
result.]
A positive integer that defines the number of fast
'combinations_with_replacement' loops to run to reduce branch
divergence. This parameter influences the performance when the
number of iterations is huge (`>1e8`); in such cases, increase
the value.
Default: 32
tolerance : float, optional
Tolerance is used to catch unexpected behavior of billions of
floating point additions, when the number of iterations is huge
or the data arrays are large. A warning is thrown when the
resulting joint prob. matrix values are outside of the acceptable
range ``[-tolerance, 1.0 + tolerance]``.
Default: 1e-5
>>>>>>> master:elephant/asset/asset.py
Returns
-------
jmat : np.ndarray
The joint probability matrix associated to `pmat`.
Notes
-----
1. By default, if CUDA is detected, CUDA acceleration is used. CUDA
backend is **~X1000** faster than the Python implementation.
To turn off CUDA features, set the environment flag
``ELEPHANT_USE_CUDA`` to ``0``. Otherwise
2. If PyOpenCL is installed and detected, PyOpenCL backend is used.
PyOpenCL backend is **~X100** faster than the Python implementation.
To turn off OpenCL features, set the environment flag
``ELEPHANT_USE_OPENCL`` to ``0``.
When using PyOpenCL backend, make sure you've disabled GPU Hangcheck
as described in the `Intel GPU developers documentation
<https://software.intel.com/content/www/us/en/develop/
documentation/get-started-with-intel-oneapi-base-linux/top/
before-you-begin.html>`_. Do it with caution - using your built-in
Intel graphics card to perform computations may make the system
unresponsive until the compute program terminates.
"""
l, w = filter_shape
# Find for each P_ij in the probability matrix its neighbors and
# maximize them by the maximum value 1-p_value_min
pmat = np.asarray(pmat, dtype=np.float32)
pmat_neighb_obj = _PMatNeighbors(filter_shape=filter_shape,
n_largest=n_largest)
pmat_neighb = pmat_neighb_obj.compute(pmat)
pmat_neighb = np.minimum(pmat_neighb, 1. - min_p_value,
out=pmat_neighb)
# in order to avoid doing the same calculation multiple times:
# find all unique sets of values in pmat_neighb
# and store the corresponding indices
# flatten the second and third dimension in order to use np.unique
pmat_neighb = pmat_neighb.reshape(pmat.size, n_largest)
pmat_neighb, pmat_neighb_indices = np.unique(pmat_neighb, axis=0,
return_inverse=True)
# Compute the joint p-value matrix jpvmat
n = l * (1 + 2 * w) - w * (
w + 1) # number of entries covered by kernel
jsf = _JSFUniformOrderStat3D(n=n, d=pmat_neighb.shape[1],
precision=precision,
verbose=self.verbose,
cuda_threads=cuda_threads,
cuda_cwr_loops=cuda_cwr_loops,
tolerance=tolerance)
jpvmat = jsf.compute(u=pmat_neighb)
# restore the original shape using the stored indices
jpvmat = jpvmat[pmat_neighb_indices].reshape(pmat.shape)
return 1. - jpvmat
@staticmethod
def mask_matrices(matrices, thresholds):
"""
Given a list of `matrices` and a list of `thresholds`, return a boolean
matrix `B` ("mask") such that `B[i,j]` is True if each input matrix in
the list strictly exceeds the corresponding threshold at that position.
If multiple matrices are passed along with only one threshold the same
threshold is applied to all matrices.
Parameters
----------
matrices : list of np.ndarray
The matrices which are compared to the respective thresholds to
build the mask. All matrices must have the same shape.
Typically, it is a list `[pmat, jmat]`, i.e., the (cumulative)
probability and joint probability matrices.
thresholds : float or list of float
The significance thresholds for each matrix in `matrices`.
Returns
-------
mask : np.ndarray
Boolean mask matrix with the shape of the input matrices.
Raises
------
ValueError
If `matrices` or `thresholds` is an empty list.
If `matrices` and `thresholds` have different lengths.
See Also
--------
ASSET.probability_matrix_montecarlo : for `pmat` generation
ASSET.probability_matrix_analytical : for `pmat` generation
ASSET.joint_probability_matrix : for `jmat` generation
"""
if len(matrices) == 0:
raise ValueError("Empty list of matrices")
if isinstance(thresholds, float):
thresholds = np.full(shape=len(matrices), fill_value=thresholds)
if len(matrices) != len(thresholds):
raise ValueError(
'`matrices` and `thresholds` must have same length')
mask = np.ones_like(matrices[0], dtype=bool)
for (mat, thresh) in zip(matrices, thresholds):
mask &= mat > thresh
# Replace nans, coming from False * np.inf, with zeros
mask[np.isnan(mask)] = False
return mask
@staticmethod
def cluster_matrix_entries(mask_matrix, max_distance, min_neighbors,
stretch, working_memory=None):
r"""
Given a matrix `mask_matrix`, replaces its positive elements with
integers representing different cluster IDs. Each cluster comprises
close-by elements.
In ASSET analysis, `mask_matrix` is a thresholded ("masked") version
of the intersection matrix `imat`, whose values are those of `imat`
only if considered statistically significant, and zero otherwise.
A cluster is built by pooling elements according to their distance,
via the DBSCAN algorithm (see `sklearn.cluster.DBSCAN` class). Elements
form a neighbourhood if at least one of them has a distance not larger
than `max_distance` from the others, and if they are at least
`min_neighbors`. Overlapping neighborhoods form a cluster:
* Clusters are assigned integers from `1` to the total number `k`
of clusters;
* Unclustered ("isolated") positive elements of `mask_matrix` are
assigned value `-1`;
* Non-positive elements are assigned the value `0`.
The distance between the positions of two positive elements in
`mask_matrix` is given by a Euclidean metric which is stretched if the
two positions are not aligned along the 45 degree direction (the main
diagonal direction), as more, with maximal stretching along the
anti-diagonal. Specifically, the Euclidean distance between positions
`(i1, j1)` and `(i2, j2)` is stretched by a factor
.. math::
1 + (\mathtt{stretch} - 1.) *
\left|\sin((\pi / 4) - \theta)\right|,
where :math:`\theta` is the angle between the pixels and the 45 degree
direction. The stretching factor thus varies between 1 and `stretch`.
Parameters
----------
mask_matrix : np.ndarray
The boolean matrix, whose elements with positive values are to be
clustered. The output of :func:`ASSET.mask_matrices`.
max_distance : float
The maximum distance between two elements in `mask_matrix` to be
a part of the same neighbourhood in the DBSCAN algorithm.
min_neighbors : int
The minimum number of elements to form a neighbourhood.
stretch : float
The stretching factor of the euclidean metric for elements aligned
along the 135 degree direction (anti-diagonal). The actual
stretching increases from 1 to `stretch` as the direction of the
two elements moves from the 45 to the 135 degree direction.
`stretch` must be greater than 1.
working_memory : int or None, optional
The sought maximum memory in MiB for temporary distance matrix
chunks. When None (default), no chunking is performed. This
parameter is passed directly to
``sklearn.metrics.pairwise_distances_chunked`` function and it
has no influence on the outcome matrix. Instead, it control the
memory VS speed trade-off.
Default: None
Returns
-------
cluster_mat : np.ndarray
A matrix with the same shape of `mask_matrix`, each of whose
elements is either:
* a positive integer (cluster ID) if the element is part of a
cluster;
* `0` if the corresponding element in `mask_matrix` is
non-positive;
* `-1` if the element does not belong to any cluster.
See Also
--------
sklearn.cluster.DBSCAN
"""
# Don't do anything if mat is identically zero
if np.all(mask_matrix == 0):
return mask_matrix
# List the significant pixels of mat in a 2-columns array
xpos_sgnf, ypos_sgnf = np.where(mask_matrix > 0)
# Compute the matrix D[i, j] of euclidean distances between pixels i
# and j
try:
D = _stretched_metric_2d(
xpos_sgnf, ypos_sgnf, stretch=stretch, ref_angle=45,
working_memory=working_memory
)
except MemoryError as err:
raise MemoryError("Set 'working_memory=100' or another value to "
"chunk the data") from err
# Cluster positions of significant pixels via dbscan
core_samples, config = dbscan(
D, eps=max_distance, min_samples=min_neighbors,
metric='precomputed')
# Construct the clustered matrix, where each element has value
# * i = 1 to k if it belongs to a cluster i,
# * 0 if it is not significant,
# * -1 if it is significant but does not belong to any cluster
cluster_mat = np.zeros_like(mask_matrix, dtype=np.int32)
cluster_mat[xpos_sgnf, ypos_sgnf] = \
config * (config == -1) + (config + 1) * (config >= 0)
return cluster_mat
def extract_synchronous_events(self, cmat, ids=None):
"""
Given a list of spike trains, a bin size, and a clustered
intersection matrix obtained from those spike trains via ASSET
analysis, extracts the sequences of synchronous events (SSEs)
corresponding to clustered elements in the cluster matrix.
Parameters
----------
cmat : (n,n) np.ndarray
The cluster matrix, the output of
:func:`ASSET.cluster_matrix_entries`.
ids : list, optional
A list of spike train IDs. If provided, `ids[i]` is the identity
of `spiketrains[i]`. If None, the IDs `0,1,...,n-1` are used.
Default: None
Returns
-------
sse_dict : dict
A dictionary `D` of SSEs, where each SSE is a sub-dictionary `Dk`,
`k=1,...,K`, where `K` is the max positive integer in `cmat` (i.e.,
the total number of clusters in `cmat`):
.. centered:: D = {1: D1, 2: D2, ..., K: DK}
Each sub-dictionary `Dk` represents the k-th diagonal structure
(i.e., the k-th cluster) in `cmat`, and is of the form
.. centered:: Dk = {(i1, j1): S1, (i2, j2): S2, ..., (iL, jL): SL}.
The keys `(i, j)` represent the positions (time bin IDs) of all
elements in `cmat` that compose the SSE (i.e., that take value `l`
and therefore belong to the same cluster), and the values `Sk` are
sets of neuron IDs representing a repeated synchronous event (i.e.,
spiking at time bins `i` and `j`).
"""
nr_worms = cmat.max() # number of different clusters ("worms") in cmat
if nr_worms <= 0:
return {}
# Compute the transactions associated to the two binnings
tracts_x = _transactions(
self.spiketrains_i, bin_size=self.bin_size, t_start=self.t_start_i,
t_stop=self.t_stop_i,
ids=ids)
if self.spiketrains_j is self.spiketrains_i:
diag_id = 0
tracts_y = tracts_x
else:
if self.is_symmetric():
diag_id = 0
tracts_y = tracts_x
else:
diag_id = None
tracts_y = _transactions(
self.spiketrains_j, bin_size=self.bin_size,
t_start=self.t_start_j, t_stop=self.t_stop_j, ids=ids)
# Reconstruct each worm, link by link
sse_dict = {}
for k in range(1, nr_worms + 1): # for each worm
# worm k is a list of links (each link will be 1 sublist)
worm_k = {}
pos_worm_k = np.array(
np.where(cmat == k)).T # position of all links
# if no link lies on the reference diagonal
if all([y - x != diag_id for (x, y) in pos_worm_k]):
for bin_x, bin_y in pos_worm_k: # for each link
# reconstruct the link
link_l = set(tracts_x[bin_x]).intersection(
tracts_y[bin_y])
# and assign it to its pixel
worm_k[(bin_x, bin_y)] = link_l
sse_dict[k] = worm_k
return sse_dict
def _rate_of_binned_spiketrain(self, binned_spiketrains, kernel_width):
"""
Calculate the rate of binned spiketrains using convolution with
a boxcar kernel.
"""
if self.verbose:
print('compute rates by boxcar-kernel convolution...')
# Create the boxcar kernel and convolve it with the binned spike trains
k = int((kernel_width / self.bin_size).simplified.item())
kernel = np.full(k, fill_value=1. / k)
rate = np.vstack([np.convolve(bst, kernel, mode='same')
for bst in binned_spiketrains])
# The convolution results in an array decreasing at the borders due
# to absence of spikes beyond the borders. Replace the first and last
# (k//2) elements with the (k//2)-th / (n-k//2)-th ones, respectively
k2 = k // 2
for i in range(rate.shape[0]):
rate[i, :k2] = rate[i, k2]
rate[i, -k2:] = rate[i, -k2 - 1]
# Multiply the firing rates by the proper unit
rate = rate * (1. / self.bin_size).rescale('Hz')
return rate
| bsd-3-clause |
xubenben/scikit-learn | sklearn/linear_model/ridge.py | 25 | 39394 | """
Ridge regression
"""
# Author: Mathieu Blondel <mathieu@mblondel.org>
# Reuben Fletcher-Costin <reuben.fletchercostin@gmail.com>
# Fabian Pedregosa <fabian@fseoane.net>
# Michael Eickenberg <michael.eickenberg@nsup.org>
# License: BSD 3 clause
from abc import ABCMeta, abstractmethod
import warnings
import numpy as np
from scipy import linalg
from scipy import sparse
from scipy.sparse import linalg as sp_linalg
from .base import LinearClassifierMixin, LinearModel, _rescale_data
from ..base import RegressorMixin
from ..utils.extmath import safe_sparse_dot
from ..utils import check_X_y
from ..utils import compute_sample_weight
from ..utils import column_or_1d
from ..preprocessing import LabelBinarizer
from ..grid_search import GridSearchCV
from ..externals import six
from ..metrics.scorer import check_scoring
def _solve_sparse_cg(X, y, alpha, max_iter=None, tol=1e-3, verbose=0):
n_samples, n_features = X.shape
X1 = sp_linalg.aslinearoperator(X)
coefs = np.empty((y.shape[1], n_features))
if n_features > n_samples:
def create_mv(curr_alpha):
def _mv(x):
return X1.matvec(X1.rmatvec(x)) + curr_alpha * x
return _mv
else:
def create_mv(curr_alpha):
def _mv(x):
return X1.rmatvec(X1.matvec(x)) + curr_alpha * x
return _mv
for i in range(y.shape[1]):
y_column = y[:, i]
mv = create_mv(alpha[i])
if n_features > n_samples:
# kernel ridge
# w = X.T * inv(X X^t + alpha*Id) y
C = sp_linalg.LinearOperator(
(n_samples, n_samples), matvec=mv, dtype=X.dtype)
coef, info = sp_linalg.cg(C, y_column, tol=tol)
coefs[i] = X1.rmatvec(coef)
else:
# linear ridge
# w = inv(X^t X + alpha*Id) * X.T y
y_column = X1.rmatvec(y_column)
C = sp_linalg.LinearOperator(
(n_features, n_features), matvec=mv, dtype=X.dtype)
coefs[i], info = sp_linalg.cg(C, y_column, maxiter=max_iter,
tol=tol)
if info < 0:
raise ValueError("Failed with error code %d" % info)
if max_iter is None and info > 0 and verbose:
warnings.warn("sparse_cg did not converge after %d iterations." %
info)
return coefs
def _solve_lsqr(X, y, alpha, max_iter=None, tol=1e-3):
n_samples, n_features = X.shape
coefs = np.empty((y.shape[1], n_features))
# According to the lsqr documentation, alpha = damp^2.
sqrt_alpha = np.sqrt(alpha)
for i in range(y.shape[1]):
y_column = y[:, i]
coefs[i] = sp_linalg.lsqr(X, y_column, damp=sqrt_alpha[i],
atol=tol, btol=tol, iter_lim=max_iter)[0]
return coefs
def _solve_cholesky(X, y, alpha):
# w = inv(X^t X + alpha*Id) * X.T y
n_samples, n_features = X.shape
n_targets = y.shape[1]
A = safe_sparse_dot(X.T, X, dense_output=True)
Xy = safe_sparse_dot(X.T, y, dense_output=True)
one_alpha = np.array_equal(alpha, len(alpha) * [alpha[0]])
if one_alpha:
A.flat[::n_features + 1] += alpha[0]
return linalg.solve(A, Xy, sym_pos=True,
overwrite_a=True).T
else:
coefs = np.empty([n_targets, n_features])
for coef, target, current_alpha in zip(coefs, Xy.T, alpha):
A.flat[::n_features + 1] += current_alpha
coef[:] = linalg.solve(A, target, sym_pos=True,
overwrite_a=False).ravel()
A.flat[::n_features + 1] -= current_alpha
return coefs
def _solve_cholesky_kernel(K, y, alpha, sample_weight=None, copy=False):
# dual_coef = inv(X X^t + alpha*Id) y
n_samples = K.shape[0]
n_targets = y.shape[1]
if copy:
K = K.copy()
alpha = np.atleast_1d(alpha)
one_alpha = (alpha == alpha[0]).all()
has_sw = isinstance(sample_weight, np.ndarray) \
or sample_weight not in [1.0, None]
if has_sw:
# Unlike other solvers, we need to support sample_weight directly
# because K might be a pre-computed kernel.
sw = np.sqrt(np.atleast_1d(sample_weight))
y = y * sw[:, np.newaxis]
K *= np.outer(sw, sw)
if one_alpha:
# Only one penalty, we can solve multi-target problems in one time.
K.flat[::n_samples + 1] += alpha[0]
try:
# Note: we must use overwrite_a=False in order to be able to
# use the fall-back solution below in case a LinAlgError
# is raised
dual_coef = linalg.solve(K, y, sym_pos=True,
overwrite_a=False)
except np.linalg.LinAlgError:
warnings.warn("Singular matrix in solving dual problem. Using "
"least-squares solution instead.")
dual_coef = linalg.lstsq(K, y)[0]
# K is expensive to compute and store in memory so change it back in
# case it was user-given.
K.flat[::n_samples + 1] -= alpha[0]
if has_sw:
dual_coef *= sw[:, np.newaxis]
return dual_coef
else:
# One penalty per target. We need to solve each target separately.
dual_coefs = np.empty([n_targets, n_samples])
for dual_coef, target, current_alpha in zip(dual_coefs, y.T, alpha):
K.flat[::n_samples + 1] += current_alpha
dual_coef[:] = linalg.solve(K, target, sym_pos=True,
overwrite_a=False).ravel()
K.flat[::n_samples + 1] -= current_alpha
if has_sw:
dual_coefs *= sw[np.newaxis, :]
return dual_coefs.T
def _solve_svd(X, y, alpha):
U, s, Vt = linalg.svd(X, full_matrices=False)
idx = s > 1e-15 # same default value as scipy.linalg.pinv
s_nnz = s[idx][:, np.newaxis]
UTy = np.dot(U.T, y)
d = np.zeros((s.size, alpha.size))
d[idx] = s_nnz / (s_nnz ** 2 + alpha)
d_UT_y = d * UTy
return np.dot(Vt.T, d_UT_y).T
def ridge_regression(X, y, alpha, sample_weight=None, solver='auto',
max_iter=None, tol=1e-3, verbose=0):
"""Solve the ridge equation by the method of normal equations.
Read more in the :ref:`User Guide <ridge_regression>`.
Parameters
----------
X : {array-like, sparse matrix, LinearOperator},
shape = [n_samples, n_features]
Training data
y : array-like, shape = [n_samples] or [n_samples, n_targets]
Target values
alpha : {float, array-like},
shape = [n_targets] if array-like
The l_2 penalty to be used. If an array is passed, penalties are
assumed to be specific to targets
max_iter : int, optional
Maximum number of iterations for conjugate gradient solver.
The default value is determined by scipy.sparse.linalg.
sample_weight : float or numpy array of shape [n_samples]
Individual weights for each sample. If sample_weight is set, then
the solver will automatically be set to 'cholesky'
solver : {'auto', 'svd', 'cholesky', 'lsqr', 'sparse_cg'}
Solver to use in the computational routines:
- 'auto' chooses the solver automatically based on the type of data.
- 'svd' uses a Singular Value Decomposition of X to compute the Ridge
coefficients. More stable for singular matrices than
'cholesky'.
- 'cholesky' uses the standard scipy.linalg.solve function to
obtain a closed-form solution via a Cholesky decomposition of
dot(X.T, X)
- 'sparse_cg' uses the conjugate gradient solver as found in
scipy.sparse.linalg.cg. As an iterative algorithm, this solver is
more appropriate than 'cholesky' for large-scale data
(possibility to set `tol` and `max_iter`).
- 'lsqr' uses the dedicated regularized least-squares routine
scipy.sparse.linalg.lsqr. It is the fatest but may not be available
in old scipy versions. It also uses an iterative procedure.
All three solvers support both dense and sparse data.
tol : float
Precision of the solution.
verbose : int
Verbosity level. Setting verbose > 0 will display additional information
depending on the solver used.
Returns
-------
coef : array, shape = [n_features] or [n_targets, n_features]
Weight vector(s).
Notes
-----
This function won't compute the intercept.
"""
n_samples, n_features = X.shape
if y.ndim > 2:
raise ValueError("Target y has the wrong shape %s" % str(y.shape))
ravel = False
if y.ndim == 1:
y = y.reshape(-1, 1)
ravel = True
n_samples_, n_targets = y.shape
if n_samples != n_samples_:
raise ValueError("Number of samples in X and y does not correspond:"
" %d != %d" % (n_samples, n_samples_))
has_sw = sample_weight is not None
if solver == 'auto':
# cholesky if it's a dense array and cg in
# any other case
if not sparse.issparse(X) or has_sw:
solver = 'cholesky'
else:
solver = 'sparse_cg'
elif solver == 'lsqr' and not hasattr(sp_linalg, 'lsqr'):
warnings.warn("""lsqr not available on this machine, falling back
to sparse_cg.""")
solver = 'sparse_cg'
if has_sw:
if np.atleast_1d(sample_weight).ndim > 1:
raise ValueError("Sample weights must be 1D array or scalar")
# Sample weight can be implemented via a simple rescaling.
X, y = _rescale_data(X, y, sample_weight)
# There should be either 1 or n_targets penalties
alpha = np.asarray(alpha).ravel()
if alpha.size not in [1, n_targets]:
raise ValueError("Number of targets and number of penalties "
"do not correspond: %d != %d"
% (alpha.size, n_targets))
if alpha.size == 1 and n_targets > 1:
alpha = np.repeat(alpha, n_targets)
if solver not in ('sparse_cg', 'cholesky', 'svd', 'lsqr'):
raise ValueError('Solver %s not understood' % solver)
if solver == 'sparse_cg':
coef = _solve_sparse_cg(X, y, alpha, max_iter, tol, verbose)
elif solver == "lsqr":
coef = _solve_lsqr(X, y, alpha, max_iter, tol)
elif solver == 'cholesky':
if n_features > n_samples:
K = safe_sparse_dot(X, X.T, dense_output=True)
try:
dual_coef = _solve_cholesky_kernel(K, y, alpha)
coef = safe_sparse_dot(X.T, dual_coef, dense_output=True).T
except linalg.LinAlgError:
# use SVD solver if matrix is singular
solver = 'svd'
else:
try:
coef = _solve_cholesky(X, y, alpha)
except linalg.LinAlgError:
# use SVD solver if matrix is singular
solver = 'svd'
if solver == 'svd':
if sparse.issparse(X):
raise TypeError('SVD solver does not support sparse'
' inputs currently')
coef = _solve_svd(X, y, alpha)
if ravel:
# When y was passed as a 1d-array, we flatten the coefficients.
coef = coef.ravel()
return coef
class _BaseRidge(six.with_metaclass(ABCMeta, LinearModel)):
@abstractmethod
def __init__(self, alpha=1.0, fit_intercept=True, normalize=False,
copy_X=True, max_iter=None, tol=1e-3, solver="auto"):
self.alpha = alpha
self.fit_intercept = fit_intercept
self.normalize = normalize
self.copy_X = copy_X
self.max_iter = max_iter
self.tol = tol
self.solver = solver
def fit(self, X, y, sample_weight=None):
X, y = check_X_y(X, y, ['csr', 'csc', 'coo'], dtype=np.float,
multi_output=True, y_numeric=True)
if ((sample_weight is not None) and
np.atleast_1d(sample_weight).ndim > 1):
raise ValueError("Sample weights must be 1D array or scalar")
X, y, X_mean, y_mean, X_std = self._center_data(
X, y, self.fit_intercept, self.normalize, self.copy_X,
sample_weight=sample_weight)
self.coef_ = ridge_regression(X, y,
alpha=self.alpha,
sample_weight=sample_weight,
max_iter=self.max_iter,
tol=self.tol,
solver=self.solver)
self._set_intercept(X_mean, y_mean, X_std)
return self
class Ridge(_BaseRidge, RegressorMixin):
"""Linear least squares with l2 regularization.
This model solves a regression model where the loss function is
the linear least squares function and regularization is given by
the l2-norm. Also known as Ridge Regression or Tikhonov regularization.
This estimator has built-in support for multi-variate regression
(i.e., when y is a 2d-array of shape [n_samples, n_targets]).
Read more in the :ref:`User Guide <ridge_regression>`.
Parameters
----------
alpha : {float, array-like}
shape = [n_targets]
Small positive values of alpha improve the conditioning of the problem
and reduce the variance of the estimates. Alpha corresponds to
``(2*C)^-1`` in other linear models such as LogisticRegression or
LinearSVC. If an array is passed, penalties are assumed to be specific
to the targets. Hence they must correspond in number.
copy_X : boolean, optional, default True
If True, X will be copied; else, it may be overwritten.
fit_intercept : boolean
Whether to calculate the intercept for this model. If set
to false, no intercept will be used in calculations
(e.g. data is expected to be already centered).
max_iter : int, optional
Maximum number of iterations for conjugate gradient solver.
The default value is determined by scipy.sparse.linalg.
normalize : boolean, optional, default False
If True, the regressors X will be normalized before regression.
solver : {'auto', 'svd', 'cholesky', 'lsqr', 'sparse_cg'}
Solver to use in the computational routines:
- 'auto' chooses the solver automatically based on the type of data.
- 'svd' uses a Singular Value Decomposition of X to compute the Ridge
coefficients. More stable for singular matrices than
'cholesky'.
- 'cholesky' uses the standard scipy.linalg.solve function to
obtain a closed-form solution.
- 'sparse_cg' uses the conjugate gradient solver as found in
scipy.sparse.linalg.cg. As an iterative algorithm, this solver is
more appropriate than 'cholesky' for large-scale data
(possibility to set `tol` and `max_iter`).
- 'lsqr' uses the dedicated regularized least-squares routine
scipy.sparse.linalg.lsqr. It is the fatest but may not be available
in old scipy versions. It also uses an iterative procedure.
All three solvers support both dense and sparse data.
tol : float
Precision of the solution.
Attributes
----------
coef_ : array, shape = [n_features] or [n_targets, n_features]
Weight vector(s).
intercept_ : float | array, shape = (n_targets,)
Independent term in decision function. Set to 0.0 if
``fit_intercept = False``.
See also
--------
RidgeClassifier, RidgeCV, KernelRidge
Examples
--------
>>> from sklearn.linear_model import Ridge
>>> import numpy as np
>>> n_samples, n_features = 10, 5
>>> np.random.seed(0)
>>> y = np.random.randn(n_samples)
>>> X = np.random.randn(n_samples, n_features)
>>> clf = Ridge(alpha=1.0)
>>> clf.fit(X, y) # doctest: +NORMALIZE_WHITESPACE
Ridge(alpha=1.0, copy_X=True, fit_intercept=True, max_iter=None,
normalize=False, solver='auto', tol=0.001)
"""
def __init__(self, alpha=1.0, fit_intercept=True, normalize=False,
copy_X=True, max_iter=None, tol=1e-3, solver="auto"):
super(Ridge, self).__init__(alpha=alpha, fit_intercept=fit_intercept,
normalize=normalize, copy_X=copy_X,
max_iter=max_iter, tol=tol, solver=solver)
def fit(self, X, y, sample_weight=None):
"""Fit Ridge regression model
Parameters
----------
X : {array-like, sparse matrix}, shape = [n_samples, n_features]
Training data
y : array-like, shape = [n_samples] or [n_samples, n_targets]
Target values
sample_weight : float or numpy array of shape [n_samples]
Individual weights for each sample
Returns
-------
self : returns an instance of self.
"""
return super(Ridge, self).fit(X, y, sample_weight=sample_weight)
class RidgeClassifier(LinearClassifierMixin, _BaseRidge):
"""Classifier using Ridge regression.
Read more in the :ref:`User Guide <ridge_regression>`.
Parameters
----------
alpha : float
Small positive values of alpha improve the conditioning of the problem
and reduce the variance of the estimates. Alpha corresponds to
``(2*C)^-1`` in other linear models such as LogisticRegression or
LinearSVC.
class_weight : dict or 'balanced', optional
Weights associated with classes in the form ``{class_label: weight}``.
If not given, all classes are supposed to have weight one.
The "balanced" mode uses the values of y to automatically adjust
weights inversely proportional to class frequencies in the input data
as ``n_samples / (n_classes * np.bincount(y))``
copy_X : boolean, optional, default True
If True, X will be copied; else, it may be overwritten.
fit_intercept : boolean
Whether to calculate the intercept for this model. If set to false, no
intercept will be used in calculations (e.g. data is expected to be
already centered).
max_iter : int, optional
Maximum number of iterations for conjugate gradient solver.
The default value is determined by scipy.sparse.linalg.
normalize : boolean, optional, default False
If True, the regressors X will be normalized before regression.
solver : {'auto', 'svd', 'cholesky', 'lsqr', 'sparse_cg'}
Solver to use in the computational
routines. 'svd' will use a Singular value decomposition to obtain
the solution, 'cholesky' will use the standard
scipy.linalg.solve function, 'sparse_cg' will use the
conjugate gradient solver as found in
scipy.sparse.linalg.cg while 'auto' will chose the most
appropriate depending on the matrix X. 'lsqr' uses
a direct regularized least-squares routine provided by scipy.
tol : float
Precision of the solution.
Attributes
----------
coef_ : array, shape = [n_features] or [n_classes, n_features]
Weight vector(s).
intercept_ : float | array, shape = (n_targets,)
Independent term in decision function. Set to 0.0 if
``fit_intercept = False``.
See also
--------
Ridge, RidgeClassifierCV
Notes
-----
For multi-class classification, n_class classifiers are trained in
a one-versus-all approach. Concretely, this is implemented by taking
advantage of the multi-variate response support in Ridge.
"""
def __init__(self, alpha=1.0, fit_intercept=True, normalize=False,
copy_X=True, max_iter=None, tol=1e-3, class_weight=None,
solver="auto"):
super(RidgeClassifier, self).__init__(
alpha=alpha, fit_intercept=fit_intercept, normalize=normalize,
copy_X=copy_X, max_iter=max_iter, tol=tol, solver=solver)
self.class_weight = class_weight
def fit(self, X, y, sample_weight=None):
"""Fit Ridge regression model.
Parameters
----------
X : {array-like, sparse matrix}, shape = [n_samples,n_features]
Training data
y : array-like, shape = [n_samples]
Target values
sample_weight : float or numpy array of shape (n_samples,)
Sample weight.
Returns
-------
self : returns an instance of self.
"""
self._label_binarizer = LabelBinarizer(pos_label=1, neg_label=-1)
Y = self._label_binarizer.fit_transform(y)
if not self._label_binarizer.y_type_.startswith('multilabel'):
y = column_or_1d(y, warn=True)
if self.class_weight:
if sample_weight is None:
sample_weight = 1.
# modify the sample weights with the corresponding class weight
sample_weight = (sample_weight *
compute_sample_weight(self.class_weight, y))
super(RidgeClassifier, self).fit(X, Y, sample_weight=sample_weight)
return self
@property
def classes_(self):
return self._label_binarizer.classes_
class _RidgeGCV(LinearModel):
"""Ridge regression with built-in Generalized Cross-Validation
It allows efficient Leave-One-Out cross-validation.
This class is not intended to be used directly. Use RidgeCV instead.
Notes
-----
We want to solve (K + alpha*Id)c = y,
where K = X X^T is the kernel matrix.
Let G = (K + alpha*Id)^-1.
Dual solution: c = Gy
Primal solution: w = X^T c
Compute eigendecomposition K = Q V Q^T.
Then G = Q (V + alpha*Id)^-1 Q^T,
where (V + alpha*Id) is diagonal.
It is thus inexpensive to inverse for many alphas.
Let loov be the vector of prediction values for each example
when the model was fitted with all examples but this example.
loov = (KGY - diag(KG)Y) / diag(I-KG)
Let looe be the vector of prediction errors for each example
when the model was fitted with all examples but this example.
looe = y - loov = c / diag(G)
References
----------
http://cbcl.mit.edu/projects/cbcl/publications/ps/MIT-CSAIL-TR-2007-025.pdf
http://www.mit.edu/~9.520/spring07/Classes/rlsslides.pdf
"""
def __init__(self, alphas=(0.1, 1.0, 10.0),
fit_intercept=True, normalize=False,
scoring=None, copy_X=True,
gcv_mode=None, store_cv_values=False):
self.alphas = np.asarray(alphas)
self.fit_intercept = fit_intercept
self.normalize = normalize
self.scoring = scoring
self.copy_X = copy_X
self.gcv_mode = gcv_mode
self.store_cv_values = store_cv_values
def _pre_compute(self, X, y):
# even if X is very sparse, K is usually very dense
K = safe_sparse_dot(X, X.T, dense_output=True)
v, Q = linalg.eigh(K)
QT_y = np.dot(Q.T, y)
return v, Q, QT_y
def _decomp_diag(self, v_prime, Q):
# compute diagonal of the matrix: dot(Q, dot(diag(v_prime), Q^T))
return (v_prime * Q ** 2).sum(axis=-1)
def _diag_dot(self, D, B):
# compute dot(diag(D), B)
if len(B.shape) > 1:
# handle case where B is > 1-d
D = D[(slice(None), ) + (np.newaxis, ) * (len(B.shape) - 1)]
return D * B
def _errors(self, alpha, y, v, Q, QT_y):
# don't construct matrix G, instead compute action on y & diagonal
w = 1.0 / (v + alpha)
c = np.dot(Q, self._diag_dot(w, QT_y))
G_diag = self._decomp_diag(w, Q)
# handle case where y is 2-d
if len(y.shape) != 1:
G_diag = G_diag[:, np.newaxis]
return (c / G_diag) ** 2, c
def _values(self, alpha, y, v, Q, QT_y):
# don't construct matrix G, instead compute action on y & diagonal
w = 1.0 / (v + alpha)
c = np.dot(Q, self._diag_dot(w, QT_y))
G_diag = self._decomp_diag(w, Q)
# handle case where y is 2-d
if len(y.shape) != 1:
G_diag = G_diag[:, np.newaxis]
return y - (c / G_diag), c
def _pre_compute_svd(self, X, y):
if sparse.issparse(X):
raise TypeError("SVD not supported for sparse matrices")
U, s, _ = linalg.svd(X, full_matrices=0)
v = s ** 2
UT_y = np.dot(U.T, y)
return v, U, UT_y
def _errors_svd(self, alpha, y, v, U, UT_y):
w = ((v + alpha) ** -1) - (alpha ** -1)
c = np.dot(U, self._diag_dot(w, UT_y)) + (alpha ** -1) * y
G_diag = self._decomp_diag(w, U) + (alpha ** -1)
if len(y.shape) != 1:
# handle case where y is 2-d
G_diag = G_diag[:, np.newaxis]
return (c / G_diag) ** 2, c
def _values_svd(self, alpha, y, v, U, UT_y):
w = ((v + alpha) ** -1) - (alpha ** -1)
c = np.dot(U, self._diag_dot(w, UT_y)) + (alpha ** -1) * y
G_diag = self._decomp_diag(w, U) + (alpha ** -1)
if len(y.shape) != 1:
# handle case when y is 2-d
G_diag = G_diag[:, np.newaxis]
return y - (c / G_diag), c
def fit(self, X, y, sample_weight=None):
"""Fit Ridge regression model
Parameters
----------
X : {array-like, sparse matrix}, shape = [n_samples, n_features]
Training data
y : array-like, shape = [n_samples] or [n_samples, n_targets]
Target values
sample_weight : float or array-like of shape [n_samples]
Sample weight
Returns
-------
self : Returns self.
"""
X, y = check_X_y(X, y, ['csr', 'csc', 'coo'], dtype=np.float,
multi_output=True, y_numeric=True)
n_samples, n_features = X.shape
X, y, X_mean, y_mean, X_std = LinearModel._center_data(
X, y, self.fit_intercept, self.normalize, self.copy_X,
sample_weight=sample_weight)
gcv_mode = self.gcv_mode
with_sw = len(np.shape(sample_weight))
if gcv_mode is None or gcv_mode == 'auto':
if sparse.issparse(X) or n_features > n_samples or with_sw:
gcv_mode = 'eigen'
else:
gcv_mode = 'svd'
elif gcv_mode == "svd" and with_sw:
# FIXME non-uniform sample weights not yet supported
warnings.warn("non-uniform sample weights unsupported for svd, "
"forcing usage of eigen")
gcv_mode = 'eigen'
if gcv_mode == 'eigen':
_pre_compute = self._pre_compute
_errors = self._errors
_values = self._values
elif gcv_mode == 'svd':
# assert n_samples >= n_features
_pre_compute = self._pre_compute_svd
_errors = self._errors_svd
_values = self._values_svd
else:
raise ValueError('bad gcv_mode "%s"' % gcv_mode)
v, Q, QT_y = _pre_compute(X, y)
n_y = 1 if len(y.shape) == 1 else y.shape[1]
cv_values = np.zeros((n_samples * n_y, len(self.alphas)))
C = []
scorer = check_scoring(self, scoring=self.scoring, allow_none=True)
error = scorer is None
for i, alpha in enumerate(self.alphas):
weighted_alpha = (sample_weight * alpha
if sample_weight is not None
else alpha)
if error:
out, c = _errors(weighted_alpha, y, v, Q, QT_y)
else:
out, c = _values(weighted_alpha, y, v, Q, QT_y)
cv_values[:, i] = out.ravel()
C.append(c)
if error:
best = cv_values.mean(axis=0).argmin()
else:
# The scorer want an object that will make the predictions but
# they are already computed efficiently by _RidgeGCV. This
# identity_estimator will just return them
def identity_estimator():
pass
identity_estimator.decision_function = lambda y_predict: y_predict
identity_estimator.predict = lambda y_predict: y_predict
out = [scorer(identity_estimator, y.ravel(), cv_values[:, i])
for i in range(len(self.alphas))]
best = np.argmax(out)
self.alpha_ = self.alphas[best]
self.dual_coef_ = C[best]
self.coef_ = safe_sparse_dot(self.dual_coef_.T, X)
self._set_intercept(X_mean, y_mean, X_std)
if self.store_cv_values:
if len(y.shape) == 1:
cv_values_shape = n_samples, len(self.alphas)
else:
cv_values_shape = n_samples, n_y, len(self.alphas)
self.cv_values_ = cv_values.reshape(cv_values_shape)
return self
class _BaseRidgeCV(LinearModel):
def __init__(self, alphas=(0.1, 1.0, 10.0),
fit_intercept=True, normalize=False, scoring=None,
cv=None, gcv_mode=None,
store_cv_values=False):
self.alphas = alphas
self.fit_intercept = fit_intercept
self.normalize = normalize
self.scoring = scoring
self.cv = cv
self.gcv_mode = gcv_mode
self.store_cv_values = store_cv_values
def fit(self, X, y, sample_weight=None):
"""Fit Ridge regression model
Parameters
----------
X : array-like, shape = [n_samples, n_features]
Training data
y : array-like, shape = [n_samples] or [n_samples, n_targets]
Target values
sample_weight : float or array-like of shape [n_samples]
Sample weight
Returns
-------
self : Returns self.
"""
if self.cv is None:
estimator = _RidgeGCV(self.alphas,
fit_intercept=self.fit_intercept,
normalize=self.normalize,
scoring=self.scoring,
gcv_mode=self.gcv_mode,
store_cv_values=self.store_cv_values)
estimator.fit(X, y, sample_weight=sample_weight)
self.alpha_ = estimator.alpha_
if self.store_cv_values:
self.cv_values_ = estimator.cv_values_
else:
if self.store_cv_values:
raise ValueError("cv!=None and store_cv_values=True "
" are incompatible")
parameters = {'alpha': self.alphas}
fit_params = {'sample_weight': sample_weight}
gs = GridSearchCV(Ridge(fit_intercept=self.fit_intercept),
parameters, fit_params=fit_params, cv=self.cv)
gs.fit(X, y)
estimator = gs.best_estimator_
self.alpha_ = gs.best_estimator_.alpha
self.coef_ = estimator.coef_
self.intercept_ = estimator.intercept_
return self
class RidgeCV(_BaseRidgeCV, RegressorMixin):
"""Ridge regression with built-in cross-validation.
By default, it performs Generalized Cross-Validation, which is a form of
efficient Leave-One-Out cross-validation.
Read more in the :ref:`User Guide <ridge_regression>`.
Parameters
----------
alphas : numpy array of shape [n_alphas]
Array of alpha values to try.
Small positive values of alpha improve the conditioning of the
problem and reduce the variance of the estimates.
Alpha corresponds to ``(2*C)^-1`` in other linear models such as
LogisticRegression or LinearSVC.
fit_intercept : boolean
Whether to calculate the intercept for this model. If set
to false, no intercept will be used in calculations
(e.g. data is expected to be already centered).
normalize : boolean, optional, default False
If True, the regressors X will be normalized before regression.
scoring : string, callable or None, optional, default: None
A string (see model evaluation documentation) or
a scorer callable object / function with signature
``scorer(estimator, X, y)``.
cv : integer or cross-validation generator, optional
If None, Generalized Cross-Validation (efficient Leave-One-Out)
will be used.
If an integer is passed, it is the number of folds for KFold cross
validation. Specific cross-validation objects can be passed, see
sklearn.cross_validation module for the list of possible objects
gcv_mode : {None, 'auto', 'svd', eigen'}, optional
Flag indicating which strategy to use when performing
Generalized Cross-Validation. Options are::
'auto' : use svd if n_samples > n_features or when X is a sparse
matrix, otherwise use eigen
'svd' : force computation via singular value decomposition of X
(does not work for sparse matrices)
'eigen' : force computation via eigendecomposition of X^T X
The 'auto' mode is the default and is intended to pick the cheaper
option of the two depending upon the shape and format of the training
data.
store_cv_values : boolean, default=False
Flag indicating if the cross-validation values corresponding to
each alpha should be stored in the `cv_values_` attribute (see
below). This flag is only compatible with `cv=None` (i.e. using
Generalized Cross-Validation).
Attributes
----------
cv_values_ : array, shape = [n_samples, n_alphas] or \
shape = [n_samples, n_targets, n_alphas], optional
Cross-validation values for each alpha (if `store_cv_values=True` and \
`cv=None`). After `fit()` has been called, this attribute will \
contain the mean squared errors (by default) or the values of the \
`{loss,score}_func` function (if provided in the constructor).
coef_ : array, shape = [n_features] or [n_targets, n_features]
Weight vector(s).
intercept_ : float | array, shape = (n_targets,)
Independent term in decision function. Set to 0.0 if
``fit_intercept = False``.
alpha_ : float
Estimated regularization parameter.
See also
--------
Ridge: Ridge regression
RidgeClassifier: Ridge classifier
RidgeClassifierCV: Ridge classifier with built-in cross validation
"""
pass
class RidgeClassifierCV(LinearClassifierMixin, _BaseRidgeCV):
"""Ridge classifier with built-in cross-validation.
By default, it performs Generalized Cross-Validation, which is a form of
efficient Leave-One-Out cross-validation. Currently, only the n_features >
n_samples case is handled efficiently.
Read more in the :ref:`User Guide <ridge_regression>`.
Parameters
----------
alphas : numpy array of shape [n_alphas]
Array of alpha values to try.
Small positive values of alpha improve the conditioning of the
problem and reduce the variance of the estimates.
Alpha corresponds to ``(2*C)^-1`` in other linear models such as
LogisticRegression or LinearSVC.
fit_intercept : boolean
Whether to calculate the intercept for this model. If set
to false, no intercept will be used in calculations
(e.g. data is expected to be already centered).
normalize : boolean, optional, default False
If True, the regressors X will be normalized before regression.
scoring : string, callable or None, optional, default: None
A string (see model evaluation documentation) or
a scorer callable object / function with signature
``scorer(estimator, X, y)``.
cv : cross-validation generator, optional
If None, Generalized Cross-Validation (efficient Leave-One-Out)
will be used.
class_weight : dict or 'balanced', optional
Weights associated with classes in the form ``{class_label: weight}``.
If not given, all classes are supposed to have weight one.
The "balanced" mode uses the values of y to automatically adjust
weights inversely proportional to class frequencies in the input data
as ``n_samples / (n_classes * np.bincount(y))``
Attributes
----------
cv_values_ : array, shape = [n_samples, n_alphas] or \
shape = [n_samples, n_responses, n_alphas], optional
Cross-validation values for each alpha (if `store_cv_values=True` and
`cv=None`). After `fit()` has been called, this attribute will contain \
the mean squared errors (by default) or the values of the \
`{loss,score}_func` function (if provided in the constructor).
coef_ : array, shape = [n_features] or [n_targets, n_features]
Weight vector(s).
intercept_ : float | array, shape = (n_targets,)
Independent term in decision function. Set to 0.0 if
``fit_intercept = False``.
alpha_ : float
Estimated regularization parameter
See also
--------
Ridge: Ridge regression
RidgeClassifier: Ridge classifier
RidgeCV: Ridge regression with built-in cross validation
Notes
-----
For multi-class classification, n_class classifiers are trained in
a one-versus-all approach. Concretely, this is implemented by taking
advantage of the multi-variate response support in Ridge.
"""
def __init__(self, alphas=(0.1, 1.0, 10.0), fit_intercept=True,
normalize=False, scoring=None, cv=None, class_weight=None):
super(RidgeClassifierCV, self).__init__(
alphas=alphas, fit_intercept=fit_intercept, normalize=normalize,
scoring=scoring, cv=cv)
self.class_weight = class_weight
def fit(self, X, y, sample_weight=None):
"""Fit the ridge classifier.
Parameters
----------
X : array-like, shape (n_samples, n_features)
Training vectors, where n_samples is the number of samples
and n_features is the number of features.
y : array-like, shape (n_samples,)
Target values.
sample_weight : float or numpy array of shape (n_samples,)
Sample weight.
Returns
-------
self : object
Returns self.
"""
self._label_binarizer = LabelBinarizer(pos_label=1, neg_label=-1)
Y = self._label_binarizer.fit_transform(y)
if not self._label_binarizer.y_type_.startswith('multilabel'):
y = column_or_1d(y, warn=True)
if self.class_weight:
if sample_weight is None:
sample_weight = 1.
# modify the sample weights with the corresponding class weight
sample_weight = (sample_weight *
compute_sample_weight(self.class_weight, y))
_BaseRidgeCV.fit(self, X, Y, sample_weight=sample_weight)
return self
@property
def classes_(self):
return self._label_binarizer.classes_
| bsd-3-clause |