repo_name
stringlengths 6
112
| path
stringlengths 4
204
| copies
stringlengths 1
3
| size
stringlengths 4
6
| content
stringlengths 714
810k
| license
stringclasses 15
values |
---|---|---|---|---|---|
Nik0l/UTemPro | Twitter.py | 1 | 7978 | import pandas as pd
from collections import Counter
import pandas as pd
from sklearn import metrics
import xgboost as xgb
import matplotlib.pyplot as plt
def modelfit(alg, data, predictors, useTrainCV=True, cv_folds=5, early_stopping_rounds=50):
'''
a variation of:
http://www.analyticsvidhya.com/blog/2016/03/complete-guide-parameter-tuning-xgboost-with-codes-python/
'''
if useTrainCV:
xgb_param = alg.get_xgb_params()
xgtrain = xgb.DMatrix(data['x_train'][predictors], label=data['y_train'])
cvresult = xgb.cv(xgb_param,
xgtrain,
num_boost_round=alg.get_params()['n_estimators'],
nfold=cv_folds,
metrics='auc',
early_stopping_rounds=early_stopping_rounds)
alg.set_params(n_estimators=cvresult.shape[0])
#Fit the algorithm on the data
alg.fit(data['x_train'][predictors], data['y_train'], eval_metric='auc')
#Predict training set:
dtrain_predictions = alg.predict(data['x_train'][predictors])
dtrain_predprob = alg.predict_proba(data['x_train'][predictors])[:,1]
#Print model report:
print ("\nModel Report")
print ("Accuracy : %.4g" % metrics.accuracy_score(data['y_train'].values, dtrain_predictions))
print ("AUC Score (Train): %f" % metrics.roc_auc_score(data['y_train'], dtrain_predprob))
feat_imp = pd.Series(alg.booster().get_fscore()).sort_values(ascending=False)
feat_imp[0:20].plot(kind='bar', title='Feature Importances')
plt.ylabel('Feature Importance Score')
plt.show()
return alg
def openFriendDf(path):
df = pd.read_csv(path, delimiter=r"\s+", header=None)
df.columns = ['UserIdFrom', 'UserIdTo']
#df.columns = ['UserId', 'UserIdTo','Timestamp','Type']
return df
def openActivityDf(path):
df = pd.read_csv(path, delimiter=r"\s+", header=None)
df.columns = ['UserId', 'UserIdTo','Timestamp','Type']
df['TweetId'] = df.index
df['Timestamp'] = pd.to_datetime(df['Timestamp'], unit='s')
#print df['Timestamp']
print df.shape
return df
def extractTempFeatures(df):
temp_f = []
for index, row in df.iterrows():
t = row['Timestamp']
temp_f.append([t.day, t.hour, t.minute, t.second])
temp_df = pd.DataFrame(temp_f, columns=['day', 'hour', 'minute', 'second'])
temp_df = pd.concat([df['TweetId'], temp_df], axis=1)
print temp_df
return temp_df
def extractNumTweets(df):
freqs_sent = Counter(df['UserId'])
freqs_received = Counter(df['UserIdTo'])
#print freqs
fr_s = pd.DataFrame(freqs_sent.items(), columns=['UserId', 'Tweets_Sent'])
fr_r = pd.DataFrame(freqs_received.items(), columns=['UserId', 'Tweets_Received'])
#print fr_s
#print fr_r
result = pd.merge(fr_s, fr_r, on='UserId', how='outer')
result = result.fillna(0)
#print result
#print result.shape
return result
def extractNumRe(df, a):
freqs_sent = Counter(df['UserId'])
freqs_received = Counter(df['UserIdTo'])
#print freqs
fr_s = pd.DataFrame(freqs_sent.items(), columns=['UserId', a[0]])
fr_r = pd.DataFrame(freqs_received.items(), columns=['UserId', a[1]])
#print fr_s
#print fr_r
result = pd.merge(fr_s, fr_r, on='UserId', how='outer')
result = result.fillna(0)
#print result
#print result.shape
return result
def extractNumFriends(df):
freqs_sent = Counter(df['UserIdFrom'])
freqs_received = Counter(df['UserIdTo'])
#print freqs
fr_s = pd.DataFrame(freqs_sent.items(), columns=['UserId', 'Following'])
fr_r = pd.DataFrame(freqs_received.items(), columns=['UserId', 'Followers'])
#print fr_s
#print fr_r
result = pd.merge(fr_s, fr_r, on='UserId', how='outer')
result = result.fillna(0)
#print result
#print result.shape
return result
def tweetsAggregate(df):
dfa = df[df['Type']=='RE']
a = ['Tweets_Re', 'Tweets_Red']
num_re = extractNumRe(dfa, a)
dfb = df[df['Type']=='RT']
b = ['Tweets_Rt', 'Tweets_Rtd']
num_rt = extractNumRe(dfb, b)
dfc = df[df['Type']=='MT']
c = ['Tweets_Mt', 'Tweets_Mtd']
num_mt = extractNumRe(dfc, c)
result = pd.merge(num_re, num_rt, on='UserId', how='outer')
result = pd.merge(result, num_mt, on='UserId', how='outer')
result = result.fillna(0)
#print result
return result
def makeLabel(df):
label = []
for index, row in df.iterrows():
if row['Type'] == 'RE':
label.append(1)
else:
label.append(0)
labdf = pd.DataFrame(label, columns=['isRetweet'])
labdf = pd.concat([df['TweetId'], labdf], axis=1)
return labdf
def extractFeatures(df1, df2):
#calculate interaction graph features
temp_df = extractTempFeatures(df1)
tweets = tweetsAggregate(df1)
num_mes = extractNumTweets(df1)
interact = pd.merge(tweets, num_mes, on='UserId', how='outer')
print interact
#add temporal features
df1 = pd.merge(df1, temp_df, on='TweetId', how='left')
#calculate friendship graph features
friendship = extractNumFriends(df2)
print friendship
#add interaction graph features
df1 = pd.merge(df1, interact, on='UserId', how='left')
#print interact
#add friendship graph features
result = pd.merge(df1, friendship, on='UserId', how='left')
result = result.fillna(0)
return result
def getDataforML():
filename_activity = 'higgs-activity_time.txt'
filename_social = 'higgs-social_network.edgelist'
#open activity graph
df1 = openActivityDf(filename_activity)
#open a friendship graph
df2 = openFriendDf(filename_social)
#make a label
label = makeLabel(df1)
print label.mean()
#extract features, merge them
result = extractFeatures(df1, df2)
#add label
result = pd.merge(result, label, on='TweetId', how='outer')
#print result
y = result['isRetweet']
X = result.drop('isRetweet', 1)
X = X.drop('Type', 1)
X = X.drop('Timestamp', 1)
print X
ratio = y.value_counts() / float(y.size)
print ('ratio of y: ', ratio)
data = dict(
x_train=X[0:500000],
x_test=X[500001:563068],
y_train=y[0:500000],
y_test=y[500001:563068]
)
print data['y_test'].mean()
data['y_test'].to_csv('test_real.csv')
return data
def predict(data):
xgbm = xgb.XGBClassifier(
learning_rate=0.02,
n_estimators=1500,
max_depth=6,
min_child_weight=1,
gamma=0,
subsample=0.9,
colsample_bytree=0.85,
objective= 'binary:logistic',
nthread=4,
scale_pos_weight=1,
seed=27)
features = [x for x in data['x_train'].columns if x not in ['ID']]
alg = modelfit(xgbm, data, features)
dtrain_predprob = alg.predict_proba(data['x_test'][features])[:, 1]
df = pd.DataFrame(dtrain_predprob, columns=['TARGET'])
print (df['TARGET'].mean())
df_res = pd.concat([data['y_test'].astype(int), df], axis=1)
df_res.to_csv('results.csv', index=False)
def mergeFiles():
filename1 = 'test_real.csv'
filename2 = 'results.csv'
df1 = pd.read_csv(filename1)
df1.columns = ['TweetId', 'isRetweet']
df2 = pd.read_csv(filename2)
df2 = df2[0:len(df1)]
df2 = df2.drop('isRetweet',1)
print df1.head()
print df2.head()
print df1.shape
print df2.shape
df = pd.concat([df1, df2], axis=1)
print df.head()
print df.shape
df = df.sort(['TARGET'], ascending=0)
df.to_csv('final.csv')
#data = getDataforML()
#predict(data)
#mergeFiles()
filename_activity = 'higgs-activity_time.txt'
df = pd.read_csv(filename_activity, delimiter=r"\s+", header=None)
df.columns = ['UserId', 'UserIdTo','Timestamp','Type']
df['TweetId'] = df.index
df = df.sort(['Timestamp', 'Type'], ascending=[1,0])
df = df.reset_index(drop=True)
print df
#TODO: remove dublicated rows where RE, RT, or MT at the same time
#freqs = Counter(df['Timestamp'])
#repeated = []
#for freq in freqs:
#if freqs[freq] > 1:
#repeated.append(freq)
#print len(repeated)
#df_dropped = df[df['Timestamp'] != repeated]
#print df_dropped
list_del = []
for index, row in df.iterrows():
if index > 0 and df.ix['Timestamp'][index] == df.ix['Timestamp'][prev_index] and df.ix['Type'] <> 'RE':
list_del.append(index)
prev_index = index
df = df.drop(df.index[list_del])
#print 'need to drop'
print df
| mit |
bert9bert/statsmodels | statsmodels/distributions/mixture_rvs.py | 6 | 10208 | from statsmodels.compat.python import range
import numpy as np
def _make_index(prob,size):
"""
Returns a boolean index for given probabilities.
Notes
---------
prob = [.75,.25] means that there is a 75% chance of the first column
being True and a 25% chance of the second column being True. The
columns are mutually exclusive.
"""
rv = np.random.uniform(size=(size,1))
cumprob = np.cumsum(prob)
return np.logical_and(np.r_[0,cumprob[:-1]] <= rv, rv < cumprob)
def mixture_rvs(prob, size, dist, kwargs=None):
"""
Sample from a mixture of distributions.
Parameters
----------
prob : array-like
Probability of sampling from each distribution in dist
size : int
The length of the returned sample.
dist : array-like
An iterable of distributions objects from scipy.stats.
kwargs : tuple of dicts, optional
A tuple of dicts. Each dict in kwargs can have keys loc, scale, and
args to be passed to the respective distribution in dist. If not
provided, the distribution defaults are used.
Examples
--------
Say we want 5000 random variables from mixture of normals with two
distributions norm(-1,.5) and norm(1,.5) and we want to sample from the
first with probability .75 and the second with probability .25.
>>> from scipy import stats
>>> prob = [.75,.25]
>>> Y = mixture_rvs(prob, 5000, dist=[stats.norm, stats.norm],
... kwargs = (dict(loc=-1,scale=.5),dict(loc=1,scale=.5)))
"""
if len(prob) != len(dist):
raise ValueError("You must provide as many probabilities as distributions")
if not np.allclose(np.sum(prob), 1):
raise ValueError("prob does not sum to 1")
if kwargs is None:
kwargs = ({},)*len(prob)
idx = _make_index(prob,size)
sample = np.empty(size)
for i in range(len(prob)):
sample_idx = idx[...,i]
sample_size = sample_idx.sum()
loc = kwargs[i].get('loc',0)
scale = kwargs[i].get('scale',1)
args = kwargs[i].get('args',())
sample[sample_idx] = dist[i].rvs(*args, **dict(loc=loc,scale=scale,
size=sample_size))
return sample
class MixtureDistribution(object):
'''univariate mixture distribution
for simple case for now (unbound support)
does not yet inherit from scipy.stats.distributions
adding pdf to mixture_rvs, some restrictions on broadcasting
Currently it does not hold any state, all arguments included in each method.
'''
#def __init__(self, prob, size, dist, kwargs=None):
def rvs(self, prob, size, dist, kwargs=None):
return mixture_rvs(prob, size, dist, kwargs=kwargs)
def pdf(self, x, prob, dist, kwargs=None):
"""
pdf a mixture of distributions.
Parameters
----------
x : array-like
Array containing locations where the PDF should be evaluated
prob : array-like
Probability of sampling from each distribution in dist
dist : array-like
An iterable of distributions objects from scipy.stats.
kwargs : tuple of dicts, optional
A tuple of dicts. Each dict in kwargs can have keys loc, scale, and
args to be passed to the respective distribution in dist. If not
provided, the distribution defaults are used.
Examples
--------
Say we want 5000 random variables from mixture of normals with two
distributions norm(-1,.5) and norm(1,.5) and we want to sample from the
first with probability .75 and the second with probability .25.
>>> import numpy as np
>>> from scipy import stats
>>> from statsmodels.distributions.mixture_rvs import MixtureDistribution
>>> x = np.arange(-4.0, 4.0, 0.01)
>>> prob = [.75,.25]
>>> mixture = MixtureDistribution()
>>> Y = mixture.pdf(x, prob, dist=[stats.norm, stats.norm],
... kwargs = (dict(loc=-1,scale=.5),dict(loc=1,scale=.5)))
"""
if len(prob) != len(dist):
raise ValueError("You must provide as many probabilities as distributions")
if not np.allclose(np.sum(prob), 1):
raise ValueError("prob does not sum to 1")
if kwargs is None:
kwargs = ({},)*len(prob)
for i in range(len(prob)):
loc = kwargs[i].get('loc',0)
scale = kwargs[i].get('scale',1)
args = kwargs[i].get('args',())
if i == 0: #assume all broadcast the same as the first dist
pdf_ = prob[i] * dist[i].pdf(x, *args, loc=loc, scale=scale)
else:
pdf_ += prob[i] * dist[i].pdf(x, *args, loc=loc, scale=scale)
return pdf_
def cdf(self, x, prob, dist, kwargs=None):
"""
cdf of a mixture of distributions.
Parameters
----------
x : array-like
Array containing locations where the CDF should be evaluated
prob : array-like
Probability of sampling from each distribution in dist
size : int
The length of the returned sample.
dist : array-like
An iterable of distributions objects from scipy.stats.
kwargs : tuple of dicts, optional
A tuple of dicts. Each dict in kwargs can have keys loc, scale, and
args to be passed to the respective distribution in dist. If not
provided, the distribution defaults are used.
Examples
--------
Say we want 5000 random variables from mixture of normals with two
distributions norm(-1,.5) and norm(1,.5) and we want to sample from the
first with probability .75 and the second with probability .25.
>>> import numpy as np
>>> from scipy import stats
>>> from statsmodels.distributions.mixture_rvs import MixtureDistribution
>>> x = np.arange(-4.0, 4.0, 0.01)
>>> prob = [.75,.25]
>>> mixture = MixtureDistribution()
>>> Y = mixture.pdf(x, prob, dist=[stats.norm, stats.norm],
... kwargs = (dict(loc=-1,scale=.5),dict(loc=1,scale=.5)))
"""
if len(prob) != len(dist):
raise ValueError("You must provide as many probabilities as distributions")
if not np.allclose(np.sum(prob), 1):
raise ValueError("prob does not sum to 1")
if kwargs is None:
kwargs = ({},)*len(prob)
for i in range(len(prob)):
loc = kwargs[i].get('loc',0)
scale = kwargs[i].get('scale',1)
args = kwargs[i].get('args',())
if i == 0: #assume all broadcast the same as the first dist
cdf_ = prob[i] * dist[i].cdf(x, *args, loc=loc, scale=scale)
else:
cdf_ += prob[i] * dist[i].cdf(x, *args, loc=loc, scale=scale)
return cdf_
def mv_mixture_rvs(prob, size, dist, nvars, **kwargs):
"""
Sample from a mixture of multivariate distributions.
Parameters
----------
prob : array-like
Probability of sampling from each distribution in dist
size : int
The length of the returned sample.
dist : array-like
An iterable of distributions instances with callable method rvs.
nvargs : int
dimension of the multivariate distribution, could be inferred instead
kwargs : tuple of dicts, optional
ignored
Examples
--------
Say we want 2000 random variables from mixture of normals with two
multivariate normal distributions, and we want to sample from the
first with probability .4 and the second with probability .6.
import statsmodels.sandbox.distributions.mv_normal as mvd
cov3 = np.array([[ 1. , 0.5 , 0.75],
[ 0.5 , 1.5 , 0.6 ],
[ 0.75, 0.6 , 2. ]])
mu = np.array([-1, 0.0, 2.0])
mu2 = np.array([4, 2.0, 2.0])
mvn3 = mvd.MVNormal(mu, cov3)
mvn32 = mvd.MVNormal(mu2, cov3/2., 4)
rvs = mix.mv_mixture_rvs([0.4, 0.6], 2000, [mvn3, mvn32], 3)
"""
if len(prob) != len(dist):
raise ValueError("You must provide as many probabilities as distributions")
if not np.allclose(np.sum(prob), 1):
raise ValueError("prob does not sum to 1")
if kwargs is None:
kwargs = ({},)*len(prob)
idx = _make_index(prob,size)
sample = np.empty((size, nvars))
for i in range(len(prob)):
sample_idx = idx[...,i]
sample_size = sample_idx.sum()
#loc = kwargs[i].get('loc',0)
#scale = kwargs[i].get('scale',1)
#args = kwargs[i].get('args',())
# use int to avoid numpy bug with np.random.multivariate_normal
sample[sample_idx] = dist[i].rvs(size=int(sample_size))
return sample
if __name__ == '__main__':
from scipy import stats
obs_dist = mixture_rvs([.25,.75], size=10000, dist=[stats.norm, stats.beta],
kwargs=(dict(loc=-1,scale=.5),dict(loc=1,scale=1,args=(1,.5))))
nobs = 10000
mix = MixtureDistribution()
## mrvs = mixture_rvs([1/3.,2/3.], size=nobs, dist=[stats.norm, stats.norm],
## kwargs = (dict(loc=-1,scale=.5),dict(loc=1,scale=.75)))
mix_kwds = (dict(loc=-1,scale=.25),dict(loc=1,scale=.75))
mrvs = mix.rvs([1/3.,2/3.], size=nobs, dist=[stats.norm, stats.norm],
kwargs=mix_kwds)
grid = np.linspace(-4,4, 100)
mpdf = mix.pdf(grid, [1/3.,2/3.], dist=[stats.norm, stats.norm],
kwargs=mix_kwds)
mcdf = mix.cdf(grid, [1/3.,2/3.], dist=[stats.norm, stats.norm],
kwargs=mix_kwds)
doplot = 1
if doplot:
import matplotlib.pyplot as plt
plt.figure()
plt.hist(mrvs, bins=50, normed=True, color='red')
plt.title('histogram of sample and pdf')
plt.plot(grid, mpdf, lw=2, color='black')
plt.figure()
plt.hist(mrvs, bins=50, normed=True, cumulative=True, color='red')
plt.title('histogram of sample and pdf')
plt.plot(grid, mcdf, lw=2, color='black')
plt.show()
| bsd-3-clause |
hugobowne/scikit-learn | examples/linear_model/plot_multi_task_lasso_support.py | 102 | 2319 | #!/usr/bin/env python
"""
=============================================
Joint feature selection with multi-task Lasso
=============================================
The multi-task lasso allows to fit multiple regression problems
jointly enforcing the selected features to be the same across
tasks. This example simulates sequential measurements, each task
is a time instant, and the relevant features vary in amplitude
over time while being the same. The multi-task lasso imposes that
features that are selected at one time point are select for all time
point. This makes feature selection by the Lasso more stable.
"""
print(__doc__)
# Author: Alexandre Gramfort <alexandre.gramfort@inria.fr>
# License: BSD 3 clause
import matplotlib.pyplot as plt
import numpy as np
from sklearn.linear_model import MultiTaskLasso, Lasso
rng = np.random.RandomState(42)
# Generate some 2D coefficients with sine waves with random frequency and phase
n_samples, n_features, n_tasks = 100, 30, 40
n_relevant_features = 5
coef = np.zeros((n_tasks, n_features))
times = np.linspace(0, 2 * np.pi, n_tasks)
for k in range(n_relevant_features):
coef[:, k] = np.sin((1. + rng.randn(1)) * times + 3 * rng.randn(1))
X = rng.randn(n_samples, n_features)
Y = np.dot(X, coef.T) + rng.randn(n_samples, n_tasks)
coef_lasso_ = np.array([Lasso(alpha=0.5).fit(X, y).coef_ for y in Y.T])
coef_multi_task_lasso_ = MultiTaskLasso(alpha=1.).fit(X, Y).coef_
###############################################################################
# Plot support and time series
fig = plt.figure(figsize=(8, 5))
plt.subplot(1, 2, 1)
plt.spy(coef_lasso_)
plt.xlabel('Feature')
plt.ylabel('Time (or Task)')
plt.text(10, 5, 'Lasso')
plt.subplot(1, 2, 2)
plt.spy(coef_multi_task_lasso_)
plt.xlabel('Feature')
plt.ylabel('Time (or Task)')
plt.text(10, 5, 'MultiTaskLasso')
fig.suptitle('Coefficient non-zero location')
feature_to_plot = 0
plt.figure()
lw = 2
plt.plot(coef[:, feature_to_plot], color='seagreen', linewidth=lw,
label='Ground truth')
plt.plot(coef_lasso_[:, feature_to_plot], color='cornflowerblue', linewidth=lw,
label='Lasso')
plt.plot(coef_multi_task_lasso_[:, feature_to_plot], color='gold', linewidth=lw,
label='MultiTaskLasso')
plt.legend(loc='upper center')
plt.axis('tight')
plt.ylim([-1.1, 1.1])
plt.show()
| bsd-3-clause |
delta2323/chainer | chainer/training/extensions/plot_report.py | 3 | 6076 | import json
from os import path
import warnings
import numpy
import six
from chainer import reporter
from chainer import serializer as serializer_module
from chainer.training import extension
from chainer.training import trigger as trigger_module
try:
from matplotlib import pyplot as plot
_available = True
except (ImportError, TypeError):
_available = False
def _check_available():
if not _available:
warnings.warn('matplotlib is not installed on your environment, '
'so nothing will be plotted at this time. '
'Please install matplotlib to plot figures.\n\n'
' $ pip install matplotlib\n')
class PlotReport(extension.Extension):
"""Trainer extension to output plots.
This extension accumulates the observations of the trainer to
:class:`~chainer.DictSummary` at a regular interval specified by a supplied
trigger, and plot a graph with using them.
There are two triggers to handle this extension. One is the trigger to
invoke this extension, which is used to handle the timing of accumulating
the results. It is set to ``1, 'iteration'`` by default. The other is the
trigger to determine when to emit the result. When this trigger returns
True, this extension appends the summary of accumulated values to the list
of past summaries, and writes the list to the log file. Then, this
extension makes a new fresh summary object which is used until the next
time that the trigger fires.
It also adds ``'epoch'`` and ``'iteration'`` entries to each result
dictionary, which are the epoch and iteration counts at the output.
.. warning::
If your environment needs to specify a backend of matplotlib
explicitly, please call ``matplotlib.use`` before importing Chainer.
For example:
.. code-block:: python
import matplotlib
matplotlib.use('Agg')
import chainer
Then, once ``chainer.training.extensions`` is imported,
``matplotlib.use`` will have no effect.
For the details, please see here:
http://matplotlib.org/faq/usage_faq.html#what-is-a-backend
Args:
y_keys (iterable of strs): Keys of values regarded as y. If this is
None, nothing is output to the graph.
x_key (str): Keys of values regarded as x. The default value is
'iteration'.
trigger: Trigger that decides when to aggregate the result and output
the values. This is distinct from the trigger of this extension
itself. If it is a tuple in the form ``<int>, 'epoch'`` or ``<int>,
'iteration'``, it is passed to :class:`IntervalTrigger`.
postprocess: Callback to postprocess the result dictionaries. Figure
object, Axes object, and all plot data are passed to this callback
in this order. This callback can modify the figure.
file_name (str): Name of the figure file under the output directory.
It can be a format string.
marker (str): The marker used to plot the graph. Default is ``'x'``. If
``None`` is given, it draws with no markers.
grid (bool): Set the axis grid on if True. Default is True.
"""
def __init__(self, y_keys, x_key='iteration', trigger=(1, 'epoch'),
postprocess=None, file_name='plot.png', marker='x',
grid=True):
_check_available()
self._x_key = x_key
if isinstance(y_keys, str):
y_keys = (y_keys,)
self._y_keys = y_keys
self._trigger = trigger_module.get_trigger(trigger)
self._file_name = file_name
self._marker = marker
self._grid = grid
self._postprocess = postprocess
self._init_summary()
self._data = {k: [] for k in y_keys}
@staticmethod
def available():
_check_available()
return _available
def __call__(self, trainer):
if not _available:
return
keys = self._y_keys
observation = trainer.observation
summary = self._summary
if keys is None:
summary.add(observation)
else:
summary.add({k: observation[k] for k in keys if k in observation})
if self._trigger(trainer):
stats = self._summary.compute_mean()
stats_cpu = {}
for name, value in six.iteritems(stats):
stats_cpu[name] = float(value) # copy to CPU
updater = trainer.updater
stats_cpu['epoch'] = updater.epoch
stats_cpu['iteration'] = updater.iteration
x = stats_cpu[self._x_key]
data = self._data
for k in keys:
if k in stats_cpu:
data[k].append((x, stats_cpu[k]))
f = plot.figure()
a = f.add_subplot(111)
a.set_xlabel(self._x_key)
if self._grid:
a.grid()
for k in keys:
xy = data[k]
if len(xy) == 0:
continue
xy = numpy.array(xy)
a.plot(xy[:, 0], xy[:, 1], marker=self._marker, label=k)
if a.has_data():
if self._postprocess is not None:
self._postprocess(f, a, summary)
l = a.legend(bbox_to_anchor=(1.05, 1), loc=2, borderaxespad=0.)
f.savefig(path.join(trainer.out, self._file_name),
bbox_extra_artists=(l,), bbox_inches='tight')
plot.close()
self._init_summary()
def serialize(self, serializer):
if isinstance(serializer, serializer_module.Serializer):
serializer('_plot_{}'.format(self._file_name),
json.dumps(self._data))
else:
self._data = json.loads(
serializer('_plot_{}'.format(self._file_name), ''))
def _init_summary(self):
self._summary = reporter.DictSummary()
| mit |
rajat1994/scikit-learn | examples/ensemble/plot_adaboost_regression.py | 311 | 1529 | """
======================================
Decision Tree Regression with AdaBoost
======================================
A decision tree is boosted using the AdaBoost.R2 [1] algorithm on a 1D
sinusoidal dataset with a small amount of Gaussian noise.
299 boosts (300 decision trees) is compared with a single decision tree
regressor. As the number of boosts is increased the regressor can fit more
detail.
.. [1] H. Drucker, "Improving Regressors using Boosting Techniques", 1997.
"""
print(__doc__)
# Author: Noel Dawe <noel.dawe@gmail.com>
#
# License: BSD 3 clause
# importing necessary libraries
import numpy as np
import matplotlib.pyplot as plt
from sklearn.tree import DecisionTreeRegressor
from sklearn.ensemble import AdaBoostRegressor
# Create the dataset
rng = np.random.RandomState(1)
X = np.linspace(0, 6, 100)[:, np.newaxis]
y = np.sin(X).ravel() + np.sin(6 * X).ravel() + rng.normal(0, 0.1, X.shape[0])
# Fit regression model
regr_1 = DecisionTreeRegressor(max_depth=4)
regr_2 = AdaBoostRegressor(DecisionTreeRegressor(max_depth=4),
n_estimators=300, random_state=rng)
regr_1.fit(X, y)
regr_2.fit(X, y)
# Predict
y_1 = regr_1.predict(X)
y_2 = regr_2.predict(X)
# Plot the results
plt.figure()
plt.scatter(X, y, c="k", label="training samples")
plt.plot(X, y_1, c="g", label="n_estimators=1", linewidth=2)
plt.plot(X, y_2, c="r", label="n_estimators=300", linewidth=2)
plt.xlabel("data")
plt.ylabel("target")
plt.title("Boosted Decision Tree Regression")
plt.legend()
plt.show()
| bsd-3-clause |
demianw/dipy | doc/examples/simulate_dki.py | 13 | 5070 | """
==========================
DKI MultiTensor Simulation
==========================
In this example we show how to simulate the diffusion kurtosis imaging (DKI)
data of a single voxel. DKI captures information about the non-Gaussian
properties of water diffusion which is a consequence of the existence of tissue
barriers and compartments. In these simulations compartmental heterogeneity is
taken into account by modeling different compartments for the intra- and
extra-cellular media of two populations of fibers. These simulations are
performed according to [RNH2015]_.
We first import all relevant modules.
"""
import numpy as np
import matplotlib.pyplot as plt
from dipy.sims.voxel import (multi_tensor_dki, single_tensor)
from dipy.data import get_data
from dipy.io.gradients import read_bvals_bvecs
from dipy.core.gradients import gradient_table
from dipy.reconst.dti import (decompose_tensor, from_lower_triangular)
"""
For the simulation we will need a GradientTable with the b-values and
b-vectors. Here we use the GradientTable of the sample Dipy dataset
'small_64D'.
"""
fimg, fbvals, fbvecs = get_data('small_64D')
bvals, bvecs = read_bvals_bvecs(fbvals, fbvecs)
"""
DKI requires data from more than one non-zero b-value. Since the dataset
'small_64D' was acquired with one non-zero bvalue we artificialy produce a
second non-zero b-value.
"""
bvals = np.concatenate((bvals, bvals * 2), axis=0)
bvecs = np.concatenate((bvecs, bvecs), axis=0)
"""
The b-values and gradient directions are then converted to Dipy's
GradientTable format.
"""
gtab = gradient_table(bvals, bvecs)
"""
In ``mevals`` we save the eigenvalues of each tensor. To simulate crossing
fibers with two different media (representing intra and extra-cellular media),
a total of four components have to be taken in to account (i.e. the first two
compartments correspond to the intra and extra cellular media for the first
fiber population while the others correspond to the media of the second fiber
population)
"""
mevals = np.array([[0.00099, 0, 0],
[0.00226, 0.00087, 0.00087],
[0.00099, 0, 0],
[0.00226, 0.00087, 0.00087]])
"""
In ``angles`` we save in polar coordinates (:math:`\theta, \phi`) the principal
axis of each compartment tensor. To simulate crossing fibers at 70 degrees
the compartments of the first fiber are aligned to the x-axis while the
compartments of the second fiber are aligned to the x-z plane with an angular
deviation of 70 degrees from the first one.
"""
angles = [(90, 0), (90, 0), (20, 0), (20, 0)]
"""
In ``fractions`` we save the percentage of the contribution of each
compartment, which is computed by multiplying the percentage of contribution
of each fiber population and the water fraction of each different medium
"""
fie = 0.49 # intra axonal water fraction
fractions = [fie*50, (1 - fie)*50, fie*50, (1 - fie)*50]
"""
Having defined the parameters for all tissue compartments, the elements of the
diffusion tensor (dt), the elements of the kurtosis tensor (kt) and the DW
signals simulated from the DKI model can be obtain using the function
``multi_tensor_dki``.
"""
signal_dki, dt, kt = multi_tensor_dki(gtab, mevals, S0=200, angles=angles,
fractions=fractions, snr=None)
"""
We can also add rician noise with a specific SNR.
"""
signal_noisy, dt, kt = multi_tensor_dki(gtab, mevals, S0=200,
angles=angles, fractions=fractions,
snr=10)
"""
For comparison purposes, we also compute the DW signal if only the diffusion
tensor components are taken into account. For this we use Dipy's function
single_tensor which requires that dt is decomposed into its eigenvalues and
eigenvectors.
"""
dt_evals, dt_evecs = decompose_tensor(from_lower_triangular(dt))
signal_dti = single_tensor(gtab, S0=200, evals=dt_evals, evecs=dt_evecs,
snr=None)
"""
Finally, we can visualize the values of the different version of simulated
signals for all assumed gradient directions and bvalues.
"""
plt.plot(signal_dti, label='noiseless dti')
plt.plot(signal_dki, label='noiseless dki')
plt.plot(signal_noisy, label='with noise')
plt.legend()
plt.show()
plt.savefig('simulated_dki_signal.png')
"""
.. figure:: simulated_dki_signal.png
:align: center
**Simulated signals obtain from the DTI and DKI models**.
Non-Gaussian diffusion properties in tissues are responsible to smaller signal
attenuations for larger bvalues when compared to signal attenuations from free
gaussian water diffusion. This feature can be shown from the figure above,
since signals simulated from the DKI models reveals larger DW signal
intensities than the signals obtained only from the diffusion tensor
components.
References:
[RNH2015] R. Neto Henriques et al., "Exploring the 3D geometry of the diffusion
kurtosis tensor - Impact on the development of robust tractography
procedures and novel biomarkers", NeuroImage (2015) 111, 85-99.
"""
| bsd-3-clause |
h2educ/scikit-learn | examples/cluster/plot_ward_structured_vs_unstructured.py | 320 | 3369 | """
===========================================================
Hierarchical clustering: structured vs unstructured ward
===========================================================
Example builds a swiss roll dataset and runs
hierarchical clustering on their position.
For more information, see :ref:`hierarchical_clustering`.
In a first step, the hierarchical clustering is performed without connectivity
constraints on the structure and is solely based on distance, whereas in
a second step the clustering is restricted to the k-Nearest Neighbors
graph: it's a hierarchical clustering with structure prior.
Some of the clusters learned without connectivity constraints do not
respect the structure of the swiss roll and extend across different folds of
the manifolds. On the opposite, when opposing connectivity constraints,
the clusters form a nice parcellation of the swiss roll.
"""
# Authors : Vincent Michel, 2010
# Alexandre Gramfort, 2010
# Gael Varoquaux, 2010
# License: BSD 3 clause
print(__doc__)
import time as time
import numpy as np
import matplotlib.pyplot as plt
import mpl_toolkits.mplot3d.axes3d as p3
from sklearn.cluster import AgglomerativeClustering
from sklearn.datasets.samples_generator import make_swiss_roll
###############################################################################
# Generate data (swiss roll dataset)
n_samples = 1500
noise = 0.05
X, _ = make_swiss_roll(n_samples, noise)
# Make it thinner
X[:, 1] *= .5
###############################################################################
# Compute clustering
print("Compute unstructured hierarchical clustering...")
st = time.time()
ward = AgglomerativeClustering(n_clusters=6, linkage='ward').fit(X)
elapsed_time = time.time() - st
label = ward.labels_
print("Elapsed time: %.2fs" % elapsed_time)
print("Number of points: %i" % label.size)
###############################################################################
# Plot result
fig = plt.figure()
ax = p3.Axes3D(fig)
ax.view_init(7, -80)
for l in np.unique(label):
ax.plot3D(X[label == l, 0], X[label == l, 1], X[label == l, 2],
'o', color=plt.cm.jet(np.float(l) / np.max(label + 1)))
plt.title('Without connectivity constraints (time %.2fs)' % elapsed_time)
###############################################################################
# Define the structure A of the data. Here a 10 nearest neighbors
from sklearn.neighbors import kneighbors_graph
connectivity = kneighbors_graph(X, n_neighbors=10, include_self=False)
###############################################################################
# Compute clustering
print("Compute structured hierarchical clustering...")
st = time.time()
ward = AgglomerativeClustering(n_clusters=6, connectivity=connectivity,
linkage='ward').fit(X)
elapsed_time = time.time() - st
label = ward.labels_
print("Elapsed time: %.2fs" % elapsed_time)
print("Number of points: %i" % label.size)
###############################################################################
# Plot result
fig = plt.figure()
ax = p3.Axes3D(fig)
ax.view_init(7, -80)
for l in np.unique(label):
ax.plot3D(X[label == l, 0], X[label == l, 1], X[label == l, 2],
'o', color=plt.cm.jet(float(l) / np.max(label + 1)))
plt.title('With connectivity constraints (time %.2fs)' % elapsed_time)
plt.show()
| bsd-3-clause |
ABcDexter/python-weka-wrapper | setup.py | 2 | 3655 | # This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
# setup.py
# Copyright (C) 2014-2015 Fracpete (pythonwekawrapper at gmail dot com)
import os
from setuptools import setup
from urllib2 import urlopen, URLError, HTTPError
def download_file(url, outfile):
"""
Downloads the file associated with the URL and saves it to the specified output file.
Taken from here: http://stackoverflow.com/a/4028894
:param url: the URL to download
:type url: str
:param outfile: the name of the output file
:type outfile: str
:returns: whether the download was successful
:rtype: bool
"""
try:
# Open the url
f = urlopen(url)
print("Downloading '" + url + "' to '" + outfile + "'")
# Open our local file for writing
with open(outfile, "wb") as local_file:
local_file.write(f.read())
# handle errors
except HTTPError, e:
print("HTTP Error: " + str(e.code) + " " + url)
return False
except URLError, e:
print("URL Error: " + str(e.reason) + " " + url)
return False
return True
def download_weka():
"""
Downloads the monolithic Weka jar from sourceforget.net if nececssary.
"""
url = "http://sourceforge.net/projects/weka/files/weka-3-7/3.7.12/weka-3-7-12-monolithic.jar/download"
outfile = os.path.join(os.path.dirname(__file__), "python", "weka", "lib", "weka.jar")
if not os.path.exists(outfile):
if not download_file(url, outfile):
print("Failed to download Weka jar '" + url + "' to '" + outfile + "'!")
else:
print("Download of Weka jar successful!")
def ext_modules():
"""
Initiates Weka jar download.
"""
download_weka()
def _read(f):
"""
Reads in the content of the file.
:param f: the file to read
:type f: str
:return: the content
:rtype: str
"""
return open(f, 'rb').read()
setup(
name="python-weka-wrapper",
description="Python wrapper for the Weka Machine Learning Workbench",
long_description=(
_read('DESCRIPTION.rst') + b'\n' +
_read('CHANGES.rst')).decode('utf-8'),
url="https://github.com/fracpete/python-weka-wrapper",
classifiers=[
'Development Status :: 4 - Beta',
'License :: OSI Approved :: GNU General Public License (GPL)',
'Topic :: Scientific/Engineering :: Artificial Intelligence',
'Programming Language :: Python',
],
license='GNU General Public License version 3.0 (GPLv3)',
package_dir={
'': 'python'
},
packages=[
"weka",
"weka.core",
"weka.plot"
],
package_data={
"weka": ["lib/*.jar"],
},
include_package_data=True,
version="0.3.2",
author='Peter "fracpete" Reutemann',
author_email='pythonwekawrapper at gmail dot com',
install_requires=[
"javabridge>=1.0.11",
"numpy"
],
extras_require={
'plots': ["matplotlib"],
'graphs': ["pygraphviz", "PIL"],
},
ext_modules=ext_modules(),
)
| gpl-3.0 |
KellyChan/Python | python/crawlers/crawler/catalogs/lowes/lowes_catalogs_products_recheck.py | 3 | 2796 | __author__ = "Kelly Chan"
__date__ = "Sept 9 2014"
__version__ = "1.0.0"
import os
import sys
reload(sys)
sys.setdefaultencoding( "utf-8" )
import mechanize
import cookielib
import re
import time
import urllib
import urllib2
from bs4 import BeautifulSoup
import pandas
def openBrowser():
# Browser
br = mechanize.Browser()
# Cookie Jar
cj = cookielib.LWPCookieJar()
br.set_cookiejar(cj)
# Browser options
br.set_handle_equiv(True)
#br.set_handle_gzip(True)
br.set_handle_redirect(True)
br.set_handle_referer(True)
br.set_handle_robots(False)
# Follows refresh 0 but not hangs on refresh > 0
br.set_handle_refresh(mechanize._http.HTTPRefreshProcessor(), max_time=1)
br.addheaders = [('User-agent', 'Mozilla/5.0 (X11; U; Linux i686; en-US; rv:1.9.0.1) Gecko/2008071615 Fedora/3.0.1-1.fc9 Firefox/3.0.1')]
return br
def getSoup(br, url):
# Open url
r = br.open(url)
html = r.read()
soup = BeautifulSoup(html)
return soup
def loadURLs(dataFile):
urls = []
with open(dataFile, 'rb') as f:
for line in f.readlines():
urls.append(line.strip())
return urls
def getPages(soup):
results = soup.find('span', attrs={'class': 'totalPages'})
if results:
pages = int(results.get_text())
else:
pages = 0
return pages
def filterRE(results, pattern):
return re.findall(re.compile(pattern), str(results))
def getProducts(content):
products = []
links = []
results = content.find_all('h3', attrs={'class': 'productTitle'})
#print results
#pattern1 = r"<a href=.*>([\d\w\u4e00-\u9fa5\s].*)</a>"
pattern1 = r"<a href=.*>(.*)</a>"
results1 = filterRE(results, pattern1)
for result in results1:
products.append(result.strip())
#print products
pattern2 = r'<a href="(.*)" name=.*>'
results2 = filterRE(results, pattern2)
for result in results2:
links.append(result.strip())
#print links
return products, links
def main():
outPath = "G:/vimFiles/freelance/20140903-eCatalog/src/outputs/"
#fileName = "error-one-page.csv"
fileName = "error-more-pages.csv"
br = openBrowser()
urls = loadURLs(outPath+fileName)
for url in urls:
soup = getSoup(br, url)
content = soup.find('ul', attrs={'id': 'productResults'})
products, links = getProducts(content)
if len(products) == len(links):
data = pandas.DataFrame({'product': products, 'prodURL': links})
data['deptURL'] = url
with open(outPath+"products-error-more-pages.csv", 'a') as f:
data.to_csv(f, header=False)
else:
print url
if __name__ == '__main__':
main()
| mit |
ktaneishi/deepchem | contrib/one_shot_models/multitask_classifier.py | 5 | 10016 | """
Implements a multitask graph convolutional classifier.
"""
from __future__ import print_function
from __future__ import division
from __future__ import unicode_literals
__author__ = "Han Altae-Tran and Bharath Ramsundar"
__copyright__ = "Copyright 2016, Stanford University"
__license__ = "MIT"
import warnings
import os
import sys
import numpy as np
import tensorflow as tf
import sklearn.metrics
import tempfile
from deepchem.data import pad_features
from deepchem.utils.save import log
from deepchem.models import Model
from deepchem.nn.copy import Input
from deepchem.nn.copy import Dense
from deepchem.nn import model_ops
# TODO(rbharath): Find a way to get rid of this import?
from deepchem.models.tf_new_models.graph_topology import merge_dicts
def get_loss_fn(final_loss):
# Obtain appropriate loss function
if final_loss == 'L2':
def loss_fn(x, t):
diff = tf.subtract(x, t)
return tf.reduce_sum(tf.square(diff), 0)
elif final_loss == 'weighted_L2':
def loss_fn(x, t, w):
diff = tf.subtract(x, t)
weighted_diff = tf.multiply(diff, w)
return tf.reduce_sum(tf.square(weighted_diff), 0)
elif final_loss == 'L1':
def loss_fn(x, t):
diff = tf.subtract(x, t)
return tf.reduce_sum(tf.abs(diff), 0)
elif final_loss == 'huber':
def loss_fn(x, t):
diff = tf.subtract(x, t)
return tf.reduce_sum(
tf.minimum(0.5 * tf.square(diff),
huber_d * (tf.abs(diff) - 0.5 * huber_d)), 0)
elif final_loss == 'cross_entropy':
def loss_fn(x, t, w):
costs = tf.nn.sigmoid_cross_entropy_with_logits(logits=x, labels=t)
weighted_costs = tf.multiply(costs, w)
return tf.reduce_sum(weighted_costs)
elif final_loss == 'hinge':
def loss_fn(x, t, w):
t = tf.multiply(2.0, t) - 1
costs = tf.maximum(0.0, 1.0 - tf.multiply(t, x))
weighted_costs = tf.multiply(costs, w)
return tf.reduce_sum(weighted_costs)
return loss_fn
class MultitaskGraphClassifier(Model):
def __init__(self,
model,
n_tasks,
n_feat,
logdir=None,
batch_size=50,
final_loss='cross_entropy',
learning_rate=.001,
optimizer_type="adam",
learning_rate_decay_time=1000,
beta1=.9,
beta2=.999,
pad_batches=True,
verbose=True):
warnings.warn(
"MultitaskGraphClassifier is deprecated. "
"Will be removed in DeepChem 1.4.", DeprecationWarning)
super(MultitaskGraphClassifier, self).__init__(
model_dir=logdir, verbose=verbose)
self.n_tasks = n_tasks
self.final_loss = final_loss
self.model = model
self.sess = tf.Session(graph=self.model.graph)
with self.model.graph.as_default():
# Extract model info
self.batch_size = batch_size
self.pad_batches = pad_batches
# Get graph topology for x
self.graph_topology = self.model.get_graph_topology()
self.feat_dim = n_feat
# Raw logit outputs
self.logits = self.build()
self.loss_op = self.add_training_loss(self.final_loss, self.logits)
self.outputs = self.add_softmax(self.logits)
self.learning_rate = learning_rate
self.T = learning_rate_decay_time
self.optimizer_type = optimizer_type
self.optimizer_beta1 = beta1
self.optimizer_beta2 = beta2
# Set epsilon
self.epsilon = 1e-7
self.add_optimizer()
# Initialize
self.init_fn = tf.global_variables_initializer()
self.sess.run(self.init_fn)
# Path to save checkpoint files, which matches the
# replicated supervisor's default path.
self._save_path = os.path.join(self.model_dir, 'model.ckpt')
def build(self):
# Create target inputs
self.label_placeholder = tf.placeholder(
dtype='bool', shape=(None, self.n_tasks), name="label_placeholder")
self.weight_placeholder = tf.placeholder(
dtype='float32', shape=(None, self.n_tasks), name="weight_placholder")
feat = self.model.return_outputs()
################################################################ DEBUG
#print("multitask classifier")
#print("feat")
#print(feat)
################################################################ DEBUG
output = model_ops.multitask_logits(feat, self.n_tasks)
return output
def add_optimizer(self):
if self.optimizer_type == "adam":
self.optimizer = tf.train.AdamOptimizer(
self.learning_rate,
beta1=self.optimizer_beta1,
beta2=self.optimizer_beta2,
epsilon=self.epsilon)
else:
raise ValueError("Optimizer type not recognized.")
# Get train function
self.train_op = self.optimizer.minimize(self.loss_op)
def construct_feed_dict(self, X_b, y_b=None, w_b=None, training=True):
"""Get initial information about task normalization"""
# TODO(rbharath): I believe this is total amount of data
n_samples = len(X_b)
if y_b is None:
y_b = np.zeros((n_samples, self.n_tasks))
if w_b is None:
w_b = np.zeros((n_samples, self.n_tasks))
targets_dict = {self.label_placeholder: y_b, self.weight_placeholder: w_b}
# Get graph information
atoms_dict = self.graph_topology.batch_to_feed_dict(X_b)
# TODO (hraut->rhbarath): num_datapoints should be a vector, with ith element being
# the number of labeled data points in target_i. This is to normalize each task
# num_dat_dict = {self.num_datapoints_placeholder : self.}
# Get other optimizer information
# TODO(rbharath): Figure out how to handle phase appropriately
feed_dict = merge_dicts([targets_dict, atoms_dict])
return feed_dict
def add_training_loss(self, final_loss, logits):
"""Computes loss using logits."""
loss_fn = get_loss_fn(final_loss) # Get loss function
task_losses = []
# label_placeholder of shape (batch_size, n_tasks). Split into n_tasks
# tensors of shape (batch_size,)
task_labels = tf.split(
axis=1, num_or_size_splits=self.n_tasks, value=self.label_placeholder)
task_weights = tf.split(
axis=1, num_or_size_splits=self.n_tasks, value=self.weight_placeholder)
for task in range(self.n_tasks):
task_label_vector = task_labels[task]
task_weight_vector = task_weights[task]
# Convert the labels into one-hot vector encodings.
one_hot_labels = tf.cast(
tf.one_hot(tf.cast(tf.squeeze(task_label_vector), tf.int32), 2),
tf.float32)
# Since we use tf.nn.softmax_cross_entropy_with_logits note that we pass in
# un-softmaxed logits rather than softmax outputs.
task_loss = loss_fn(logits[task], one_hot_labels, task_weight_vector)
task_losses.append(task_loss)
# It's ok to divide by just the batch_size rather than the number of nonzero
# examples (effect averages out)
total_loss = tf.add_n(task_losses)
total_loss = tf.math.divide(total_loss, self.batch_size)
return total_loss
def add_softmax(self, outputs):
"""Replace logits with softmax outputs."""
softmax = []
with tf.name_scope('inference'):
for i, logits in enumerate(outputs):
softmax.append(tf.nn.softmax(logits, name='softmax_%d' % i))
return softmax
def fit(self,
dataset,
nb_epoch=10,
max_checkpoints_to_keep=5,
log_every_N_batches=50,
checkpoint_interval=10,
**kwargs):
# Perform the optimization
log("Training for %d epochs" % nb_epoch, self.verbose)
# TODO(rbharath): Disabling saving for now to try to debug.
for epoch in range(nb_epoch):
log("Starting epoch %d" % epoch, self.verbose)
for batch_num, (X_b, y_b, w_b, ids_b) in enumerate(
dataset.iterbatches(self.batch_size, pad_batches=self.pad_batches)):
if batch_num % log_every_N_batches == 0:
log("On batch %d" % batch_num, self.verbose)
self.sess.run(
self.train_op, feed_dict=self.construct_feed_dict(X_b, y_b, w_b))
def save(self):
"""
No-op since this model doesn't currently support saving...
"""
pass
def predict(self, dataset, transformers=[], **kwargs):
"""Wraps predict to set batch_size/padding."""
return super(MultitaskGraphClassifier, self).predict(
dataset, transformers, batch_size=self.batch_size)
def predict_proba(self, dataset, transformers=[], n_classes=2, **kwargs):
"""Wraps predict_proba to set batch_size/padding."""
return super(MultitaskGraphClassifier, self).predict_proba(
dataset, transformers, n_classes=n_classes, batch_size=self.batch_size)
def predict_on_batch(self, X):
"""Return model output for the provided input.
"""
if self.pad_batches:
X = pad_features(self.batch_size, X)
# run eval data through the model
n_tasks = self.n_tasks
with self.sess.as_default():
feed_dict = self.construct_feed_dict(X)
# Shape (n_samples, n_tasks)
batch_outputs = self.sess.run(self.outputs, feed_dict=feed_dict)
n_samples = len(X)
outputs = np.zeros((n_samples, self.n_tasks))
for task, output in enumerate(batch_outputs):
outputs[:, task] = np.argmax(output, axis=1)
return outputs
def predict_proba_on_batch(self, X, n_classes=2):
"""Returns class probabilities on batch"""
# run eval data through the model
if self.pad_batches:
X = pad_features(self.batch_size, X)
n_tasks = self.n_tasks
with self.sess.as_default():
feed_dict = self.construct_feed_dict(X)
batch_outputs = self.sess.run(self.outputs, feed_dict=feed_dict)
n_samples = len(X)
outputs = np.zeros((n_samples, self.n_tasks, n_classes))
for task, output in enumerate(batch_outputs):
outputs[:, task, :] = output
return outputs
def get_num_tasks(self):
"""Needed to use Model.predict() from superclass."""
return self.n_tasks
| mit |
shennjia/weblte | matplot/test_subframe.py | 1 | 6044 | __author__ = 'shenojia'
import pprint
import re
import sys
import unittest
import weakref
import numpy as np
from matplotlib.path import Path
from matplotlib.patches import PathPatch
import matplotlib.pyplot as plt
import matplotlib.gridspec as gridspec
sys.path.insert(0, '..')
from R12_36211.RE import RE
from R12_36211.RG import RG
from R12_36211.PRB import PRB
from R12_36211.SUBFRAME import SUBFRAME
from R12_36211.CRS import CRS
from R12_36211.PBCH import PBCH
from R12_36xxx.HighLayer import conf
from R12_36xxx.ConstDefinition import *
from R12_36xxx.TypesDefition import *
class Test_SUBFRAME(unittest.TestCase):
"""
test different channels and signals
"""
@unittest.skip("test_subframeConstruct")
def test_subframeConstruct(self):
"""
new a subframe
:return:
"""
sf = SUBFRAME(0,0, SubFrameType('D'))
crs = sf.crses[0]
print(crs.res[0]().t)
print(crs.res[0]().p)
print(crs.res[0]().a)
pass
#@unittest.skip("test_plotPcfichInSubframe")
def test_plotPcfichInSubframe(self):
"""
pcfich plot
"""
fig = plt.figure(figsize=(8,6), dpi=80)
sf = SUBFRAME(0, 0, SubFrameType('D'))
gs = gridspec.GridSpec(1,2,left=0.1,right=0.9,wspace=0.01)
ax0 = plt.subplot(gs[0,0])
ax1 = plt.subplot(gs[0,1])
ax0.add_patch(sf.slots[0].patch)
ax1.add_patch(sf.slots[1].patch)
sf.pcfich.patch.set_facecolor((0.5, 1.0, 1.0, 0.5))
if sf.pcfich.n__s %2 == 0:
ax0.add_patch(sf.pcfich.patch)
else:
ax1.add_patch(sf.pcfich.patch)
ax0.autoscale_view()
ax1.autoscale_view()
plt.show()
@unittest.skip("test_plotPbchInSubframe")
def test_plotPbchInSubframe(self):
"""
pbch plot
"""
fig = plt.figure()
sf = SUBFRAME(0, 0, SubFrameType('D'))
gs = gridspec.GridSpec(1,2,left=0.1,right=0.9,wspace=0.01)
ax0 = plt.subplot(gs[0,0])
ax1 = plt.subplot(gs[0,1])
ax0.add_patch(sf.slots[0].patch)
ax1.add_patch(sf.slots[1].patch)
sf.pbch.patch.set_facecolor((0.0, 1.0, 1.0, 0.5))
if sf.pbch.n__s %2 == 0:
ax0.add_patch(sf.pbch.patch)
else:
ax1.add_patch(sf.pbch.patch)
ax0.autoscale_view()
ax1.autoscale_view()
plt.show()
@unittest.skip("test_plotCrsInSubframe")
def test_plotCrsInSubframe(self):
"""
Make a compund path --
Use CLOSEOPOLY and MOVETO for the different parts of
the compound path
"""
fig = plt.figure()
sf = SUBFRAME(0,0, SubFrameType('D'))
gs = gridspec.GridSpec(1,2,left=0.1,right=0.9,wspace=0.01)
ax0 = plt.subplot(gs[0,0])
ax1 = plt.subplot(gs[0,1])
ax0.add_patch(sf.slots[0].patch)
crs00 = sf.crses[(sf.slots[0].n__s,0)]
crs00.patch.set_facecolor((0.0, 1.0, 1.0, 0.5))
ax0.add_patch(crs00.patch)
crs01 = sf.crses[(sf.slots[0].n__s,1)]
crs01.patch.set_facecolor((1.0, 1.0, 0.0, 0.5))
ax0.add_patch(crs01.patch)
crs02 = sf.crses[(sf.slots[0].n__s,2)]
crs02.patch.set_facecolor((0.6, 1.0, 0.6, 0.5))
ax0.add_patch(crs02.patch)
crs03 = sf.crses[(sf.slots[0].n__s,3)]
crs03.patch.set_facecolor((1.0, 0.8, 0.8, 0.5))
ax0.add_patch(crs03.patch)
ax1.add_patch(sf.slots[1].patch)
crs10 = sf.crses[(sf.slots[1].n__s,0)]
crs10.patch.set_facecolor((0.0, 1.0, 1.0, 0.5))
ax1.add_patch(crs10.patch)
crs11 = sf.crses[(sf.slots[1].n__s,1)]
crs11.patch.set_facecolor((1.0, 1.0, 0.0, 0.5))
ax1.add_patch(crs11.patch)
crs12 = sf.crses[(sf.slots[1].n__s,2)]
crs12.patch.set_facecolor((0.6, 1.0, 0.6, 0.5))
ax1.add_patch(crs12.patch)
crs13 = sf.crses[(sf.slots[1].n__s,3)]
crs13.patch.set_facecolor((1.0, 0.8, 0.8, 0.5))
ax1.add_patch(crs13.patch)
ax0.autoscale_view()
ax1.autoscale_view()
plt.show()
@unittest.skip("test_plotCrsInSubframe2")
def test_plotCrsInSubframe2(self):
"""
crs test on a special frame
"""
fig = plt.figure()
#TODO:high level parameter verif shall be done in a more elegant way. avoid too much misc checking inside
#so user shall ensure all high level parameter setting shall be correct first
sf = SUBFRAME(0,1, SubFrameType('S'))
gs = gridspec.GridSpec(1,2,left=0.1,right=0.9,wspace=0.01)
ax0 = plt.subplot(gs[0,0])
ax1 = plt.subplot(gs[0,1])
ax0.add_patch(sf.slots[0].patch)
crs00 = sf.crses[(sf.slots[0].n__s,0)]
crs00.patch.set_facecolor((0.0, 1.0, 1.0, 0.5))
ax0.add_patch(crs00.patch)
crs01 = sf.crses[(sf.slots[0].n__s,1)]
crs01.patch.set_facecolor((1.0, 1.0, 0.0, 0.5))
ax0.add_patch(crs01.patch)
crs02 = sf.crses[(sf.slots[0].n__s,2)]
crs02.patch.set_facecolor((0.6, 1.0, 0.6, 0.5))
ax0.add_patch(crs02.patch)
crs03 = sf.crses[(sf.slots[0].n__s,3)]
crs03.patch.set_facecolor((1.0, 0.8, 0.8, 0.5))
ax0.add_patch(crs03.patch)
ax1.add_patch(sf.slots[1].patch)
crs10 = sf.crses[(sf.slots[1].n__s,0)]
crs10.patch.set_facecolor((0.0, 1.0, 1.0, 0.5))
ax1.add_patch(crs10.patch)
crs11 = sf.crses[(sf.slots[1].n__s,1)]
crs11.patch.set_facecolor((1.0, 1.0, 0.0, 0.5))
ax1.add_patch(crs11.patch)
crs12 = sf.crses[(sf.slots[1].n__s,2)]
crs12.patch.set_facecolor((0.6, 1.0, 0.6, 0.5))
ax1.add_patch(crs12.patch)
crs13 = sf.crses[(sf.slots[1].n__s,3)]
crs13.patch.set_facecolor((1.0, 0.8, 0.8, 0.5))
ax1.add_patch(crs13.patch)
ax1.add_patch(sf.slots[1].patch)
ax0.autoscale_view()
ax1.autoscale_view()
plt.show()
if __name__ == '__main__':
unittest.main() | mit |
IAmWave/blekota | src/visual.py | 1 | 2059 | import numpy as np
import matplotlib.pyplot as plt
import const
def compare(y, y2, fs=const.DEFAULT_FS):
"""Take a ground truth and predicted sounds and show a plot comparing them. Generate a graph of how the sounds differ over time.
Args:
y (array): The ground truth sound.
y2 (array): The predicted sound.
fs (int, optional): Sampling frequency.
"""
x = np.linspace(0, y.size / fs, y.size)
plt.figure()
plt.subplot(211)
plt.title('Waveform comparison')
plt.plot(x, y2, 'r', linewidth=4.0)
plt.plot(x, y, 'k--', linewidth=2.0)
plt.xlim(0, 0.05)
plt.xlabel('Time')
plt.ylabel('Sound wave')
running_average_n = 100 # Smooth out the error using a running average
def running_average(arr):
return np.convolve(arr, np.ones((running_average_n,)) / running_average_n, mode='valid')
err = running_average((y - y2) ** 2)
err_bad = running_average(y ** 2) # The error if y2 was a constant 0, for comparison
plt.subplot(212)
plt.title('Square error over time')
plt.plot(err[::running_average_n], 'r')
plt.plot(err_bad[::running_average_n], 'k')
plt.axhline(0, color='black')
plt.xlabel('Time')
plt.ylabel('Square error')
plt.show()
def show(y, fs=const.DEFAULT_FS):
"""Plot a sound.
Args:
y (array): The sound to plot.
fs (int, optional): Sampling frequency.
"""
x = np.linspace(0, y.size / fs, y.size)
plt.figure()
plt.title('Waveform')
plt.axhline(0, color='black')
plt.plot(x, y, 'r', linewidth=2.0)
plt.xlim(0, 0.05)
plt.xlabel('Time')
plt.ylabel('Sound wave')
plt.show()
def heatmap(p, fs=const.DEFAULT_FS):
"""Plot a probability heatmap generated by a model.
Args:
p (array of shape [number of samples, 256]): The probabilities of each sample's values.
fs (int, optional): Sampling frequency.
"""
plt.figure()
plt.title('Probability heatmap')
plt.imshow(p.T, cmap='hot', interpolation='nearest')
plt.show()
| mit |
ky822/scikit-learn | sklearn/mixture/gmm.py | 68 | 31091 | """
Gaussian Mixture Models.
This implementation corresponds to frequentist (non-Bayesian) formulation
of Gaussian Mixture Models.
"""
# Author: Ron Weiss <ronweiss@gmail.com>
# Fabian Pedregosa <fabian.pedregosa@inria.fr>
# Bertrand Thirion <bertrand.thirion@inria.fr>
import warnings
import numpy as np
from scipy import linalg
from time import time
from ..base import BaseEstimator
from ..utils import check_random_state, check_array
from ..utils.extmath import logsumexp
from ..utils.validation import check_is_fitted
from .. import cluster
from sklearn.externals.six.moves import zip
EPS = np.finfo(float).eps
def log_multivariate_normal_density(X, means, covars, covariance_type='diag'):
"""Compute the log probability under a multivariate Gaussian distribution.
Parameters
----------
X : array_like, shape (n_samples, n_features)
List of n_features-dimensional data points. Each row corresponds to a
single data point.
means : array_like, shape (n_components, n_features)
List of n_features-dimensional mean vectors for n_components Gaussians.
Each row corresponds to a single mean vector.
covars : array_like
List of n_components covariance parameters for each Gaussian. The shape
depends on `covariance_type`:
(n_components, n_features) if 'spherical',
(n_features, n_features) if 'tied',
(n_components, n_features) if 'diag',
(n_components, n_features, n_features) if 'full'
covariance_type : string
Type of the covariance parameters. Must be one of
'spherical', 'tied', 'diag', 'full'. Defaults to 'diag'.
Returns
-------
lpr : array_like, shape (n_samples, n_components)
Array containing the log probabilities of each data point in
X under each of the n_components multivariate Gaussian distributions.
"""
log_multivariate_normal_density_dict = {
'spherical': _log_multivariate_normal_density_spherical,
'tied': _log_multivariate_normal_density_tied,
'diag': _log_multivariate_normal_density_diag,
'full': _log_multivariate_normal_density_full}
return log_multivariate_normal_density_dict[covariance_type](
X, means, covars)
def sample_gaussian(mean, covar, covariance_type='diag', n_samples=1,
random_state=None):
"""Generate random samples from a Gaussian distribution.
Parameters
----------
mean : array_like, shape (n_features,)
Mean of the distribution.
covar : array_like, optional
Covariance of the distribution. The shape depends on `covariance_type`:
scalar if 'spherical',
(n_features) if 'diag',
(n_features, n_features) if 'tied', or 'full'
covariance_type : string, optional
Type of the covariance parameters. Must be one of
'spherical', 'tied', 'diag', 'full'. Defaults to 'diag'.
n_samples : int, optional
Number of samples to generate. Defaults to 1.
Returns
-------
X : array, shape (n_features, n_samples)
Randomly generated sample
"""
rng = check_random_state(random_state)
n_dim = len(mean)
rand = rng.randn(n_dim, n_samples)
if n_samples == 1:
rand.shape = (n_dim,)
if covariance_type == 'spherical':
rand *= np.sqrt(covar)
elif covariance_type == 'diag':
rand = np.dot(np.diag(np.sqrt(covar)), rand)
else:
s, U = linalg.eigh(covar)
s.clip(0, out=s) # get rid of tiny negatives
np.sqrt(s, out=s)
U *= s
rand = np.dot(U, rand)
return (rand.T + mean).T
class GMM(BaseEstimator):
"""Gaussian Mixture Model
Representation of a Gaussian mixture model probability distribution.
This class allows for easy evaluation of, sampling from, and
maximum-likelihood estimation of the parameters of a GMM distribution.
Initializes parameters such that every mixture component has zero
mean and identity covariance.
Read more in the :ref:`User Guide <gmm>`.
Parameters
----------
n_components : int, optional
Number of mixture components. Defaults to 1.
covariance_type : string, optional
String describing the type of covariance parameters to
use. Must be one of 'spherical', 'tied', 'diag', 'full'.
Defaults to 'diag'.
random_state: RandomState or an int seed (None by default)
A random number generator instance
min_covar : float, optional
Floor on the diagonal of the covariance matrix to prevent
overfitting. Defaults to 1e-3.
tol : float, optional
Convergence threshold. EM iterations will stop when average
gain in log-likelihood is below this threshold. Defaults to 1e-3.
n_iter : int, optional
Number of EM iterations to perform.
n_init : int, optional
Number of initializations to perform. the best results is kept
params : string, optional
Controls which parameters are updated in the training
process. Can contain any combination of 'w' for weights,
'm' for means, and 'c' for covars. Defaults to 'wmc'.
init_params : string, optional
Controls which parameters are updated in the initialization
process. Can contain any combination of 'w' for weights,
'm' for means, and 'c' for covars. Defaults to 'wmc'.
verbose : int, default: 0
Enable verbose output. If 1 then it always prints the current
initialization and iteration step. If greater than 1 then
it prints additionally the change and time needed for each step.
Attributes
----------
weights_ : array, shape (`n_components`,)
This attribute stores the mixing weights for each mixture component.
means_ : array, shape (`n_components`, `n_features`)
Mean parameters for each mixture component.
covars_ : array
Covariance parameters for each mixture component. The shape
depends on `covariance_type`::
(n_components, n_features) if 'spherical',
(n_features, n_features) if 'tied',
(n_components, n_features) if 'diag',
(n_components, n_features, n_features) if 'full'
converged_ : bool
True when convergence was reached in fit(), False otherwise.
See Also
--------
DPGMM : Infinite gaussian mixture model, using the dirichlet
process, fit with a variational algorithm
VBGMM : Finite gaussian mixture model fit with a variational
algorithm, better for situations where there might be too little
data to get a good estimate of the covariance matrix.
Examples
--------
>>> import numpy as np
>>> from sklearn import mixture
>>> np.random.seed(1)
>>> g = mixture.GMM(n_components=2)
>>> # Generate random observations with two modes centered on 0
>>> # and 10 to use for training.
>>> obs = np.concatenate((np.random.randn(100, 1),
... 10 + np.random.randn(300, 1)))
>>> g.fit(obs) # doctest: +NORMALIZE_WHITESPACE
GMM(covariance_type='diag', init_params='wmc', min_covar=0.001,
n_components=2, n_init=1, n_iter=100, params='wmc',
random_state=None, thresh=None, tol=0.001, verbose=0)
>>> np.round(g.weights_, 2)
array([ 0.75, 0.25])
>>> np.round(g.means_, 2)
array([[ 10.05],
[ 0.06]])
>>> np.round(g.covars_, 2) #doctest: +SKIP
array([[[ 1.02]],
[[ 0.96]]])
>>> g.predict([[0], [2], [9], [10]]) #doctest: +ELLIPSIS
array([1, 1, 0, 0]...)
>>> np.round(g.score([[0], [2], [9], [10]]), 2)
array([-2.19, -4.58, -1.75, -1.21])
>>> # Refit the model on new data (initial parameters remain the
>>> # same), this time with an even split between the two modes.
>>> g.fit(20 * [[0]] + 20 * [[10]]) # doctest: +NORMALIZE_WHITESPACE
GMM(covariance_type='diag', init_params='wmc', min_covar=0.001,
n_components=2, n_init=1, n_iter=100, params='wmc',
random_state=None, thresh=None, tol=0.001, verbose=0)
>>> np.round(g.weights_, 2)
array([ 0.5, 0.5])
"""
def __init__(self, n_components=1, covariance_type='diag',
random_state=None, thresh=None, tol=1e-3, min_covar=1e-3,
n_iter=100, n_init=1, params='wmc', init_params='wmc',
verbose=0):
if thresh is not None:
warnings.warn("'thresh' has been replaced by 'tol' in 0.16 "
" and will be removed in 0.18.",
DeprecationWarning)
self.n_components = n_components
self.covariance_type = covariance_type
self.thresh = thresh
self.tol = tol
self.min_covar = min_covar
self.random_state = random_state
self.n_iter = n_iter
self.n_init = n_init
self.params = params
self.init_params = init_params
self.verbose = verbose
if covariance_type not in ['spherical', 'tied', 'diag', 'full']:
raise ValueError('Invalid value for covariance_type: %s' %
covariance_type)
if n_init < 1:
raise ValueError('GMM estimation requires at least one run')
self.weights_ = np.ones(self.n_components) / self.n_components
# flag to indicate exit status of fit() method: converged (True) or
# n_iter reached (False)
self.converged_ = False
def _get_covars(self):
"""Covariance parameters for each mixture component.
The shape depends on ``cvtype``::
(n_states, n_features) if 'spherical',
(n_features, n_features) if 'tied',
(n_states, n_features) if 'diag',
(n_states, n_features, n_features) if 'full'
"""
if self.covariance_type == 'full':
return self.covars_
elif self.covariance_type == 'diag':
return [np.diag(cov) for cov in self.covars_]
elif self.covariance_type == 'tied':
return [self.covars_] * self.n_components
elif self.covariance_type == 'spherical':
return [np.diag(cov) for cov in self.covars_]
def _set_covars(self, covars):
"""Provide values for covariance"""
covars = np.asarray(covars)
_validate_covars(covars, self.covariance_type, self.n_components)
self.covars_ = covars
def score_samples(self, X):
"""Return the per-sample likelihood of the data under the model.
Compute the log probability of X under the model and
return the posterior distribution (responsibilities) of each
mixture component for each element of X.
Parameters
----------
X: array_like, shape (n_samples, n_features)
List of n_features-dimensional data points. Each row
corresponds to a single data point.
Returns
-------
logprob : array_like, shape (n_samples,)
Log probabilities of each data point in X.
responsibilities : array_like, shape (n_samples, n_components)
Posterior probabilities of each mixture component for each
observation
"""
check_is_fitted(self, 'means_')
X = check_array(X)
if X.ndim == 1:
X = X[:, np.newaxis]
if X.size == 0:
return np.array([]), np.empty((0, self.n_components))
if X.shape[1] != self.means_.shape[1]:
raise ValueError('The shape of X is not compatible with self')
lpr = (log_multivariate_normal_density(X, self.means_, self.covars_,
self.covariance_type) +
np.log(self.weights_))
logprob = logsumexp(lpr, axis=1)
responsibilities = np.exp(lpr - logprob[:, np.newaxis])
return logprob, responsibilities
def score(self, X, y=None):
"""Compute the log probability under the model.
Parameters
----------
X : array_like, shape (n_samples, n_features)
List of n_features-dimensional data points. Each row
corresponds to a single data point.
Returns
-------
logprob : array_like, shape (n_samples,)
Log probabilities of each data point in X
"""
logprob, _ = self.score_samples(X)
return logprob
def predict(self, X):
"""Predict label for data.
Parameters
----------
X : array-like, shape = [n_samples, n_features]
Returns
-------
C : array, shape = (n_samples,) component memberships
"""
logprob, responsibilities = self.score_samples(X)
return responsibilities.argmax(axis=1)
def predict_proba(self, X):
"""Predict posterior probability of data under each Gaussian
in the model.
Parameters
----------
X : array-like, shape = [n_samples, n_features]
Returns
-------
responsibilities : array-like, shape = (n_samples, n_components)
Returns the probability of the sample for each Gaussian
(state) in the model.
"""
logprob, responsibilities = self.score_samples(X)
return responsibilities
def sample(self, n_samples=1, random_state=None):
"""Generate random samples from the model.
Parameters
----------
n_samples : int, optional
Number of samples to generate. Defaults to 1.
Returns
-------
X : array_like, shape (n_samples, n_features)
List of samples
"""
check_is_fitted(self, 'means_')
if random_state is None:
random_state = self.random_state
random_state = check_random_state(random_state)
weight_cdf = np.cumsum(self.weights_)
X = np.empty((n_samples, self.means_.shape[1]))
rand = random_state.rand(n_samples)
# decide which component to use for each sample
comps = weight_cdf.searchsorted(rand)
# for each component, generate all needed samples
for comp in range(self.n_components):
# occurrences of current component in X
comp_in_X = (comp == comps)
# number of those occurrences
num_comp_in_X = comp_in_X.sum()
if num_comp_in_X > 0:
if self.covariance_type == 'tied':
cv = self.covars_
elif self.covariance_type == 'spherical':
cv = self.covars_[comp][0]
else:
cv = self.covars_[comp]
X[comp_in_X] = sample_gaussian(
self.means_[comp], cv, self.covariance_type,
num_comp_in_X, random_state=random_state).T
return X
def fit_predict(self, X, y=None):
"""Fit and then predict labels for data.
Warning: due to the final maximization step in the EM algorithm,
with low iterations the prediction may not be 100% accurate
Parameters
----------
X : array-like, shape = [n_samples, n_features]
Returns
-------
C : array, shape = (n_samples,) component memberships
"""
return self._fit(X, y).argmax(axis=1)
def _fit(self, X, y=None, do_prediction=False):
"""Estimate model parameters with the EM algorithm.
A initialization step is performed before entering the
expectation-maximization (EM) algorithm. If you want to avoid
this step, set the keyword argument init_params to the empty
string '' when creating the GMM object. Likewise, if you would
like just to do an initialization, set n_iter=0.
Parameters
----------
X : array_like, shape (n, n_features)
List of n_features-dimensional data points. Each row
corresponds to a single data point.
Returns
-------
responsibilities : array, shape (n_samples, n_components)
Posterior probabilities of each mixture component for each
observation.
"""
# initialization step
X = check_array(X, dtype=np.float64, ensure_min_samples=2)
if X.shape[0] < self.n_components:
raise ValueError(
'GMM estimation with %s components, but got only %s samples' %
(self.n_components, X.shape[0]))
max_log_prob = -np.infty
if self.verbose > 0:
print('Expectation-maximization algorithm started.')
for init in range(self.n_init):
if self.verbose > 0:
print('Initialization ' + str(init + 1))
start_init_time = time()
if 'm' in self.init_params or not hasattr(self, 'means_'):
self.means_ = cluster.KMeans(
n_clusters=self.n_components,
random_state=self.random_state).fit(X).cluster_centers_
if self.verbose > 1:
print('\tMeans have been initialized.')
if 'w' in self.init_params or not hasattr(self, 'weights_'):
self.weights_ = np.tile(1.0 / self.n_components,
self.n_components)
if self.verbose > 1:
print('\tWeights have been initialized.')
if 'c' in self.init_params or not hasattr(self, 'covars_'):
cv = np.cov(X.T) + self.min_covar * np.eye(X.shape[1])
if not cv.shape:
cv.shape = (1, 1)
self.covars_ = \
distribute_covar_matrix_to_match_covariance_type(
cv, self.covariance_type, self.n_components)
if self.verbose > 1:
print('\tCovariance matrices have been initialized.')
# EM algorithms
current_log_likelihood = None
# reset self.converged_ to False
self.converged_ = False
# this line should be removed when 'thresh' is removed in v0.18
tol = (self.tol if self.thresh is None
else self.thresh / float(X.shape[0]))
for i in range(self.n_iter):
if self.verbose > 0:
print('\tEM iteration ' + str(i + 1))
start_iter_time = time()
prev_log_likelihood = current_log_likelihood
# Expectation step
log_likelihoods, responsibilities = self.score_samples(X)
current_log_likelihood = log_likelihoods.mean()
# Check for convergence.
# (should compare to self.tol when deprecated 'thresh' is
# removed in v0.18)
if prev_log_likelihood is not None:
change = abs(current_log_likelihood - prev_log_likelihood)
if self.verbose > 1:
print('\t\tChange: ' + str(change))
if change < tol:
self.converged_ = True
if self.verbose > 0:
print('\t\tEM algorithm converged.')
break
# Maximization step
self._do_mstep(X, responsibilities, self.params,
self.min_covar)
if self.verbose > 1:
print('\t\tEM iteration ' + str(i + 1) + ' took {0:.5f}s'.format(
time() - start_iter_time))
# if the results are better, keep it
if self.n_iter:
if current_log_likelihood > max_log_prob:
max_log_prob = current_log_likelihood
best_params = {'weights': self.weights_,
'means': self.means_,
'covars': self.covars_}
if self.verbose > 1:
print('\tBetter parameters were found.')
if self.verbose > 1:
print('\tInitialization ' + str(init + 1) + ' took {0:.5f}s'.format(
time() - start_init_time))
# check the existence of an init param that was not subject to
# likelihood computation issue.
if np.isneginf(max_log_prob) and self.n_iter:
raise RuntimeError(
"EM algorithm was never able to compute a valid likelihood " +
"given initial parameters. Try different init parameters " +
"(or increasing n_init) or check for degenerate data.")
if self.n_iter:
self.covars_ = best_params['covars']
self.means_ = best_params['means']
self.weights_ = best_params['weights']
else: # self.n_iter == 0 occurs when using GMM within HMM
# Need to make sure that there are responsibilities to output
# Output zeros because it was just a quick initialization
responsibilities = np.zeros((X.shape[0], self.n_components))
return responsibilities
def fit(self, X, y=None):
"""Estimate model parameters with the EM algorithm.
A initialization step is performed before entering the
expectation-maximization (EM) algorithm. If you want to avoid
this step, set the keyword argument init_params to the empty
string '' when creating the GMM object. Likewise, if you would
like just to do an initialization, set n_iter=0.
Parameters
----------
X : array_like, shape (n, n_features)
List of n_features-dimensional data points. Each row
corresponds to a single data point.
Returns
-------
self
"""
self._fit(X, y)
return self
def _do_mstep(self, X, responsibilities, params, min_covar=0):
""" Perform the Mstep of the EM algorithm and return the class weights
"""
weights = responsibilities.sum(axis=0)
weighted_X_sum = np.dot(responsibilities.T, X)
inverse_weights = 1.0 / (weights[:, np.newaxis] + 10 * EPS)
if 'w' in params:
self.weights_ = (weights / (weights.sum() + 10 * EPS) + EPS)
if 'm' in params:
self.means_ = weighted_X_sum * inverse_weights
if 'c' in params:
covar_mstep_func = _covar_mstep_funcs[self.covariance_type]
self.covars_ = covar_mstep_func(
self, X, responsibilities, weighted_X_sum, inverse_weights,
min_covar)
return weights
def _n_parameters(self):
"""Return the number of free parameters in the model."""
ndim = self.means_.shape[1]
if self.covariance_type == 'full':
cov_params = self.n_components * ndim * (ndim + 1) / 2.
elif self.covariance_type == 'diag':
cov_params = self.n_components * ndim
elif self.covariance_type == 'tied':
cov_params = ndim * (ndim + 1) / 2.
elif self.covariance_type == 'spherical':
cov_params = self.n_components
mean_params = ndim * self.n_components
return int(cov_params + mean_params + self.n_components - 1)
def bic(self, X):
"""Bayesian information criterion for the current model fit
and the proposed data
Parameters
----------
X : array of shape(n_samples, n_dimensions)
Returns
-------
bic: float (the lower the better)
"""
return (-2 * self.score(X).sum() +
self._n_parameters() * np.log(X.shape[0]))
def aic(self, X):
"""Akaike information criterion for the current model fit
and the proposed data
Parameters
----------
X : array of shape(n_samples, n_dimensions)
Returns
-------
aic: float (the lower the better)
"""
return - 2 * self.score(X).sum() + 2 * self._n_parameters()
#########################################################################
# some helper routines
#########################################################################
def _log_multivariate_normal_density_diag(X, means, covars):
"""Compute Gaussian log-density at X for a diagonal model"""
n_samples, n_dim = X.shape
lpr = -0.5 * (n_dim * np.log(2 * np.pi) + np.sum(np.log(covars), 1)
+ np.sum((means ** 2) / covars, 1)
- 2 * np.dot(X, (means / covars).T)
+ np.dot(X ** 2, (1.0 / covars).T))
return lpr
def _log_multivariate_normal_density_spherical(X, means, covars):
"""Compute Gaussian log-density at X for a spherical model"""
cv = covars.copy()
if covars.ndim == 1:
cv = cv[:, np.newaxis]
if covars.shape[1] == 1:
cv = np.tile(cv, (1, X.shape[-1]))
return _log_multivariate_normal_density_diag(X, means, cv)
def _log_multivariate_normal_density_tied(X, means, covars):
"""Compute Gaussian log-density at X for a tied model"""
cv = np.tile(covars, (means.shape[0], 1, 1))
return _log_multivariate_normal_density_full(X, means, cv)
def _log_multivariate_normal_density_full(X, means, covars, min_covar=1.e-7):
"""Log probability for full covariance matrices."""
n_samples, n_dim = X.shape
nmix = len(means)
log_prob = np.empty((n_samples, nmix))
for c, (mu, cv) in enumerate(zip(means, covars)):
try:
cv_chol = linalg.cholesky(cv, lower=True)
except linalg.LinAlgError:
# The model is most probably stuck in a component with too
# few observations, we need to reinitialize this components
try:
cv_chol = linalg.cholesky(cv + min_covar * np.eye(n_dim),
lower=True)
except linalg.LinAlgError:
raise ValueError("'covars' must be symmetric, "
"positive-definite")
cv_log_det = 2 * np.sum(np.log(np.diagonal(cv_chol)))
cv_sol = linalg.solve_triangular(cv_chol, (X - mu).T, lower=True).T
log_prob[:, c] = - .5 * (np.sum(cv_sol ** 2, axis=1) +
n_dim * np.log(2 * np.pi) + cv_log_det)
return log_prob
def _validate_covars(covars, covariance_type, n_components):
"""Do basic checks on matrix covariance sizes and values
"""
from scipy import linalg
if covariance_type == 'spherical':
if len(covars) != n_components:
raise ValueError("'spherical' covars have length n_components")
elif np.any(covars <= 0):
raise ValueError("'spherical' covars must be non-negative")
elif covariance_type == 'tied':
if covars.shape[0] != covars.shape[1]:
raise ValueError("'tied' covars must have shape (n_dim, n_dim)")
elif (not np.allclose(covars, covars.T)
or np.any(linalg.eigvalsh(covars) <= 0)):
raise ValueError("'tied' covars must be symmetric, "
"positive-definite")
elif covariance_type == 'diag':
if len(covars.shape) != 2:
raise ValueError("'diag' covars must have shape "
"(n_components, n_dim)")
elif np.any(covars <= 0):
raise ValueError("'diag' covars must be non-negative")
elif covariance_type == 'full':
if len(covars.shape) != 3:
raise ValueError("'full' covars must have shape "
"(n_components, n_dim, n_dim)")
elif covars.shape[1] != covars.shape[2]:
raise ValueError("'full' covars must have shape "
"(n_components, n_dim, n_dim)")
for n, cv in enumerate(covars):
if (not np.allclose(cv, cv.T)
or np.any(linalg.eigvalsh(cv) <= 0)):
raise ValueError("component %d of 'full' covars must be "
"symmetric, positive-definite" % n)
else:
raise ValueError("covariance_type must be one of " +
"'spherical', 'tied', 'diag', 'full'")
def distribute_covar_matrix_to_match_covariance_type(
tied_cv, covariance_type, n_components):
"""Create all the covariance matrices from a given template"""
if covariance_type == 'spherical':
cv = np.tile(tied_cv.mean() * np.ones(tied_cv.shape[1]),
(n_components, 1))
elif covariance_type == 'tied':
cv = tied_cv
elif covariance_type == 'diag':
cv = np.tile(np.diag(tied_cv), (n_components, 1))
elif covariance_type == 'full':
cv = np.tile(tied_cv, (n_components, 1, 1))
else:
raise ValueError("covariance_type must be one of " +
"'spherical', 'tied', 'diag', 'full'")
return cv
def _covar_mstep_diag(gmm, X, responsibilities, weighted_X_sum, norm,
min_covar):
"""Performing the covariance M step for diagonal cases"""
avg_X2 = np.dot(responsibilities.T, X * X) * norm
avg_means2 = gmm.means_ ** 2
avg_X_means = gmm.means_ * weighted_X_sum * norm
return avg_X2 - 2 * avg_X_means + avg_means2 + min_covar
def _covar_mstep_spherical(*args):
"""Performing the covariance M step for spherical cases"""
cv = _covar_mstep_diag(*args)
return np.tile(cv.mean(axis=1)[:, np.newaxis], (1, cv.shape[1]))
def _covar_mstep_full(gmm, X, responsibilities, weighted_X_sum, norm,
min_covar):
"""Performing the covariance M step for full cases"""
# Eq. 12 from K. Murphy, "Fitting a Conditional Linear Gaussian
# Distribution"
n_features = X.shape[1]
cv = np.empty((gmm.n_components, n_features, n_features))
for c in range(gmm.n_components):
post = responsibilities[:, c]
mu = gmm.means_[c]
diff = X - mu
with np.errstate(under='ignore'):
# Underflow Errors in doing post * X.T are not important
avg_cv = np.dot(post * diff.T, diff) / (post.sum() + 10 * EPS)
cv[c] = avg_cv + min_covar * np.eye(n_features)
return cv
def _covar_mstep_tied(gmm, X, responsibilities, weighted_X_sum, norm,
min_covar):
# Eq. 15 from K. Murphy, "Fitting a Conditional Linear Gaussian
# Distribution"
avg_X2 = np.dot(X.T, X)
avg_means2 = np.dot(gmm.means_.T, weighted_X_sum)
out = avg_X2 - avg_means2
out *= 1. / X.shape[0]
out.flat[::len(out) + 1] += min_covar
return out
_covar_mstep_funcs = {'spherical': _covar_mstep_spherical,
'diag': _covar_mstep_diag,
'tied': _covar_mstep_tied,
'full': _covar_mstep_full,
}
| bsd-3-clause |
amolkahat/pandas | pandas/tests/indexing/test_floats.py | 2 | 28488 | # -*- coding: utf-8 -*-
import pytest
from warnings import catch_warnings
import numpy as np
from pandas import (Series, DataFrame, Index, Float64Index, Int64Index,
RangeIndex)
from pandas.util.testing import assert_series_equal, assert_almost_equal
import pandas.util.testing as tm
ignore_ix = pytest.mark.filterwarnings("ignore:\\n.ix:DeprecationWarning")
class TestFloatIndexers(object):
def check(self, result, original, indexer, getitem):
"""
comparator for results
we need to take care if we are indexing on a
Series or a frame
"""
if isinstance(original, Series):
expected = original.iloc[indexer]
else:
if getitem:
expected = original.iloc[:, indexer]
else:
expected = original.iloc[indexer]
assert_almost_equal(result, expected)
def test_scalar_error(self):
# GH 4892
# float_indexers should raise exceptions
# on appropriate Index types & accessors
# this duplicates the code below
# but is spefically testing for the error
# message
for index in [tm.makeStringIndex, tm.makeUnicodeIndex,
tm.makeCategoricalIndex,
tm.makeDateIndex, tm.makeTimedeltaIndex,
tm.makePeriodIndex, tm.makeIntIndex,
tm.makeRangeIndex]:
i = index(5)
s = Series(np.arange(len(i)), index=i)
def f():
s.iloc[3.0]
tm.assert_raises_regex(TypeError,
'Cannot index by location index',
f)
def f():
s.iloc[3.0] = 0
pytest.raises(TypeError, f)
@ignore_ix
def test_scalar_non_numeric(self):
# GH 4892
# float_indexers should raise exceptions
# on appropriate Index types & accessors
for index in [tm.makeStringIndex, tm.makeUnicodeIndex,
tm.makeCategoricalIndex,
tm.makeDateIndex, tm.makeTimedeltaIndex,
tm.makePeriodIndex]:
i = index(5)
for s in [Series(
np.arange(len(i)), index=i), DataFrame(
np.random.randn(
len(i), len(i)), index=i, columns=i)]:
# getting
for idxr, getitem in [(lambda x: x.ix, False),
(lambda x: x.iloc, False),
(lambda x: x, True)]:
def f():
with catch_warnings(record=True):
idxr(s)[3.0]
# gettitem on a DataFrame is a KeyError as it is indexing
# via labels on the columns
if getitem and isinstance(s, DataFrame):
error = KeyError
else:
error = TypeError
pytest.raises(error, f)
# label based can be a TypeError or KeyError
def f():
s.loc[3.0]
if s.index.inferred_type in ['string', 'unicode', 'mixed']:
error = KeyError
else:
error = TypeError
pytest.raises(error, f)
# contains
assert 3.0 not in s
# setting with a float fails with iloc
def f():
s.iloc[3.0] = 0
pytest.raises(TypeError, f)
# setting with an indexer
if s.index.inferred_type in ['categorical']:
# Value or Type Error
pass
elif s.index.inferred_type in ['datetime64', 'timedelta64',
'period']:
# these should prob work
# and are inconsisten between series/dataframe ATM
# for idxr in [lambda x: x.ix,
# lambda x: x]:
# s2 = s.copy()
# def f():
# idxr(s2)[3.0] = 0
# pytest.raises(TypeError, f)
pass
else:
s2 = s.copy()
s2.loc[3.0] = 10
assert s2.index.is_object()
for idxr in [lambda x: x.ix,
lambda x: x]:
s2 = s.copy()
with catch_warnings(record=True):
idxr(s2)[3.0] = 0
assert s2.index.is_object()
# fallsback to position selection, series only
s = Series(np.arange(len(i)), index=i)
s[3]
pytest.raises(TypeError, lambda: s[3.0])
@ignore_ix
def test_scalar_with_mixed(self):
s2 = Series([1, 2, 3], index=['a', 'b', 'c'])
s3 = Series([1, 2, 3], index=['a', 'b', 1.5])
# lookup in a pure string index
# with an invalid indexer
for idxr in [lambda x: x.ix,
lambda x: x,
lambda x: x.iloc]:
def f():
with catch_warnings(record=True):
idxr(s2)[1.0]
pytest.raises(TypeError, f)
pytest.raises(KeyError, lambda: s2.loc[1.0])
result = s2.loc['b']
expected = 2
assert result == expected
# mixed index so we have label
# indexing
for idxr in [lambda x: x]:
def f():
idxr(s3)[1.0]
pytest.raises(TypeError, f)
result = idxr(s3)[1]
expected = 2
assert result == expected
# mixed index so we have label
# indexing
for idxr in [lambda x: x.ix]:
with catch_warnings(record=True):
def f():
idxr(s3)[1.0]
pytest.raises(TypeError, f)
result = idxr(s3)[1]
expected = 2
assert result == expected
pytest.raises(TypeError, lambda: s3.iloc[1.0])
pytest.raises(KeyError, lambda: s3.loc[1.0])
result = s3.loc[1.5]
expected = 3
assert result == expected
@ignore_ix
def test_scalar_integer(self):
# test how scalar float indexers work on int indexes
# integer index
for i in [Int64Index(range(5)), RangeIndex(5)]:
for s in [Series(np.arange(len(i))),
DataFrame(np.random.randn(len(i), len(i)),
index=i, columns=i)]:
# coerce to equal int
for idxr, getitem in [(lambda x: x.ix, False),
(lambda x: x.loc, False),
(lambda x: x, True)]:
with catch_warnings(record=True):
result = idxr(s)[3.0]
self.check(result, s, 3, getitem)
# coerce to equal int
for idxr, getitem in [(lambda x: x.ix, False),
(lambda x: x.loc, False),
(lambda x: x, True)]:
if isinstance(s, Series):
def compare(x, y):
assert x == y
expected = 100
else:
compare = tm.assert_series_equal
if getitem:
expected = Series(100,
index=range(len(s)), name=3)
else:
expected = Series(100.,
index=range(len(s)), name=3)
s2 = s.copy()
with catch_warnings(record=True):
idxr(s2)[3.0] = 100
result = idxr(s2)[3.0]
compare(result, expected)
result = idxr(s2)[3]
compare(result, expected)
# contains
# coerce to equal int
assert 3.0 in s
@ignore_ix
def test_scalar_float(self):
# scalar float indexers work on a float index
index = Index(np.arange(5.))
for s in [Series(np.arange(len(index)), index=index),
DataFrame(np.random.randn(len(index), len(index)),
index=index, columns=index)]:
# assert all operations except for iloc are ok
indexer = index[3]
for idxr, getitem in [(lambda x: x.ix, False),
(lambda x: x.loc, False),
(lambda x: x, True)]:
# getting
result = idxr(s)[indexer]
self.check(result, s, 3, getitem)
# setting
s2 = s.copy()
def f():
with catch_warnings(record=True):
idxr(s2)[indexer] = expected
with catch_warnings(record=True):
result = idxr(s2)[indexer]
self.check(result, s, 3, getitem)
# random integer is a KeyError
with catch_warnings(record=True):
pytest.raises(KeyError, lambda: idxr(s)[3.5])
# contains
assert 3.0 in s
# iloc succeeds with an integer
expected = s.iloc[3]
s2 = s.copy()
s2.iloc[3] = expected
result = s2.iloc[3]
self.check(result, s, 3, False)
# iloc raises with a float
pytest.raises(TypeError, lambda: s.iloc[3.0])
def g():
s2.iloc[3.0] = 0
pytest.raises(TypeError, g)
@ignore_ix
def test_slice_non_numeric(self):
# GH 4892
# float_indexers should raise exceptions
# on appropriate Index types & accessors
for index in [tm.makeStringIndex, tm.makeUnicodeIndex,
tm.makeDateIndex, tm.makeTimedeltaIndex,
tm.makePeriodIndex]:
index = index(5)
for s in [Series(range(5), index=index),
DataFrame(np.random.randn(5, 2), index=index)]:
# getitem
for l in [slice(3.0, 4),
slice(3, 4.0),
slice(3.0, 4.0)]:
def f():
s.iloc[l]
pytest.raises(TypeError, f)
for idxr in [lambda x: x.ix,
lambda x: x.loc,
lambda x: x.iloc,
lambda x: x]:
def f():
with catch_warnings(record=True):
idxr(s)[l]
pytest.raises(TypeError, f)
# setitem
for l in [slice(3.0, 4),
slice(3, 4.0),
slice(3.0, 4.0)]:
def f():
s.iloc[l] = 0
pytest.raises(TypeError, f)
for idxr in [lambda x: x.ix,
lambda x: x.loc,
lambda x: x.iloc,
lambda x: x]:
def f():
with catch_warnings(record=True):
idxr(s)[l] = 0
pytest.raises(TypeError, f)
@ignore_ix
def test_slice_integer(self):
# same as above, but for Integer based indexes
# these coerce to a like integer
# oob indicates if we are out of bounds
# of positional indexing
for index, oob in [(Int64Index(range(5)), False),
(RangeIndex(5), False),
(Int64Index(range(5)) + 10, True)]:
# s is an in-range index
s = Series(range(5), index=index)
# getitem
for l in [slice(3.0, 4),
slice(3, 4.0),
slice(3.0, 4.0)]:
for idxr in [lambda x: x.loc,
lambda x: x.ix]:
with catch_warnings(record=True):
result = idxr(s)[l]
# these are all label indexing
# except getitem which is positional
# empty
if oob:
indexer = slice(0, 0)
else:
indexer = slice(3, 5)
self.check(result, s, indexer, False)
# positional indexing
def f():
s[l]
pytest.raises(TypeError, f)
# getitem out-of-bounds
for l in [slice(-6, 6),
slice(-6.0, 6.0)]:
for idxr in [lambda x: x.loc,
lambda x: x.ix]:
with catch_warnings(record=True):
result = idxr(s)[l]
# these are all label indexing
# except getitem which is positional
# empty
if oob:
indexer = slice(0, 0)
else:
indexer = slice(-6, 6)
self.check(result, s, indexer, False)
# positional indexing
def f():
s[slice(-6.0, 6.0)]
pytest.raises(TypeError, f)
# getitem odd floats
for l, res1 in [(slice(2.5, 4), slice(3, 5)),
(slice(2, 3.5), slice(2, 4)),
(slice(2.5, 3.5), slice(3, 4))]:
for idxr in [lambda x: x.loc,
lambda x: x.ix]:
with catch_warnings(record=True):
result = idxr(s)[l]
if oob:
res = slice(0, 0)
else:
res = res1
self.check(result, s, res, False)
# positional indexing
def f():
s[l]
pytest.raises(TypeError, f)
# setitem
for l in [slice(3.0, 4),
slice(3, 4.0),
slice(3.0, 4.0)]:
for idxr in [lambda x: x.loc,
lambda x: x.ix]:
sc = s.copy()
with catch_warnings(record=True):
idxr(sc)[l] = 0
result = idxr(sc)[l].values.ravel()
assert (result == 0).all()
# positional indexing
def f():
s[l] = 0
pytest.raises(TypeError, f)
def test_integer_positional_indexing(self):
""" make sure that we are raising on positional indexing
w.r.t. an integer index """
s = Series(range(2, 6), index=range(2, 6))
result = s[2:4]
expected = s.iloc[2:4]
assert_series_equal(result, expected)
for idxr in [lambda x: x,
lambda x: x.iloc]:
for l in [slice(2, 4.0),
slice(2.0, 4),
slice(2.0, 4.0)]:
def f():
idxr(s)[l]
pytest.raises(TypeError, f)
@ignore_ix
def test_slice_integer_frame_getitem(self):
# similar to above, but on the getitem dim (of a DataFrame)
for index in [Int64Index(range(5)), RangeIndex(5)]:
s = DataFrame(np.random.randn(5, 2), index=index)
def f(idxr):
# getitem
for l in [slice(0.0, 1),
slice(0, 1.0),
slice(0.0, 1.0)]:
result = idxr(s)[l]
indexer = slice(0, 2)
self.check(result, s, indexer, False)
# positional indexing
def f():
s[l]
pytest.raises(TypeError, f)
# getitem out-of-bounds
for l in [slice(-10, 10),
slice(-10.0, 10.0)]:
result = idxr(s)[l]
self.check(result, s, slice(-10, 10), True)
# positional indexing
def f():
s[slice(-10.0, 10.0)]
pytest.raises(TypeError, f)
# getitem odd floats
for l, res in [(slice(0.5, 1), slice(1, 2)),
(slice(0, 0.5), slice(0, 1)),
(slice(0.5, 1.5), slice(1, 2))]:
result = idxr(s)[l]
self.check(result, s, res, False)
# positional indexing
def f():
s[l]
pytest.raises(TypeError, f)
# setitem
for l in [slice(3.0, 4),
slice(3, 4.0),
slice(3.0, 4.0)]:
sc = s.copy()
idxr(sc)[l] = 0
result = idxr(sc)[l].values.ravel()
assert (result == 0).all()
# positional indexing
def f():
s[l] = 0
pytest.raises(TypeError, f)
f(lambda x: x.loc)
with catch_warnings(record=True):
f(lambda x: x.ix)
@ignore_ix
def test_slice_float(self):
# same as above, but for floats
index = Index(np.arange(5.)) + 0.1
for s in [Series(range(5), index=index),
DataFrame(np.random.randn(5, 2), index=index)]:
for l in [slice(3.0, 4),
slice(3, 4.0),
slice(3.0, 4.0)]:
expected = s.iloc[3:4]
for idxr in [lambda x: x.ix,
lambda x: x.loc,
lambda x: x]:
# getitem
with catch_warnings(record=True):
result = idxr(s)[l]
if isinstance(s, Series):
tm.assert_series_equal(result, expected)
else:
tm.assert_frame_equal(result, expected)
# setitem
s2 = s.copy()
with catch_warnings(record=True):
idxr(s2)[l] = 0
result = idxr(s2)[l].values.ravel()
assert (result == 0).all()
def test_floating_index_doc_example(self):
index = Index([1.5, 2, 3, 4.5, 5])
s = Series(range(5), index=index)
assert s[3] == 2
assert s.loc[3] == 2
assert s.loc[3] == 2
assert s.iloc[3] == 3
def test_floating_misc(self):
# related 236
# scalar/slicing of a float index
s = Series(np.arange(5), index=np.arange(5) * 2.5, dtype=np.int64)
# label based slicing
result1 = s[1.0:3.0]
result2 = s.loc[1.0:3.0]
result3 = s.loc[1.0:3.0]
assert_series_equal(result1, result2)
assert_series_equal(result1, result3)
# exact indexing when found
result1 = s[5.0]
result2 = s.loc[5.0]
result3 = s.loc[5.0]
assert result1 == result2
assert result1 == result3
result1 = s[5]
result2 = s.loc[5]
result3 = s.loc[5]
assert result1 == result2
assert result1 == result3
assert s[5.0] == s[5]
# value not found (and no fallbacking at all)
# scalar integers
pytest.raises(KeyError, lambda: s.loc[4])
pytest.raises(KeyError, lambda: s.loc[4])
pytest.raises(KeyError, lambda: s[4])
# fancy floats/integers create the correct entry (as nan)
# fancy tests
expected = Series([2, 0], index=Float64Index([5.0, 0.0]))
for fancy_idx in [[5.0, 0.0], np.array([5.0, 0.0])]: # float
assert_series_equal(s[fancy_idx], expected)
assert_series_equal(s.loc[fancy_idx], expected)
assert_series_equal(s.loc[fancy_idx], expected)
expected = Series([2, 0], index=Index([5, 0], dtype='int64'))
for fancy_idx in [[5, 0], np.array([5, 0])]: # int
assert_series_equal(s[fancy_idx], expected)
assert_series_equal(s.loc[fancy_idx], expected)
assert_series_equal(s.loc[fancy_idx], expected)
# all should return the same as we are slicing 'the same'
result1 = s.loc[2:5]
result2 = s.loc[2.0:5.0]
result3 = s.loc[2.0:5]
result4 = s.loc[2.1:5]
assert_series_equal(result1, result2)
assert_series_equal(result1, result3)
assert_series_equal(result1, result4)
# previously this did fallback indexing
result1 = s[2:5]
result2 = s[2.0:5.0]
result3 = s[2.0:5]
result4 = s[2.1:5]
assert_series_equal(result1, result2)
assert_series_equal(result1, result3)
assert_series_equal(result1, result4)
result1 = s.loc[2:5]
result2 = s.loc[2.0:5.0]
result3 = s.loc[2.0:5]
result4 = s.loc[2.1:5]
assert_series_equal(result1, result2)
assert_series_equal(result1, result3)
assert_series_equal(result1, result4)
# combined test
result1 = s.loc[2:5]
result2 = s.loc[2:5]
result3 = s[2:5]
assert_series_equal(result1, result2)
assert_series_equal(result1, result3)
# list selection
result1 = s[[0.0, 5, 10]]
result2 = s.loc[[0.0, 5, 10]]
result3 = s.loc[[0.0, 5, 10]]
result4 = s.iloc[[0, 2, 4]]
assert_series_equal(result1, result2)
assert_series_equal(result1, result3)
assert_series_equal(result1, result4)
with tm.assert_produces_warning(FutureWarning, check_stacklevel=False):
result1 = s[[1.6, 5, 10]]
with tm.assert_produces_warning(FutureWarning, check_stacklevel=False):
result2 = s.loc[[1.6, 5, 10]]
with tm.assert_produces_warning(FutureWarning, check_stacklevel=False):
result3 = s.loc[[1.6, 5, 10]]
assert_series_equal(result1, result2)
assert_series_equal(result1, result3)
assert_series_equal(result1, Series(
[np.nan, 2, 4], index=[1.6, 5, 10]))
with tm.assert_produces_warning(FutureWarning, check_stacklevel=False):
result1 = s[[0, 1, 2]]
with tm.assert_produces_warning(FutureWarning, check_stacklevel=False):
result2 = s.loc[[0, 1, 2]]
with tm.assert_produces_warning(FutureWarning, check_stacklevel=False):
result3 = s.loc[[0, 1, 2]]
assert_series_equal(result1, result2)
assert_series_equal(result1, result3)
assert_series_equal(result1, Series(
[0.0, np.nan, np.nan], index=[0, 1, 2]))
result1 = s.loc[[2.5, 5]]
result2 = s.loc[[2.5, 5]]
assert_series_equal(result1, result2)
assert_series_equal(result1, Series([1, 2], index=[2.5, 5.0]))
result1 = s[[2.5]]
result2 = s.loc[[2.5]]
result3 = s.loc[[2.5]]
assert_series_equal(result1, result2)
assert_series_equal(result1, result3)
assert_series_equal(result1, Series([1], index=[2.5]))
def test_floating_tuples(self):
# see gh-13509
s = Series([(1, 1), (2, 2), (3, 3)], index=[0.0, 0.1, 0.2], name='foo')
result = s[0.0]
assert result == (1, 1)
expected = Series([(1, 1), (2, 2)], index=[0.0, 0.0], name='foo')
s = Series([(1, 1), (2, 2), (3, 3)], index=[0.0, 0.0, 0.2], name='foo')
result = s[0.0]
tm.assert_series_equal(result, expected)
def test_float64index_slicing_bug(self):
# GH 5557, related to slicing a float index
ser = {256: 2321.0,
1: 78.0,
2: 2716.0,
3: 0.0,
4: 369.0,
5: 0.0,
6: 269.0,
7: 0.0,
8: 0.0,
9: 0.0,
10: 3536.0,
11: 0.0,
12: 24.0,
13: 0.0,
14: 931.0,
15: 0.0,
16: 101.0,
17: 78.0,
18: 9643.0,
19: 0.0,
20: 0.0,
21: 0.0,
22: 63761.0,
23: 0.0,
24: 446.0,
25: 0.0,
26: 34773.0,
27: 0.0,
28: 729.0,
29: 78.0,
30: 0.0,
31: 0.0,
32: 3374.0,
33: 0.0,
34: 1391.0,
35: 0.0,
36: 361.0,
37: 0.0,
38: 61808.0,
39: 0.0,
40: 0.0,
41: 0.0,
42: 6677.0,
43: 0.0,
44: 802.0,
45: 0.0,
46: 2691.0,
47: 0.0,
48: 3582.0,
49: 0.0,
50: 734.0,
51: 0.0,
52: 627.0,
53: 70.0,
54: 2584.0,
55: 0.0,
56: 324.0,
57: 0.0,
58: 605.0,
59: 0.0,
60: 0.0,
61: 0.0,
62: 3989.0,
63: 10.0,
64: 42.0,
65: 0.0,
66: 904.0,
67: 0.0,
68: 88.0,
69: 70.0,
70: 8172.0,
71: 0.0,
72: 0.0,
73: 0.0,
74: 64902.0,
75: 0.0,
76: 347.0,
77: 0.0,
78: 36605.0,
79: 0.0,
80: 379.0,
81: 70.0,
82: 0.0,
83: 0.0,
84: 3001.0,
85: 0.0,
86: 1630.0,
87: 7.0,
88: 364.0,
89: 0.0,
90: 67404.0,
91: 9.0,
92: 0.0,
93: 0.0,
94: 7685.0,
95: 0.0,
96: 1017.0,
97: 0.0,
98: 2831.0,
99: 0.0,
100: 2963.0,
101: 0.0,
102: 854.0,
103: 0.0,
104: 0.0,
105: 0.0,
106: 0.0,
107: 0.0,
108: 0.0,
109: 0.0,
110: 0.0,
111: 0.0,
112: 0.0,
113: 0.0,
114: 0.0,
115: 0.0,
116: 0.0,
117: 0.0,
118: 0.0,
119: 0.0,
120: 0.0,
121: 0.0,
122: 0.0,
123: 0.0,
124: 0.0,
125: 0.0,
126: 67744.0,
127: 22.0,
128: 264.0,
129: 0.0,
260: 197.0,
268: 0.0,
265: 0.0,
269: 0.0,
261: 0.0,
266: 1198.0,
267: 0.0,
262: 2629.0,
258: 775.0,
257: 0.0,
263: 0.0,
259: 0.0,
264: 163.0,
250: 10326.0,
251: 0.0,
252: 1228.0,
253: 0.0,
254: 2769.0,
255: 0.0}
# smoke test for the repr
s = Series(ser)
result = s.value_counts()
str(result)
| bsd-3-clause |
pydata/xarray | xarray/tests/test_formatting_html.py | 1 | 6861 | from distutils.version import LooseVersion
import numpy as np
import pandas as pd
import pytest
import xarray as xr
from xarray.core import formatting_html as fh
@pytest.fixture
def dataarray():
return xr.DataArray(np.random.RandomState(0).randn(4, 6))
@pytest.fixture
def dask_dataarray(dataarray):
pytest.importorskip("dask")
return dataarray.chunk()
@pytest.fixture
def multiindex():
mindex = pd.MultiIndex.from_product(
[["a", "b"], [1, 2]], names=("level_1", "level_2")
)
return xr.Dataset({}, {"x": mindex})
@pytest.fixture
def dataset():
times = pd.date_range("2000-01-01", "2001-12-31", name="time")
annual_cycle = np.sin(2 * np.pi * (times.dayofyear.values / 365.25 - 0.28))
base = 10 + 15 * annual_cycle.reshape(-1, 1)
tmin_values = base + 3 * np.random.randn(annual_cycle.size, 3)
tmax_values = base + 10 + 3 * np.random.randn(annual_cycle.size, 3)
return xr.Dataset(
{
"tmin": (("time", "location"), tmin_values),
"tmax": (("time", "location"), tmax_values),
},
{"time": times, "location": ["<IA>", "IN", "IL"]},
attrs={"description": "Test data."},
)
def test_short_data_repr_html(dataarray):
data_repr = fh.short_data_repr_html(dataarray)
assert data_repr.startswith("<pre>array")
def test_short_data_repr_html_non_str_keys(dataset):
ds = dataset.assign({2: lambda x: x["tmin"]})
fh.dataset_repr(ds)
def test_short_data_repr_html_dask(dask_dataarray):
import dask
if LooseVersion(dask.__version__) < "2.0.0":
assert not hasattr(dask_dataarray.data, "_repr_html_")
data_repr = fh.short_data_repr_html(dask_dataarray)
assert (
data_repr
== "dask.array<xarray-<this-array>, shape=(4, 6), dtype=float64, chunksize=(4, 6)>"
)
else:
assert hasattr(dask_dataarray.data, "_repr_html_")
data_repr = fh.short_data_repr_html(dask_dataarray)
assert data_repr == dask_dataarray.data._repr_html_()
def test_format_dims_no_dims():
dims, coord_names = {}, []
formatted = fh.format_dims(dims, coord_names)
assert formatted == ""
def test_format_dims_unsafe_dim_name():
dims, coord_names = {"<x>": 3, "y": 2}, []
formatted = fh.format_dims(dims, coord_names)
assert "<x>" in formatted
def test_format_dims_non_index():
dims, coord_names = {"x": 3, "y": 2}, ["time"]
formatted = fh.format_dims(dims, coord_names)
assert "class='xr-has-index'" not in formatted
def test_format_dims_index():
dims, coord_names = {"x": 3, "y": 2}, ["x"]
formatted = fh.format_dims(dims, coord_names)
assert "class='xr-has-index'" in formatted
def test_summarize_attrs_with_unsafe_attr_name_and_value():
attrs = {"<x>": 3, "y": "<pd.DataFrame>"}
formatted = fh.summarize_attrs(attrs)
assert "<dt><span><x> :</span></dt>" in formatted
assert "<dt><span>y :</span></dt>" in formatted
assert "<dd>3</dd>" in formatted
assert "<dd><pd.DataFrame></dd>" in formatted
def test_repr_of_dataarray(dataarray):
formatted = fh.array_repr(dataarray)
assert "dim_0" in formatted
# has an expanded data section
assert formatted.count("class='xr-array-in' type='checkbox' checked>") == 1
# coords and attrs don't have an items so they'll be be disabled and collapsed
assert (
formatted.count("class='xr-section-summary-in' type='checkbox' disabled >") == 2
)
with xr.set_options(display_expand_data=False):
formatted = fh.array_repr(dataarray)
assert "dim_0" in formatted
# has an expanded data section
assert formatted.count("class='xr-array-in' type='checkbox' checked>") == 0
# coords and attrs don't have an items so they'll be be disabled and collapsed
assert (
formatted.count("class='xr-section-summary-in' type='checkbox' disabled >")
== 2
)
def test_summary_of_multiindex_coord(multiindex):
idx = multiindex.x.variable.to_index_variable()
formatted = fh._summarize_coord_multiindex("foo", idx)
assert "(level_1, level_2)" in formatted
assert "MultiIndex" in formatted
assert "<span class='xr-has-index'>foo</span>" in formatted
def test_repr_of_multiindex(multiindex):
formatted = fh.dataset_repr(multiindex)
assert "(x)" in formatted
def test_repr_of_dataset(dataset):
formatted = fh.dataset_repr(dataset)
# coords, attrs, and data_vars are expanded
assert (
formatted.count("class='xr-section-summary-in' type='checkbox' checked>") == 3
)
assert "<U4" in formatted or ">U4" in formatted
assert "<IA>" in formatted
with xr.set_options(
display_expand_coords=False,
display_expand_data_vars=False,
display_expand_attrs=False,
):
formatted = fh.dataset_repr(dataset)
# coords, attrs, and data_vars are collapsed
assert (
formatted.count("class='xr-section-summary-in' type='checkbox' checked>")
== 0
)
assert "<U4" in formatted or ">U4" in formatted
assert "<IA>" in formatted
def test_repr_text_fallback(dataset):
formatted = fh.dataset_repr(dataset)
# Just test that the "pre" block used for fallback to plain text is present.
assert "<pre class='xr-text-repr-fallback'>" in formatted
def test_variable_repr_html():
v = xr.Variable(["time", "x"], [[1, 2, 3], [4, 5, 6]], {"foo": "bar"})
assert hasattr(v, "_repr_html_")
with xr.set_options(display_style="html"):
html = v._repr_html_().strip()
# We don't do a complete string identity since
# html output is probably subject to change, is long and... reasons.
# Just test that something reasonable was produced.
assert html.startswith("<div") and html.endswith("</div>")
assert "xarray.Variable" in html
def test_repr_of_nonstr_dataset(dataset):
ds = dataset.copy()
ds.attrs[1] = "Test value"
ds[2] = ds["tmin"]
formatted = fh.dataset_repr(ds)
assert "<dt><span>1 :</span></dt><dd>Test value</dd>" in formatted
assert "<div class='xr-var-name'><span>2</span>" in formatted
def test_repr_of_nonstr_dataarray(dataarray):
da = dataarray.rename(dim_0=15)
da.attrs[1] = "value"
formatted = fh.array_repr(da)
assert "<dt><span>1 :</span></dt><dd>value</dd>" in formatted
assert "<li><span>15</span>: 4</li>" in formatted
def test_nonstr_variable_repr_html():
v = xr.Variable(["time", 10], [[1, 2, 3], [4, 5, 6]], {22: "bar"})
assert hasattr(v, "_repr_html_")
with xr.set_options(display_style="html"):
html = v._repr_html_().strip()
assert "<dt><span>22 :</span></dt><dd>bar</dd>" in html
assert "<li><span>10</span>: 3</li></ul>" in html
| apache-2.0 |
fengjiang96/tushare | tushare/stock/reference.py | 27 | 25190 | # -*- coding:utf-8 -*-
"""
投资参考数据接口
Created on 2015/03/21
@author: Jimmy Liu
@group : waditu
@contact: jimmysoa@sina.cn
"""
from __future__ import division
from tushare.stock import cons as ct
from tushare.stock import ref_vars as rv
from tushare.util import dateu as dt
import pandas as pd
import time
import lxml.html
from lxml import etree
import re
import json
from pandas.compat import StringIO
from tushare.util import dateu as du
from tushare.util.netbase import Client
try:
from urllib.request import urlopen, Request
except ImportError:
from urllib2 import urlopen, Request
def profit_data(year=2014, top=25,
retry_count=3, pause=0.001):
"""
获取分配预案数据
Parameters
--------
year:年份
top:取最新n条数据,默认取最近公布的25条
retry_count : int, 默认 3
如遇网络等问题重复执行的次数
pause : int, 默认 0
重复请求数据过程中暂停的秒数,防止请求间隔时间太短出现的问题
returns
-------
DataFrame
code:股票代码
name:股票名称
year:分配年份
report_date:公布日期
divi:分红金额(每10股)
shares:转增和送股数(每10股)
"""
if top <= 25:
df, pages = _dist_cotent(year, 0, retry_count, pause)
return df.head(top)
elif top == 'all':
ct._write_head()
df, pages = _dist_cotent(year, 0, retry_count, pause)
for idx in range(1,int(pages)):
df = df.append(_dist_cotent(year, idx, retry_count,
pause), ignore_index=True)
return df
else:
if isinstance(top, int):
ct._write_head()
allPages = top/25+1 if top%25>0 else top/25
df, pages = _dist_cotent(year, 0, retry_count, pause)
if int(allPages) < int(pages):
pages = allPages
for idx in range(1, int(pages)):
df = df.append(_dist_cotent(year, idx, retry_count,
pause), ignore_index=True)
return df.head(top)
else:
print(ct.TOP_PARAS_MSG)
def _fun_divi(x):
if ct.PY3:
reg = re.compile(r'分红(.*?)元', re.UNICODE)
res = reg.findall(x)
return 0 if len(res)<1 else float(res[0])
else:
if isinstance(x, unicode):
s1 = unicode('分红','utf-8')
s2 = unicode('元','utf-8')
reg = re.compile(r'%s(.*?)%s'%(s1, s2), re.UNICODE)
res = reg.findall(x)
return 0 if len(res)<1 else float(res[0])
else:
return 0
def _fun_into(x):
if ct.PY3:
reg1 = re.compile(r'转增(.*?)股', re.UNICODE)
reg2 = re.compile(r'送股(.*?)股', re.UNICODE)
res1 = reg1.findall(x)
res2 = reg2.findall(x)
res1 = 0 if len(res1)<1 else float(res1[0])
res2 = 0 if len(res2)<1 else float(res2[0])
return res1 + res2
else:
if isinstance(x, unicode):
s1 = unicode('转增','utf-8')
s2 = unicode('送股','utf-8')
s3 = unicode('股','utf-8')
reg1 = re.compile(r'%s(.*?)%s'%(s1, s3), re.UNICODE)
reg2 = re.compile(r'%s(.*?)%s'%(s2, s3), re.UNICODE)
res1 = reg1.findall(x)
res2 = reg2.findall(x)
res1 = 0 if len(res1)<1 else float(res1[0])
res2 = 0 if len(res2)<1 else float(res2[0])
return res1 + res2
else:
return 0
def _dist_cotent(year, pageNo, retry_count, pause):
for _ in range(retry_count):
time.sleep(pause)
try:
if pageNo > 0:
ct._write_console()
html = lxml.html.parse(rv.DP_163_URL%(ct.P_TYPE['http'], ct.DOMAINS['163'],
ct.PAGES['163dp'], year, pageNo))
res = html.xpath('//div[@class=\"fn_rp_list\"]/table')
if ct.PY3:
sarr = [etree.tostring(node).decode('utf-8') for node in res]
else:
sarr = [etree.tostring(node) for node in res]
sarr = ''.join(sarr)
df = pd.read_html(sarr, skiprows=[0])[0]
df = df.drop(df.columns[0], axis=1)
df.columns = rv.DP_163_COLS
df['divi'] = df['plan'].map(_fun_divi)
df['shares'] = df['plan'].map(_fun_into)
df = df.drop('plan', axis=1)
df['code'] = df['code'].astype(object)
df['code'] = df['code'].map(lambda x : str(x).zfill(6))
pages = []
if pageNo == 0:
page = html.xpath('//div[@class=\"mod_pages\"]/a')
if len(page)>1:
asr = page[len(page)-2]
pages = asr.xpath('text()')
except Exception as e:
print(e)
else:
if pageNo == 0:
return df, pages[0] if len(pages)>0 else 0
else:
return df
raise IOError(ct.NETWORK_URL_ERROR_MSG)
def forecast_data(year, quarter):
"""
获取业绩预告数据
Parameters
--------
year:int 年度 e.g:2014
quarter:int 季度 :1、2、3、4,只能输入这4个季度
说明:由于是从网站获取的数据,需要一页页抓取,速度取决于您当前网络速度
Return
--------
DataFrame
code,代码
name,名称
type,业绩变动类型【预增、预亏等】
report_date,发布日期
pre_eps,上年同期每股收益
range,业绩变动范围
"""
if ct._check_input(year, quarter) is True:
ct._write_head()
data = _get_forecast_data(year, quarter, 1, pd.DataFrame())
df = pd.DataFrame(data, columns=ct.FORECAST_COLS)
df['code'] = df['code'].map(lambda x: str(x).zfill(6))
return df
def _get_forecast_data(year, quarter, pageNo, dataArr):
ct._write_console()
try:
html = lxml.html.parse(ct.FORECAST_URL%(ct.P_TYPE['http'], ct.DOMAINS['vsf'],
ct.PAGES['fd'], year, quarter, pageNo,
ct.PAGE_NUM[1]))
res = html.xpath("//table[@class=\"list_table\"]/tr")
if ct.PY3:
sarr = [etree.tostring(node).decode('utf-8') for node in res]
else:
sarr = [etree.tostring(node) for node in res]
sarr = ''.join(sarr)
sarr = sarr.replace('--', '0')
sarr = '<table>%s</table>'%sarr
df = pd.read_html(sarr)[0]
df = df.drop([4, 5, 8], axis=1)
df.columns = ct.FORECAST_COLS
dataArr = dataArr.append(df, ignore_index=True)
nextPage = html.xpath('//div[@class=\"pages\"]/a[last()]/@onclick')
if len(nextPage)>0:
pageNo = re.findall(r'\d+',nextPage[0])[0]
return _get_forecast_data(year, quarter, pageNo, dataArr)
else:
return dataArr
except Exception as e:
print(e)
def xsg_data(year=None, month=None,
retry_count=3, pause=0.001):
"""
获取限售股解禁数据
Parameters
--------
year:年份,默认为当前年
month:解禁月份,默认为当前月
retry_count : int, 默认 3
如遇网络等问题重复执行的次数
pause : int, 默认 0
重复请求数据过程中暂停的秒数,防止请求间隔时间太短出现的问题
Return
------
DataFrame
code:股票代码
name:名称
date:解禁日期
count:解禁数量(万股)
ratio:占总盘比率
"""
year = dt.get_year() if year is None else year
month = dt.get_month() if month is None else month
for _ in range(retry_count):
time.sleep(pause)
try:
request = Request(rv.XSG_URL%(ct.P_TYPE['http'], ct.DOMAINS['em'],
ct.PAGES['emxsg'], year, month))
lines = urlopen(request, timeout = 10).read()
lines = lines.decode('utf-8') if ct.PY3 else lines
except Exception as e:
print(e)
else:
da = lines[3:len(lines)-3]
list = []
for row in da.split('","'):
list.append([data for data in row.split(',')])
df = pd.DataFrame(list)
df = df[[1, 3, 4, 5, 6]]
for col in [5, 6]:
df[col] = df[col].astype(float)
df[5] = df[5]/10000
df[6] = df[6]*100
df[5] = df[5].map(ct.FORMAT)
df[6] = df[6].map(ct.FORMAT)
df.columns = rv.XSG_COLS
return df
raise IOError(ct.NETWORK_URL_ERROR_MSG)
def fund_holdings(year, quarter,
retry_count=3, pause=0.001):
"""
获取基金持股数据
Parameters
--------
year:年份e.g 2014
quarter:季度(只能输入1,2,3,4这个四个数字)
retry_count : int, 默认 3
如遇网络等问题重复执行的次数
pause : int, 默认 0
重复请求数据过程中暂停的秒数,防止请求间隔时间太短出现的问题
Return
------
DataFrame
code:股票代码
name:名称
date:报告日期
nums:基金家数
nlast:与上期相比(增加或减少了)
count:基金持股数(万股)
clast:与上期相比
amount:基金持股市值
ratio:占流通盘比率
"""
start,end = rv.QUARTS_DIC[str(quarter)]
if quarter == 1:
start = start % str(year-1)
end = end%year
else:
start, end = start%year, end%year
ct._write_head()
df, pages = _holding_cotent(start, end, 0, retry_count, pause)
for idx in range(1, pages):
df = df.append(_holding_cotent(start, end, idx, retry_count, pause),
ignore_index=True)
return df
def _holding_cotent(start, end, pageNo, retry_count, pause):
for _ in range(retry_count):
time.sleep(pause)
if pageNo>0:
ct._write_console()
try:
request = Request(rv.FUND_HOLDS_URL%(ct.P_TYPE['http'], ct.DOMAINS['163'],
ct.PAGES['163fh'], ct.PAGES['163fh'],
pageNo, start, end, _random(5)))
lines = urlopen(request, timeout = 10).read()
lines = lines.decode('utf-8') if ct.PY3 else lines
lines = lines.replace('--', '0')
lines = json.loads(lines)
data = lines['list']
df = pd.DataFrame(data)
df = df.drop(['CODE', 'ESYMBOL', 'EXCHANGE', 'NAME', 'RN', 'SHANGQIGUSHU',
'SHANGQISHIZHI', 'SHANGQISHULIANG'], axis=1)
for col in ['GUSHU', 'GUSHUBIJIAO', 'SHIZHI', 'SCSTC27']:
df[col] = df[col].astype(float)
df['SCSTC27'] = df['SCSTC27']*100
df['GUSHU'] = df['GUSHU']/10000
df['GUSHUBIJIAO'] = df['GUSHUBIJIAO']/10000
df['SHIZHI'] = df['SHIZHI']/10000
df['GUSHU'] = df['GUSHU'].map(ct.FORMAT)
df['GUSHUBIJIAO'] = df['GUSHUBIJIAO'].map(ct.FORMAT)
df['SHIZHI'] = df['SHIZHI'].map(ct.FORMAT)
df['SCSTC27'] = df['SCSTC27'].map(ct.FORMAT)
df.columns = rv.FUND_HOLDS_COLS
df = df[['code', 'name', 'date', 'nums', 'nlast', 'count',
'clast', 'amount', 'ratio']]
except Exception as e:
print(e)
else:
if pageNo == 0:
return df, int(lines['pagecount'])
else:
return df
raise IOError(ct.NETWORK_URL_ERROR_MSG)
def new_stocks(retry_count=3, pause=0.001):
"""
获取新股上市数据
Parameters
--------
retry_count : int, 默认 3
如遇网络等问题重复执行的次数
pause : int, 默认 0
重复请求数据过程中暂停的秒数,防止请求间隔时间太短出现的问题
Return
------
DataFrame
code:股票代码
name:名称
ipo_date:上网发行日期
issue_date:上市日期
amount:发行数量(万股)
markets:上网发行数量(万股)
price:发行价格(元)
pe:发行市盈率
limit:个人申购上限(万股)
funds:募集资金(亿元)
ballot:网上中签率(%)
"""
data = pd.DataFrame()
ct._write_head()
df = _newstocks(data, 1, retry_count,
pause)
return df
def _newstocks(data, pageNo, retry_count, pause):
for _ in range(retry_count):
time.sleep(pause)
ct._write_console()
try:
html = lxml.html.parse(rv.NEW_STOCKS_URL%(ct.P_TYPE['http'],ct.DOMAINS['vsf'],
ct.PAGES['newstock'], pageNo))
res = html.xpath('//table[@id=\"NewStockTable\"]/tr')
if ct.PY3:
sarr = [etree.tostring(node).decode('utf-8') for node in res]
else:
sarr = [etree.tostring(node) for node in res]
sarr = ''.join(sarr)
sarr = sarr.replace('<font color="red">*</font>', '')
sarr = '<table>%s</table>'%sarr
df = pd.read_html(StringIO(sarr), skiprows=[0, 1])[0]
df = df.drop([df.columns[idx] for idx in [1, 12, 13, 14]], axis=1)
df.columns = rv.NEW_STOCKS_COLS
df['code'] = df['code'].map(lambda x : str(x).zfill(6))
res = html.xpath('//table[@class=\"table2\"]/tr[1]/td[1]/a/text()')
tag = '下一页' if ct.PY3 else unicode('下一页', 'utf-8')
hasNext = True if tag in res else False
data = data.append(df, ignore_index=True)
pageNo += 1
if hasNext:
data = _newstocks(data, pageNo, retry_count, pause)
except Exception as ex:
print(ex)
else:
return data
def sh_margins(start=None, end=None, retry_count=3, pause=0.001):
"""
获取沪市融资融券数据列表
Parameters
--------
start:string
开始日期 format:YYYY-MM-DD 为空时取去年今日
end:string
结束日期 format:YYYY-MM-DD 为空时取当前日期
retry_count : int, 默认 3
如遇网络等问题重复执行的次数
pause : int, 默认 0
重复请求数据过程中暂停的秒数,防止请求间隔时间太短出现的问题
Return
------
DataFrame
opDate:信用交易日期
rzye:本日融资余额(元)
rzmre: 本日融资买入额(元)
rqyl: 本日融券余量
rqylje: 本日融券余量金额(元)
rqmcl: 本日融券卖出量
rzrqjyzl:本日融资融券余额(元)
"""
start = du.today_last_year() if start is None else start
end = du.today() if end is None else end
if du.diff_day(start, end) < 0:
return None
start, end = start.replace('-', ''), end.replace('-', '')
data = pd.DataFrame()
ct._write_head()
df = _sh_hz(data, start=start, end=end,
retry_count=retry_count,
pause=pause)
return df
def _sh_hz(data, start=None, end=None,
pageNo='', beginPage='',
endPage='',
retry_count=3, pause=0.001):
for _ in range(retry_count):
time.sleep(pause)
ct._write_console()
try:
tail = rv.MAR_SH_HZ_TAIL_URL%(pageNo,
beginPage, endPage)
if pageNo == '':
pageNo = 6
tail = ''
else:
pageNo += 5
beginPage = pageNo
endPage = pageNo + 4
url = rv.MAR_SH_HZ_URL%(ct.P_TYPE['http'], ct.DOMAINS['sseq'],
ct.PAGES['qmd'], _random(5),
start, end, tail,
_random())
ref = rv.MAR_SH_HZ_REF_URL%(ct.P_TYPE['http'], ct.DOMAINS['sse'])
clt = Client(url, ref=ref, cookie=rv.MAR_SH_COOKIESTR)
lines = clt.gvalue()
lines = lines.decode('utf-8') if ct.PY3 else lines
lines = lines[19:-1]
lines = json.loads(lines)
pagecount = int(lines['pageHelp'].get('pageCount'))
datapage = int(pagecount/5+1 if pagecount%5>0 else pagecount/5)
df = pd.DataFrame(lines['result'], columns=rv.MAR_SH_HZ_COLS)
df['opDate'] = df['opDate'].map(lambda x: '%s-%s-%s'%(x[0:4], x[4:6], x[6:8]))
data = data.append(df, ignore_index=True)
if beginPage < datapage*5:
data = _sh_hz(data, start=start, end=end, pageNo=pageNo,
beginPage=beginPage, endPage=endPage,
retry_count=retry_count, pause=pause)
except Exception as e:
print(e)
else:
return data
raise IOError(ct.NETWORK_URL_ERROR_MSG)
def sh_margin_details(date='', symbol='',
start='', end='',
retry_count=3, pause=0.001):
"""
获取沪市融资融券明细列表
Parameters
--------
date:string
明细数据日期 format:YYYY-MM-DD 默认为空''
symbol:string
标的代码,6位数字e.g.600848,默认为空
start:string
开始日期 format:YYYY-MM-DD 默认为空''
end:string
结束日期 format:YYYY-MM-DD 默认为空''
retry_count : int, 默认 3
如遇网络等问题重复执行的次数
pause : int, 默认 0
重复请求数据过程中暂停的秒数,防止请求间隔时间太短出现的问题
Return
------
DataFrame
opDate:信用交易日期
stockCode:标的证券代码
securityAbbr:标的证券简称
rzye:本日融资余额(元)
rzmre: 本日融资买入额(元)
rzche:本日融资偿还额(元)
rqyl: 本日融券余量
rqmcl: 本日融券卖出量
rqchl: 本日融券偿还量
"""
date = date if date == '' else date.replace('-', '')
start = start if start == '' else start.replace('-', '')
end = end if end == '' else end.replace('-', '')
if (start != '') & (end != ''):
date = ''
data = pd.DataFrame()
ct._write_head()
df = _sh_mx(data, date=date, start=start,
end=end, symbol=symbol,
retry_count=retry_count,
pause=pause)
return df
def _sh_mx(data, date='', start='', end='',
symbol='',
pageNo='', beginPage='',
endPage='',
retry_count=3, pause=0.001):
for _ in range(retry_count):
time.sleep(pause)
ct._write_console()
try:
tail = '&pageHelp.pageNo=%s&pageHelp.beginPage=%s&pageHelp.endPage=%s'%(pageNo,
beginPage, endPage)
if pageNo == '':
pageNo = 6
tail = ''
else:
pageNo += 5
beginPage = pageNo
endPage = pageNo + 4
ref = rv.MAR_SH_HZ_REF_URL%(ct.P_TYPE['http'], ct.DOMAINS['sse'])
clt = Client(rv.MAR_SH_MX_URL%(ct.P_TYPE['http'], ct.DOMAINS['sseq'],
ct.PAGES['qmd'], _random(5), date,
symbol, start, end, tail,
_random()), ref=ref, cookie=rv.MAR_SH_COOKIESTR)
lines = clt.gvalue()
lines = lines.decode('utf-8') if ct.PY3 else lines
lines = lines[19:-1]
lines = json.loads(lines)
pagecount = int(lines['pageHelp'].get('pageCount'))
datapage = int(pagecount/5+1 if pagecount%5>0 else pagecount/5)
if pagecount == 0:
return data
if pageNo == 6:
ct._write_tips(lines['pageHelp'].get('total'))
df = pd.DataFrame(lines['result'], columns=rv.MAR_SH_MX_COLS)
df['opDate'] = df['opDate'].map(lambda x: '%s-%s-%s'%(x[0:4], x[4:6], x[6:8]))
data = data.append(df, ignore_index=True)
if beginPage < datapage*5:
data = _sh_mx(data, start=start, end=end, pageNo=pageNo,
beginPage=beginPage, endPage=endPage,
retry_count=retry_count, pause=pause)
except Exception as e:
print(e)
else:
return data
raise IOError(ct.NETWORK_URL_ERROR_MSG)
def sz_margins(start=None, end=None, retry_count=3, pause=0.001):
"""
获取深市融资融券数据列表
Parameters
--------
start:string
开始日期 format:YYYY-MM-DD 默认为上一周的今天
end:string
结束日期 format:YYYY-MM-DD 默认为今日
retry_count : int, 默认 3
如遇网络等问题重复执行的次数
pause : int, 默认 0
重复请求数据过程中暂停的秒数,防止请求间隔时间太短出现的问题
Return
------
DataFrame
opDate:信用交易日期(index)
rzmre: 融资买入额(元)
rzye:融资余额(元)
rqmcl: 融券卖出量
rqyl: 融券余量
rqye: 融券余量(元)
rzrqye:融资融券余额(元)
"""
data = pd.DataFrame()
if start is None and end is None:
end = du.today()
start = du.day_last_week()
if start is None or end is None:
ct._write_msg(rv.MAR_SZ_HZ_MSG2)
return None
try:
date_range = pd.date_range(start=start, end=end, freq='B')
if len(date_range)>261:
ct._write_msg(rv.MAR_SZ_HZ_MSG)
else:
ct._write_head()
for date in date_range:
data = data.append(_sz_hz(str(date.date()), retry_count, pause) )
except:
ct._write_msg(ct.DATA_INPUT_ERROR_MSG)
else:
return data
def _sz_hz(date='', retry_count=3, pause=0.001):
for _ in range(retry_count):
time.sleep(pause)
ct._write_console()
try:
request = Request(rv.MAR_SZ_HZ_URL%(ct.P_TYPE['http'], ct.DOMAINS['szse'],
ct.PAGES['szsefc'], date))
lines = urlopen(request, timeout = 10).read()
if len(lines) <= 200:
return pd.DataFrame()
df = pd.read_html(lines, skiprows=[0])[0]
df.columns = rv.MAR_SZ_HZ_COLS
df['opDate'] = date
except Exception as e:
print(e)
else:
return df
raise IOError(ct.NETWORK_URL_ERROR_MSG)
def sz_margin_details(date='', retry_count=3, pause=0.001):
"""
获取深市融资融券明细列表
Parameters
--------
date:string
明细数据日期 format:YYYY-MM-DD 默认为空''
retry_count : int, 默认 3
如遇网络等问题重复执行的次数
pause : int, 默认 0
重复请求数据过程中暂停的秒数,防止请求间隔时间太短出现的问题
Return
------
DataFrame
opDate:信用交易日期
stockCode:标的证券代码
securityAbbr:标的证券简称
rzmre: 融资买入额(元)
rzye:融资余额(元)
rqmcl: 融券卖出量
rqyl: 融券余量
rqye: 融券余量(元)
rzrqye:融资融券余额(元)
"""
for _ in range(retry_count):
time.sleep(pause)
try:
request = Request(rv.MAR_SZ_MX_URL%(ct.P_TYPE['http'], ct.DOMAINS['szse'],
ct.PAGES['szsefc'], date))
lines = urlopen(request, timeout = 10).read()
if len(lines) <= 200:
return pd.DataFrame()
df = pd.read_html(lines, skiprows=[0])[0]
df.columns = rv.MAR_SZ_MX_COLS
df['stockCode'] = df['stockCode'].map(lambda x:str(x).zfill(6))
df['opDate'] = date
except Exception as e:
print(e)
else:
return df
raise IOError(ct.NETWORK_URL_ERROR_MSG)
def _random(n=13):
from random import randint
start = 10**(n-1)
end = (10**n)-1
return str(randint(start, end))
| bsd-3-clause |
bearing/dosenet-raspberrypi | spectra_fitter.py | 2 | 24564 | import csv
import numpy as np
import matplotlib.pyplot as plt
import matplotlib.patches as patches
import matplotlib.path as path
import matplotlib.dates as mdates
from dateutil.parser import parse
from datetime import datetime
from datetime import timedelta
# import urllib.request
import pytz
import codecs
from matplotlib.backends.backend_pdf import PdfPages
from scipy import optimize
from scipy import asarray as ar,exp
from scipy.integrate import quad
import pandas as pd
from pandas import DataFrame
#--------------------------------------------------------------------------#
# Fit Functions
#--------------------------------------------------------------------------#
def lbound(bound,par):
return 1e4*np.sqrt(bound-par) + 1e-3*(bound-par) if (par<bound) else 0
def ubound(bound,par):
return 1e4*np.sqrt(par-bound) + 1e-3*(par-bound) if (par>bound) else 0
def bound(bounds,par):
return lbound(bounds[0],par) + ubound(bounds[1],par)
def fixed(fix,par):
return bound((fix,fix), par)
def gaus(x,a,x0,sigma):
return a*exp(-(x-x0)**2/(2*sigma**2))+lbound(0,a)+lbound(0,sigma)+lbound(0,x0)
def expo(x,a,slope):
return a*exp(x*slope)
# p = [a1,mean,sigma,a2,shift,slope,const]
def gaus_plus_exp(x,p):
return gaus(x,p[0],p[1],p[2])+expo(x,p[3],p[4])
# p = [a1,mean,sigma,slope,const]
def gaus_plus_line(x,p):
return gaus(x,p[0],p[1],p[2])+p[3]*x+p[4]
def gaus_plus_const(x,p):
return gaus(x,p[0],p[1],p[2])+p[3]
def double_gaus_plus_exp(x,p):
return gaus(x,p[0],p[1],p[2])+gaus(x,p[3],p[4],p[5])+expo(x,p[6],p[7])
def double_gaus_plus_line(x,p):
return gaus(x,p[0],p[1],p[2])+gaus(x,p[3],p[4],p[5])+p[6]*x+p[7]
#--------------------------------------------------------------------------#
# Process input data
#--------------------------------------------------------------------------#
def make_int(lst):
#Makes all entries of a list an integer
y = []
for i in lst:
y.append(int(i))
return y
def make_array(lst,low=10,high=1032):
'''
Makes list into an array. Also splices out the irrelevant stuff
for a spectra. Set lower and upper bound of required Data for each isotope
from input CSV file.
'''
z = np.asarray(make_int(lst[low:high]))
return z
def get_times(rows, number, n=1):
'''
Get list of times for data: determines time as the midpoint between the upper and lower bounds in the integration window
Arguments:
- full list of inputs from data csv
- number of days to collect data over
- number of hours to integrate over
Returns:
- list of times
'''
entries = 12*n
days = (24/n)
i = 0
counter = 0
times = []
while i < number*days:
if counter < days:
time_range = []
integration = rows[(i*entries)+1:((i+1)*entries)+1]
for j in integration:
time_range.append(parse(j[1]))
times.append(time_range[int(len(time_range)/2)])
counter+=1
i+=1
else:
print('finished', i)
counter = 0
print('finished', i)
counter = 0
return times
def double_peak_finder(array,lower,upper):
'''
Fits double gaussian + exponential to data within some window
- fit is applied only to data within the upper/lower channel
boundaries provided as inputs
Arguments:
- full array of data
- lower and upper channel values for the fit window
Returns:
- list of fit parameters and list of parameter errors
'''
points = ar(range(lower,upper))
peak = list(array[lower:upper])
counts = ar(peak)
# Initialize fit parameters based on rough estimates of mean,sigma,amp,etc.
# - mean estimated as center of fit window - set window accordingly
# - double gaussian means shifted slightly in each direction
# - gaussian amp and expo shift estimated based on counts at left edge
# - expo slope determined using fit window boundaries
nentries = len(points)
mean = lower + (upper - lower)/2.0
slope = 2*(np.log(counts[-1])-np.log(counts[0]))/(points[-1]-points[0])
pinit = [counts[0]/5.0,mean-2,5.0,counts[0]/5.0,mean+2,5.0,counts[0],slope]
# Currently using leastsq fit from scipy
# - see scipy documentation for more information
errfunc = lambda p, x, y: double_gaus_plus_exp(x,p) - y
pfit,pcov,infodict,errmsg,success = \
optimize.leastsq(errfunc, pinit, args=(points,counts), \
full_output=1, epsfcn=0.0001)
# Calculate fit parameter uncertainties using the covariance matrix
# and the (fit - data) variance
if (len(counts) > len(pinit)) and pcov is not None:
s_sq = (errfunc(pfit, points, counts)**2).sum()/(len(counts)-len(pinit))
pcov = pcov * s_sq
else:
pcov = 0
error = []
for i in range(len(pfit)):
try:
# This conditional is bad!!
# Artificially sets error to zero if it's too big - remove now!
if np.absolute(pcov[i][i])**0.5 > np.absolute(pfit[i]):
error.append( 0.00 )
else:
error.append(np.absolute(pcov[i][i])**0.5)
except:
error.append( 0.00 )
pfit_leastsq = pfit
perr_leastsq = np.array(error)
return pfit_leastsq, perr_leastsq
def peak_finder(array,lower,upper,count_offset):
'''
Fits gaussian + exponential to data within some window
- fit is applied only to data within the upper/lower channel
boundaries provided as inputs
Arguments:
- full array of data
- lower and upper channel values for the fit window
- count_offset used to correct exponential fit parameter for the fact that the fit is not starting at the left edge of the spectrum
Returns:
- list of fit parameters and list of parameter errors
'''
points = ar(range(lower,upper))
peak = list(array[lower:upper])
counts = ar(peak)
# Initialize fit parameters based on rough estimates of mean,sigma,amp,etc.
# - mean estimated as center of fit window - set window accordingly
# - gaussian amp and expo shift estimated based on counts at left edge
# - expo slope determined using fit window boundaries
nentries = len(points)
mean = lower + (upper - lower)/2.0
slope = 2*(np.log(counts[-1])-np.log(counts[0]))/(points[-1]-points[0])
pinit = [counts[0],mean,5.0,counts[0]*count_offset,slope]
#print('Initial parameters: amp = {0}, mean = {1}, sigma = {2}, amp2 = {3}'.format(pinit[0],pinit[1],pinit[2],pinit[3]))
# Currently using leastsq fit from scipy
# - see scipy documentation for more information
errfunc = lambda p, x, y: gaus_plus_exp(x,p)-y
pfit,pcov,infodict,errmsg,success = \
optimize.leastsq(errfunc, pinit, args=(points,counts), \
full_output=1, epsfcn=0.0001)
#print('after parameters: amp= {0}, mean ={1}, sigma = {2}, amp2 = {3}'.format(pfit[0],pfit[1],pfit[2],pfit[3]))
# Calculate fit parameter uncertainties using the covariance matrix
# and the (fit - data) variance
if (len(counts) > len(pinit)) and pcov is not None:
s_sq = (errfunc(pfit, points, counts)**2).sum()/(len(counts)-len(pinit))
pcov = pcov * s_sq
else:
pcov = 0
error = []
for i in range(len(pfit)):
try:
error.append(np.absolute(pcov[i][i])**0.5)
except:
error.append( 0.00 )
pfit_leastsq = pfit
perr_leastsq = np.array(error)
return pfit_leastsq, perr_leastsq
def get_double_peaks(rows, number, n=1, lower_limit=480, upper_limit=600, make_plot = False):
'''
Applies double gaussian + expo fits to all data over some range of time
Arguments:
- full list of csv data input rows
- number of days to run over
- number of hours to integrate each calculation over
- lower,upper limits for fit windows
- flag to plot each fit for diagnostics
Returns:
- list of means,sigmas,amps for second gaussian in fit
- that's the Bi peak, so this is hard coded to work for a specific case
- each entry in list includes the value and uncertainty
'''
entries = 12*n
days = (24/n)
i = 0
counter = 0
means = []
sigmas = []
amps = []
while i < number*days:
if counter < days:
integration = rows[(i*entries)+1:((i+1)*entries)+1]
array_lst = []
for j in integration:
array_lst.append(make_array(j,12))
integrated = sum(array_lst)
#print integrated
fit_pars, fit_errs = double_peak_finder(integrated,lower_limit,upper_limit)
mean = [fit_pars[1],fit_errs[1]]
sigma = [fit_pars[2],fit_errs[2]]
amp = [fit_pars[0],fit_errs[0]]
if fit_pars[4] > fit_pars[1]:
mean = [fit_pars[4],fit_errs[4]]
sigma = [fit_pars[5],fit_errs[5]]
amp = [fit_pars[3],fit_errs[3]]
means.append(mean)
sigmas.append(sigma)
amps.append(amp)
counter+=1
i+=1
if make_plot:
fig = plt.figure()
fig.patch.set_facecolor('white')
plt.title('Spectra integrated over a day')
plt.xlabel('channels')
plt.ylabel('counts')
plt.xlim(1,1000)
x = ar(range(0,len(integrated)))
plt.plot(x,integrated,'b:',label='data')
plt.plot(x,double_gaus_plus_exp(x,fit_pars),'ro:',label='fit')
plt.legend()
plt.yscale('log')
plt.show()
else:
counter = 0
counter = 0
return means, sigmas, amps
def get_peaks(rows, number=1, n=1, lower_limit=480, upper_limit=600, make_plot = False,count_offset=100):
'''
Applies double gaussian + expo fits to all data over some range of time
Arguments:
- full list of csv data input rows
- number of days to run over
- number of hours to integrate each calculation over
- lower,upper limits for fit windows
- flag to plot each fit for diagnostics
- count offset correction to fit parameters based on peak position
(peaks farther from the left edge of spectrum need bigger correction)
Returns:
- lists of means,sigmas,amps from all gaussian fits
- each entry in list includes the value and uncertainty
'''
entries = 12*n
days = (24/n)
print('making {} plots for each day'.format(days))
i = 0
counter = 0
means = []
sigmas = []
amps = []
while i < number*days:
if counter < days:
integration = rows[(i*entries)+1:((i+1)*entries)+1]
array_lst = []
for j in integration:
array_lst.append(make_array(j,12))
integrated = sum(array_lst)
#print integrated
fit_pars,fit_errs = peak_finder(integrated,lower_limit,upper_limit,count_offset)
means.append([fit_pars[1],fit_errs[1]])
sigmas.append([fit_pars[2],fit_errs[2]])
amps.append([fit_pars[0],fit_errs[0]])
counter +=1
i+=1
if make_plot:
fig = plt.figure()
fig.patch.set_facecolor('white')
plt.title('Spectra integrated over a day')
plt.xlabel('channels')
plt.ylabel('counts')
plt.xlim(1,500)
#plt.ylim()
x = ar(range(0,len(integrated)))
plt.plot(x,integrated,'b:',label='data')
plt.plot(x,gaus_plus_exp(x,fit_pars),'ro:',label='fit')
plt.legend()
plt.yscale('log')
plt.show()
else:
counter = 0
counter = 0
return means,sigmas,amps
def get_peaks2(rows, number=1, n=1, lower_limit=900, upper_limit=1020, make_plot = False,count_offset=100):
'''
This is for Tl-208
Applies gaussian + const fits to all data over some range of time
Arguments:
- full list of csv data input rows
- number of days to run over
- number of hours to integrate each calculation over
- lower,upper limits for fit windows
- flag to plot each fit for diagnostics
- count offset correction to fit parameters based on peak position
(peaks farther from the left edge of spectrum need bigger correction)
Returns:
- lists of means,sigmas,amps from all gaussian fits
- each entry in list includes the value and uncertainty
'''
entries = 12*n
days = (24/n)
print('making {} plots for each day'.format(days))
i = 0
counter = 0
means = []
sigmas = []
amps = []
while i < number*days:
if counter < days:
integration = rows[(i*entries)+1:((i+1)*entries)+1]
array_lst = []
for j in integration:
array_lst.append(make_array(j,12))
integrated = sum(array_lst)
#print integrated
fit_pars,fit_errs = peak_finder(integrated,lower_limit,upper_limit,count_offset)
means.append([fit_pars[1],fit_errs[1]])
sigmas.append([fit_pars[2],fit_errs[2]])
amps.append([fit_pars[0],fit_errs[0]])
counter +=1
i+=1
if make_plot:
fig = plt.figure()
fig.patch.set_facecolor('white')
plt.title('Spectra integrated over a day')
plt.xlabel('channels')
plt.ylabel('counts')
plt.xlim(1,1000)
#plt.ylim()
x = ar(range(0,len(integrated)))
plt.plot(x,integrated,'b:',label='data')
plt.plot(x,gaus_plus_const(x,fit_pars),'ro:',label='fit')
plt.legend()
plt.yscale('log')
plt.show()
else:
counter = 0
counter = 0
return means,sigmas,amps
#--------------------------------------------------------------------------#
# Methods for performing calculations on fit results
#--------------------------------------------------------------------------#
def get_mean(values):
'''
Calculate the mean and sigma for some input array of data
'''
mean = 0
var = 0
for i in range(len(values)):
if values[i] > 1:
mean += values[i]
mean = mean/len(values)
for i in range(len(values)):
if values[i] > 1:
var += (mean - values[i])**2
np.sum(values)/len(values)
var = np.sqrt(var/len(values))
return mean, var
def get_peak_counts(means,sigmas,amps):
'''
Calculate the area under a gaussian curve (estimate of counts in that peak)
Arguments:
- list of guassian means
- list of guassian widths
- list of gaussian amplitudes
Returns:
- list of counts from resulting gaussian integrations
'''
counts = []
for i in range(len(means)):
count,err = quad(gaus,0,1000,args=(amps[i],means[i],sigmas[i]))
counts.append(count)
return counts
def get_calibration(rows,ndays):
'''
Specific method for getting the data calibration assuming Bi-214 is part
of a double peak and fitting data integrated over a day not an hour
Returns a single calibration constant
'''
Bi_peaks, Bi_sigmas, Bi_amps = get_double_peaks(rows,ndays,24,240,320,True)
K_peaks,K_errs = get_peaks(rows,ndays,24,440,640)
Tl_peaks,Tl_errs = get_peaks2(rows,ndays,24,900,1020)
print(Bi_peaks)
print(K_peaks)
print(Tl_peaks)
Bi_mean, Bi_var = get_mean(np.asarray(Bi_peaks))
K_mean, K_var = get_mean(np.asarray(K_peaks))
Tl_mean, Tl_var = get_mean(np.asarray(Tl_peaks))
print('bizmuth peak channel = {}, potassium peak channel = {}, thallium peak channel= {}'.format(Bi_mean,K_mean,Tl_mean))
calibration_constant = (1460-609)/(K_mean - Bi_mean)
print('keV/channel = {}'.format(calibration_constant))
return calibration_constant
def spectrum_peaks_plotter(rows):
'''
This method intergrates the input data from the CSV file, and make an estimated
plot for each isotope peak, based on number of channels and the corresponding
counts of each isotope
'''
n=4
entries = 12*n
integration = rows[1:entries+1]
array_lst = []
for j in integration:
array_lst.append(make_array(j,160,320))
integrated = sum(array_lst)
Channels = range(0,len(integrated))
Counts = integrated
plt.plot(Channels, Counts)
plt.xlabel('Channels')
plt.ylabel('Counts')
plt.title('Bi-Peaks Identifier ')
plt.show()
integration_1 = rows[1:entries+1]
array_lst_1 = []
for i in integration_1:
array_lst_1.append(make_array(i,540,640))
integrated_1 = sum(array_lst_1)
Channels_1 = range(0,len(integrated_1))
Counts_1 = integrated_1
plt.plot(Channels_1, Counts_1)
plt.xlabel('Channels')
plt.ylabel('Counts')
plt.title('K-Peak Identifier')
plt.show()
integration_2 = rows[1:entries+1]
array_lst_2 = []
for j in integration_2:
array_lst_2.append(make_array(j,800,1022))
integrated_2 = sum(array_lst_2)
Channels_2 = range(0,len(integrated_2))
Counts_2 = integrated_2
plt.plot(Channels_2, Counts_2)
plt.xlabel('Channels')
plt.ylabel('Counts')
plt.title('Tl-Peak Identifier')
plt.show()
def main(rows, times):
# import data from weather station for all isotopes
date = []
cpm = []
cpm_error = []
line = 0
# #url = 'https://radwatch.berkeley.edu/sites/default/files/dosenet/lbl_outside_d3s.csv'
# url = 'https://radwatch.berkeley.edu/sites/default/files/dosenet/etch_roof_d3s.csv'
# print(url)
# response = urllib.request.urlopen(url)
# print(response)
# rows = []
# # Reading file in python 3
# reader = csv.reader(codecs.iterdecode(response, 'utf-8'))
#
# for row in reader:
# rows.append(row)
# if line > 0:
# date.append(parse(row[1]))
# cpm.append(float(row[3]))
# cpm_error.append(float(row[4]))
# line += 1
#print 'collected data between ', date[0], ' and ', date[-1]
# get_calibration(rows,5)
#---------------------------------------------------------------------#
# Get fit results for ndays integrating over nhours for each fit
#---------------------------------------------------------------------#
ndays = 7
nhours = 2
#times = get_times(rows,ndays,nhours)
K_peaks, K_sigmas, K_amps = get_peaks(rows,ndays,nhours,540,640)
Bi_peaks,Bi_sigmas,Bi_amps = get_double_peaks(rows,ndays,nhours,160,320)
Bi_peaks,Bi_sigmas,Bi_amps = get_peaks(rows,ndays,nhours,164,324,False,1)
Tl_peaks, Tl_sigmas, Tl_amps = get_peaks2(rows,ndays,nhours,900,1000)
#-------------------------------------------------------------------------#
# Break apart mean,sigma,amp values and uncertainties
#-------------------------------------------------------------------------#
K_ch = np.asarray([i[0] for i in K_peaks])
K_ch_errs = np.asarray([i[1] for i in K_peaks])
K_sig = [i[0] for i in K_sigmas]
K_A = [i[0] for i in K_amps]
Bi_ch = np.asarray([i[0] for i in Bi_peaks])
Bi_ch_errs = np.asarray([i[1] for i in Bi_peaks])
Bi_sig = [i[0] for i in Bi_sigmas]
Bi_A = [i[0] for i in Bi_amps]
Tl_ch = np.asarray([i[0] for i in Tl_peaks])
Tl_ch_errs = np.asarray([i[1] for i in Tl_peaks])
Tl_sig = [i[0] for i in Tl_sigmas]
Tl_A = [i[0] for i in Tl_amps]
K_ch_ave = np.mean(K_ch)
K_ch_var = np.sqrt(np.var(K_ch))
B_ch_ave = np.mean(Bi_ch)
B_ch_var = np.sqrt(np.var(Bi_ch))
Tl_ch_ave = np.mean(Tl_ch)
Tl_ch_var = np.sqrt(np.var(Tl_ch))
print('K-40 <channel> = {} +/- {}'.format(K_ch_ave,K_ch_var))
print('Bi-214 <channel> = {} +/- {}'.format(B_ch_ave,B_ch_var))
print('Tl-208 <channel> = {} +/- {}'.format(Tl_ch_ave,Tl_ch_var))
for i in range(len(K_ch)):
if abs(K_ch[i]-K_ch_ave) > 3*K_ch_var:
print('Bad K-40 fit: peak channel = {}'.format(K_ch[i]))
if abs(Bi_ch[i]-B_ch_ave) > 3*B_ch_var:
print('Bad Bi-214 fit: peak channel = {}'.format(Bi_ch[i]))
#-------------------------------------------------------------------------#
# Get arrays of counts inside K-40, Bi-214,and Tl-208 peaks using fit results
#-------------------------------------------------------------------------#
K_counts = get_peak_counts(K_ch,K_sig,K_A)
Bi_counts = get_peak_counts(Bi_ch,Bi_sig,Bi_A)
Tl_counts= get_peak_counts(Tl_ch,Tl_sig,Tl_A)
#-------------------------------------------------------------------------#
# Get array of calibration constants from resulting K-40 and Bi-214 means
#-------------------------------------------------------------------------#
calibs = (1460-609)/(K_ch - Bi_ch)
calib_err = (1460-609)/(K_ch - Bi_ch)**2 \
*np.sqrt(Bi_ch_errs**2 + K_ch_errs**2)
#-------------------------------------------------------------------------#
# Plots of everything we are interested in!
#-------------------------------------------------------------------------#
fig, ax = plt.subplots()
fig.patch.set_facecolor('white')
plt.title('K-40 counts vs Time')
plt.xlabel('Time')
plt.ylabel('counts')
plt.ylim(0,1600)
ax.plot(times,K_counts, 'ro')
ax.errorbar(times,K_counts,yerr=np.sqrt(K_counts),fmt='ro',ecolor='r')
fig.autofmt_xdate()
fig, ax = plt.subplots()
fig.patch.set_facecolor('white')
plt.title('Bi-214 counts vs Time')
plt.xlabel('Time')
plt.ylabel('counts')
ax.plot(times,Bi_counts, 'ro')
ax.errorbar(times,Bi_counts,yerr=np.sqrt(Bi_counts),fmt='ro',ecolor='r')
fig.autofmt_xdate()
fig, ax = plt.subplots()
fig.patch.set_facecolor('white')
plt.title('1460 Center channel vs Time')
plt.xlabel('Time')
plt.ylabel('1460 center channel')
ax.plot(times,K_ch, 'ro')
ax.errorbar(times,K_ch,yerr=K_ch_errs,fmt='ro',ecolor='r')
fig.autofmt_xdate()
fig,ax=plt.subplots()
fig.patch.set_facecolor('white')
plt.title('Tl-208 count vs Time')
plt.xlabel('Time')
plt.ylabel('counts')
plt.ylim(0,1000)
ax.plot(times,Tl_counts, 'ro')
ax.errorbar(times,Tl_counts,yerr=np.sqrt(Tl_counts),fmt='ro',ecolor='r')
fig.autofmt_xdate()
fig, ax = plt.subplots()
fig.patch.set_facecolor('white')
plt.title('609 Center channel vs Time')
plt.xlabel('Time')
plt.ylabel('609 center channel')
plt.ylim(B_ch_ave-10*B_ch_var,B_ch_ave+10*B_ch_var)
ax.plot(times,Bi_ch, 'ro')
ax.errorbar(times,Bi_ch,yerr=Bi_ch_errs,fmt='ro',ecolor='r')
fig.autofmt_xdate()
fig, ax = plt.subplots()
fig.patch.set_facecolor('white')
plt.title('keV/channel vs Time')
plt.xlabel('Time')
plt.ylabel('keV/channel')
#plt.ylim(4.9,5.15)
plt.ylim(4.6,6.0)
ax.plot(times,calibs, 'bo')
ax.errorbar(times,calibs,yerr=calib_err,fmt='bo',ecolor='b')
fig.autofmt_xdate()
# Finally: interested in how much the count rates vary for the two isotopes
Bi_mean, Bi_var = get_mean(np.asarray(Bi_counts))
print('Bi-214 <N> = {} +/- {}'.format(Bi_mean,Bi_var))
K_mean, K_var = get_mean(np.asarray(K_counts))
print('K-40 <N> = {} +/- {}'.format(K_mean,K_var))
Tl_mean, Tl_var = get_mean(np.asarray(Tl_counts))
print('Tl-208 <N> = {} +/- {}'.format(Tl_mean,Tl_var))
#Plotting the the three Isotopes on same plot
fig=plt.figure()
#plt.plot_date(times,K_counts,'bo',label='k-40')
plt.errorbar(times,K_counts,yerr=np.sqrt(K_counts),fmt='bo',ecolor='b',label='K-40')
#plt.plot_date(times,Bi_counts,'ro',label='Bi-214')
plt.errorbar(times,Bi_counts,yerr=np.sqrt(Bi_counts),fmt='ro',ecolor='r',label='Bi-214')
#plt.plot_date(times,Tl_counts,'ko',label='Tl-208')
plt.errorbar(times,Tl_counts,yerr=np.sqrt(Tl_counts),fmt='ko',ecolor='y',label='Tl-208')
plt.ylim(0,1800)
plt.xlabel('Time')
plt.ylabel('counts')
plt.title('K-40,Bi-214,Tl-208 Counts vs Time')
#plt.legend(bbox_to_anchor=(1.2, 0.05))
plt.legend(loc='upper center', bbox_to_anchor=(0.5, 1.02),
ncol=3, fancybox=True, shadow=False,numpoints=1)
fig.autofmt_xdate()
# Show all plots - add autosave?
peaksplot= spectrum_peaks_plotter(rows)
plt.show()
| mit |
DonghoChoi/ISB_Project | local/lab_task1_2_to_queries.py | 2 | 5660 | #!/usr/bin/python
# Author: Dongho Choi
import os.path
import datetime
import math
import time
import sys
import itertools
import pandas as pd
from urllib.parse import urlparse
import numpy as np
from math import log
from sshtunnel import SSHTunnelForwarder # for SSH connection
import pymysql.cursors # MySQL handling API
from geopy.distance import vincenty
import sys
#sys.path.append("./configs/")
sys.path.append("/Users/donghochoi/Documents/Work/Exploration_Study/Dissertation/Code/local/configs/")
import server_config # (1) info2_server (2) exploration_db
def find_domain(current_domain,df_distinct_visit_list): # Return -1 when no domain_query existing, otherwise the location
for i in range(0, len(df_distinct_visit_list)):
if current_domain == df_distinct_visit_list.iloc[i]['domain']:
#print("if found same domain: i = {0}, domainID = {1}".format(i,df_distinct_visit_list.iloc[i]['domainID']))
return df_distinct_visit_list.iloc[i]['domainID']
return -1
if __name__ == "__main__":
# READ DATA FROM SERVER
#read_Data_from_Server()
# Server connection
server = SSHTunnelForwarder(
(server_config.info2_server['host'], 22),
ssh_username=server_config.info2_server['user'],
ssh_password=server_config.info2_server['password'],
remote_bind_address=('127.0.0.1', 3306))
server.start()
connection = pymysql.connect(host='127.0.0.1',
port=server.local_bind_port,
user=server_config.exploration_db['user'],
password=server_config.exploration_db['password'],
db=server_config.exploration_db['database'])
connection.autocommit(True)
cursor = connection.cursor()
print("MySQL connection established.")
# Get pages_all table: importing all pages that participants visited.
df_queries = pd.read_sql("SELECT userID,stageID,questionID,query FROM queries WHERE (userID!=5001)", con=connection)
#df_pages_all = df_pages_all.rename(columns={'epochTime': 'localTimestamp'})
print("queries Table READ")
print("Length of queries is ",len(df_queries))
# Get the participants list from the table of 'final_participants'
df_participants = pd.read_sql('SELECT * FROM final_participants', con=connection)
print("Participants Table READ")
# READ AND FILL THE PARTICIPANTS LIST WITH COMBINATIONS
participants_list = df_participants['userID'].tolist()
num_participants = len(participants_list) # number of participants
print('number of participants:{0}'.format(num_participants))
# url visit list
for i in range(0, num_participants): # i - current userID
#for i in range(0,1):
current_userID = participants_list[i]
print("Current User:",current_userID)
df_user_queries = df_queries.loc[df_queries['userID']==current_userID] # select rows that contains this specific participant
df_task1_queries = df_user_queries.loc[df_user_queries['stageID']==31]
df_task2_queries = df_user_queries.loc[df_user_queries['stageID']==41]
###### TASK 1 #######
# number of questions that the person asked
num_questions = df_task1_queries['questionID'].max()
task1_query_list = []
for j in range(0, num_questions):
df_question_queries = df_task1_queries.loc[df_task1_queries['questionID']==j+1]
query_list = df_question_queries['query'].tolist()
query_set = set(query_list)
num_a = len(query_set)
# check if queries that issued for previous questions are included.
stacked_query_set = set(task1_query_list)
query_set = query_set - stacked_query_set
num_b = len(query_set)
repeated_query = num_a-num_b
if (repeated_query != 0):
print("--------------- Query used in previous questions: {0} times".format(repeated_query))
num_query_issued = len(query_list)-repeated_query
num_distinct_query_issued = len(query_set)
print("in question {0}: {1} queries issued, and {2} distinct queries".format(j+1,num_query_issued,num_distinct_query_issued))
sql = "INSERT INTO user_task1_queries (userID,questionID,query_issued,distinct_query) VALUES (" + str(current_userID) + "," + str(j+1) + "," + str(num_query_issued) + "," + str(num_distinct_query_issued) + ");"
print(sql)
cursor.execute(sql)
task1_query_list = list(set(task1_query_list + query_list))
###### TASK 2 ######
task2_query_list = df_task2_queries['query'].tolist()
task2_query_set = set(task2_query_list)
num_c = len(task2_query_set)
stacked_query_set = set(task1_query_list)
task2_query_set = task2_query_set - stacked_query_set
num_d = len(task2_query_set)
task2_repeated_query = num_c-num_d
if(task2_repeated_query != 0):
print("------------ Query in task1 is revisited")
num_task2_query_issued = len(task2_query_list) - task2_repeated_query
num_task2_distinct_query_issued = len(task2_query_set)
print("in task2: {0} queries issued, and {1} distinct queries".format(num_task2_query_issued,num_task2_distinct_query_issued))
sql = "INSERT INTO user_task2_queries (userID,query_issued,distinct_query_issued) VALUES (" + str(current_userID) + "," + str(num_task2_query_issued) + "," + str(num_task2_distinct_query_issued) + ");"
print(sql)
cursor.execute(sql)
server.stop()
print("End")
| gpl-3.0 |
mbednarski/Chiron | chiron/plotter.py | 1 | 4558 | import os
import datetime
import abc
import numpy as np
import matplotlib.pyplot as plt
import sys
import logging
logger = logging.getLogger(__name__)
handler = logging.StreamHandler(sys.stdout)
handler.setLevel(logging.INFO)
formatter = logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s')
handler.setFormatter(formatter)
class SeriesTransformer(abc.ABC):
@abc.abstractmethod
def transform(self, data):
pass
class SeriesBase(abc.ABC):
def __init__(self, name, friendly_name=None):
self.name = name
self._location = None
self.friendly_name = friendly_name if friendly_name is not None else name
def set_location(self, location):
self._location = location
def _read_values(self):
files = [os.path.join(self._location, x) for x in os.listdir(self._location)]
files.sort()
values = None
for f in files:
fcontent = np.load(f)
if values is None:
values = fcontent
continue
values = np.vstack((values, fcontent))
return values
@abc.abstractmethod
def _get_data(self):
pass
def plot(self, axis, **kwargs):
data = self._get_data()
axis.plot(data, label=self.friendly_name, **kwargs)
axis.legend()
class RawSeries(SeriesBase):
def _get_data(self):
return self._read_values()
def __init__(self, name, friendly_name=None):
super().__init__(name, friendly_name)
class RollingAverageSeries(SeriesBase):
def __init__(self, name, friendly_name=None, window=50):
super().__init__(name, friendly_name)
self.window = window
def _get_data(self):
data = self._read_values()
data = self._compute_rolling_average(data, self.window)
return data
def _compute_rolling_average(self, array, window):
avgs = np.zeros_like(array)
for i in range(avgs.shape[0]):
avgs[i] = np.mean(array[i - window:i])
return avgs
class MaxSeries(SeriesBase):
def _get_data(self):
data = self._read_values()
data = self._compute_max(data)
return data
def __init__(self, name, friendly_name):
super().__init__(name, friendly_name)
def _compute_max(self, data):
maxs = np.zeros_like(data)
for i in range(maxs.shape[0]):
maxs[i] = np.max(data[:i + 1])
return maxs
class Plotter(object):
def __init__(self, root_directory, shape=(1, 1)):
logger.info("")
self._root_directory = root_directory
self._session_location = None
self._session_datetime = None
self._series_to_plot = []
self._series_locations = {}
self.fig, self.axes = plt.subplots(shape[0], shape[1])
def select_session(self):
raise NotImplementedError()
def select_latest_session(self):
ls = [(x, os.path.join(self._root_directory, x)) for x in os.listdir(self._root_directory)]
ls = filter(lambda x: os.path.isdir(x[1]), ls)
parsed = [
(x[0], x[1],
datetime.datetime.strptime(x[0], '%Y-%m-%d_%H_%M_%S'))
for x
in ls
]
parsed.sort(key=lambda x: x[2])
latest = parsed[-1]
self._session_location = latest[1]
self._session_datetime = latest[2]
self._series_locations = self._get_collections(self._session_location)
def _get_collections(self, root):
ls = [(x, os.path.join(root, x)) for x in os.listdir(root)]
ls = filter(lambda x: os.path.isdir(x[1]), ls)
return dict(list(ls))
def append(self, series, axis=1):
series.set_location(self._series_locations[series.name])
self._series_to_plot.append((series, axis))
def plot(self):
for s, axis in self._series_to_plot:
s.plot(self.axes[axis])
plt.show()
def main():
root_dir = r'C:\p\github\Chiron\chiron\agents\monitor'
p = Plotter(root_dir, shape=(2, 1))
p.select_latest_session()
p.append(RollingAverageSeries('episode_reward', '10 avg reward', 10), 0)
p.append(RollingAverageSeries('episode_reward', '50 avg reward', 50), 0)
p.append(RollingAverageSeries('episode_reward', '100 avg reward', 100), 0)
p.append(RollingAverageSeries('episode_reward', '200 avg reward', 200), 0)
p.append(MaxSeries('episode_reward', 'max reward'), 0)
p.append(RawSeries('epsilon', 'epsilon'), 1)
p.plot()
if __name__ == '__main__':
main()
| gpl-3.0 |
sonnyhu/scikit-learn | sklearn/utils/metaestimators.py | 11 | 2353 | """Utilities for meta-estimators"""
# Author: Joel Nothman
# Andreas Mueller
# License: BSD
from operator import attrgetter
from functools import update_wrapper
__all__ = ['if_delegate_has_method']
class _IffHasAttrDescriptor(object):
"""Implements a conditional property using the descriptor protocol.
Using this class to create a decorator will raise an ``AttributeError``
if the ``attribute_name`` is not present on the base object.
This allows ducktyping of the decorated method based on ``attribute_name``.
See https://docs.python.org/3/howto/descriptor.html for an explanation of
descriptors.
"""
def __init__(self, fn, attribute_name):
self.fn = fn
self.get_attribute = attrgetter(attribute_name)
# update the docstring of the descriptor
update_wrapper(self, fn)
def __get__(self, obj, type=None):
# raise an AttributeError if the attribute is not present on the object
if obj is not None:
# delegate only on instances, not the classes.
# this is to allow access to the docstrings.
self.get_attribute(obj)
# lambda, but not partial, allows help() to work with update_wrapper
out = lambda *args, **kwargs: self.fn(obj, *args, **kwargs)
# update the docstring of the returned function
update_wrapper(out, self.fn)
return out
def if_delegate_has_method(delegate):
"""Create a decorator for methods that are delegated to a sub-estimator
This enables ducktyping by hasattr returning True according to the
sub-estimator.
>>> from sklearn.utils.metaestimators import if_delegate_has_method
>>>
>>>
>>> class MetaEst(object):
... def __init__(self, sub_est):
... self.sub_est = sub_est
...
... @if_delegate_has_method(delegate='sub_est')
... def predict(self, X):
... return self.sub_est.predict(X)
...
>>> class HasPredict(object):
... def predict(self, X):
... return X.sum(axis=1)
...
>>> class HasNoPredict(object):
... pass
...
>>> hasattr(MetaEst(HasPredict()), 'predict')
True
>>> hasattr(MetaEst(HasNoPredict()), 'predict')
False
"""
return lambda fn: _IffHasAttrDescriptor(fn, '%s.%s' % (delegate, fn.__name__))
| bsd-3-clause |
amolkahat/pandas | pandas/tests/indexing/test_indexing.py | 2 | 40445 | # -*- coding: utf-8 -*-
# pylint: disable-msg=W0612,E1101
""" test fancy indexing & misc """
import pytest
import weakref
from warnings import catch_warnings, simplefilter
from datetime import datetime
from pandas.core.dtypes.common import (
is_integer_dtype,
is_float_dtype)
from pandas.compat import range, lrange, lzip, StringIO
import numpy as np
import pandas as pd
from pandas.core.indexing import (_non_reducing_slice, _maybe_numeric_slice,
validate_indices)
from pandas import NaT, DataFrame, Index, Series, MultiIndex
import pandas.util.testing as tm
from pandas.compat import PY2
from pandas.tests.indexing.common import Base, _mklbl
# ------------------------------------------------------------------------
# Indexing test cases
class TestFancy(Base):
""" pure get/set item & fancy indexing """
def test_setitem_ndarray_1d(self):
# GH5508
# len of indexer vs length of the 1d ndarray
df = DataFrame(index=Index(lrange(1, 11)))
df['foo'] = np.zeros(10, dtype=np.float64)
df['bar'] = np.zeros(10, dtype=np.complex)
# invalid
def f():
df.loc[df.index[2:5], 'bar'] = np.array([2.33j, 1.23 + 0.1j,
2.2, 1.0])
pytest.raises(ValueError, f)
# valid
df.loc[df.index[2:6], 'bar'] = np.array([2.33j, 1.23 + 0.1j,
2.2, 1.0])
result = df.loc[df.index[2:6], 'bar']
expected = Series([2.33j, 1.23 + 0.1j, 2.2, 1.0], index=[3, 4, 5, 6],
name='bar')
tm.assert_series_equal(result, expected)
# dtype getting changed?
df = DataFrame(index=Index(lrange(1, 11)))
df['foo'] = np.zeros(10, dtype=np.float64)
df['bar'] = np.zeros(10, dtype=np.complex)
def f():
df[2:5] = np.arange(1, 4) * 1j
pytest.raises(ValueError, f)
def test_inf_upcast(self):
# GH 16957
# We should be able to use np.inf as a key
# np.inf should cause an index to convert to float
# Test with np.inf in rows
df = DataFrame(columns=[0])
df.loc[1] = 1
df.loc[2] = 2
df.loc[np.inf] = 3
# make sure we can look up the value
assert df.loc[np.inf, 0] == 3
result = df.index
expected = pd.Float64Index([1, 2, np.inf])
tm.assert_index_equal(result, expected)
# Test with np.inf in columns
df = DataFrame()
df.loc[0, 0] = 1
df.loc[1, 1] = 2
df.loc[0, np.inf] = 3
result = df.columns
expected = pd.Float64Index([0, 1, np.inf])
tm.assert_index_equal(result, expected)
def test_setitem_dtype_upcast(self):
# GH3216
df = DataFrame([{"a": 1}, {"a": 3, "b": 2}])
df['c'] = np.nan
assert df['c'].dtype == np.float64
df.loc[0, 'c'] = 'foo'
expected = DataFrame([{"a": 1, "c": 'foo'},
{"a": 3, "b": 2, "c": np.nan}])
tm.assert_frame_equal(df, expected)
# GH10280
df = DataFrame(np.arange(6, dtype='int64').reshape(2, 3),
index=list('ab'),
columns=['foo', 'bar', 'baz'])
for val in [3.14, 'wxyz']:
left = df.copy()
left.loc['a', 'bar'] = val
right = DataFrame([[0, val, 2], [3, 4, 5]], index=list('ab'),
columns=['foo', 'bar', 'baz'])
tm.assert_frame_equal(left, right)
assert is_integer_dtype(left['foo'])
assert is_integer_dtype(left['baz'])
left = DataFrame(np.arange(6, dtype='int64').reshape(2, 3) / 10.0,
index=list('ab'),
columns=['foo', 'bar', 'baz'])
left.loc['a', 'bar'] = 'wxyz'
right = DataFrame([[0, 'wxyz', .2], [.3, .4, .5]], index=list('ab'),
columns=['foo', 'bar', 'baz'])
tm.assert_frame_equal(left, right)
assert is_float_dtype(left['foo'])
assert is_float_dtype(left['baz'])
def test_dups_fancy_indexing(self):
# GH 3455
from pandas.util.testing import makeCustomDataframe as mkdf
df = mkdf(10, 3)
df.columns = ['a', 'a', 'b']
result = df[['b', 'a']].columns
expected = Index(['b', 'a', 'a'])
tm.assert_index_equal(result, expected)
# across dtypes
df = DataFrame([[1, 2, 1., 2., 3., 'foo', 'bar']],
columns=list('aaaaaaa'))
df.head()
str(df)
result = DataFrame([[1, 2, 1., 2., 3., 'foo', 'bar']])
result.columns = list('aaaaaaa')
# TODO(wesm): unused?
df_v = df.iloc[:, 4] # noqa
res_v = result.iloc[:, 4] # noqa
tm.assert_frame_equal(df, result)
# GH 3561, dups not in selected order
df = DataFrame(
{'test': [5, 7, 9, 11],
'test1': [4., 5, 6, 7],
'other': list('abcd')}, index=['A', 'A', 'B', 'C'])
rows = ['C', 'B']
expected = DataFrame(
{'test': [11, 9],
'test1': [7., 6],
'other': ['d', 'c']}, index=rows)
result = df.loc[rows]
tm.assert_frame_equal(result, expected)
result = df.loc[Index(rows)]
tm.assert_frame_equal(result, expected)
rows = ['C', 'B', 'E']
expected = DataFrame(
{'test': [11, 9, np.nan],
'test1': [7., 6, np.nan],
'other': ['d', 'c', np.nan]}, index=rows)
with tm.assert_produces_warning(FutureWarning, check_stacklevel=False):
result = df.loc[rows]
tm.assert_frame_equal(result, expected)
# see GH5553, make sure we use the right indexer
rows = ['F', 'G', 'H', 'C', 'B', 'E']
expected = DataFrame({'test': [np.nan, np.nan, np.nan, 11, 9, np.nan],
'test1': [np.nan, np.nan, np.nan, 7., 6, np.nan],
'other': [np.nan, np.nan, np.nan,
'd', 'c', np.nan]},
index=rows)
with tm.assert_produces_warning(FutureWarning, check_stacklevel=False):
result = df.loc[rows]
tm.assert_frame_equal(result, expected)
# List containing only missing label
dfnu = DataFrame(np.random.randn(5, 3), index=list('AABCD'))
with pytest.raises(KeyError):
dfnu.loc[['E']]
# ToDo: check_index_type can be True after GH 11497
# GH 4619; duplicate indexer with missing label
df = DataFrame({"A": [0, 1, 2]})
with tm.assert_produces_warning(FutureWarning, check_stacklevel=False):
result = df.loc[[0, 8, 0]]
expected = DataFrame({"A": [0, np.nan, 0]}, index=[0, 8, 0])
tm.assert_frame_equal(result, expected, check_index_type=False)
df = DataFrame({"A": list('abc')})
with tm.assert_produces_warning(FutureWarning, check_stacklevel=False):
result = df.loc[[0, 8, 0]]
expected = DataFrame({"A": ['a', np.nan, 'a']}, index=[0, 8, 0])
tm.assert_frame_equal(result, expected, check_index_type=False)
# non unique with non unique selector
df = DataFrame({'test': [5, 7, 9, 11]}, index=['A', 'A', 'B', 'C'])
expected = DataFrame(
{'test': [5, 7, 5, 7, np.nan]}, index=['A', 'A', 'A', 'A', 'E'])
with tm.assert_produces_warning(FutureWarning, check_stacklevel=False):
result = df.loc[['A', 'A', 'E']]
tm.assert_frame_equal(result, expected)
@pytest.mark.skipif(PY2,
reason="GH-20770. Py2 unreliable warnings catching.")
def test_dups_fancy_indexing2(self):
# GH 5835
# dups on index and missing values
df = DataFrame(
np.random.randn(5, 5), columns=['A', 'B', 'B', 'B', 'A'])
expected = pd.concat(
[df.loc[:, ['A', 'B']], DataFrame(np.nan, columns=['C'],
index=df.index)], axis=1)
with tm.assert_produces_warning(FutureWarning, check_stacklevel=False):
result = df.loc[:, ['A', 'B', 'C']]
tm.assert_frame_equal(result, expected)
# GH 6504, multi-axis indexing
df = DataFrame(np.random.randn(9, 2),
index=[1, 1, 1, 2, 2, 2, 3, 3, 3], columns=['a', 'b'])
expected = df.iloc[0:6]
result = df.loc[[1, 2]]
tm.assert_frame_equal(result, expected)
expected = df
result = df.loc[:, ['a', 'b']]
tm.assert_frame_equal(result, expected)
expected = df.iloc[0:6, :]
result = df.loc[[1, 2], ['a', 'b']]
tm.assert_frame_equal(result, expected)
def test_indexing_mixed_frame_bug(self):
# GH3492
df = DataFrame({'a': {1: 'aaa', 2: 'bbb', 3: 'ccc'},
'b': {1: 111, 2: 222, 3: 333}})
# this works, new column is created correctly
df['test'] = df['a'].apply(lambda x: '_' if x == 'aaa' else x)
# this does not work, ie column test is not changed
idx = df['test'] == '_'
temp = df.loc[idx, 'a'].apply(lambda x: '-----' if x == 'aaa' else x)
df.loc[idx, 'test'] = temp
assert df.iloc[0, 2] == '-----'
# if I look at df, then element [0,2] equals '_'. If instead I type
# df.ix[idx,'test'], I get '-----', finally by typing df.iloc[0,2] I
# get '_'.
def test_multitype_list_index_access(self):
# GH 10610
df = DataFrame(np.random.random((10, 5)),
columns=["a"] + [20, 21, 22, 23])
with pytest.raises(KeyError):
df[[22, 26, -8]]
assert df[21].shape[0] == df.shape[0]
def test_set_index_nan(self):
# GH 3586
df = DataFrame({'PRuid': {17: 'nonQC',
18: 'nonQC',
19: 'nonQC',
20: '10',
21: '11',
22: '12',
23: '13',
24: '24',
25: '35',
26: '46',
27: '47',
28: '48',
29: '59',
30: '10'},
'QC': {17: 0.0,
18: 0.0,
19: 0.0,
20: np.nan,
21: np.nan,
22: np.nan,
23: np.nan,
24: 1.0,
25: np.nan,
26: np.nan,
27: np.nan,
28: np.nan,
29: np.nan,
30: np.nan},
'data': {17: 7.9544899999999998,
18: 8.0142609999999994,
19: 7.8591520000000008,
20: 0.86140349999999999,
21: 0.87853110000000001,
22: 0.8427041999999999,
23: 0.78587700000000005,
24: 0.73062459999999996,
25: 0.81668560000000001,
26: 0.81927080000000008,
27: 0.80705009999999999,
28: 0.81440240000000008,
29: 0.80140849999999997,
30: 0.81307740000000006},
'year': {17: 2006,
18: 2007,
19: 2008,
20: 1985,
21: 1985,
22: 1985,
23: 1985,
24: 1985,
25: 1985,
26: 1985,
27: 1985,
28: 1985,
29: 1985,
30: 1986}}).reset_index()
result = df.set_index(['year', 'PRuid', 'QC']).reset_index().reindex(
columns=df.columns)
tm.assert_frame_equal(result, df)
def test_multi_nan_indexing(self):
# GH 3588
df = DataFrame({"a": ['R1', 'R2', np.nan, 'R4'],
'b': ["C1", "C2", "C3", "C4"],
"c": [10, 15, np.nan, 20]})
result = df.set_index(['a', 'b'], drop=False)
expected = DataFrame({"a": ['R1', 'R2', np.nan, 'R4'],
'b': ["C1", "C2", "C3", "C4"],
"c": [10, 15, np.nan, 20]},
index=[Index(['R1', 'R2', np.nan, 'R4'],
name='a'),
Index(['C1', 'C2', 'C3', 'C4'], name='b')])
tm.assert_frame_equal(result, expected)
def test_multi_assign(self):
# GH 3626, an assignment of a sub-df to a df
df = DataFrame({'FC': ['a', 'b', 'a', 'b', 'a', 'b'],
'PF': [0, 0, 0, 0, 1, 1],
'col1': lrange(6),
'col2': lrange(6, 12)})
df.iloc[1, 0] = np.nan
df2 = df.copy()
mask = ~df2.FC.isna()
cols = ['col1', 'col2']
dft = df2 * 2
dft.iloc[3, 3] = np.nan
expected = DataFrame({'FC': ['a', np.nan, 'a', 'b', 'a', 'b'],
'PF': [0, 0, 0, 0, 1, 1],
'col1': Series([0, 1, 4, 6, 8, 10]),
'col2': [12, 7, 16, np.nan, 20, 22]})
# frame on rhs
df2.loc[mask, cols] = dft.loc[mask, cols]
tm.assert_frame_equal(df2, expected)
df2.loc[mask, cols] = dft.loc[mask, cols]
tm.assert_frame_equal(df2, expected)
# with an ndarray on rhs
# coerces to float64 because values has float64 dtype
# GH 14001
expected = DataFrame({'FC': ['a', np.nan, 'a', 'b', 'a', 'b'],
'PF': [0, 0, 0, 0, 1, 1],
'col1': [0., 1., 4., 6., 8., 10.],
'col2': [12, 7, 16, np.nan, 20, 22]})
df2 = df.copy()
df2.loc[mask, cols] = dft.loc[mask, cols].values
tm.assert_frame_equal(df2, expected)
df2.loc[mask, cols] = dft.loc[mask, cols].values
tm.assert_frame_equal(df2, expected)
# broadcasting on the rhs is required
df = DataFrame(dict(A=[1, 2, 0, 0, 0], B=[0, 0, 0, 10, 11], C=[
0, 0, 0, 10, 11], D=[3, 4, 5, 6, 7]))
expected = df.copy()
mask = expected['A'] == 0
for col in ['A', 'B']:
expected.loc[mask, col] = df['D']
df.loc[df['A'] == 0, ['A', 'B']] = df['D']
tm.assert_frame_equal(df, expected)
def test_setitem_list(self):
# GH 6043
# ix with a list
df = DataFrame(index=[0, 1], columns=[0])
with catch_warnings(record=True):
simplefilter("ignore")
df.ix[1, 0] = [1, 2, 3]
df.ix[1, 0] = [1, 2]
result = DataFrame(index=[0, 1], columns=[0])
with catch_warnings(record=True):
simplefilter("ignore")
result.ix[1, 0] = [1, 2]
tm.assert_frame_equal(result, df)
# ix with an object
class TO(object):
def __init__(self, value):
self.value = value
def __str__(self):
return "[{0}]".format(self.value)
__repr__ = __str__
def __eq__(self, other):
return self.value == other.value
def view(self):
return self
df = DataFrame(index=[0, 1], columns=[0])
with catch_warnings(record=True):
simplefilter("ignore")
df.ix[1, 0] = TO(1)
df.ix[1, 0] = TO(2)
result = DataFrame(index=[0, 1], columns=[0])
with catch_warnings(record=True):
simplefilter("ignore")
result.ix[1, 0] = TO(2)
tm.assert_frame_equal(result, df)
# remains object dtype even after setting it back
df = DataFrame(index=[0, 1], columns=[0])
with catch_warnings(record=True):
simplefilter("ignore")
df.ix[1, 0] = TO(1)
df.ix[1, 0] = np.nan
result = DataFrame(index=[0, 1], columns=[0])
tm.assert_frame_equal(result, df)
def test_string_slice(self):
# GH 14424
# string indexing against datetimelike with object
# dtype should properly raises KeyError
df = DataFrame([1], Index([pd.Timestamp('2011-01-01')], dtype=object))
assert df.index.is_all_dates
with pytest.raises(KeyError):
df['2011']
with pytest.raises(KeyError):
df.loc['2011', 0]
df = DataFrame()
assert not df.index.is_all_dates
with pytest.raises(KeyError):
df['2011']
with pytest.raises(KeyError):
df.loc['2011', 0]
def test_mi_access(self):
# GH 4145
data = """h1 main h3 sub h5
0 a A 1 A1 1
1 b B 2 B1 2
2 c B 3 A1 3
3 d A 4 B2 4
4 e A 5 B2 5
5 f B 6 A2 6
"""
df = pd.read_csv(StringIO(data), sep=r'\s+', index_col=0)
df2 = df.set_index(['main', 'sub']).T.sort_index(1)
index = Index(['h1', 'h3', 'h5'])
columns = MultiIndex.from_tuples([('A', 'A1')], names=['main', 'sub'])
expected = DataFrame([['a', 1, 1]], index=columns, columns=index).T
result = df2.loc[:, ('A', 'A1')]
tm.assert_frame_equal(result, expected)
result = df2[('A', 'A1')]
tm.assert_frame_equal(result, expected)
# GH 4146, not returning a block manager when selecting a unique index
# from a duplicate index
# as of 4879, this returns a Series (which is similar to what happens
# with a non-unique)
expected = Series(['a', 1, 1], index=['h1', 'h3', 'h5'], name='A1')
result = df2['A']['A1']
tm.assert_series_equal(result, expected)
# selecting a non_unique from the 2nd level
expected = DataFrame([['d', 4, 4], ['e', 5, 5]],
index=Index(['B2', 'B2'], name='sub'),
columns=['h1', 'h3', 'h5'], ).T
result = df2['A']['B2']
tm.assert_frame_equal(result, expected)
def test_astype_assignment(self):
# GH4312 (iloc)
df_orig = DataFrame([['1', '2', '3', '.4', 5, 6., 'foo']],
columns=list('ABCDEFG'))
df = df_orig.copy()
df.iloc[:, 0:2] = df.iloc[:, 0:2].astype(np.int64)
expected = DataFrame([[1, 2, '3', '.4', 5, 6., 'foo']],
columns=list('ABCDEFG'))
tm.assert_frame_equal(df, expected)
df = df_orig.copy()
df.iloc[:, 0:2] = df.iloc[:, 0:2]._convert(datetime=True, numeric=True)
expected = DataFrame([[1, 2, '3', '.4', 5, 6., 'foo']],
columns=list('ABCDEFG'))
tm.assert_frame_equal(df, expected)
# GH5702 (loc)
df = df_orig.copy()
df.loc[:, 'A'] = df.loc[:, 'A'].astype(np.int64)
expected = DataFrame([[1, '2', '3', '.4', 5, 6., 'foo']],
columns=list('ABCDEFG'))
tm.assert_frame_equal(df, expected)
df = df_orig.copy()
df.loc[:, ['B', 'C']] = df.loc[:, ['B', 'C']].astype(np.int64)
expected = DataFrame([['1', 2, 3, '.4', 5, 6., 'foo']],
columns=list('ABCDEFG'))
tm.assert_frame_equal(df, expected)
# full replacements / no nans
df = DataFrame({'A': [1., 2., 3., 4.]})
df.iloc[:, 0] = df['A'].astype(np.int64)
expected = DataFrame({'A': [1, 2, 3, 4]})
tm.assert_frame_equal(df, expected)
df = DataFrame({'A': [1., 2., 3., 4.]})
df.loc[:, 'A'] = df['A'].astype(np.int64)
expected = DataFrame({'A': [1, 2, 3, 4]})
tm.assert_frame_equal(df, expected)
def test_astype_assignment_with_dups(self):
# GH 4686
# assignment with dups that has a dtype change
cols = MultiIndex.from_tuples([('A', '1'), ('B', '1'), ('A', '2')])
df = DataFrame(np.arange(3).reshape((1, 3)),
columns=cols, dtype=object)
index = df.index.copy()
df['A'] = df['A'].astype(np.float64)
tm.assert_index_equal(df.index, index)
# TODO(wesm): unused variables
# result = df.get_dtype_counts().sort_index()
# expected = Series({'float64': 2, 'object': 1}).sort_index()
@pytest.mark.parametrize("index,val", [
(Index([0, 1, 2]), 2),
(Index([0, 1, '2']), '2'),
(Index([0, 1, 2, np.inf, 4]), 4),
(Index([0, 1, 2, np.nan, 4]), 4),
(Index([0, 1, 2, np.inf]), np.inf),
(Index([0, 1, 2, np.nan]), np.nan),
])
def test_index_contains(self, index, val):
assert val in index
@pytest.mark.parametrize("index,val", [
(Index([0, 1, 2]), '2'),
(Index([0, 1, '2']), 2),
(Index([0, 1, 2, np.inf]), 4),
(Index([0, 1, 2, np.nan]), 4),
(Index([0, 1, 2, np.inf]), np.nan),
(Index([0, 1, 2, np.nan]), np.inf),
# Checking if np.inf in Int64Index should not cause an OverflowError
# Related to GH 16957
(pd.Int64Index([0, 1, 2]), np.inf),
(pd.Int64Index([0, 1, 2]), np.nan),
(pd.UInt64Index([0, 1, 2]), np.inf),
(pd.UInt64Index([0, 1, 2]), np.nan),
])
def test_index_not_contains(self, index, val):
assert val not in index
@pytest.mark.parametrize("index,val", [
(Index([0, 1, '2']), 0),
(Index([0, 1, '2']), '2'),
])
def test_mixed_index_contains(self, index, val):
# GH 19860
assert val in index
@pytest.mark.parametrize("index,val", [
(Index([0, 1, '2']), '1'),
(Index([0, 1, '2']), 2),
])
def test_mixed_index_not_contains(self, index, val):
# GH 19860
assert val not in index
def test_contains_with_float_index(self):
# GH#22085
integer_index = pd.Int64Index([0, 1, 2, 3])
uinteger_index = pd.UInt64Index([0, 1, 2, 3])
float_index = pd.Float64Index([0.1, 1.1, 2.2, 3.3])
for index in (integer_index, uinteger_index):
assert 1.1 not in index
assert 1.0 in index
assert 1 in index
assert 1.1 in float_index
assert 1.0 not in float_index
assert 1 not in float_index
def test_index_type_coercion(self):
with catch_warnings(record=True):
simplefilter("ignore")
# GH 11836
# if we have an index type and set it with something that looks
# to numpy like the same, but is actually, not
# (e.g. setting with a float or string '0')
# then we need to coerce to object
# integer indexes
for s in [Series(range(5)),
Series(range(5), index=range(1, 6))]:
assert s.index.is_integer()
for indexer in [lambda x: x.ix,
lambda x: x.loc,
lambda x: x]:
s2 = s.copy()
indexer(s2)[0.1] = 0
assert s2.index.is_floating()
assert indexer(s2)[0.1] == 0
s2 = s.copy()
indexer(s2)[0.0] = 0
exp = s.index
if 0 not in s:
exp = Index(s.index.tolist() + [0])
tm.assert_index_equal(s2.index, exp)
s2 = s.copy()
indexer(s2)['0'] = 0
assert s2.index.is_object()
for s in [Series(range(5), index=np.arange(5.))]:
assert s.index.is_floating()
for idxr in [lambda x: x.ix,
lambda x: x.loc,
lambda x: x]:
s2 = s.copy()
idxr(s2)[0.1] = 0
assert s2.index.is_floating()
assert idxr(s2)[0.1] == 0
s2 = s.copy()
idxr(s2)[0.0] = 0
tm.assert_index_equal(s2.index, s.index)
s2 = s.copy()
idxr(s2)['0'] = 0
assert s2.index.is_object()
class TestMisc(Base):
def test_indexer_caching(self):
# GH5727
# make sure that indexers are in the _internal_names_set
n = 1000001
arrays = [lrange(n), lrange(n)]
index = MultiIndex.from_tuples(lzip(*arrays))
s = Series(np.zeros(n), index=index)
str(s)
# setitem
expected = Series(np.ones(n), index=index)
s = Series(np.zeros(n), index=index)
s[s == 0] = 1
tm.assert_series_equal(s, expected)
def test_float_index_to_mixed(self):
df = DataFrame({0.0: np.random.rand(10), 1.0: np.random.rand(10)})
df['a'] = 10
tm.assert_frame_equal(DataFrame({0.0: df[0.0],
1.0: df[1.0],
'a': [10] * 10}),
df)
def test_float_index_non_scalar_assignment(self):
df = DataFrame({'a': [1, 2, 3], 'b': [3, 4, 5]}, index=[1., 2., 3.])
df.loc[df.index[:2]] = 1
expected = DataFrame({'a': [1, 1, 3], 'b': [1, 1, 5]}, index=df.index)
tm.assert_frame_equal(expected, df)
df = DataFrame({'a': [1, 2, 3], 'b': [3, 4, 5]}, index=[1., 2., 3.])
df2 = df.copy()
df.loc[df.index] = df.loc[df.index]
tm.assert_frame_equal(df, df2)
def test_float_index_at_iat(self):
s = Series([1, 2, 3], index=[0.1, 0.2, 0.3])
for el, item in s.iteritems():
assert s.at[el] == item
for i in range(len(s)):
assert s.iat[i] == i + 1
def test_mixed_index_assignment(self):
# GH 19860
s = Series([1, 2, 3, 4, 5], index=['a', 'b', 'c', 1, 2])
s.at['a'] = 11
assert s.iat[0] == 11
s.at[1] = 22
assert s.iat[3] == 22
def test_mixed_index_no_fallback(self):
# GH 19860
s = Series([1, 2, 3, 4, 5], index=['a', 'b', 'c', 1, 2])
with pytest.raises(KeyError):
s.at[0]
with pytest.raises(KeyError):
s.at[4]
def test_rhs_alignment(self):
# GH8258, tests that both rows & columns are aligned to what is
# assigned to. covers both uniform data-type & multi-type cases
def run_tests(df, rhs, right):
# label, index, slice
lbl_one, idx_one, slice_one = list('bcd'), [1, 2, 3], slice(1, 4)
lbl_two, idx_two, slice_two = ['joe', 'jolie'], [1, 2], slice(1, 3)
left = df.copy()
left.loc[lbl_one, lbl_two] = rhs
tm.assert_frame_equal(left, right)
left = df.copy()
left.iloc[idx_one, idx_two] = rhs
tm.assert_frame_equal(left, right)
left = df.copy()
with catch_warnings(record=True):
# XXX: finer-filter here.
simplefilter("ignore")
left.ix[slice_one, slice_two] = rhs
tm.assert_frame_equal(left, right)
left = df.copy()
with catch_warnings(record=True):
simplefilter("ignore")
left.ix[idx_one, idx_two] = rhs
tm.assert_frame_equal(left, right)
left = df.copy()
with catch_warnings(record=True):
simplefilter("ignore")
left.ix[lbl_one, lbl_two] = rhs
tm.assert_frame_equal(left, right)
xs = np.arange(20).reshape(5, 4)
cols = ['jim', 'joe', 'jolie', 'joline']
df = DataFrame(xs, columns=cols, index=list('abcde'))
# right hand side; permute the indices and multiplpy by -2
rhs = -2 * df.iloc[3:0:-1, 2:0:-1]
# expected `right` result; just multiply by -2
right = df.copy()
right.iloc[1:4, 1:3] *= -2
# run tests with uniform dtypes
run_tests(df, rhs, right)
# make frames multi-type & re-run tests
for frame in [df, rhs, right]:
frame['joe'] = frame['joe'].astype('float64')
frame['jolie'] = frame['jolie'].map('@{0}'.format)
run_tests(df, rhs, right)
def test_str_label_slicing_with_negative_step(self):
SLC = pd.IndexSlice
def assert_slices_equivalent(l_slc, i_slc):
tm.assert_series_equal(s.loc[l_slc], s.iloc[i_slc])
if not idx.is_integer:
# For integer indices, ix and plain getitem are position-based.
tm.assert_series_equal(s[l_slc], s.iloc[i_slc])
tm.assert_series_equal(s.loc[l_slc], s.iloc[i_slc])
for idx in [_mklbl('A', 20), np.arange(20) + 100,
np.linspace(100, 150, 20)]:
idx = Index(idx)
s = Series(np.arange(20), index=idx)
assert_slices_equivalent(SLC[idx[9]::-1], SLC[9::-1])
assert_slices_equivalent(SLC[:idx[9]:-1], SLC[:8:-1])
assert_slices_equivalent(SLC[idx[13]:idx[9]:-1], SLC[13:8:-1])
assert_slices_equivalent(SLC[idx[9]:idx[13]:-1], SLC[:0])
def test_slice_with_zero_step_raises(self):
s = Series(np.arange(20), index=_mklbl('A', 20))
tm.assert_raises_regex(ValueError, 'slice step cannot be zero',
lambda: s[::0])
tm.assert_raises_regex(ValueError, 'slice step cannot be zero',
lambda: s.loc[::0])
with catch_warnings(record=True):
simplefilter("ignore")
tm.assert_raises_regex(ValueError,
'slice step cannot be zero',
lambda: s.ix[::0])
def test_indexing_assignment_dict_already_exists(self):
df = DataFrame({'x': [1, 2, 6],
'y': [2, 2, 8],
'z': [-5, 0, 5]}).set_index('z')
expected = df.copy()
rhs = dict(x=9, y=99)
df.loc[5] = rhs
expected.loc[5] = [9, 99]
tm.assert_frame_equal(df, expected)
def test_indexing_dtypes_on_empty(self):
# Check that .iloc and .ix return correct dtypes GH9983
df = DataFrame({'a': [1, 2, 3], 'b': ['b', 'b2', 'b3']})
with catch_warnings(record=True):
simplefilter("ignore")
df2 = df.ix[[], :]
assert df2.loc[:, 'a'].dtype == np.int64
tm.assert_series_equal(df2.loc[:, 'a'], df2.iloc[:, 0])
with catch_warnings(record=True):
simplefilter("ignore")
tm.assert_series_equal(df2.loc[:, 'a'], df2.ix[:, 0])
def test_range_in_series_indexing(self):
# range can cause an indexing error
# GH 11652
for x in [5, 999999, 1000000]:
s = Series(index=range(x))
s.loc[range(1)] = 42
tm.assert_series_equal(s.loc[range(1)], Series(42.0, index=[0]))
s.loc[range(2)] = 43
tm.assert_series_equal(s.loc[range(2)], Series(43.0, index=[0, 1]))
def test_non_reducing_slice(self):
df = DataFrame([[0, 1], [2, 3]])
slices = [
# pd.IndexSlice[:, :],
pd.IndexSlice[:, 1],
pd.IndexSlice[1, :],
pd.IndexSlice[[1], [1]],
pd.IndexSlice[1, [1]],
pd.IndexSlice[[1], 1],
pd.IndexSlice[1],
pd.IndexSlice[1, 1],
slice(None, None, None),
[0, 1],
np.array([0, 1]),
Series([0, 1])
]
for slice_ in slices:
tslice_ = _non_reducing_slice(slice_)
assert isinstance(df.loc[tslice_], DataFrame)
def test_list_slice(self):
# like dataframe getitem
slices = [['A'], Series(['A']), np.array(['A'])]
df = DataFrame({'A': [1, 2], 'B': [3, 4]}, index=['A', 'B'])
expected = pd.IndexSlice[:, ['A']]
for subset in slices:
result = _non_reducing_slice(subset)
tm.assert_frame_equal(df.loc[result], df.loc[expected])
def test_maybe_numeric_slice(self):
df = DataFrame({'A': [1, 2], 'B': ['c', 'd'], 'C': [True, False]})
result = _maybe_numeric_slice(df, slice_=None)
expected = pd.IndexSlice[:, ['A']]
assert result == expected
result = _maybe_numeric_slice(df, None, include_bool=True)
expected = pd.IndexSlice[:, ['A', 'C']]
result = _maybe_numeric_slice(df, [1])
expected = [1]
assert result == expected
def test_partial_boolean_frame_indexing(self):
# GH 17170
df = DataFrame(np.arange(9.).reshape(3, 3),
index=list('abc'), columns=list('ABC'))
index_df = DataFrame(1, index=list('ab'), columns=list('AB'))
result = df[index_df.notnull()]
expected = DataFrame(np.array([[0., 1., np.nan],
[3., 4., np.nan],
[np.nan] * 3]),
index=list('abc'),
columns=list('ABC'))
tm.assert_frame_equal(result, expected)
def test_no_reference_cycle(self):
df = DataFrame({'a': [0, 1], 'b': [2, 3]})
for name in ('loc', 'iloc', 'at', 'iat'):
getattr(df, name)
with catch_warnings(record=True):
simplefilter("ignore")
getattr(df, 'ix')
wr = weakref.ref(df)
del df
assert wr() is None
class TestSeriesNoneCoercion(object):
EXPECTED_RESULTS = [
# For numeric series, we should coerce to NaN.
([1, 2, 3], [np.nan, 2, 3]),
([1.0, 2.0, 3.0], [np.nan, 2.0, 3.0]),
# For datetime series, we should coerce to NaT.
([datetime(2000, 1, 1), datetime(2000, 1, 2), datetime(2000, 1, 3)],
[NaT, datetime(2000, 1, 2), datetime(2000, 1, 3)]),
# For objects, we should preserve the None value.
(["foo", "bar", "baz"], [None, "bar", "baz"]),
]
def test_coercion_with_setitem(self):
for start_data, expected_result in self.EXPECTED_RESULTS:
start_series = Series(start_data)
start_series[0] = None
expected_series = Series(expected_result)
tm.assert_series_equal(start_series, expected_series)
def test_coercion_with_loc_setitem(self):
for start_data, expected_result in self.EXPECTED_RESULTS:
start_series = Series(start_data)
start_series.loc[0] = None
expected_series = Series(expected_result)
tm.assert_series_equal(start_series, expected_series)
def test_coercion_with_setitem_and_series(self):
for start_data, expected_result in self.EXPECTED_RESULTS:
start_series = Series(start_data)
start_series[start_series == start_series[0]] = None
expected_series = Series(expected_result)
tm.assert_series_equal(start_series, expected_series)
def test_coercion_with_loc_and_series(self):
for start_data, expected_result in self.EXPECTED_RESULTS:
start_series = Series(start_data)
start_series.loc[start_series == start_series[0]] = None
expected_series = Series(expected_result)
tm.assert_series_equal(start_series, expected_series)
class TestDataframeNoneCoercion(object):
EXPECTED_SINGLE_ROW_RESULTS = [
# For numeric series, we should coerce to NaN.
([1, 2, 3], [np.nan, 2, 3]),
([1.0, 2.0, 3.0], [np.nan, 2.0, 3.0]),
# For datetime series, we should coerce to NaT.
([datetime(2000, 1, 1), datetime(2000, 1, 2), datetime(2000, 1, 3)],
[NaT, datetime(2000, 1, 2), datetime(2000, 1, 3)]),
# For objects, we should preserve the None value.
(["foo", "bar", "baz"], [None, "bar", "baz"]),
]
def test_coercion_with_loc(self):
for start_data, expected_result, in self.EXPECTED_SINGLE_ROW_RESULTS:
start_dataframe = DataFrame({'foo': start_data})
start_dataframe.loc[0, ['foo']] = None
expected_dataframe = DataFrame({'foo': expected_result})
tm.assert_frame_equal(start_dataframe, expected_dataframe)
def test_coercion_with_setitem_and_dataframe(self):
for start_data, expected_result, in self.EXPECTED_SINGLE_ROW_RESULTS:
start_dataframe = DataFrame({'foo': start_data})
start_dataframe[start_dataframe['foo'] == start_dataframe['foo'][
0]] = None
expected_dataframe = DataFrame({'foo': expected_result})
tm.assert_frame_equal(start_dataframe, expected_dataframe)
def test_none_coercion_loc_and_dataframe(self):
for start_data, expected_result, in self.EXPECTED_SINGLE_ROW_RESULTS:
start_dataframe = DataFrame({'foo': start_data})
start_dataframe.loc[start_dataframe['foo'] == start_dataframe[
'foo'][0]] = None
expected_dataframe = DataFrame({'foo': expected_result})
tm.assert_frame_equal(start_dataframe, expected_dataframe)
def test_none_coercion_mixed_dtypes(self):
start_dataframe = DataFrame({
'a': [1, 2, 3],
'b': [1.0, 2.0, 3.0],
'c': [datetime(2000, 1, 1), datetime(2000, 1, 2), datetime(2000, 1,
3)],
'd': ['a', 'b', 'c']
})
start_dataframe.iloc[0] = None
exp = DataFrame({'a': [np.nan, 2, 3],
'b': [np.nan, 2.0, 3.0],
'c': [NaT, datetime(2000, 1, 2),
datetime(2000, 1, 3)],
'd': [None, 'b', 'c']})
tm.assert_frame_equal(start_dataframe, exp)
def test_validate_indices_ok():
indices = np.asarray([0, 1])
validate_indices(indices, 2)
validate_indices(indices[:0], 0)
validate_indices(np.array([-1, -1]), 0)
def test_validate_indices_low():
indices = np.asarray([0, -2])
with tm.assert_raises_regex(ValueError, "'indices' contains"):
validate_indices(indices, 2)
def test_validate_indices_high():
indices = np.asarray([0, 1, 2])
with tm.assert_raises_regex(IndexError, "indices are out"):
validate_indices(indices, 2)
def test_validate_indices_empty():
with tm.assert_raises_regex(IndexError, "indices are out"):
validate_indices(np.array([0, 1]), 0)
def test_extension_array_cross_section():
# A cross-section of a homogeneous EA should be an EA
df = pd.DataFrame({
"A": pd.core.arrays.integer_array([1, 2]),
"B": pd.core.arrays.integer_array([3, 4])
}, index=['a', 'b'])
expected = pd.Series(pd.core.arrays.integer_array([1, 3]),
index=['A', 'B'], name='a')
result = df.loc['a']
tm.assert_series_equal(result, expected)
result = df.iloc[0]
tm.assert_series_equal(result, expected)
def test_extension_array_cross_section_converts():
df = pd.DataFrame({
"A": pd.core.arrays.integer_array([1, 2]),
"B": np.array([1, 2]),
}, index=['a', 'b'])
result = df.loc['a']
expected = pd.Series([1, 1], dtype=object, index=['A', 'B'], name='a')
tm.assert_series_equal(result, expected)
result = df.iloc[0]
tm.assert_series_equal(result, expected)
| bsd-3-clause |
mcleonard/sampyl | examples/slice_sample.py | 2 | 1708 | import sys
sys.path.append('.')
import sampyl as smp
from sampyl.state import State
from sampyl import np
from sampyl.diagnostics import diagnostics
import matplotlib
matplotlib.use('TkAgg')
import matplotlib.pyplot as plt
import seaborn as sns
# correlated gaussian log likelihood
def logp(x, y):
icov = np.linalg.inv(np.array([[1., .8], [.8, 1.]]))
d = np.array([x, y])
return -.5 * np.dot(np.dot(d, icov), d)
logp_xy = lambda(th): logp(th[0], th[1])
# compare slice samplers, metropolis hastings, and the two variable
# slice sampler
ssamp = smp.Slice(logp, start={'x': 4., 'y': 4.} )
slice_trace = ssamp.sample(1000)
met = smp.Metropolis(logp, start={'x': 4., 'y': 4.})
met_trace = met.sample(1000)
bslice = smp.Slice(logp_xy, start={'th': np.array([4., 4.])})
btrace = bslice.sample(1000)
# compute effective sample size based on autocorrelation
slice_eff = diagnostics.compute_n_eff_acf(slice_trace.x)
met_eff = diagnostics.compute_n_eff_acf(met_trace.x)
b_eff = diagnostics.compute_n_eff_acf(btrace.th[:,0])
print "Slice effective sample size: %2.2f"%slice_eff
print "MH effective sample size: %2.2f"%met_eff
print "two var slice effective sample size: %2.2f"%b_eff
print " ----- "
print "Slice sampler evals per sample: ", ssamp.evals_per_sample
# graphically compare samples
fig, axarr = plt.subplots(1, 3, figsize=(12,4))
axarr[0].scatter(slice_trace.x, slice_trace.y)
axarr[0].set_title("Slice samples")
axarr[1].scatter(met_trace.x, met_trace.y)
axarr[1].set_title("MH samples")
axarr[2].scatter(btrace.th[:,0], btrace.th[:,1])
axarr[2].set_title("Two var Slice samples")
for ax in axarr:
ax.set_xlim((-4, 4))
ax.set_ylim((-4, 4))
plt.show()
| mit |
astroML/astroML | examples/datasets/plot_LIGO_spectrum.py | 2 | 2514 | """
Plot the power spectrum of LIGO
-------------------------------
This compares the power spectrum computed using the raw FFT, and using
Welch's method (i.e. overlapping window functions that reduce noise).
The top panel shows the raw signal, which is the measurements of the
change in baseline length. The bottom panel shows the raw and smoothed
power spectrum, used by the LIGO team to characterize the noise
of the detector. The particular data used here is the injected
`Big Dog <http://www.ligo.org/news/blind-injection.php>`_ event.
"""
# Author: Jake VanderPlas <vanderplas@astro.washington.edu>
# License: BSD
# The figure is an example from astroML: see http://astroML.github.com
import numpy as np
from matplotlib import pyplot as plt
from scipy import fftpack
from matplotlib import mlab
from astroML.datasets import fetch_LIGO_large
#------------------------------------------------------------
# Fetch the LIGO hanford data
data, dt = fetch_LIGO_large()
# subset of the data to plot
t0 = 646
T = 2
tplot = dt * np.arange(T * 4096)
dplot = data[4096 * t0: 4096 * (t0 + T)]
tplot = tplot[::10]
dplot = dplot[::10]
fmin = 40
fmax = 2060
#------------------------------------------------------------
# compute PSD using simple FFT
N = len(data)
df = 1. / (N * dt)
PSD = abs(dt * fftpack.fft(data)[:N // 2]) ** 2
f = df * np.arange(N / 2)
cutoff = ((f >= fmin) & (f <= fmax))
f = f[cutoff]
PSD = PSD[cutoff]
f = f[::100]
PSD = PSD[::100]
#------------------------------------------------------------
# compute PSD using Welch's method -- hanning window function
PSDW2, fW2 = mlab.psd(data, NFFT=4096, Fs=1. / dt,
window=mlab.window_hanning, noverlap=2048)
dfW2 = fW2[1] - fW2[0]
cutoff = (fW2 >= fmin) & (fW2 <= fmax)
fW2 = fW2[cutoff]
PSDW2 = PSDW2[cutoff]
#------------------------------------------------------------
# Plot the data
fig = plt.figure()
fig.subplots_adjust(bottom=0.1, top=0.9, hspace=0.3)
# top panel: time series
ax = fig.add_subplot(211)
ax.plot(tplot, dplot, '-k')
ax.set_xlabel('time (s)')
ax.set_ylabel('$h(t)$')
ax.set_ylim(-1.2E-18, 1.2E-18)
# bottom panel: hanning window
ax = fig.add_subplot(212)
ax.loglog(f, PSD, '-', c='#AAAAAA')
ax.loglog(fW2, PSDW2, '-k')
ax.text(0.98, 0.95, "Hanning (cosine) window",
ha='right', va='top', transform=ax.transAxes)
ax.set_xlabel('frequency (Hz)')
ax.set_ylabel(r'$PSD(f)$')
ax.set_xlim(40, 2060)
ax.set_ylim(1E-46, 1E-36)
ax.yaxis.set_major_locator(plt.LogLocator(base=100))
plt.show()
| bsd-2-clause |
johnmwalters/ThinkStats2 | code/hinc_soln.py | 67 | 4296 | """This file contains code used in "Think Stats",
by Allen B. Downey, available from greenteapress.com
Copyright 2014 Allen B. Downey
License: GNU GPLv3 http://www.gnu.org/licenses/gpl.html
"""
from __future__ import print_function
import numpy as np
import pandas
import hinc
import thinkplot
import thinkstats2
"""This file contains a solution to an exercise in Think Stats:
The distributions of wealth and income are sometimes modeled using
lognormal and Pareto distributions. To see which is better, let's
look at some data.
The Current Population Survey (CPS) is joint effort of the Bureau
of Labor Statistics and the Census Bureau to study income and related
variables. Data collected in 2013 is available from
http://www.census.gov/hhes/www/cpstables/032013/hhinc/toc.htm.
I downloaded hinc06.xls, which is an Excel spreadsheet with
information about household income, and converted it to hinc06.csv,
a CSV file you will find in the repository for this book. You
will also find hinc.py, which reads the CSV file.
Extract the distribution of incomes from this dataset. Are any of the
analytic distributions in this chapter a good model of the data? A
solution to this exercise is in hinc_soln.py.
My solution generates three figures:
1) The CDF of income on a linear scale.
2) The CCDF on a log-log scale along with a Pareto model intended
to match the tail behavior.
3) The CDF on a log-x scale along with a lognormal model chose to
match the median and inter-quartile range.
My conclusions based on these figures are:
1) The Pareto model is probably a reasonable choice for the top
10-20% of incomes.
2) The lognormal model captures the shape of the distribution better,
but the data deviate substantially from the model. With different
choices for sigma, you could match the upper or lower tail, but not
both at the same time.
In summary I would say that neither model captures the whole distribution,
so you might have to
1) look for another analytic model,
2) choose one that captures the part of the distribution that is most
relevent, or
3) avoid using an analytic model altogether.
"""
class SmoothCdf(thinkstats2.Cdf):
"""Represents a CDF based on calculated quantiles.
"""
def Render(self):
"""Because this CDF was not computed from a sample, it
should not be rendered as a step function.
"""
return self.xs, self.ps
def Prob(self, x):
"""Compute CDF(x), interpolating between known values.
"""
return np.interp(x, self.xs, self.ps)
def Value(self, p):
"""Compute inverse CDF(x), interpolating between probabilities.
"""
return np.interp(p, self.ps, self.xs)
def MakeFigures(df):
"""Plots the CDF of income in several forms.
"""
xs, ps = df.income.values, df.ps.values
cdf = SmoothCdf(xs, ps, label='data')
cdf_log = SmoothCdf(np.log10(xs), ps, label='data')
# linear plot
thinkplot.Cdf(cdf)
thinkplot.Save(root='hinc_linear',
xlabel='household income',
ylabel='CDF')
# pareto plot
# for the model I chose parameters by hand to fit the tail
xs, ys = thinkstats2.RenderParetoCdf(xmin=55000, alpha=2.5,
low=0, high=250000)
thinkplot.Plot(xs, 1-ys, label='model', color='0.8')
thinkplot.Cdf(cdf, complement=True)
thinkplot.Save(root='hinc_pareto',
xlabel='log10 household income',
ylabel='CCDF',
xscale='log',
yscale='log')
# lognormal plot
# for the model I estimate mu and sigma using
# percentile-based statistics
median = cdf_log.Percentile(50)
iqr = cdf_log.Percentile(75) - cdf_log.Percentile(25)
std = iqr / 1.349
# choose std to match the upper tail
std = 0.35
print(median, std)
xs, ps = thinkstats2.RenderNormalCdf(median, std, low=3.5, high=5.5)
thinkplot.Plot(xs, ps, label='model', color='0.8')
thinkplot.Cdf(cdf_log)
thinkplot.Save(root='hinc_normal',
xlabel='log10 household income',
ylabel='CDF')
def main():
df = hinc.ReadData()
MakeFigures(df)
if __name__ == "__main__":
main()
| gpl-3.0 |
jseabold/scikit-learn | sklearn/_build_utils/__init__.py | 21 | 1125 | """
Utilities useful during the build.
"""
# author: Andy Mueller, Gael Varoquaux
# license: BSD
from __future__ import division, print_function, absolute_import
HASH_FILE = 'cythonize.dat'
DEFAULT_ROOT = 'sklearn'
# WindowsError is not defined on unix systems
try:
WindowsError
except NameError:
WindowsError = None
from numpy.distutils.system_info import get_info
def get_blas_info():
def atlas_not_found(blas_info_):
def_macros = blas_info.get('define_macros', [])
for x in def_macros:
if x[0] == "NO_ATLAS_INFO":
# if x[1] != 1 we should have lapack
# how do we do that now?
return True
if x[0] == "ATLAS_INFO":
if "None" in x[1]:
# this one turned up on FreeBSD
return True
return False
blas_info = get_info('blas_opt', 0)
if (not blas_info) or atlas_not_found(blas_info):
cblas_libs = ['cblas']
blas_info.pop('libraries', None)
else:
cblas_libs = blas_info.pop('libraries', [])
return cblas_libs, blas_info
| bsd-3-clause |
DistrictDataLabs/yellowbrick | yellowbrick/bestfit.py | 1 | 9446 | # yellowbrick.bestfit
# Uses Scikit-Learn to compute a best fit function, then draws it in the plot.
#
# Author: Benjamin Bengfort
# Created: Sun Jun 26 17:27:08 2016 -0400
#
# Copyright (C) 2016 The sckit-yb developers
# For license information, see LICENSE.txt
#
# ID: bestfit.py [56236f3] benjamin@bengfort.com $
"""
Uses Scikit-Learn to compute a best fit function, then draws it in the plot.
"""
##########################################################################
## Imports
##########################################################################
import numpy as np
import matplotlib.pyplot as plt
from sklearn import linear_model
from sklearn.preprocessing import PolynomialFeatures
from sklearn.pipeline import make_pipeline
from sklearn.metrics import mean_squared_error as mse
from operator import itemgetter
from yellowbrick.style.palettes import LINE_COLOR
from yellowbrick.exceptions import YellowbrickValueError
##########################################################################
## Module Constants
##########################################################################
# Names of the various estimator functions
LINEAR = "linear"
QUADRATIC = "quadratic"
EXPONENTIAL = "exponential"
LOG = "log"
SELECT_BEST = "select_best"
##########################################################################
## Draw Line of Best Fit
##########################################################################
def draw_best_fit(X, y, ax, estimator="linear", **kwargs):
"""
Uses Scikit-Learn to fit a model to X and y then uses the resulting model
to predict the curve based on the X values. This curve is drawn to the ax
(matplotlib axis) which must be passed as the third variable.
The estimator function can be one of the following:
- ``'linear'``: Uses OLS to fit the regression
- ``'quadratic'``: Uses OLS with Polynomial order 2
- ``'exponential'``: Not implemented yet
- ``'log'``: Not implemented yet
- ``'select_best'``: Selects the best fit via MSE
The remaining keyword arguments are passed to ax.plot to define and
describe the line of best fit.
Parameters
----------
X : ndarray or DataFrame of shape n x m
A matrix of n instances with m features
y : ndarray or Series of length n
An array or series of target or class values
ax : matplotlib Axes, default: None
The axis to plot the figure on. If None is passed in the current axes
will be used (or generated if required).
estimator : string, default: 'linear'
The name of the estimator function used to draw the best fit line.
The estimator can currently be one of linear, quadratic, exponential,
log, or select_best. The select best method uses the minimum MSE to
select the best fit line.
kwargs : dict
Keyword arguments to pass to the matplotlib plot function to style and
label the line of best fit. By default, the standard line color is
used unless the color keyword argument is passed in.
Returns
-------
ax : matplotlib Axes
The axes with the line drawn on it.
"""
# Estimators are the types of best fit lines that can be drawn.
estimators = {
LINEAR: fit_linear, # Uses OLS to fit the regression
QUADRATIC: fit_quadratic, # Uses OLS with Polynomial order 2
EXPONENTIAL: fit_exponential, # Not implemented yet
LOG: fit_log, # Not implemented yet
SELECT_BEST: fit_select_best, # Selects the best fit via MSE
}
# Check to make sure that a correct estimator value was passed in.
if estimator not in estimators:
raise YellowbrickValueError(
"'{}' not a valid type of estimator; choose from {}".format(
estimator, ", ".join(estimators.keys())
)
)
# Then collect the estimator function from the mapping.
estimator = estimators[estimator]
# Ensure that X and y are the same length
if len(X) != len(y):
raise YellowbrickValueError(
(
"X and y must have same length:" " X len {} doesn't match y len {}!"
).format(len(X), len(y))
)
# Ensure that X and y are np.arrays
X = np.array(X)
y = np.array(y)
# Verify that X is a two dimensional array for Scikit-Learn esitmators
# and that its dimensions are (n, 1) where n is the number of rows.
if X.ndim < 2:
X = X[:, np.newaxis] # Reshape X into the correct dimensions
if X.ndim > 2:
raise YellowbrickValueError(
"X must be a (1,) or (n,1) dimensional array not {}".format(X.shape)
)
# Verify that y is a (n,) dimensional array
if y.ndim > 1:
raise YellowbrickValueError(
"y must be a (1,) dimensional array not {}".format(y.shape)
)
# Uses the estimator to fit the data and get the model back.
model = estimator(X, y)
# Set the color if not passed in.
if "c" not in kwargs and "color" not in kwargs:
kwargs["color"] = LINE_COLOR
# Get the current working axes
ax = ax or plt.gca()
# Plot line of best fit onto the axes that were passed in.
# TODO: determine if xlim or X.min(), X.max() are better params
xr = np.linspace(*ax.get_xlim(), num=100)
ax.plot(xr, model.predict(xr[:, np.newaxis]), **kwargs)
return ax
##########################################################################
## Estimator Functions
##########################################################################
def fit_select_best(X, y):
"""
Selects the best fit of the estimators already implemented by choosing the
model with the smallest mean square error metric for the trained values.
"""
models = [fit(X, y) for fit in [fit_linear, fit_quadratic]]
errors = map(lambda model: mse(y, model.predict(X)), models)
return min(zip(models, errors), key=itemgetter(1))[0]
def fit_linear(X, y):
"""
Uses OLS to fit the regression.
"""
model = linear_model.LinearRegression()
model.fit(X, y)
return model
def fit_quadratic(X, y):
"""
Uses OLS with Polynomial order 2.
"""
model = make_pipeline(PolynomialFeatures(2), linear_model.LinearRegression())
model.fit(X, y)
return model
def fit_exponential(X, y):
"""
Fits an exponential curve to the data.
"""
raise NotImplementedError("Exponential best fit lines are not implemented")
def fit_log(X, y):
"""
Fit a logrithmic curve to the data.
"""
raise NotImplementedError("Logrithmic best fit lines are not implemented")
##########################################################################
## Draw 45 Degree Line
##########################################################################
def draw_identity_line(ax=None, dynamic=True, **kwargs):
"""
Draws a 45 degree identity line such that y=x for all points within the
given axes x and y limits. This function also registeres a callback so
that as the figure is modified, the axes are updated and the line remains
drawn correctly.
Parameters
----------
ax : matplotlib Axes, default: None
The axes to plot the figure on. If None is passed in the current axes
will be used (or generated if required).
dynamic : bool, default : True
If the plot is dynamic, callbacks will be registered to update the
identiy line as axes are changed.
kwargs : dict
Keyword arguments to pass to the matplotlib plot function to style the
identity line.
Returns
-------
ax : matplotlib Axes
The axes with the line drawn on it.
Notes
-----
.. seealso:: `StackOverflow discussion: Does matplotlib have a function for drawing diagonal lines in axis coordinates? <https://stackoverflow.com/questions/22104256/does-matplotlib-have-a-function-for-drawing-diagonal-lines-in-axis-coordinates>`_
"""
# Get the current working axes
ax = ax or plt.gca()
# Define the standard line color
if "c" not in kwargs and "color" not in kwargs:
kwargs["color"] = LINE_COLOR
# Define the standard opacity
if "alpha" not in kwargs:
kwargs["alpha"] = 0.5
# Draw the identity line
identity, = ax.plot([], [], **kwargs)
# Define the callback
def callback(ax):
# Get the x and y limits on the axes
xlim = ax.get_xlim()
ylim = ax.get_ylim()
# Set the bounding range of the line
data = (max(xlim[0], ylim[0]), min(xlim[1], ylim[1]))
identity.set_data(data, data)
# Register the callback and return
callback(ax)
if dynamic:
ax.callbacks.connect("xlim_changed", callback)
ax.callbacks.connect("ylim_changed", callback)
return ax
if __name__ == "__main__":
import os
import pandas as pd
path = os.path.join(
os.path.dirname(__file__), "..", "examples", "data", "concrete.xls"
)
if not os.path.exists(path):
raise Exception("Could not find path for testing")
xkey = "Fine Aggregate (component 7)(kg in a m^3 mixture)"
ykey = "Coarse Aggregate (component 6)(kg in a m^3 mixture)"
data = pd.read_excel(path)
fig, axe = plt.subplots()
axe.scatter(data[xkey], data[ykey])
draw_best_fit(data[xkey], data[ykey], axe, "select_best")
plt.show()
| apache-2.0 |
DailyActie/Surrogate-Model | website/taihu/run_moea_d3d.py | 1 | 33680 | #!/usr/bin/python
# MIT License
#
# Copyright (c) 2016 Daily Actie
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
#
# Author: Quan Pan <quanpan302@hotmail.com>
# License: MIT License
# Create: 2016-12-02
# 0 --py:Success::
# 1 --py:Warning::
# 2 --py:Error::
# --py:Start\t['+datetime.datetime.now().strftime("%Y-%m-%d %H:%M:%S")+']::
# --py:End\t['+datetime.datetime.now().strftime("%Y-%m-%d %H:%M:%S")+']::
# --py:Test::
import os, sys, getopt, datetime
import warnings
warnings.filterwarnings(action="ignore", category=Warning)
import numpy as np
from copy import deepcopy
import random
random.seed(0.5)
# from sklearn.preprocessing import StandardScaler, MinMaxScaler
sys.path.append("..")
from d3d_read_map import objfunCost
from surrogate.base import Individual
from surrogate.selection import selNSGA2, selTournamentDCD
from surrogate.crossover import cxSimulatedBinaryBounded
from surrogate.mutation import mutPolynomialBounded
from surrogate.sampling import samRandom
# from surrogate.sampling import samBeta, samUniform
# from surrogate import benchmarks
# from surrogate.estimator import ANNSurrogate
from surrogate.estimator import delft3dWAQ
from surrogate.files import jsonMOEA, decvarMOEA
from hashlib import sha1
def main(argv):
"""
:param argv:
:return:
"""
# run.sh: icaseStart=1, icaseEnd=2
# d3d_read_map.py: taihuDir, caseName, varName, iseg, itime
# d3d_create_inp.py: blockDir, blockFname, inp00Fname, tempFname
_Ngen = 0
_Ndim = 0
_Npop = 0
_Nobj = 0
_Ncon = 0
CXPB = 0.0
try:
opts, args = getopt.getopt(argv,"hg:d:p:o:c:x:",["gen=","dim=","pop=","obj=","con=","cxpb"])
except getopt.GetoptError:
print sys.argv[0]+' -g <gen> -d <dim> -p <pop> -o <obj> -c <con> -x <cxpb>'
sys.exit(2)
for opt, arg in opts:
if opt in ("-h", "--help"):
print sys.argv[0]+' -g <gen> -d <dim> -p <pop> -o <obj> -c <con> -x <cxpb>'
sys.exit()
elif opt in ("-g", "--gen"):
_Ngen = int(arg)
elif opt in ("-d", "--dim"):
_Ndim = int(arg)
elif opt in ("-p", "--pop"):
_Npop = int(arg)
elif opt in ("-o", "--obj"):
_Nobj = int(arg)
elif opt in ("-c", "--con"):
_Ncon = int(arg)
elif opt in ("-x", "--cxpb"):
CXPB = float(arg)
print '--py:Start::['+datetime.datetime.now().strftime("%Y-%m-%d %H:%M:%S")+'] run_moea.py'
if _Ngen > 0 and _Ndim > 0 and _Npop > 0 and _Nobj > 0 and _Ncon >= 0 and CXPB >= 0.0:
moeaLoop(_Ngen, _Ndim, _Npop, _Nobj, _Ncon, CXPB)
else:
print '--py:Error:: '
print '--py:End:: ['+datetime.datetime.now().strftime("%Y-%m-%d %H:%M:%S")+']'
def meshgridMD(group=[], grids=[]):
"""
:param argv:
:return:
"""
# irow = 0
# for ii in grids[0]:
# for ij in grids[1]:
# variable = [-9999 for x in range(Ndim)]
#
# variable[0] = ii
# variable[1] = ij
#
# irow += 1
# print str(irow)+'\t'+'\t'.join(map(str, variable))
#
# # variables.append(variable)
ranges = []
index = []
for igrid in range(len(grids)):
ranges.append([0, len(grids[igrid])])
from operator import mul
operations=reduce(mul,(p[1]-p[0] for p in ranges))-1
result=[i[0] for i in ranges]
# print result
index.append([i for i in result])
pos=len(ranges)-1
increments=0
while increments < operations:
if result[pos]==ranges[pos][1]-1:
result[pos]=ranges[pos][0]
pos-=1
else:
result[pos]+=1
increments+=1
pos=len(ranges)-1 #increment the innermost loop
# print result
index.append([i for i in result])
value = []
variables = []
for irow in range(len(index)):
# print str(irow)
# print '\t['+'\t'.join(map(str,index[irow]))+']'
value.append([grids[idim][igrid] for idim,igrid in enumerate(index[irow])])
# print '\t['+'\t'.join(map(str,value[irow]))+']'
variables.append([])
for icol in group:
variables[irow].append(value[irow][icol])
# print '\t['+'\t'.join(map(str,variables[irow]))+']'
# print '\tvariables = ['
# for irow in range(0,len(variables)-1):
# print '\t\t['+','.join(map(str,variables[irow]))+'],'
# irow = len(variables)-1
# print '\t\t['+','.join(map(str,variables[irow]))+']'
# print '\t]'
return index,value,variables
def Population(numPop=4, numVar=10, estimator=delft3dWAQ, weights=(-1.0, -1.0)):
"""Population
:param numPop:
:param numVar:
:param estimator:
:param weights:
:return:
"""
constraint = []
Individuals = []
variables = []
# group,grids,index,value,variables = [],[],[],[],[]
# group = [
# 0,1,2,2,2,2,2,3,3,3,3,0,0,0,0,0,4,4,4,4,4,0,0,0,0,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,
# 0,1,2,2,2,2,2,3,3,3,3,0,0,0,0,0,4,4,4,4,4,0,0,0,0,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5
# ]
# grids = [
# [1.0],
# [1.0],
# [1.0],
# [1.0],
# [1.0],
# [1.0]
# ]
# index,value,variables = meshgridMD(group,grids)
for i in range(numPop):
# variable = variables[0]
# variable = variables[i]
variable = samRandom(n=numVar)
# variable = samBeta(a=0.1, b=0.1, size=numVar)
# variable = samUniform(low=0.0, high=1.0, size=numVar).tolist()
# print '--py:Test::\t[' + ','.join(map("{:.5f}".format, variable)) + '],'
Individuals.append(Individual(estimator=estimator, variable=variable, constraint=constraint, weights=weights))
return Individuals
def delwaq(caseDir, casePref, icaseStart=99999999, icaseEnd=99999999, icase=0):
print ''
print '--py:Start::['+datetime.datetime.now().strftime("%Y-%m-%d %H:%M:%S")+'] os.system('+str(icaseStart)+' '+str(icaseEnd)+'), icase: '+str(icase)+''
os.system('./run.sh '+str(icaseStart)+' '+str(icaseEnd)+' '+caseDir+' '+casePref)
caseName = casePref+"%08d" % icaseStart
objfunFname = '/var/www/html/taihu'+'/'+caseDir+'/'+caseName+'/taihu_objfun.txt'
with open(objfunFname, 'r') as objfunFref:
# 20171110 before, without cost
# [obj1, obj2, iseg, itime] = [float(elt.strip()) for elt in objfunFref.readline().split('\t')]
# 20171110, with cost
[obj1, obj2, obj3, iseg, itime] = [float(elt.strip()) for elt in objfunFref.readline().split('\t')]
print '--py:End:: ['+datetime.datetime.now().strftime("%Y-%m-%d %H:%M:%S")+'] os.system('+str(obj1)+' '+str(obj2)+'), icase: '+str(icase)+''
print ''
# 20171110 before, without cost
# return obj1,obj2
# 20171110, with cost
return obj1,obj2,obj3
def getSSD(scaler,variable,x_ssd_1,factor=0.5):
numPop = len(variable)
numVar = len(variable[0])
# sd = np.sum(np.std(variable, axis=1))/float(numPop)
sd = np.sum(np.std(scaler.transform(variable), axis=1))/float(numPop)
ssd = x_ssd_1[3]+factor*(sd-x_ssd_1[3])
s = ssd/x_ssd_1[3]*x_ssd_1[1]
n = int(s*numPop)
return [n,s,sd,ssd]
def moeaLoop(_Ngen, _Ndim, _Npop, _Nobj, _Ncon, CXPB):
# # --sh:[icaseStart = 1, icaseEnd = 2] => [t00000001, t00000002]
# os.system('./run.sh '+str(icaseStart)+' '+str(icaseEnd))
#
# createdir "$casedir"
# createD3Dinp "$blockdir" "$blockfname" "$decvarfname" "$inp00fname" "$tempfname"
# copyfile "$decvarfname" "$casedir/taihu_decvvar.txt"
# movefile "$tempfname" "$inpfname"
# rundelwaq "$exedelwaqdir" "$casedir" "$inpfname" "$bloomfname" "$procfname"
print '--py:MOEA:: Init moeaLoop('+str(_Ngen)+', '+str(_Ndim)+', '+str(_Npop)+', '+str(_Nobj)+', '+str(_Ncon)+', '+str(CXPB)+')'
jsonFname = 'result/moea/taihu.json'
varDir = 'deltemp'
caseDir = 'result'
casePref = 'moea'
_INF = 1e-14
weights = (-1.0, -1.0)
print '--py:MOEA:: Init estimator'
estimator = delft3dWAQ
# estimator = benchmarks.zdt3
# os.system('./run.sh '+str(icaseStart)+' '+str(icaseEnd))
# print '--py:MOEA:: Init ANNSurrogate'
# # import sklearn
# # # print('The scikit-learn version is {}.'.format(sklearn.__version__))
# # # surrogate = ANNSurrogate(algorithm='lbfgs', alpha=1e-5, hidden_layer_sizes=(8), random_state=1)
# surrogate = ANNSurrogate(algorithm='lbfgs', activation='relu', hidden_layer_sizes=(70), batch_size=10, random_state=5)
# import cPickle as pickle
# surrogate = pickle.load(open('/var/www/html/taihu/mlmodel/d3d_ann_moea.pkl', 'rb'))
# # surrogate.fit(X_train, Y_train)
# # surrogate = ANNSurrogate(algorithm='sgd', alpha=1e-5, hidden_layer_sizes=(8), random_state=1)
# # surrogate.partial_fit(X_train, Y_train)
"""algorithm => solver"""
"""
0.*.* init size of training dataset
1.*.* igen = 0
2.*.* igen in range[1, _Ngen]
*.1.* initiate X
0 Xold_ind
1 Xnew_ind
*.2.* predict
0 delwaq
1 ann
2 updated ann
*.3.* set Y
0 Yold_obj
1 Ynew_obj
*.4.* print
0 Xold_ind, Yold_obj
1 Xnew_ind, Ynew_obj
"""
"""0.1.0"""
Xold_ind, Yold_obj = np.zeros([_Npop, _Ndim]), np.zeros([_Npop, _Nobj])
"""0.1.1
ANGA needs to initiate step: 1.1.1 and 1.3.1
"""
# Xnew_ind, Ynew_obj = [], []
"""0.1.2
ANGA needs to initiate step: 1.1.1 and 1.3.1
ANGA needs to be updated by new training dataset
"""
# Xnew_ind, Ynew_obj > [_Npop, _Ndim], [_Npop, _Nobj]
Xnew_ind, Ynew_obj = [], []
# hash table
# print(sha1(np.reshape([X for X in [0.1,0.2,0.3]],(1,-1))).hexdigest())
Htbl_Xnew_ind, Htbl_Ynew_obj = [], []
ipop_new = 0
X_n0, X_S0, X_SD0, X_SSD0 = _Npop, 1.0, 0.0, 1.0
"""objtype: 12.obj12; 13.obj13; 23.obj23"""
if _Nobj == 1:
weights = (-1.0, -1.0)
objtype = 1
# objtype = 2
# objtype = 3
elif _Nobj == 2:
weights = (-1.0, -1.0)
# objtype = 12
# objtype = 13
objtype = 23
elif _Nobj == 3:
weights = (-1.0, -1.0, -1.0)
objtype = 123
else:
weights = (-1.0, -1.0)
objtype = 12
# print str(objtype)
# TODO NSGA2-0 initiate
igen = 0
print '\n'
print '--py:MOEA:: ['+datetime.datetime.now().strftime("%Y-%m-%d %H:%M:%S")+'] Gen: ' + str(igen)
population = Population(numPop=_Npop, numVar=_Ndim, estimator=estimator, weights=weights)
ioDecVarFile = decvarMOEA(varDir=varDir, casePref=casePref, numVar=_Ndim, numPop=_Npop, numCon=_Ncon, numObj=_Nobj, numGen=_Ngen)
ioResultFile = jsonMOEA(fileName=jsonFname, numVar=_Ndim, numPop=_Npop, numCon=_Ncon, numObj=_Nobj, numGen=_Ngen)
ioResultFile.writeHeader()
# TODO NSGA2-1 delwaq or ann
print '--py:MOEA::\tDecision Variable'
ioDecVarFile.writeHeader(igen=igen)
icaseStart = ioDecVarFile.icase + 1
for ipop in range(_Npop):
"""1.1.0-initiate training dataset: Xold_ind"""
Xold_ind[ipop] = [deepcopy(X) for X in population[ipop].variable]
# TODO 20170203 test set ipop == 0 : DecVar = 0.0
# if ipop == 0:
# Xold_ind[ipop] = Xold_ind[ipop]*0.0
ioDecVarFile.writeDecVar(variable=Xold_ind[ipop], ipop=ipop)
ioDecVarFile.writeEnd()
icaseEnd = ioDecVarFile.icase
# print '--py:MOEA:: icase\tStart['+str(_Npop)+'*'+str(igen)+'+1]: '+str(icaseStart)+'\tEnd['+str(_Npop)+'*('+str(igen)+'+1)]: '+str(icaseEnd)
print '--py:MOEA::\tFitness Variable'
for ipop in range(_Npop):
issm = icaseStart+ipop
iesm = icaseStart+ipop
# print str(ipop)+'\t=========='
"""1.2.0-predict by delwaq with cost"""
obj1,obj2,obj3 = delwaq(caseDir=caseDir, casePref=casePref, icaseStart=issm, icaseEnd=iesm, icase=ipop)
"""1.2.1-predict by ann and calculate cost"""
# [obj1,obj2] = [deepcopy(Y) for Y in surrogate.predict(np.reshape(Xold_ind[ipop],(1,-1)))[0]]
# obj3 = objfunCost("deltemp/moea%08i.txt" % issm)
"""1.3.0-set training dataset: Yold_obj"""
if objtype==1:
population[ipop].fitness.values = [deepcopy(Y) for Y in [obj1]]
if objtype==2:
population[ipop].fitness.values = [deepcopy(Y) for Y in [obj2]]
if objtype==3:
population[ipop].fitness.values = [deepcopy(Y) for Y in [obj3]]
if objtype==12:
population[ipop].fitness.values = [deepcopy(Y) for Y in [obj1,obj2]]
if objtype==13:
population[ipop].fitness.values = [deepcopy(Y) for Y in [obj1,obj3]]
if objtype==23:
population[ipop].fitness.values = [deepcopy(Y) for Y in [obj2,obj3]]
if objtype==123:
population[ipop].fitness.values = [deepcopy(Y) for Y in [obj1, obj2,obj3]]
Yold_obj[ipop] = [deepcopy(Y) for Y in population[ipop].fitness.values]
"""1.4.0-print result training dataset: Xold_ind, Yold_obj"""
# print '\tXold_ind: [' + '\t'.join(map("{:.5f}".format, Xold_ind[ipop])) + ']' \
# + '\n\tMean_X: ' + str(np.mean(Xold_ind[ipop])) \
# + '\n\tStd_X: ' + str(np.std(Xold_ind[ipop])) \
# + '\n\tYold_obj: [' + '\t'.join(map("{:.5f}".format, Yold_obj[ipop])) + ']'
# # TODO ANGA-1.0 initiation, NSGA2_igen=0
# print '--py:ANGA::\tDecision Variable'
# X_scaler = StandardScaler()
# # X_scaler = MinMaxScaler()
# X_scaler.fit(Xold_ind)
# # # print(X_scaler.mean_)
# # # print(X_scaler.var_)
# X_n0 = _Npop
# X_S0 = 1.0
# X_SD0 = 1.0
# # # X_SD0 = np.sum(np.std(Xold_ind, axis=1))/float(_Npop)
# # X_SD0 = np.sum(np.std(X_scaler.transform(Xold_ind), axis=1))/float(_Npop)
# # # X_scaler.fit(np.transpose(Xold_ind))
# # # X_SD0 = np.sum(np.std(X_scaler.transform(np.transpose(Xold_ind)), axis=0))/float(_Npop)
# X_SSD0 = X_SD0
# X_SSD = [[X_n0, X_S0, X_SD0, X_SSD0]]
# print str(igen)+'\tX_SSD: [' + '\t'.join(map("{:.5f}".format, X_SSD[igen])) + ']'
#
# print '--py:ANGA::\tSurrogate Model'
# # surrogate.fit(Xold_ind, Yold_obj)
# # surrogate.partial_fit(Xold_ind, Yold_obj)
# print '--py:ANGA::\tSurrogate Model'
# surrogate.fit(X_scaler.transform(Xold_ind), Yold_obj)
# # surrogate.partial_fit(X_scaler.transform(Xold_ind), Yold_obj)
#
# print '--py:ANGA::\tFitness Variable'
# for ipop in range(_Npop):
# # print str(ipop)+'\t=========='
# """1.1.1-initiate training dataset: Xnew_ind"""
# # Xnew_ind.append([deepcopy(X) for X in samBeta(a=0.1, b=0.1, size=_Ndim)])
# Xnew_ind.append([deepcopy(X) for X in Xold_ind[ipop]])
# """1.2.1-predict by ann and calculate cost"""
# [obj1,obj2] = [deepcopy(Y) for Y in surrogate.predict(np.reshape(Xnew_ind[ipop],(1,-1)))[0]]
# # [obj1,obj2] = [deepcopy(Y) for Y in surrogate.predict(X_scaler.transform(np.reshape(Xnew_ind[ipop],(1,-1))))[0]]
# obj3 = objfunCost("deltemp/moea%08i.txt" % issm)
# """1.3.1-set training dataset: Ynew_obj"""
# if objtype==1:
# Ynew_obj.append([deepcopy(Y) for Y in [obj1]])
# if objtype==2:
# Ynew_obj.append([deepcopy(Y) for Y in [obj2]])
# if objtype==3:
# Ynew_obj.append([deepcopy(Y) for Y in [obj3]])
# if objtype==12:
# Ynew_obj.append([deepcopy(Y) for Y in [obj1,obj2]])
# if objtype==13:
# Ynew_obj.append([deepcopy(Y) for Y in [obj1,obj3]])
# if objtype==23:
# Ynew_obj.append([deepcopy(Y) for Y in [obj2,obj3]])
# if objtype==123:
# Ynew_obj.append([deepcopy(Y) for Y in [obj1, obj2,obj3]])
# """1.4.1-print result training dataset: Xnew_ind, Ynew_obj"""
# # print '\tXnew_ind: [' + '\t'.join(map("{:.5f}".format, Xnew_ind[ipop])) + ']' \
# # + '\n\tMean_X: ' + str(np.mean(Xnew_ind[ipop])) \
# # + '\n\tStd_X: ' + str(np.std(Xnew_ind[ipop])) \
# # + '\n\tYnew_obj: [' + '\t'.join(map("{:.5f}".format, Ynew_obj[ipop])) + ']'
# """1.5.1-hash table: Htbl_Xnew_ind, Htbl_Ynew_obj"""
# Htbl_Xnew_ind.append(sha1(np.reshape(Xold_ind[ipop],(1,-1))).hexdigest())
# Htbl_Ynew_obj.append(sha1(np.reshape(Yold_obj[ipop],(1,-1))).hexdigest())
# # print '\tHtbl_Xnew_ind: [' + Htbl_Xnew_ind[ipop] + ']' \
# # + '\n\tHtbl_Ynew_obj: [' + Htbl_Ynew_obj[ipop] + ']'
# TODO NSGA2-2 main loop, NSGA2_igen>0
print '--py:MOEA::\tNSGA2 Selection'
population = selNSGA2(population, _Npop)
ioResultFile.writePareto(individuals=population, igen=igen)
# print str(igen) + '\tGen:'
# for ipop in population:
# print '\tpopulation.sel.a'\
# + '\tvar1: [' + ', '.join(map("{:.5f}".format, ipop.variable)) + ']'
# print
for igen in range(1, _Ngen):
# TODO NSGA2-2.1 offspring
print '\n'
print '--py:MOEA:: ['+datetime.datetime.now().strftime("%Y-%m-%d %H:%M:%S")+'] Gen: ' + str(igen)
# print '\n' + str(igen) + '\tGen:'
# for ipop in population:
# print '\tpopulation.sel.b'\
# + '\tvar: [' + ', '.join(map("{:.5f}".format, ipop.variable)) + ']'\
# + '\tobj: [' + ', '.join(map("{:.5f}".format, ipop.fitness.values)) + ']'\
# + '\tcrw: [' + str(ipop.fitness.crowding_dist) + ']'
print '--py:MOEA::\tNSGA2 Tournament'
offspring = selTournamentDCD(population, _Npop)
# for ipop in offspring:
# print '\toffspring.sel.a'\
# + '\tvar: [' + ', '.join(map("{:.5f}".format, ipop.variable)) + ']'\
# + '\tobj: [' + ', '.join(map("{:.5f}".format, ipop.fitness.values)) + ']'\
# + '\tcrw: [' + str(ipop.fitness.crowding_dist) + ']'
# print
print '--py:MOEA::\tNSGA2 Offspring'
offspring = [deepcopy(ind) for ind in offspring]
# for ipop in offspring:
# print '\toffspring.sel.a'\
# + '\tvar: [' + ', '.join(map("{:.5f}".format, ipop.variable)) + ']'\
# + '\tobj: [' + ', '.join(map("{:.5f}".format, ipop.fitness.values)) + ']'\
# + '\tcrw: [' + str(ipop.fitness.crowding_dist) + ']'
# print
print '--py:MOEA::\tCrossover of offspring with CXPB: '+str(CXPB)
for ind1, ind2 in zip(offspring[::2], offspring[1::2]):
if random.random() <= CXPB:
# print '\toffspring.cx.b'\
# + '\tvar1: [' + ', '.join(map("{:.5f}".format, ind1.variable)) + ']'\
# + '\tvar2: [' + ', '.join(map("{:.5f}".format, ind2.variable)) + ']'
ind1.variable, ind2.variable = cxSimulatedBinaryBounded(ind1.variable, ind2.variable)
# print '\toffspring.cx.a'\
# + '\tvar1: [' + ', '.join(map("{:.5f}".format, ind1.variable)) + ']'\
# + '\tvar2: [' + ', '.join(map("{:.5f}".format, ind2.variable)) + ']'
# print '\toffspring.mut.b'\
# + '\tvar1: [' + ', '.join(map("{:.5f}".format, ind1.variable)) + ']'\
# + '\tvar2: [' + ', '.join(map("{:.5f}".format, ind2.variable)) + ']'
ind1.variable = mutPolynomialBounded(ind1.variable)
ind2.variable = mutPolynomialBounded(ind2.variable)
# print '\toffspring.mut.a'\
# + '\tvar1: [' + ', '.join(map("{:.5f}".format, ind1.variable)) + ']'\
# + '\tvar2: [' + ', '.join(map("{:.5f}".format, ind2.variable)) + ']'
# print
del ind1.fitness.values, ind2.fitness.values
print '--py:MOEA::\tEvaluate the individuals with an invalid fitness'
invalid_ind = [ind for ind in offspring if not ind.fitness.valid]
# TODO NSGA2-2.2 delwaq or ann
print '--py:MOEA::\tDecision Variable'
ioDecVarFile.writeHeader(igen=igen)
icaseStart = ioDecVarFile.icase + 1
ipop = 0
for ind in invalid_ind:
"""2.1.0-initiate training dataset: Xold_ind"""
Xold_ind[ipop] = [deepcopy(X) for X in ind.variable]
ioDecVarFile.writeDecVar(variable=Xold_ind[ipop], ipop=ipop)
ipop += 1
ioDecVarFile.writeEnd()
icaseEnd = ioDecVarFile.icase
# print '--py:MOEA:: icase\tStart['+str(_Npop)+'*'+str(igen)+'+1]: '+str(icaseStart)+'\tEnd['+str(_Npop)+'*('+str(igen)+'+1)]: '+str(icaseEnd)
# TODO ANGA-1.1 delwaq or ann by sampling rate
print '--py:MOEA::\tFitness Variable'
ipop = 0
for ind in invalid_ind:
issm = icaseStart+ipop
iesm = icaseStart+ipop
# ind.fitness.values = estimator(ind.variable)
# print str(ipop)+'\t=========='
"""2.2.0-predict by delwaq with cost"""
obj1,obj2,obj3 = delwaq(caseDir=caseDir, casePref=casePref, icaseStart=issm, icaseEnd=iesm, icase=ipop)
"""2.2.1-predict by ann and calculate cost"""
# [obj1,obj2] = [deepcopy(Y) for Y in surrogate.predict(np.reshape(Xold_ind[ipop],(1,-1)))[0]]
# obj3 = objfunCost("deltemp/moea%08i.txt" % issm)
"""2.3.0-set training dataset: Yold_obj"""
if objtype==1:
ind.fitness.values = [deepcopy(Y) for Y in [obj1]]
if objtype==2:
ind.fitness.values = [deepcopy(Y) for Y in [obj2]]
if objtype==3:
ind.fitness.values = [deepcopy(Y) for Y in [obj3]]
if objtype==12:
ind.fitness.values = [deepcopy(Y) for Y in [obj1,obj2]]
if objtype==13:
ind.fitness.values = [deepcopy(Y) for Y in [obj1,obj3]]
if objtype==23:
ind.fitness.values = [deepcopy(Y) for Y in [obj2,obj3]]
if objtype==123:
ind.fitness.values = [deepcopy(Y) for Y in [obj1, obj2,obj3]]
Yold_obj[ipop] = [deepcopy(Y) for Y in ind.fitness.values]
"""2.4.0-print result training dataset: Xold_ind, Yold_obj"""
# print '\tXold_ind: [' + '\t'.join(map("{:.5f}".format, Xold_ind[ipop])) + ']' \
# + '\n\tMean_X: ' + str(np.mean(Xold_ind[ipop])) \
# + '\n\tStd_X: ' + str(np.std(Xold_ind[ipop])) \
# + '\n\tYold_obj: [' + '\t'.join(map("{:.5f}".format, Yold_obj[ipop])) + ']'
ipop += 1
# # TODO 20170105 ANGA test
# ipop = 0
# for ind in invalid_ind:
# issm = icaseStart+ipop
# iesm = icaseStart+ipop
# # ind.fitness.values = estimator(ind.variable)
# """2.1.1-initiate training dataset: Xnew_ind"""
# Xnew_ind[ipop] = [deepcopy(X) for X in ind.variable]
# """2.2.1-predict by ann and calculate cost"""
# [obj1,obj2] = [deepcopy(Y) for Y in surrogate.predict(np.reshape(Xnew_ind[ipop],(1,-1)))[0]]
# # [obj1,obj2] = [deepcopy(Y) for Y in surrogate.predict(X_scaler.transform(np.reshape(Xnew_ind[ipop],(1,-1))))[0]]
# obj3 = objfunCost("deltemp/moea%08i.txt" % issm)
# """2.3.1-initiate training dataset: Ynew_obj"""
# if objtype==1:
# Ynew_obj[ipop] = [deepcopy(Y) for Y in [obj1]]
# if objtype==2:
# Ynew_obj[ipop] = [deepcopy(Y) for Y in [obj2]]
# if objtype==3:
# Ynew_obj[ipop] = [deepcopy(Y) for Y in [obj3]]
# if objtype==12:
# Ynew_obj[ipop] = [deepcopy(Y) for Y in [obj1,obj2]]
# if objtype==13:
# Ynew_obj[ipop] = [deepcopy(Y) for Y in [obj1,obj3]]
# if objtype==23:
# Ynew_obj[ipop] = [deepcopy(Y) for Y in [obj2,obj3]]
# if objtype==123:
# Ynew_obj[ipop] = [deepcopy(Y) for Y in [obj1, obj2,obj3]]
# """2.4.1-print result training dataset: Xnew_ind, Ynew_obj"""
# print '\t' + str(ipop) \
# + '\tXnew_ind: [' + '\t'.join(map("{:.5f}".format, Xnew_ind[ipop])) + ']' \
# + '\tMean_X: ' + str(np.mean(Xnew_ind[ipop])) \
# + '\tStd_X: ' + str(np.std(Xnew_ind[ipop])) \
# + '\tYnew_obj: [' + '\t'.join(map("{:.5f}".format, Ynew_obj[ipop])) + ']'
# ipop += 1
# # TODO ANGA-2 main, NSGA2_igen>0
# # implement fitness sampling rate & retraining ANN from anga [py:function:: estimator()]
# # Important: size Xnew_ind, Ynew_obj > [_Npop, _Ndim], [_Npop, _Nobj]
# #
# # find in varaible pool (cache)?
# # <Yes> retreive fitness from cache
# # Ynew_obj = ind.fitness.values
# # ind.fitness.values = Ynew_obj[0]
# # <No > model [estimator()]?
# # <Yes> model [estimator()]
# # ind.fitness.values = estimator(ind.variable)
# # <> update cache and training set of ANN
# # # Xold_ind[ipop] = [deepcopy(X) for X in ind.variable]
# # # Yold_obj = ind.fitness.values
# # <No > predict by ANN [ANN.predict()]
# # Ynew_obj = surrogate.predict(X_scaler.transform(Xnew_ind))
# # ind.fitness.values = Ynew_obj[0]
# #
# # retraining [ANN.fit()]?
# # <Yes> surrogate.fit(X_scaler.transform(Xold_ind), Yold_obj)
# # <No > ind.fitness.values = ind.fitness.values
# # TODO ANG-2.1 sampling rate
# print '--py:ANGA::\tUpdate Decision Variable and Fitness Variable'
# X_SSD.append(getSSD(scaler=X_scaler,variable=Xold_ind,x_ssd_1=X_SSD[igen-1]))
# print str(igen)+'\tX_SSD: [' + '\t'.join(map("{:.5f}".format, X_SSD[igen])) + ']'
# ipop = 0
# for ind in invalid_ind:
# # TODO ANGA-2.2 update retraining pool
# # print str(ipop)+'\t=========='
# """2.1.2-initiate updated training dataset: Xnew_ind, Ynew_obj"""
# if sha1(np.reshape(ind.variable,(1,-1))).hexdigest() in Htbl_Xnew_ind:
# print str(ipop_new)+'\t=========='
# Xnew_ind.append(ind.variable)
# Ynew_obj.append(ind.fitness.values)
# print '\tXnew_ind: [' + '\t'.join(map("{:.5f}".format, Xnew_ind[_Npop+ipop_new])) + ']' \
# + '\n\tMean_X: ' + str(np.mean(Xnew_ind[_Npop+ipop_new])) \
# + '\n\tStd_X: ' + str(np.std(Xnew_ind[_Npop+ipop_new])) \
# + '\n\tYnew_obj: [' + '\t'.join(map("{:.5f}".format, Ynew_obj[_Npop+ipop_new])) + ']'
# ipop_new += 1
# # TODO ANGA-2.3 update ANNSurrogate
# """2.2.2-predict by updated ann and calculate cost"""
# """2.2.2.a-update ann"""
# # surrogate.fit(Xnew_ind, Ynew_obj)
# # surrogate.partial_fit(Xnew_ind, Ynew_obj)
# # # surrogate.fit(X_scaler.transform(Xnew_ind), Ynew_obj)
# # # surrogate.partial_fit(X_scaler.transform(Xnew_ind), Ynew_obj)
# """2.2.2.b-predict by updated ann"""
# # [obj1,obj2] = [deepcopy(Y) for Y in surrogate.predict(np.reshape(Xnew_ind[_Npop+ipop_new],(1,-1)))[0]]
# # # [obj1,obj2] = [deepcopy(Y) for Y in surrogate.predict(X_scaler.transform(np.reshape(Xnew_ind[_Npop+ipop_new],(1,-1))))[0]]
# """2.2.2.c-calculate cost"""
# # obj3 = objfunCost(Xnew_ind[_Npop+ipop_new])
# """2.3.2-initiate updated training dataset: Ynew_obj"""
# # if objtype==1:
# # Ynew_obj[_Npop+ipop_new] = [deepcopy(Y) for Y in [obj1]]
# # if objtype==2:
# # Ynew_obj[_Npop+ipop_new] = [deepcopy(Y) for Y in [obj2]]
# # if objtype==3:
# # Ynew_obj[_Npop+ipop_new] = [deepcopy(Y) for Y in [obj3]]
# # if objtype==12:
# # Ynew_obj[_Npop+ipop_new] = [deepcopy(Y) for Y in [obj1,obj2]]
# # if objtype==13:
# # Ynew_obj[_Npop+ipop_new] = [deepcopy(Y) for Y in [obj1,obj3]]
# # if objtype==23:
# # Ynew_obj[_Npop+ipop_new] = [deepcopy(Y) for Y in [obj2,obj3]]
# # if objtype==123:
# # Ynew_obj[_Npop+ipop_new] = [deepcopy(Y) for Y in [obj1, obj2,obj3]]
# # TODO ANGA-2.4 update fitness.values
# # ind.fitness.values = [deepcopy(Y) for Y in Ynew_obj[_Npop+ipop_new]]
# """2.4.2-print updated result training dataset: Xnew_ind, Ynew_obj"""
# # print '\tXnew_ind: [' + '\t'.join(map("{:.5f}".format, Xnew_ind[ipop])) + ']' \
# # + '\n\tMean_X: ' + str(np.mean(Xnew_ind[ipop])) \
# # + '\n\tStd_X: ' + str(np.std(Xnew_ind[ipop])) \
# # + '\n\tYnew_obj: [' + '\t'.join(map("{:.5f}".format, Ynew_obj[ipop])) + ']'
# ipop += 1
# # TODO ANGA-2.5 empty retraining pool
# TODO NSGA2-3 Pareto Front
print '--py:MOEA::\tNSGA2 Selection'
# print 'Select the next generation population\nAfter cx mut'
# for ipop in population:
# print '\tpopulation.sel.b'\
# + '\tvar: [' + ', '.join(map("{:.5f}".format, ipop.variable)) + ']'\
# + '\tobj: [' + ', '.join(map("{:.5f}".format, ipop.fitness.values)) + ']'\
# + '\tcrw: [' + str(ipop.fitness.crowding_dist) + ']'
# for ipop in offspring:
# print '\toffspring.sel.b'\
# + '\tvar: [' + ', '.join(map("{:.5f}".format, ipop.variable)) + ']'\
# + '\tobj: [' + ', '.join(map("{:.5f}".format, ipop.fitness.values)) + ']'\
# + '\tcrw: [' + str(ipop.fitness.crowding_dist) + ']'
# print
population = selNSGA2(population + offspring, _Npop)
ioResultFile.writePareto(individuals=population, igen=igen)
# for ipop in range(_Npop):
# print '\tpopulation.sel.a' \
# + '\tXold_ind: [' + ', '.join(map("{:.5f}".format, population[ipop].variable)) + ']'\
# + '\tYold_obj: [' + ', '.join(map("{:.5f}".format, population[ipop].fitness.values)) + ']'\
# + '\tcrw: [' + str(population[ipop].fitness.crowding_dist) + ']'
# for ipop in range(_Npop):
# population[ipop].objective = population[ipop].estimator(population[ipop].variable)
#
# Xold_ind.append(population[ipop].variable)
# Yold_obj.append(population[ipop].objective)
#
# print '\t' + str(ipop) \
# + '\tXold_ind: [' + ', '.join(map("{:.5f}".format, population[ipop].variable)) + ']'\
# + '\tMean_X: ' + str(np.mean(population[ipop].variable))\
# + '\tStd_X: ' + str(np.std(population[ipop].variable))\
# + '\tYold_obj: [' + ', '.join(map("{:.5f}".format, population[ipop].objective)) + ']'
#
# surrogate.fit(X_scaler.transform(Xold_ind), Yold_obj)
# # surrogate.partial_fit(X_scaler.transform(Xold_ind), Yold_obj)
# Ynew_obj = surrogate.predict(Xnew_ind)
# print 'ANNSurrogate.Xnew_ind:\n\t[' + '\t'.join(map(str, Xnew_ind)) + ']'
# print 'ANNSurrogate.Ynew_obj:\n\t[' + '\t'.join(map(str, Ynew_obj)) + ']'
# TODO NSGA2-4 end loop
ioResultFile.writeEnd()
ioResultFile.savePlot()
# ioResultFile.plot_json()
if __name__ == "__main__":
print "=================================================="
print "== =="
print "== Project: Surrogate Model =="
print "== File: run_moea.py =="
print "== =="
print "== Author: Quan Pan =="
print "== Email: quanpan302@hotmail.com =="
print "== =="
print "== License: MIT License =="
print "== Create: 2016-12-02 =="
print "== =="
print "=================================================="
# python run_sm.py -g 10 -d 100 -p 40 -o 2 -c 0 -x 0.9 2>&1
# icaseStart = 1
# icaseEnd = 2
# os.system('./run.sh '+str(icaseStart)+' '+str(icaseEnd))
main(sys.argv[1:])
| mit |
plissonf/scikit-learn | examples/cluster/plot_digits_agglomeration.py | 377 | 1694 | #!/usr/bin/python
# -*- coding: utf-8 -*-
"""
=========================================================
Feature agglomeration
=========================================================
These images how similar features are merged together using
feature agglomeration.
"""
print(__doc__)
# Code source: Gaël Varoquaux
# Modified for documentation by Jaques Grobler
# License: BSD 3 clause
import numpy as np
import matplotlib.pyplot as plt
from sklearn import datasets, cluster
from sklearn.feature_extraction.image import grid_to_graph
digits = datasets.load_digits()
images = digits.images
X = np.reshape(images, (len(images), -1))
connectivity = grid_to_graph(*images[0].shape)
agglo = cluster.FeatureAgglomeration(connectivity=connectivity,
n_clusters=32)
agglo.fit(X)
X_reduced = agglo.transform(X)
X_restored = agglo.inverse_transform(X_reduced)
images_restored = np.reshape(X_restored, images.shape)
plt.figure(1, figsize=(4, 3.5))
plt.clf()
plt.subplots_adjust(left=.01, right=.99, bottom=.01, top=.91)
for i in range(4):
plt.subplot(3, 4, i + 1)
plt.imshow(images[i], cmap=plt.cm.gray, vmax=16, interpolation='nearest')
plt.xticks(())
plt.yticks(())
if i == 1:
plt.title('Original data')
plt.subplot(3, 4, 4 + i + 1)
plt.imshow(images_restored[i], cmap=plt.cm.gray, vmax=16,
interpolation='nearest')
if i == 1:
plt.title('Agglomerated data')
plt.xticks(())
plt.yticks(())
plt.subplot(3, 4, 10)
plt.imshow(np.reshape(agglo.labels_, images[0].shape),
interpolation='nearest', cmap=plt.cm.spectral)
plt.xticks(())
plt.yticks(())
plt.title('Labels')
plt.show()
| bsd-3-clause |
alvarofierroclavero/scikit-learn | examples/semi_supervised/plot_label_propagation_structure.py | 247 | 2432 | """
==============================================
Label Propagation learning a complex structure
==============================================
Example of LabelPropagation learning a complex internal structure
to demonstrate "manifold learning". The outer circle should be
labeled "red" and the inner circle "blue". Because both label groups
lie inside their own distinct shape, we can see that the labels
propagate correctly around the circle.
"""
print(__doc__)
# Authors: Clay Woolam <clay@woolam.org>
# Andreas Mueller <amueller@ais.uni-bonn.de>
# Licence: BSD
import numpy as np
import matplotlib.pyplot as plt
from sklearn.semi_supervised import label_propagation
from sklearn.datasets import make_circles
# generate ring with inner box
n_samples = 200
X, y = make_circles(n_samples=n_samples, shuffle=False)
outer, inner = 0, 1
labels = -np.ones(n_samples)
labels[0] = outer
labels[-1] = inner
###############################################################################
# Learn with LabelSpreading
label_spread = label_propagation.LabelSpreading(kernel='knn', alpha=1.0)
label_spread.fit(X, labels)
###############################################################################
# Plot output labels
output_labels = label_spread.transduction_
plt.figure(figsize=(8.5, 4))
plt.subplot(1, 2, 1)
plot_outer_labeled, = plt.plot(X[labels == outer, 0],
X[labels == outer, 1], 'rs')
plot_unlabeled, = plt.plot(X[labels == -1, 0], X[labels == -1, 1], 'g.')
plot_inner_labeled, = plt.plot(X[labels == inner, 0],
X[labels == inner, 1], 'bs')
plt.legend((plot_outer_labeled, plot_inner_labeled, plot_unlabeled),
('Outer Labeled', 'Inner Labeled', 'Unlabeled'), 'upper left',
numpoints=1, shadow=False)
plt.title("Raw data (2 classes=red and blue)")
plt.subplot(1, 2, 2)
output_label_array = np.asarray(output_labels)
outer_numbers = np.where(output_label_array == outer)[0]
inner_numbers = np.where(output_label_array == inner)[0]
plot_outer, = plt.plot(X[outer_numbers, 0], X[outer_numbers, 1], 'rs')
plot_inner, = plt.plot(X[inner_numbers, 0], X[inner_numbers, 1], 'bs')
plt.legend((plot_outer, plot_inner), ('Outer Learned', 'Inner Learned'),
'upper left', numpoints=1, shadow=False)
plt.title("Labels learned with Label Spreading (KNN)")
plt.subplots_adjust(left=0.07, bottom=0.07, right=0.93, top=0.92)
plt.show()
| bsd-3-clause |
COSMOGRAIL/PyCS | pycs/sim/plot.py | 1 | 70337 | """
Subpackage with functions to plot all kind of results from runs.
"""
import numpy as np
import math, sys, os
import matplotlib.pyplot as plt
import matplotlib.cm as cm
import matplotlib.colors as colors
from matplotlib.ticker import MultipleLocator, FormatStrFormatter, MaxNLocator
import scipy.ndimage
import pycs.gen.util
def mad(xs):
"""
Return the median absolute deviation. Write it myself here instead of importing it from astropy, since it will add another depenency. Work with 1d array only
@todo: for PyCS 3, will use astropy as a default module (good) and use their functions
:param xs: list of values
:return: median absolute deviation
"""
median = np.median(xs)
mad = np.median([np.abs(x-median) for x in xs])
return mad
class delaycontainer:
"""
Stores the delay or error bar measurement(s) (one for each curve pair).
This object is usually produced by the plot-functions ``hists`` or ``meanvstrue`` below.
markers : [ 7 | 4 | 5 | 6 | 'o' | 'D' | 'h' | 'H' | '_' | '' | 'None' | ' ' | None | '8' | 'p' | ',' | '+' | '.' | 's' | '*' | 'd' | 3 | 0 | 1 | 2 | '1' | '3' | '4' | '2' | 'v' | '<' | '>' | '^' | '|' | 'x' | '$...$' | tuple | Nx2 array ]
"""
def __init__(self, data, name="No name", objects=None, plotcolour = "black", marker=None):
"""
self.data is a list of dicts, the fields depend on if it's delays or errorbars
* delays : label, mean, med, std
* errorbars : label, tot, sys, ran, bias
"""
self.data = data
self.name = name
self.objects = objects
self.plotcolour = plotcolour
self.marker = marker
self.markersize = 5.0
self.yshift = 0.0 # allows to "group" measurements
def newdelayplot(plotlist, rplot=7.0, displaytext=True, hidedetails=False, showbias=True, showran=True, showerr=True, showlegend=True, text=None, figsize=(10, 6), left = 0.06, right=0.97, top=0.99, bottom=0.08, wspace=0.15, hspace=0.3, txtstep=0.04, majorticksstep=2, filename=None, refshifts=None, refdelays=None, legendfromrefdelays=False, hatches=None, centershifts=None, ymin=0.2, hlines=None, tweakeddisplay=False, blindness=False, horizontaldisplay=False, showxlabelhd=True):
"""
Plots delay measurements from different methods, telescopes, sub-curves, etc in one single plot.
For this I use only ``delaycontainer`` objects, i.e. I don't do any "computation" myself.
:param plotlist: Give me a list of tuples (delays, errorbars), where delays and errorbars are delaycontainer objects as written into pkl files by ``hists`` and ``meanvstrue``.
NEW : plotlist delaycont can handle asymmetric errors (e.g. to seamlessly compare pycs with other papers' results). Instead of the "tot" key, new "plus" and "minus" keys are used.
:type plotlist: list
:param rplot: radius of delay axis, in days.
:param displaytext: Show labels with technique names and values of delays
:type displaytext: boolean
:param hidedetails: Do not show (ran, sys) in labels
:type hidedetails: boolean
:param refshifts: This is a list of dicts like {"colour":"gray", "shifts":(0, 0, 0, 90)}. Will be plotted as dashed vertical lines.
:type refshifts: list
:param refdelays: a list of tuples (delays, errorbars) to be plotted as shaded vertical zones.
:type refdelays: list
:param legendfromrefdelays: if you want to display the refdelays name in the legend panel
:type legendfromrefdelays: boolean
:param hatches: list of hatch keyword for the refdelays plotting
:type hatches: list
:param showbias: draws a little cross at the position of the delay "corrected" for the bias.
:type showbias: boolean
:param showran: draws "minor" error bar ticks using the random error only.
:type showran: boolean
:param text:
Text that you want to display, in the form : [line1, line2, line3 ...]
where line_i is (x, y, text, kwargs) where kwargs is e.g. {"fontsize":18} and x and y are relative positions (from 0 to 1).
:type text: list
:param blindness: Shift the measurements by their mean, so the displayed value are centered around 0
:type blindness: boolean
:param horizontaldisplay: display the delay panels on a single line. Works only for three-delay containers.
:type horizontaldisplay: boolean
:param showxlabelhd: display or not the x label when horizontal display is True
:type showxlabelhd: boolean
.. warning:: Altough the code says I'm plotting mean and std for the measured delays, I might be using median and mad instead! This depends on how ``hists`` was called! Be careful with this...
"""
# Some checks :
objects = plotlist[0][0].objects
for (delays, errors) in plotlist:
if delays.objects != objects or errors.objects != objects:
raise RuntimeError("Don't ask me to overplot stuff from different objects !")
n = len(objects)
nmeas = len(plotlist)
print "Objects : %s" % (", ".join(objects))
if horizontaldisplay and n != 3:
print "Horizontal display works only for three delays, you have %i" % n
print "Switching back to regular display"
horizontaldisplay = False
for (delays, errors) in plotlist:
if delays.plotcolour != errors.plotcolour:
raise RuntimeError("Hmm, plotcolours of delays and errors don't correspond !")
print "Delays : %s <-> Errors : %s" % (delays.name, errors.name)
fig = plt.figure(figsize=figsize)
fig.subplots_adjust(left=left, right=right, bottom=bottom, top=top, wspace=wspace, hspace=hspace)
axisNum = 0
print "#" * 80
for i in range(n): # A, B, C, D and so on
for j in range(n):
# print i, j
if (i == 0) or (j == n - 1):
continue # No plot
if not horizontaldisplay:
axisNum += 1
if j >= i:
continue
if horizontaldisplay:
axisNum += 1
ax = plt.subplot(1, n, axisNum)
else:
ax = plt.subplot(n - 1, n - 1, axisNum)
# We will express the delays "i - j"
delaylabel = "%s%s" % (objects[j], objects[i])
print " Delay %s" % (delaylabel)
# General esthetics :
ax.get_yaxis().set_ticks([])
minorLocator = MultipleLocator(1.0)
majorLocator = MultipleLocator(majorticksstep)
ax.xaxis.set_minor_locator(minorLocator)
ax.xaxis.set_major_locator(majorLocator)
# To determine the plot range :
paneldelays = []
# Going throuh plotlist :
if tweakeddisplay:
labelfontsize = 18
else:
labelfontsize = 14
if blindness:
blinddelays = []
for (ipl, (delays, errors)) in enumerate(plotlist):
blinddelays.append([meas for meas in delays.data if meas["label"] == delaylabel][0]["mean"])
blindmean = np.mean(blinddelays)
for (ipl, (delays, errors)) in enumerate(plotlist):
# Getting the delay for this particular panel
delay = [meas for meas in delays.data if meas["label"] == delaylabel][0]
if blindness:
delay["mean"] -= blindmean
error = [meas for meas in errors.data if meas["label"] == delaylabel][0]
paneldelays.append(delay["mean"])
ypos = nmeas - ipl + delays.yshift
# treat two cases: symmetric error ("tot" kw) and asymmetric ("plus" and "minus" kw)
if "tot" in error: # then it is symmetric
xerr = error["tot"]
else:
xerr = np.array([[error["minus"], error["plus"]]]).T
if hasattr(delays, 'elinewidth'):
elinewidth = delays.elinewidth
else:
elinewidth = 1.5
plt.errorbar([delay["mean"]], [ypos], yerr=None, xerr=xerr, fmt='-', ecolor=delays.plotcolour, elinewidth=elinewidth, capsize=3, barsabove=False)
if showran:
plt.errorbar([delay["mean"]], [ypos], yerr=None, xerr=error["ran"], fmt='-',
ecolor=delays.plotcolour, elinewidth=0.5, capsize=2, barsabove=False)
if delays.marker == None or delays.marker == ".":
plt.plot([delay["mean"]], [ypos], marker='o', markersize=delays.markersize,
markeredgecolor=delays.plotcolour, color=delays.plotcolour)
else:
plt.plot([delay["mean"]], [ypos], marker=delays.marker, markersize=delays.markersize,
markeredgecolor=delays.plotcolour, color=delays.plotcolour)
if showbias:
plt.plot([delay["mean"] - error["bias"]], [ypos], marker="x", markersize=delays.markersize,
markeredgecolor=delays.plotcolour, color=delays.plotcolour)
if hidedetails or (error["ran"] < 0.001 and error["sys"] < 0.001): # Then we ommit to write them.
if "tot" in error:
delaytext = r"$%+.1f \pm %.1f$" % (delay["mean"], error["tot"])
else:
delaytext = r"$%+.1f^{+%.1f}_{-%.1f}$" % (delay["mean"], error["plus"], error["minus"])
else:
if "tot" in error:
delaytext = r"$%+.1f \pm %.1f\,(%.1f, %.1f)$" % (
delay["mean"], error["tot"], error["ran"], error["sys"])
else: # no sys and random for the asymmetric guys...
delaytext = r"$%+.1f^{+%.1f}_{-%.1f}$" % (delay["mean"], error["plus"], error["minus"])
# if you want to hide the error...
if not showerr:
delaytext = r"$%+.1f$" % delay["mean"]
if n == 2: # For doubles, we include the technique name into the txt :
delaytext = r"%s : " % (delays.name) + delaytext
if displaytext:
if hasattr(delays, 'labelfontsize'):
thislabelfontsize = delays.labelfontsize
else:
thislabelfontsize = labelfontsize
ax.annotate(delaytext, xy=(delay["mean"], ypos + 0.3), color=delays.plotcolour,
horizontalalignment="center", fontsize=thislabelfontsize)
if "tot" in error:
print "%45s : %+6.2f +/- %.2f (%.2f, %.2f)" % (
delays.name, delay["mean"], error["tot"], error["ran"], error["sys"])
else:
print "%45s : %+6.2f + %.2f - %.2f" % (delays.name, delay["mean"], error["plus"], error["minus"])
print "#" * 80
# Now this panel is done. Some general settings :
if centershifts != None:
centerdelay = centershifts[i] - centershifts[j]
else:
centerdelay = np.median(paneldelays)
plt.xlim((centerdelay - rplot, centerdelay + rplot))
plt.ylim((ymin, nmeas + 1.5))
# Blindness display options
if blindness:
xlabel = "Blind delay [day]"
else:
xlabel = "Delay [day]"
# Tweaked display option (should disappear for an uniform display !!)
if tweakeddisplay:
plt.xticks(fontsize=15)
xlabelfontsize = 18
else:
xlabelfontsize = 14
if i == n - 1 and not horizontaldisplay:
plt.xlabel(xlabel, fontsize=xlabelfontsize)
elif horizontaldisplay:
if showxlabelhd:
plt.xlabel(xlabel, fontsize=xlabelfontsize)
else:
ax.get_xaxis().set_ticks([])
if n != 2: # otherwise only one panel, no need
plt.annotate(delaylabel, xy=(0.03, 0.88 - txtstep), xycoords='axes fraction', fontsize=14,
color="black")
if refshifts != None:
for item in refshifts:
refdelay = item["shifts"][i] - item["shifts"][j]
plt.axvline(refdelay, color=item["colour"], linestyle="--", dashes=(3, 3), zorder=-20)
if refdelays != None:
try: # if refdelays are in the form of delays and errors containers:
for (ipl,(delays, errors)) in enumerate(refdelays):
# Getting the delay for this particular panel
delay = [meas for meas in delays.data if meas["label"] == delaylabel][0]
error = [meas for meas in errors.data if meas["label"] == delaylabel][0]
if hatches!=None:
plt.axvspan(delay["mean"]-error["tot"], delay["mean"]+error["tot"], facecolor=delays.plotcolour, alpha=0.25, zorder=-20, edgecolor="none", linewidth=0, hatch=hatches[ipl])
else:
plt.axvspan(delay["mean"]-error["tot"], delay["mean"]+error["tot"], facecolor=delays.plotcolour, alpha=0.25, zorder=-20, edgecolor="none", linewidth=0)
plt.axvline(delay["mean"], color=delays.plotcolour, linestyle="--", dashes=(5, 5), lw=1.0, zorder=-20)
#plt.axvline(delay["mean"], color=item.plotcolour, linestyle="-", lw=2, alpha=0.5, zorder=-20)
except: # then refdelays is a list of flat delays
(delay, errors) = refdelays[axisNum-1]
plt.axvspan(delay-errors[1], delay+errors[0], facecolor="gray", alpha=0.15, zorder=-20, edgecolor="none", linewidth=0)
plt.axvline(delay, color="gray", linestyle='--', dashes=(5, 5), lw=1.0, zorder=-20, alpha=0.4)
if hlines != None:
for hline in hlines:
plt.axhline(hline, lw=0.5, color="gray", zorder=-30)
# The "legend" :
if showlegend:
for (ipl, (delays, errors)) in enumerate(plotlist):
line = "%s" % (delays.name)
if not tweakeddisplay:
plt.figtext(x=right, y=top - txtstep * ipl, s=line, verticalalignment="top",
horizontalalignment="right", color=delays.plotcolour, fontsize=14)
else:
if hasattr(delays, 'legendfontsize'):
lfontsize = delays.legendfontsize
else:
lfontsize = 16
plt.figtext(x=0.75, y=top - txtstep * ipl - 0.1, s=line, verticalalignment="top",
horizontalalignment="center", color=delays.plotcolour,
fontsize=lfontsize) # for 3-delay plots
if legendfromrefdelays:
for (ipl, (delays, errors)) in enumerate(refdelays):
line = "%s" % (delays.name)
plt.figtext(x=right, y=top - txtstep * (ipl + len(plotlist)), s=line, verticalalignment="top",
horizontalalignment="right", color=delays.plotcolour, fontsize=14)
# Generic text :
if text != None:
for line in text:
plt.figtext(x=line[0], y=line[1], s=line[2], **line[3])
if filename == None:
plt.show()
else:
plt.savefig(filename)
def newdelayplot2(plotlist, rplot=7.0, displaytext=True, hidedetails=False, showbias=True, showran=True, showlegend=True, text=None, figsize=(10, 6), left = 0.06, right=0.97, top=0.99, bottom=0.08, wspace=0.15, hspace=0.3, txtstep=0.04, majorticksstep=2, filename="screen", refshifts=None, refdelays=None, legendfromrefdelays=False, hatches=None, centershifts=None, ymin=0.2, hlines=None):
"""
Plots delay measurements from different methods, telescopes, sub-curves, etc in one single plot.
For this I use only ``delaycontainer`` objects, i.e. I don't do any "computation" myself.
Difference from newdelayplot is that the previously hatched/shaded regions are plotted as smaller points, without infos on the time-delay
:param plotlist: Give me a list of tuples (delays, errorbars), where delays and errorbars are delaycontainer objects as written into pkl files by ``hists`` and ``meanvstrue``.
:type plotlist: list
:param rplot: radius of delay axis, in days.
:param displaytext: Show labels with technique names and values of delays
:type displaytext: boolean
:param hidedetails: Do not show (ran, sys) in labels
:type hidedetails: boolean
:param refshifts: This is a list of dicts like {"colour":"gray", "shifts":(0, 0, 0, 90)}. Will be plotted as dashed vertical lines.
:type refshifts: list
:param refdelays: a list of tuples (delays, errorbars) to be plotted as shaded vertical zones.
:type refdelays: list
:param legendfromrefdelays: if you want to display the refdelays name in the legend panel
:type refdelays: boolean
:param hatches: list of hatch keyword for the refdelays plotting
:type refdelays: list
:param showbias: draws a little cross at the position of the delay "corrected" for the bias.
:type showbias: boolean
:param showran: draws "minor" error bar ticks using the random error only.
:type showran: boolean
:param text:
Text that you want to display, in the form : [line1, line2, line3 ...]
where line_i is (x, y, text, kwargs) where kwargs is e.g. {"fontsize":18} and x and y are relative positions (from 0 to 1).
:type text: list
"""
# Some checks :
objects = plotlist[0][0].objects
for (delays, errors) in plotlist:
if delays.objects != objects or errors.objects != objects:
raise RuntimeError("Don't ask me to overplot stuff from different objects !")
n = len(objects)
nmeas = len(plotlist)+len(refdelays)/2 +1
print "Objects : %s" % (", ".join(objects))
for (delays, errors) in plotlist:
if delays.plotcolour != errors.plotcolour:
raise RuntimeError("Hmm, plotcolours of delays and errors don't correspond !")
print "Delays : %s <-> Errors : %s" % (delays.name, errors.name)
fig = plt.figure(figsize=figsize)
fig.subplots_adjust(left=left, right=right, bottom=bottom, top=top, wspace=wspace, hspace=hspace)
axisNum = 0
print "#"*80
for i in range(n): # A, B, C, D and so on
for j in range(n):
#print i, j
if (i == 0) or (j == n-1) :
continue # No plot
axisNum += 1
if j >= i:
continue
ax = plt.subplot(n-1, n-1, axisNum)
# We will express the delays "i - j"
delaylabel="%s%s" % (objects[j], objects[i])
print " Delay %s" % (delaylabel)
# General esthetics :
ax.get_yaxis().set_ticks([])
minorLocator = MultipleLocator(1.0)
majorLocator = MultipleLocator(majorticksstep)
ax.xaxis.set_minor_locator(minorLocator)
ax.xaxis.set_major_locator(majorLocator)
# To determine the plot range :
paneldelays = []
# Going throuh plotlist :
for (ipl,(delays, errors)) in enumerate(plotlist):
# Getting the delay for this particular panel
delay = [meas for meas in delays.data if meas["label"] == delaylabel][0]
error = [meas for meas in errors.data if meas["label"] == delaylabel][0]
paneldelays.append(delay["mean"])
ypos = nmeas - ipl*1.3 + delays.yshift
plt.errorbar([delay["mean"]], [ypos], yerr=None, xerr=error["tot"], fmt='-', ecolor=delays.plotcolour, elinewidth=delays.markersize/5.0*1.5, capsize=3, barsabove=False)
if showran:
plt.errorbar([delay["mean"]], [ypos], yerr=None, xerr=error["ran"], fmt='-', ecolor=delays.plotcolour, elinewidth=0.5, capsize=2, barsabove=False)
if delays.marker == None or delays.marker == ".":
plt.plot([delay["mean"]], [ypos], marker='o', markersize=delays.markersize, markeredgecolor=delays.plotcolour, color=delays.plotcolour)
else:
plt.plot([delay["mean"]], [ypos], marker=delays.marker, markersize=delays.markersize, markeredgecolor=delays.plotcolour, color="white")
if showbias:
plt.plot([delay["mean"] - error["bias"]], [ypos], marker="x", markersize=delays.markersize, markeredgecolor=delays.plotcolour, color=delays.plotcolour)
if hidedetails or (error["ran"] < 0.001 and error["sys"] < 0.001): # Then we ommit to write them.
delaytext = r"$%+.1f \pm %.1f$" % (delay["mean"], error["tot"])
else:
delaytext = r"$%+.1f \pm %.1f\,(%.1f, %.1f)$" % (delay["mean"], error["tot"], error["ran"], error["sys"])
if n==2: # For doubles, we include the technique name into the txt :
delaytext = r"%s : " % (delays.name) + delaytext
if displaytext:
if delays.markersize>5:
ax.annotate(delaytext, xy=(delay["mean"], ypos + 0.3), color = delays.plotcolour, horizontalalignment="center", fontsize=16)
else:
ax.annotate(delaytext, xy=(delay["mean"], ypos + 0.3), color = delays.plotcolour, horizontalalignment="center", fontsize=14)
print "%45s : %+6.2f +/- %.2f (%.2f, %.2f)" % (delays.name, delay["mean"], error["tot"], error["ran"], error["sys"])
# Going throuh plotlist :
for (ipl,(delays, errors)) in enumerate(refdelays):
# Getting the delay for this particular panel
delay = [meas for meas in delays.data if meas["label"] == delaylabel][0]
error = [meas for meas in errors.data if meas["label"] == delaylabel][0]
paneldelays.append(delay["mean"])
if ipl in [0, 1]:
ypos = nmeas - (ipl/2.5+4.2) + 0.6
elif ipl in [2, 3]:
ypos = nmeas - (ipl/2.5+4.2) + 0.6 -0.4
elif ipl in [4, 5]:
ypos = nmeas - (ipl/2.5+4.2) + 0.6 -0.8
plt.errorbar([delay["mean"]], [ypos], yerr=None, xerr=error["tot"], fmt='-', ecolor=delays.plotcolour, elinewidth=1.0, capsize=3, barsabove=False)
if showran:
plt.errorbar([delay["mean"]], [ypos], yerr=None, xerr=error["ran"], fmt='-', ecolor=delays.plotcolour, elinewidth=0.33, capsize=2, barsabove=False)
if delays.marker == None or delays.marker == ".":
plt.plot([delay["mean"]], [ypos], marker='o', markersize=delays.markersize/1.5, markeredgecolor=delays.plotcolour, color=delays.plotcolour)
else:
plt.plot([delay["mean"]], [ypos], marker=delays.marker, markersize=delays.markersize/1.5, markeredgecolor=delays.plotcolour, color=delays.plotcolour)
if showbias:
plt.plot([delay["mean"] - error["bias"]], [ypos], marker="x", markersize=delays.markersize, markeredgecolor=delays.plotcolour, color=delays.plotcolour)
if hidedetails or (error["ran"] < 0.001 and error["sys"] < 0.001): # Then we ommit to write them.
delaytext = r"$%+.1f \pm %.1f$" % (delay["mean"], error["tot"])
else:
delaytext = r"$%+.1f \pm %.1f\,(%.1f, %.1f)$" % (delay["mean"], error["tot"], error["ran"], error["sys"])
if n==2: # For doubles, we include the technique name into the txt :
delaytext = r"%s : " % (delays.name) + delaytext
if displaytext:
pass
#ax.annotate(delaytext, xy=(delay["mean"], ypos + 0.3), color = delays.plotcolour, horizontalalignment="center", fontsize=14)
print "%45s : %+6.2f +/- %.2f (%.2f, %.2f)" % (delays.name, delay["mean"], error["tot"], error["ran"], error["sys"])
if axisNum ==1:
"""
ax.annotate(r"0", xy=(-13.3, 4.1), color = "crimson", horizontalalignment="center", fontsize=16)
ax.annotate(r"1", xy=(-13.3, 2.9), color = "crimson", horizontalalignment="center", fontsize=16)
ax.annotate(r"2", xy=(-13.33, 1.7), color = "crimson", horizontalalignment="center", fontsize=16)
ax.annotate(r"3", xy=(-13.37, 0.5), color = "crimson", horizontalalignment="center", fontsize=16)
"""
"""
ax.annotate(r"$\diamond$", xy=(-13.3, 3.0), color = "crimson", horizontalalignment="center", fontsize=18)
ax.annotate(r"$\dag$", xy=(-13.33, 1.8), color = "crimson", horizontalalignment="center", fontsize=18)
ax.annotate(r"$\bowtie$", xy=(-13.37, 0.6), color = "crimson", horizontalalignment="center", fontsize=18)
"""
print "#"*80
# Now this panel is done. Some general settings :
if centershifts != None:
centerdelay = centershifts[i] - centershifts[j]
else:
centerdelay = np.median(paneldelays)
plt.xlim((centerdelay - rplot, centerdelay + rplot))
plt.ylim((ymin, nmeas+1.5))
if i == n-1:
plt.xlabel("Delay [day]", fontsize=14)
if n != 2: # otherwise only one panel, no need
plt.annotate(delaylabel, xy=(0.03, 0.88-txtstep), xycoords='axes fraction', fontsize=14, color="black")
if refshifts != None:
for item in refshifts:
refdelay = item["shifts"][i] - item["shifts"][j]
plt.axvline(refdelay, color=item["colour"], linestyle="--", dashes=(3, 3), zorder=-20)
if hlines != None:
for hline in hlines:
plt.axhline(hline, lw=0.5, color="gray", zorder=-30)
# The "legend" :
if showlegend:
for (ipl,(delays, errors)) in enumerate(plotlist):
line = "%s" % (delays.name)
plt.figtext(x = right, y = top - txtstep*ipl, s = line, verticalalignment="top", horizontalalignment="right", color=delays.plotcolour, fontsize=16)
"""
if legendfromrefdelays:
for (ipl,(delays, errors)) in enumerate(refdelays):
line = "%s" % (delays.name)
plt.figtext(x = right, y = top - txtstep*(ipl+len(plotlist)), s = line, verticalalignment="top", horizontalalignment="right", color=delays.plotcolour, fontsize=12)
"""
"""
plt.figtext(x = right-0.123, y = top - txtstep*len(plotlist) - 0.025, s = r"$\diamond$", verticalalignment="top", horizontalalignment="right", color="crimson", fontsize=18)
plt.figtext(x = right-0.125, y = top - txtstep*(len(plotlist)+1) - 0.023 , s = r"$\dag$", verticalalignment="top", horizontalalignment="right", color="crimson", fontsize=18)
plt.figtext(x = right-0.12, y = top - txtstep*(len(plotlist)+2) - 0.025, s = r"$\bowtie$", verticalalignment="top", horizontalalignment="right", color="crimson", fontsize=18)
plt.figtext(x = right, y = top - txtstep*len(plotlist) - 0.03, s = "- 2003-2007", verticalalignment="top", horizontalalignment="right", color="crimson", fontsize=13)
plt.figtext(x = right, y = top - txtstep*(len(plotlist)+1) - 0.03 , s = "- 2008-2012", verticalalignment="top", horizontalalignment="right", color="crimson", fontsize=13)
plt.figtext(x = right, y = top - txtstep*(len(plotlist)+2) - 0.03, s = "- 2013-2016", verticalalignment="top", horizontalalignment="right", color="crimson", fontsize=13)
"""
# Generic text :
if text != None:
for line in text:
plt.figtext(x=line[0], y=line[1], s=line[2], **line[3])
if filename=="screen":
plt.show()
else:
plt.savefig(filename)
def normal(x, mu, sigma):
"""
Plain normal distribution.
You can directly apply me on numpy arrays x, mu, sigma.
"""
return (1.0/np.sqrt(2.0*np.pi*sigma*sigma)) * np.exp( - (x - mu)**2/(2*sigma*sigma))
def hists(rrlist, r=10.0, nbins=100, showqs=True, showallqs=False, qsrange=None, title=None, xtitle=0.5, ytitle=0.95, titlesize=18, niceplot=False, displaytext=True, figsize=(16, 9), left = 0.06, right=0.95, bottom=0.065, top=0.95, wspace=0.2, hspace=0.2, txtstep=0.04, majorticksstep=2, hideyaxis=True, trueshifts=None, filename=None, dataout=False, blindness=False, usemedian=False, outdir = "./"):
"""
Comparing the delay distributions from different run result objects.
:param rrlist: a list of runresults object.
:param r: a range radius for the hists
:param showqs: If True, I overplot the qs as scatter points.
:param dataout: True means that I'll write the pkl file needed to make the delayplot.
:param removeoutliers: True means I remove estimates that are the farthest from the median. Use this with CAUTION !!!
:param usemedian: if True, use the median and median absolute deviation instead of mean and std.
.. warning:: To avoid rewriting newdelayplot, if usemedian is True then I write the median and mad in the mean and std fields of the pickles. This is dangerous (and a bit stupid and lazy), but since hists() and newdelayplot() are usually called one after the other it should not create too much confusion.
.. note:: Actually, using median and mad as default estimators might be smarter...? To meditate for PyCS 3.0...
"""
n = rrlist[0].nimages()
labels = rrlist[0].labels
# To get some fixed ranges for the histograms, we will use the center of the histos :
#reftrueshifts = (1.0/len(rrlist)) * np.sum(np.array([rr.getts()["center"] for rr in rrlist]), axis=0)
reftrueshifts = 0.5 * (np.max(np.array([rr.getts()["center"] for rr in rrlist]), axis=0) + np.min(np.array([rr.getts()["center"] for rr in rrlist]), axis=0))
#print reftrueshifts
for rr in rrlist:
if rr.labels != labels:
raise RuntimeError("Don't ask me to overplot runresults of different curves !")
#if not np.all(rr.gettruets()["center"] == reftrueshifts):
# print "Warning : I use the trueshift of the first rr to set the ranges."
rr.trues = rr.gettruets() # To store this, avoids calculating it again and again.
rr.tmpdata = []
fig = plt.figure(figsize=figsize)
#fig.subplots_adjust(left = 0.03, right=0.95, bottom=0.05, top=0.95, wspace=0.2, hspace=0.2)
#Looks good :
#fig.subplots_adjust(left = 0.06, right=0.95, bottom=0.065, top=0.95, wspace=0.2, hspace=0.2)
fig.subplots_adjust(left = left, right=right, bottom=bottom, top=top, wspace=wspace, hspace=hspace)
axisNum = 0
for i in range(n): # [A, B, C, D]
for j in range(n):
if (i == 0) or (j == n-1) :
continue # No plot
axisNum += 1
if j >= i:
continue
ax = plt.subplot(n-1, n-1, axisNum)
#ax = plt.gca()
# Delay label, used not only for display purposes, but also for the output pkl.
delaylabel="%s%s" % (labels[j], labels[i])
if i == n-1:
if n == 2: # Only one panel -> we write the object names into the xlabel
plt.xlabel("Delay %s%s [day]" % (labels[j], labels[i]), fontsize=14)
else:
plt.xlabel("Delay [day]", fontsize=14)
if showqs:
axscatter = ax.twinx()
# Hide the y ticks :
#ax.get_yaxis().set_ticks([])
# Ranges to plot
reftruedelay = reftrueshifts[i] - reftrueshifts[j]
histrange = (reftruedelay - r, reftruedelay + r)
for irr, rr in enumerate(rrlist):
# We will express the delays "i - j"
if rr.plottrue == True:
delays = rr.truetsarray[:,i] - rr.truetsarray[:,j]
else:
delays = rr.tsarray[:,i] - rr.tsarray[:,j]
meddelay = np.median(delays)
maddelay = mad(delays)
meandelay = np.mean(delays)
stddelay = np.std(delays)
# We save these :
if usemedian:
rr.tmpdata.append({"label":delaylabel, "mean":meddelay, "med":meddelay, "std":maddelay})
else:
rr.tmpdata.append({"label":delaylabel, "mean":meandelay, "med":meddelay, "std":stddelay})
#(counts, bins, patches) = ax.hist(delays, bins=nbins, range=histrange, histtype="step", color=colours[irr % len(colours)], normed=True)
(counts, bins, patches) = ax.hist(delays, bins=nbins, range=histrange, histtype="bar", color=rr.plotcolour, alpha = 0.4, lw=0, normed=True)
if niceplot:
majorLocator = MultipleLocator(majorticksstep)
minorLocator = MultipleLocator(1.0)
ax.xaxis.set_major_locator(majorLocator)
ax.xaxis.set_minor_locator(minorLocator)
ax.yaxis.set_ticks([])
if showqs and not rr.plottrue :
if showallqs:
axscatter.scatter(delays, rr.qs, s=1, facecolor=rr.plotcolour, lw = 0)
else:
axscatter.scatter(delays[::5], rr.qs[::5], s=1, facecolor=rr.plotcolour, lw = 0)
#cmap = colors.LinearSegmentedColormap.from_list('custom',['white', rr.plotcolour],gamma=1.0)
#axscatter.hexbin(delays, rr.qs, gridsize=(5, 2), mincnt=1, cmap=cmap, edgecolor="none")
# extent=(histrange[0], histrange[1], -r, r)
if qsrange:
axscatter.set_ylim(qsrange)
if niceplot:
majorLocator = MultipleLocator(500)
axscatter.yaxis.set_major_locator(majorLocator)
if axisNum == 1:
axscatter.set_ylabel(r"$\chi^2$", fontsize=18)
# We plot the true shifts (if available) as a straight line individually for each rr :
if rr.trues["type"] == "same":
truedelay = rr.trues["center"][i] - rr.trues["center"][j]
plt.axvline(x=truedelay, linewidth=1, linestyle="--", color=rr.plotcolour)
# We compute and display the mean and std of the hist :
#if getattr(rr, "plotgauss", False) == True:
if displaytext == True:
if getattr(rr, "plotgauss", False) == True:
x = np.linspace(histrange[0], histrange[1], 100)
y = normal(x, meandelay, stddelay)
ax.plot(x, y, linestyle="-", color = rr.plotcolour)
if not usemedian:
delaytext = r"%+.1f $\pm$ %.1f" % (meandelay, stddelay)
else:
delaytext = r"%+.1f $\pm$ %.1f" % (meddelay, maddelay)
#print rr.name
#print delaylabel + " " + delaytext
#ax.text(meddelay, np.max(y)/2.0, "%.1f +/- %.1f" % (meddelay, stddelay), horizontalalignment = "center", color = rr.plotcolour)
ax.annotate(delaytext, xy=(0.04, 0.7 - 0.12*irr), xycoords='axes fraction', color = rr.plotcolour, fontsize=10)
plt.xlim(histrange)
# We increase ylim by 30% if it
ylims = list(ax.get_ylim())
if n == 2: # single panel
ylims[1] *= 1.4
else:
ylims[1] *= 1.1
ax.set_ylim(ylims)
# hide y axis if wanted to
if hideyaxis:
ax.set_yticks([])
# make the ticks a little bit bigger than default
plt.xticks(fontsize=13)
# enforce blindness if wanted, by modifying the xticks labels (not touching the data)
if blindness:
labels = ax.xaxis.get_ticklabels()
locs = ax.xaxis.get_ticklocs()
meanloc = np.mean(locs)
blindlabels = []
for loc, label in zip(locs, labels):
blindlabels.append(str(loc-meanloc))
ax.xaxis.set_ticklabels(blindlabels)
# Looked ok on big plots :
#plt.annotate(delaylabel, xy=(0.03, 0.88), xycoords='axes fraction', fontsize=12, color="black")
if n != 2: # otherwise we have only one single panel
plt.annotate(delaylabel, xy=(0.05, 0.84), xycoords='axes fraction', fontsize=14, color="black")
if trueshifts != None:
truedelay = trueshifts[i] - trueshifts[j]
plt.axvline(truedelay, color="gray", linestyle="--", dashes=(3, 3), zorder=-20)
if dataout:
for rr in rrlist:
dc = delaycontainer(data = rr.tmpdata, name = rr.name, plotcolour = rr.plotcolour, objects=labels[:])
pycs.gen.util.writepickle(dc, outdir+ "%s_delays.pkl" % (rr.autoname))
rr.tmpdata = None
labelspacetop = 0.0
labelspaceright = 0.0
if n == 2:
labelspacetop = 0.04
labelspaceright = 0.04
for irr, rr in enumerate(rrlist):
if niceplot:
labeltxt = "%s" % (getattr(rr, 'name', 'NoName'))
plt.figtext(x = right - labelspaceright, y = top - labelspacetop - txtstep*irr, s = labeltxt, verticalalignment="top", horizontalalignment="right", color=rr.plotcolour, fontsize=15)
else:
labeltxt = "%s (%s, %i) " % (getattr(rr, 'name', 'NoName'), "Truth" if rr.plottrue else "Measured", rr.tsarray.shape[0])
plt.figtext(x = right - labelspaceright, y = top - labelspacetop - txtstep*irr, s = labeltxt, verticalalignment="top", horizontalalignment="right", color=rr.plotcolour, fontsize=15)
print 'Plotting "%s"' % labeltxt
print " Labels : %s" % (", ".join(rr.labels))
print " Median shifts : %s" % (", ".join(["%.2f" % (np.median(rr.tsarray[:,i])) for i in range(len(rr.labels))]))
print " Std shifts : %s" % (", ".join(["%.2f" % (np.std(rr.tsarray[:,i])) for i in range(len(rr.labels))]))
if title != None:
plt.figtext(x = xtitle, y = ytitle, s = title, horizontalalignment="center", color="black", fontsize=titlesize)
if filename == None:
plt.show()
else:
plt.savefig(filename)
def newcovplot(rrlist, r=6, rerr=3, nbins = 10, nbins2d=3, binclip=True, binclipr=10.0, figsize=(13, 13), left=0.06, right=0.97, top=0.97, bottom=0.04, wspace=0.3, hspace=0.3, method='indepbin', minsamples=10, showplots=True, printdetails=True, printcovmat=True, detailplots=False, filepath=None, verbose=True):
#TODO: there is no binclip in depbin ! Should I implement it ?
assert (method in ['depbin', 'indepbin'])
retdict = {} # we put all the intermediate products in a dict that we return
nimages = rrlist[0].nimages()
imginds = np.arange(nimages)
labels = rrlist[0].labels
if nimages == 4: # then it's a quad
covmatsize = 6
elif nimages == 3: # then it's a folded quad
covmatsize = 3
else: # then it's a double
print "This function does not work for doubles"
print "I kindly remind you that the covariance between a variable and itself is called variance, and there are simpler functions to compute that in PyCS. Try newdelayplot for instance."
couplelist = [(i, j) for j in imginds for i in imginds if i > j]
ncouples = len(couplelist)
# print couplelist
tderrsdicts = []
# rrlist is just a list of rr, we treat them one after the other
for rr in rrlist:
# for each rr, we compute the error from the true delay
truetsslist = rr.truetsarray
tsslist = rr.tsarray-truetsslist
for ind, tss in enumerate(tsslist):
tderrs = []
truetds = []
for (i, j) in couplelist:
tderrs.append(tss[i]-tss[j])
truetds.append(truetsslist[ind][i]-truetsslist[ind][j])
tderrsdicts.append({"tderrs": tderrs, "truetds": truetds})
#tderrsdict contains the errors on the true delays, as well as the true delays for each simulation
# figure 1: general covariance plot for each pair of delays. Diagonal elements are the same than newdelayplot, off-diagonal elements are covariance for all the runresults
allcovplot = plt.figure(figsize=figsize)
allcovplot.subplots_adjust(left=left, right=right, bottom=bottom, top=top, wspace=wspace, hspace=hspace)
# figure 2: covariance computed in each bin, for each pair of delays. Diagonal elements are the same than newdelayplot, off diagonal elements are colored tiles of covariance per true delays bins, with points overplotted.
bincovplot = plt.figure(figsize=figsize)
bincovplot.subplots_adjust(left=left, right=right, bottom=bottom, top=top, wspace=wspace, hspace=hspace)
axisNum = 0
# create the empty covariance matrix
covmat = []
for ind in range(len(couplelist)):
covmat.append(np.zeros(len(couplelist)))
indepbins = np.zeros(len(couplelist))
depbins = np.zeros(len(couplelist))
rranges = np.zeros(len(couplelist))
retdict["delay"] = {} # dict in a dict !
for ii, i in enumerate(couplelist): # (0, 1), (0, 2) ...
delaylabel="%s%s" % (labels[i[1]], labels[i[0]])
retdict["delay"]["%s" % delaylabel] = {} # dict in a dict in a dict ! dictception !!
xtderrs = [tderrsdict["tderrs"][ii] for tderrsdict in tderrsdicts]
xtruetds = [tderrsdict["truetds"][ii] for tderrsdict in tderrsdicts]
maxx = np.max(xtruetds)
minx = np.min(xtruetds)
### fill the diagonal element
ax1 = allcovplot.add_subplot(ncouples, ncouples, covmatsize*ii + (ii+1))
ax2 = bincovplot.add_subplot(ncouples, ncouples, covmatsize*ii + (ii+1))
majorLocator = MultipleLocator(1.0)
for ax in [ax1, ax2]:
ax.yaxis.set_major_locator(majorLocator)
ax.xaxis.set_major_locator(MaxNLocator(5))
if ii == len(couplelist)-1:
ax1.set_xlabel('True Delay [day]')
ax2.set_ylabel('Measurement error [day]', labelpad=-10)
# way 1 - binning independent of xtruedelays distribution. User choose the plot range. Similar to newdelayplot()
reftrueshifts = np.mean([rr.gettruets()["center"] for rr in rrlist], axis=0)
#reftrueshifts = np.round(rrlist[0].gettruets()["center"])
reftruedelay = reftrueshifts[i[0]] - reftrueshifts[i[1]]
plotrange = (reftruedelay - r, reftruedelay + r)
binlims = np.linspace(plotrange[0], plotrange[1], nbins + 1)
# If we want to compare to newdelayplot():
# xtruetds = truedelays
# xtderrs = resis
# needed for binvals:
xtderrs = np.array(xtderrs)
digitized = np.digitize(xtruetds, binlims)
binvals = [xtderrs[digitized == bini] for bini in range(1, len(binlims))]
binstds = map(np.std, binvals)
binmeans = map(np.mean, binvals)
if binclip:
for (bini, binvalarray) in enumerate(binvals):
keep = np.logical_and(binvalarray < binclipr, binvalarray > -binclipr)
if np.sum(keep == False) != 0:
print "Kicking %i points." % (np.sum(keep == False))
binvals[bini] = binvalarray[keep]
binstds = map(np.std, binvals)
binmeans = map(np.mean, binvals)
syserror = np.max(np.fabs(binmeans))
randerror = np.max(binstds)
toterror = np.sqrt(syserror*syserror + randerror*randerror)
indepbins[ii] = toterror
retdict["delay"]["%s" % delaylabel]["indep"] = {} # dict in a dict in a dict in a dict ! we need to go deeper !!!
retdict["delay"]["%s" % delaylabel]["indep"]["syserror"] = syserror
retdict["delay"]["%s" % delaylabel]["indep"]["randerror"] = randerror
retdict["delay"]["%s" % delaylabel]["indep"]["toterror"] = toterror # that's already in the covariance matrix...
# Plot the result !
line = np.linspace(plotrange[0], plotrange[1], 100)
zeros = np.zeros(100)
width = binlims[1] - binlims[0]
for ax in [ax1, ax2]:
ax.plot(line, zeros, color="black", lw=0.5)
ax.bar(binlims[:-1], binmeans, yerr=binstds, width=width, color=rr.plotcolour, ecolor=rr.plotcolour, error_kw={"capsize":2.5, "capthick":0.5, "markeredgewidth":0.5}, edgecolor=rr.plotcolour, alpha = 0.2)
ax.set_ylim((-rerr, rerr))
if figsize[0] > 8:
ax.annotate(delaylabel, xy=(0.9, 0.05), xycoords='axes fraction', ha="center") # x axis
else:
ax.annotate(delaylabel, xy=(0.78, 0.08), xycoords='axes fraction', ha="center")
ax.set_xlim(plotrange)
majorLocator = MultipleLocator(int(r/2.0)+1)
ax.xaxis.set_major_locator(majorLocator)
ax.set_title(r'sys=%.2f | ran=%.2f' % (syserror, randerror)+'\n'+'tot=%.2f' % toterror, fontsize=10)
# way 2 - binning dependent on the xtruedelays samples: min and max vals corresponds to the extremas of xtruedelays distribution
xbinvals = np.linspace(minx, maxx, num=nbins+1, endpoint=True)
rranges[ii] = maxx-minx
binmeans = []
binstds = []
for indx, xbinval in enumerate(xbinvals[:nbins]):
subsamples = []
for (ind, xtruetd) in enumerate(xtruetds):
if xtruetd > xbinval and xtruetd < xbinvals[indx+1]:
subsamples.append(xtderrs[ind])
binmeans.append(np.mean(subsamples))
binstds.append(np.std(subsamples))
syserror = np.max(np.fabs(binmeans))
randerror = np.max(binstds)
toterror = np.sqrt(syserror*syserror + randerror*randerror)
depbins[ii] = toterror
retdict["delay"]["%s" % delaylabel]["dep"] = {}
retdict["delay"]["%s" % delaylabel]["dep"]["syserror"] = syserror
retdict["delay"]["%s" % delaylabel]["dep"]["randerror"] = randerror
retdict["delay"]["%s" % delaylabel]["dep"]["toterror"] = toterror
# We let the user choose which method he prefers
# Dear user, be CAREFUL with your choice !
if method == 'depbin':
if ii == 0 and verbose : print "You chose a binning depending on the sample values"
covmat[ii][ii] = depbins[ii]
elif method == 'indepbin': # that should be the default value
if ii == 0 and verbose : print "You chose a binning independent of the sample values"
covmat[ii][ii] = indepbins[ii]
### fill the off-diagonal elements
retdict["cov"] = {}
for jj, j in enumerate(couplelist):
axisNum += 1
if (ii == 0) or (jj == ncouples-1) :
continue # No plot
if jj >= ii:
continue
xdelaylabel="%s%s" % (labels[i[1]], labels[i[0]])
ydelaylabel="%s%s" % (labels[j[1]], labels[j[0]])
retdict["cov"]["%s-%s" % (ydelaylabel, xdelaylabel)] = {}
if detailplots:
# figure 3: for each pair, plot the covariance in each bin. One figure per pair
bincovplot2 = plt.figure(figsize=figsize)
bincovplot2.subplots_adjust(left=left, right=right, bottom=bottom, top=top, wspace=wspace, hspace=hspace)
ytderrs = [tderrsdict["tderrs"][jj] for tderrsdict in tderrsdicts]
ytruetds = [tderrsdict["truetds"][jj] for tderrsdict in tderrsdicts]
ax1 = allcovplot.add_subplot(ncouples, ncouples, axisNum)
ax2 = bincovplot.add_subplot(ncouples, ncouples, axisNum)
majorLocator = MultipleLocator(2.0)
ax1.set_xlim(-rerr, rerr)
ax1.set_ylim(-rerr, rerr)
ax1.xaxis.set_major_locator(majorLocator)
ax1.yaxis.set_major_locator(majorLocator)
ax1.axhline(0, color="black")
ax1.axvline(0, color="black")
ax2.xaxis.set_major_locator(MaxNLocator(3))
ax2.yaxis.set_major_locator(MaxNLocator(3))
if axisNum == ncouples*(ncouples-1) + 1:
ax1.set_xlabel('Measurement error [day]')
ax1.set_ylabel('Measurement error [day]')
ax2.set_xlabel('True delay [day]')
ax2.set_ylabel('True delay [day]')
## binning independent of xtrudelays and ytruedelays distribution. Same plotrange as diagonal elements, but 2d binning
retdict["cov"]["%s-%s" % (ydelaylabel, xdelaylabel)]["indep"] = {}
xbinlims2d = np.linspace(plotrange[0], plotrange[1], nbins2d + 1)
yreftruedelay = reftrueshifts[j[0]] - reftrueshifts[j[1]]
yplotrange = (yreftruedelay - r, yreftruedelay + r)
ybinlims2d = np.linspace(yplotrange[0], yplotrange[1], nbins2d + 1)
xcoordsan=[]
ycoordsan=[]
colorsan=[]
covsindep=[]
for indx, xbinlim in enumerate(xbinlims2d[:nbins2d]):
for indy, ybinlim in enumerate(ybinlims2d[:nbins2d]):
subsamples = []
for (ind, xtruetd), ytruetd in zip(enumerate(xtruetds), ytruetds):
if xtruetd > xbinlim and xtruetd < xbinlims2d[indx+1] and ytruetd > ybinlim and ytruetd < ybinlims2d[indy+1]:
subsamples.append((xtderrs[ind], ytderrs[ind]))
if len(subsamples) > minsamples:
covval = np.cov(subsamples, rowvar=False)[0][1]
colorsan.append("black")
else:
covval = 0
colorsan.append('crimson')
# save the plotting coords, to bold plot the biggest covval later...
xcoordsan.append(xbinlim + (xbinlims2d[indx+1]-xbinlim)/2)
ycoordsan.append(ybinlim + (ybinlims2d[indy+1]-ybinlim)/2)
# colorize the regions according to the covariance value
maxval=0.5
alpha = min(np.abs(covval/maxval), 1.0)
from matplotlib.patches import Rectangle
rect = Rectangle((xbinlim, ybinlim), xbinlims2d[indx+1]-xbinlim, ybinlims2d[indy+1]-ybinlim, color=rrlist[0].plotcolour, alpha=alpha)
ax2.add_patch(rect)
xdelaylabeldet="%s%s [%.1f , %.1f]" % (labels[i[1]], labels[i[0]], xbinlim, xbinlims2d[indx+1])
ydelaylabeldet="%s%s [%.1f , %.1f]" % (labels[j[1]], labels[j[0]], ybinlim, ybinlims2d[indy+1])
retdict["cov"]["%s-%s" % (ydelaylabel, xdelaylabel)]["indep"]["%s-%s" % (ydelaylabeldet, xdelaylabeldet)] = covval
covsindep.append(covval)
if detailplots:
# add an Axes on the figure for each bin, and plot the errors
# mapping the maptlotlib indice is a bit tricky:
# if we use nbins2dx and nbins2dy: nbins2dx*nbins2dy - (nbins2dx-1-indx) - (nbins2dy*indy)
spind = nbins2d*nbins2d - (nbins2d-1-indx) - (nbins2d*indy)
ax3 = bincovplot2.add_subplot(nbins2d, nbins2d, spind)
ax3.set_xlim(-rerr, rerr)
ax3.set_ylim(-rerr, rerr)
ax3.xaxis.set_major_locator(majorLocator)
ax3.yaxis.set_major_locator(majorLocator)
ax3.axhline(0, color="black")
ax3.axvline(0, color="black")
ax3.set_xlabel('Measurement error [day]')
ax3.set_ylabel('Measurement error [day]')
showdensity = True
bins = 10
if showdensity:
cmap = colors.LinearSegmentedColormap.from_list('custom', ['white', rrlist[0].plotcolour],gamma=1.0)
ax3.hexbin([s[0] for s in subsamples], [s[1] for s in subsamples], gridsize=bins, extent=(-rerr, rerr, -rerr, rerr), mincnt=1, cmap=cmap, edgecolor="none")
showpoints=True
if showpoints:
ax3.scatter([s[0] for s in subsamples], [s[1] for s in subsamples], s=5, facecolor=rrlist[0].plotcolour, lw=0, alpha=0.5)
showcontour=True
if showcontour:
H, xedges, yedges = np.histogram2d([s[0] for s in subsamples], [s[1] for s in subsamples], range=[[-r, r], [-r, r]], bins=(bins, bins))
extent = [yedges[0], yedges[-1], xedges[0], xedges[-1]]
data = np.vstack((xtderrs, ytderrs))
kde = scipy.stats.kde.gaussian_kde(data)
grid = np.mgrid[-r:r:1j*bins, -r:r:1j*bins]
grid_coords = np.append(grid[0].reshape(-1,1),grid[1].reshape(-1,1),axis=1)
z = kde(grid_coords.T)
z = z.reshape(bins,bins)
levels = [np.max(z)*0.45]
cset = ax3.contour(grid[0], grid[1], z, levels=levels, origin="lower", colors=rrlist[0].plotcolour, extent=extent, linewidth=0.5)
if figsize[0] > 8:
ax3.annotate(xdelaylabeldet, xy=(0.77, 0.05), xycoords='axes fraction', ha="center")
ax3.annotate(ydelaylabeldet, xy=(0.04, 0.90), xycoords='axes fraction', ha="left", rotation=90.0)
if detailplots and filepath != None:
bincovplot2.savefig(os.path.join(filepath, "bincov_%s%s-vs-%s%s.png" % (labels[j[1]], labels[j[0]], labels[i[1]], labels[i[0]])))
mincovindep = np.min(covsindep)
maxcovindep = np.max(covsindep)
if abs(mincovindep) > maxcovindep:
extcovindep = mincovindep
else:
extcovindep = maxcovindep
mind = covsindep.index(extcovindep)
for ind, val in enumerate(covsindep):
if ind == mind:
ax2.annotate("%.2f" % val, xy=(xcoordsan[ind], ycoordsan[ind]), ha="center", va='center', color='darkblue', fontsize=14)
else:
ax2.annotate("%.2f" % val, xy=(xcoordsan[ind], ycoordsan[ind]), ha="center", va='center', color=colorsan[ind])
#plotting ax2 uses the 2d binning
for ind, xbinlim in enumerate(xbinlims2d):
ax2.axvline(xbinlim, linestyle='--', color='black', alpha=0.5)
ax2.axhline(ybinlims2d[ind], linestyle='--', color='black', alpha=0.5)
showpoints=False
if showpoints:
ax2.scatter(xtruetds, ytruetds, s=2, facecolor=rrlist[0].plotcolour, lw=0, alpha=0.1)
ax2.set_xlim(plotrange)
ax2.set_ylim(yplotrange)
# plotting ax1 is pretty basic, that's only the points
retdict["cov"]["%s-%s" % (ydelaylabel, xdelaylabel)]["dep"] = {}
showdensity = True
bins = 10
if showdensity:
cmap = colors.LinearSegmentedColormap.from_list('custom', ['white', rrlist[0].plotcolour],gamma=1.0)
ax1.hexbin(xtderrs, ytderrs, gridsize=bins, extent=(-rerr, rerr, -rerr, rerr), mincnt=1, cmap=cmap, edgecolor="none")
showpoints=False
if showpoints:
ax1.scatter(xtderrs, ytderrs, s=2, facecolor=rrlist[0].plotcolour, lw=0)
showcontour=True
if showcontour:
H, xedges, yedges = np.histogram2d(xtderrs, ytderrs, range=[[-r, r], [-r, r]], bins=(bins, bins))
extent = [yedges[0], yedges[-1], xedges[0], xedges[-1]]
data = np.vstack((xtderrs, ytderrs))
kde = scipy.stats.kde.gaussian_kde(data)
grid = np.mgrid[-r:r:1j*bins, -r:r:1j*bins]
grid_coords = np.append(grid[0].reshape(-1,1),grid[1].reshape(-1,1),axis=1)
z = kde(grid_coords.T)
z = z.reshape(bins,bins)
levels = [np.max(z)*0.45]
cset = ax1.contour(grid[0], grid[1], z, levels=levels, origin="lower", colors=rrlist[0].plotcolour, extent=extent, linewidth=0.5)
if figsize[0] > 8:
ax1.annotate(xdelaylabel, xy=(0.9, 0.05), xycoords='axes fraction', ha="center") # x axis
ax1.annotate(ydelaylabel, xy=(0.06, 0.85), xycoords='axes fraction', ha="left", rotation=90.0) # y axis
else:
ax1.annotate(xdelaylabel, xy=(0.78, 0.08), xycoords='axes fraction', ha="center") # x axis
ax1.annotate(ydelaylabel, xy=(0.08, 0.76), xycoords='axes fraction', ha="left", rotation=90.0) # y axis
meancov = np.cov([(xtderr, ytderr) for xtderr, ytderr in zip(xtderrs, ytderrs)], rowvar=False)[0][1]
ax2.set_title('%s vs %s | mean = %.2f' % (ydelaylabel, xdelaylabel, meancov), fontsize=10)
## binning dependent of true delays, for comparision
xbinvals = np.linspace(minx, maxx, num=nbins2d+1, endpoint=True)
maxy = np.max(ytruetds)
miny = np.min(ytruetds)
ybinvals = np.linspace(miny, maxy, num=nbins2d+1, endpoint=True)
covsdep=[]
for indx, xbinval in enumerate(xbinvals[:nbins2d]):
for indy, ybinval in enumerate(ybinvals[:nbins2d]):
subsamples = []
for (ind, xtruetd), ytruetd in zip(enumerate(xtruetds), ytruetds):
if xtruetd > xbinval and xtruetd < xbinvals[indx+1] and ytruetd > ybinval and ytruetd < ybinvals[indy+1]:
subsamples.append((xtderrs[ind], ytderrs[ind]))
#TODO: due to the non-uniform sampling of the simulated true tds, some regions of the truetd_x vs truetd_y are rather empty (less than 10 samples). Should we i) increase the number of simulated samples, ii) discard these regions from the analysis, iii) transfer these samples to the nearest bin ?
#print len(subsamples), len(subsamples[0]), subsamples[0]
xdelaylabeldet = "%s%s [%.1f , %.1f]" % (labels[i[1]], labels[i[0]], xbinval, xbinvals[indx+1])
ydelaylabeldet = "%s%s [%.1f , %.1f]" % (labels[j[1]], labels[j[0]], ybinval, ybinvals[indy+1])
if len(subsamples) > minsamples:
covvaldep = np.cov(subsamples, rowvar=False)[0][1]
else:
covvaldep = 0.0
retdict["cov"]["%s-%s" % (ydelaylabel, xdelaylabel)]["dep"]["%s-%s" % (ydelaylabeldet, xdelaylabeldet)] = covvaldep
covsdep.append(covvaldep)
mincovdep = np.min(covsdep)
maxcovdep = np.max(covsdep)
if abs(mincovdep) > maxcovdep:
extcovdep = mincovdep
else:
extcovdep = maxcovdep
# We do NOT want the min or max in the final covmat but the mean on all samples.
# do NOT take the mean of covsdep, some samples are not in !!
covdep = meancov
covindep = meancov
if method == "depbin":
covmat[ii][jj] = covdep
covmat[jj][ii] = covdep
elif method == "indepbin":
covmat[ii][jj] = covindep
covmat[jj][ii] = covindep
if verbose:
# I shoud definitely improve that display part...
print "-"*15
print i, j
print covdep, covindep
axinv = bincovplot.add_subplot(ncouples, ncouples, 2, frameon=False)
axinv.set_xticklabels([])
axinv.set_yticklabels([])
axinv.set_xticks([])
axinv.set_yticks([])
# and annotate
text = 'True delay plot range: +- %i [days]' % r + '\n\n'
text += 'Measurement error plot range: +- %.1f [days]' % rerr + '\n\n'
text += '1D binning: %i bins' % nbins + '\n\n'
text += '2D binning: %ix%i bins' % (nbins2d, nbins2d) + '\n\n'
text += 'Min. number of samples in 2D binning: %i samples' % minsamples + '\n\n\n\n'
if printdetails:
if len(covmat[0]) == 6:
mylist = [str(e) for e in covmat[0]]+\
[str(e) for e in covmat[1]]+\
[str(e) for e in covmat[2]]+\
[str(e) for e in covmat[3]]+\
[str(e) for e in covmat[4]]+\
[str(e) for e in covmat[5]]
mylist = [float(e) for e in mylist]
else:
print "Cov. matrix display not defined for matrices other than 6x6 !"
printcovmat = False
if printcovmat:
text += ' AB AC AD BC BD CD \n'
text += ' '+'-----'*12+'\n'
text += 'AB | %.2f %.2f %.2f %.2f %.2f %.2f \n |\n'\
'AC | %.2f %.2f %.2f %.2f %.2f %.2f \n |\n' \
'AD | %.2f %.2f %.2f %.2f %.2f %.2f \n |\n' \
'BC | %.2f %.2f %.2f %.2f %.2f %.2f \n |\n' \
'BD | %.2f %.2f %.2f %.2f %.2f %.2f \n |\n' \
'CD | %.2f %.2f %.2f %.2f %.2f %.2f \n |\n' \
% (mylist[0], mylist[1], mylist[2], mylist[3], mylist[4], mylist[5]
, mylist[6], mylist[7], mylist[8], mylist[9], mylist[10], mylist[11]
, mylist[12], mylist[13], mylist[14], mylist[15], mylist[16], mylist[17]
, mylist[18], mylist[19], mylist[20], mylist[21], mylist[22], mylist[23]
, mylist[24], mylist[25], mylist[26], mylist[27], mylist[28], mylist[29]
, mylist[30], mylist[31], mylist[32], mylist[33], mylist[34], mylist[35])
axinv.annotate(text, xy=(0.7 * (ncouples-1), -2.0), xycoords='axes fraction', ha="left")
else:
axinv.annotate(text, xy=(0.7 * (ncouples-1), -1.0), xycoords='axes fraction', ha="left")
retdict["r"] = r
retdict["rerr"] = rerr
retdict["nbins"] = nbins
retdict["nbins2d"] = nbins2d
retdict["minsamples"] = minsamples
if filepath != None:
bincovplot.savefig(os.path.join(filepath, "bincov.png"))
allcovplot.savefig(os.path.join(filepath, "allcov.png"))
else:
if showplots:
plt.show()
# now let's compare indepbins and depbins
if verbose:
print "-"*35
print "nbins = %i" % nbins
print "indepbins - r = %.1f" % r
print "depbins - r(max-min) =", np.mean(rranges)
print "-"*35
print "pair - indepbins - depbins - diff"
print "-"*35
print "AB - %.2f - %.2f - %.1f%%" % (indepbins[0], depbins[0], (max(indepbins[0], depbins[0])-min(indepbins[0], depbins[0])) / max(indepbins[0], depbins[0])*100)
print "AC - %.2f - %.2f - %.1f%%" % (indepbins[1], depbins[1], (max(indepbins[1], depbins[1])-min(indepbins[1], depbins[1])) / max(indepbins[1], depbins[1])*100)
if nimages == 4:
print "BC - %.2f - %.2f - %.1f%%" % (indepbins[3], depbins[3], (max(indepbins[3], depbins[3])-min(indepbins[3], depbins[3])) / max(indepbins[3], depbins[3])*100)
print "AD - %.2f - %.2f - %.1f%%" % (indepbins[2], depbins[2], (max(indepbins[2], depbins[2])-min(indepbins[2], depbins[2])) / max(indepbins[2], depbins[2])*100)
print "BD - %.2f - %.2f - %.1f%%" % (indepbins[4], depbins[4], (max(indepbins[4], depbins[4])-min(indepbins[4], depbins[4])) / max(indepbins[4], depbins[4])*100)
print "CD - %.2f - %.2f - %.1f%%" % (indepbins[5], depbins[5], (max(indepbins[5], depbins[5])-min(indepbins[5], depbins[5])) / max(indepbins[5], depbins[5])*100)
elif nimages == 3:
print "BC - %.2f - %.2f - %.1f%%" % (indepbins[2], depbins[2], (max(indepbins[2], depbins[2])-min(indepbins[2], depbins[2])) / max(indepbins[2], depbins[2])*100)
print "-"*35
retdict["covmat"] = covmat
return retdict
def measvstrue(rrlist, r=10.0, nbins = 10, plotpoints=True, alphapoints=1.0, plotrods=True, alpharods=0.2, ploterrorbars=True, sidebyside=True, errorrange=None, binclip=False, binclipr=10.0, title=None, xtitle=0.75, ytitle=0.95, titlesize=30, figsize=(10, 6), left = 0.06, right=0.97, top=0.99, bottom=0.08, wspace=0.15, hspace=0.3, txtstep=0.04, majorticksstep=2, displayn=True, filename=None, dataout=False, tweakeddisplay=False, blindness=False, outdir = "./"):
"""
Plots measured delays versus true delays
:param r: radius of simulation input delays to plot (x axis range)
:param nbins: number of bins for the bar plot within this range.
:param plotpoints: should I plot the points (scatter plot) ?
:param plotrods: should I plot the avg within each bin ?
:param ploterrorbars: should I add errorbars upon the bar plot ?
:param sidebyside: should I plot bars side by side, or overplot them ?
:param errorrange: radius of measurement errors to plot (y axis range). You can also give a tuple (low, high), to make asymetric plots.
:param binclip: should I clip errors larger than binclipr days (catastrophic failures of methods) ?
:param binclipr: see binclip ...
"""
n = rrlist[0].nimages()
labels = rrlist[0].labels
# To get some fixed ranges for the histograms, we will use the first element of rrlist.
reftrueshifts = np.round(rrlist[0].gettruets()["center"])
#@todo: WAAARNING ! Depending on the shape your rrlist (is it a 1x1000 runresults or 50x20 runresults), reftrueshift will have different values, impacting the final determination of the systematic and random error you compute. This can lead to a variation >10% on the final error !!!! DO SOMETHING !!!
#print len(rrlist), rrlist[0].gettruets()["center"]
#sys.exit()
for rr in rrlist:
if rr.labels != labels:
raise RuntimeError("Don't ask me to overplot runresults of different curves !")
#if not np.all(rr.gettruets()["center"] == reftrueshifts):
# print "Warning : I use the trueshift of the first rr to set the ranges."
rr.trues = rr.gettruets() # To store this, avoids calculating it again and again.
rr.tmpdata = []
fig = plt.figure(figsize=figsize)
fig.subplots_adjust(left=left, right=right, bottom=bottom, top=top, wspace=wspace, hspace=hspace)
axisNum = 0
for i in range(n): # [A, B, C, D]
for j in range(n):
#print i, j
if (i == 0) or (j == n-1) :
continue # No plot
axisNum += 1
if j >= i:
continue
ax = plt.subplot(n-1, n-1, axisNum)
minorLocator = MultipleLocator(1.0)
majorLocator = MultipleLocator(majorticksstep)
ax.xaxis.set_minor_locator(minorLocator)
ax.xaxis.set_major_locator(majorLocator)
if tweakeddisplay:
from matplotlib.ticker import MaxNLocator
locator=MaxNLocator(prune='both', nbins=6)
ax.yaxis.set_major_locator(locator)
ax.yaxis.set_minor_locator(MultipleLocator(1.0))
reftruedelay = reftrueshifts[i] - reftrueshifts[j]
plotrange = (reftruedelay - r, reftruedelay + r)
# Identity line :
line = np.linspace(plotrange[0], plotrange[1], 100)
zeros = np.zeros(100)
plt.plot(line, zeros, color="black", lw=0.5)
# Delay label
delaylabel="%s%s" % (labels[j], labels[i])
# Preparing the bins :
binlims = np.linspace(plotrange[0], plotrange[1], nbins + 1)
for irr, rr in enumerate(rrlist): # We go through the different runresult objects
# We will express the delays "i - j"
truedelays = rr.truetsarray[:,i] - rr.truetsarray[:,j]
measdelays = rr.tsarray[:,i] - rr.tsarray[:,j]
resis = measdelays-truedelays
# A simple scatter plot of the residues :
if plotpoints:
ax.scatter(truedelays, resis, s=2, facecolor=rr.plotcolour, lw = 0, alpha=alphapoints)
# We bin those :
digitized = np.digitize(truedelays, binlims)
binvals = [resis[digitized == bini] for bini in range(1, len(binlims))]
binstds = map(np.std, binvals)
binmedians = map(np.median, binvals)
binmeans = map(np.mean, binvals)
if binclip:
for (bini, binvalarray) in enumerate(binvals):
#keep = np.logical_and(binvalarray < (binmedians[bini] + 1*binstds[bini]), binvalarray > (binmedians[bini] - 1*binstds[bini]))
#keep = np.logical_and(binvalarray < np.max(binvalarray), binvalarray > np.min(binvalarray))
keep = np.logical_and(binvalarray < binclipr, binvalarray > -binclipr)
if np.sum(keep == False) != 0:
print "Kicking %i points." % (np.sum(keep == False))
binvals[bini] = binvalarray[keep]
binstds = map(np.std, binvals)
binmedians = map(np.median, binvals)
binmeans = map(np.mean, binvals)
# We save the maximum sys and ran error :
syserror = np.max(np.fabs(binmeans))
randerror = np.max(binstds)
toterror = np.sqrt(syserror*syserror + randerror*randerror)
bias = np.mean(binmeans) # The signed bias
rr.tmpdata.append({
"label":delaylabel,
"sys":syserror,
"ran":randerror,
"tot":toterror,
"bias":bias
})
#binmeans = [np.median(resis[digitized == bini]) for bini in range(1, len(binlims))]
#binstds = [np.std(resis[digitized == bini]) for bini in range(1, len(binlims))]
width = binlims[1] - binlims[0]
if plotrods:
if not sidebyside:
if ploterrorbars:
ax.bar(binlims[:-1], binmeans, yerr=binstds, width=width, color=rr.plotcolour, ecolor=rr.plotcolour, error_kw={"capsize":2.5, "capthick":0.5, "markeredgewidth":0.5}, edgecolor=rr.plotcolour, alpha = alpharods)
else:
ax.bar(binlims[:-1], binmeans, width=width, color=rr.plotcolour, edgecolor=rr.plotcolour, alpha = alpharods)
else:
width = width/len(rrlist)
squeezefactor = 1.0
plotwidth = squeezefactor * width
offset = width * (1.0-squeezefactor)/2.0
if ploterrorbars:
ax.bar(binlims[:-1] + offset + irr*plotwidth, binmeans, yerr=binstds, width=plotwidth, color=rr.plotcolour, ecolor=rr.plotcolour, error_kw={"capsize":2.5, "capthick":0.5, "markeredgewidth":0.5}, edgecolor=rr.plotcolour, alpha = alpharods, linewidth=0)
else:
ax.bar(binlims[:-1] + offset + irr*plotwidth, binmeans, width=plotwidth, color=rr.plotcolour, edgecolor=rr.plotcolour, alpha = alpharods)
# That's it for the different runresult objects, back to the common stuff for this particular panel :
if sidebyside:
for binlim in binlims:
plt.axvline(binlim, lw=0.5, color="#AAAAAA", zorder=-30)
# on all border plots :
#if i == n-1:
# plt.xlabel("Synthetic input delay [day]")
#if j == 0:
# plt.ylabel("Delay measurement error [day]")
# Just on 2 plots :
if tweakeddisplay:
if i == n-1:
plt.xlabel("True delay [day]", fontsize=18)
if j == 0 and i == int(math.floor(n/2.0)):
plt.ylabel("Delay measurement error [day]", fontsize=18, y=-0.10)
plt.xticks(fontsize=15)
plt.yticks(fontsize=15)
else:
if i == n-1:
plt.xlabel("True delay [day]", fontsize=16)
if j == 0 and i == int(math.floor(n/2.0)):
plt.ylabel("Delay measurement error [day]", fontsize=16)
plt.xticks(fontsize=13)
plt.yticks(fontsize=13)
plt.xlim(plotrange)
#plt.ylim(plotrange)
if errorrange != None:
if hasattr(errorrange, '__iter__'): # then its a tuple or list
plt.ylim((errorrange[0], errorrange[1]))
else:
plt.ylim((-errorrange, errorrange))
if n != 2: # otherwise we have only 1 delay and panel
plt.annotate(delaylabel, xy=(0.03, 0.88-txtstep), xycoords='axes fraction', fontsize=14, color="black")
# enforce blindness if wanted, by modifying the xticks labels (not touching the data)
if blindness:
labels = ax.xaxis.get_ticklabels()
locs = ax.xaxis.get_ticklocs()
meanloc = np.mean(locs)
blindlabels = []
for loc, label in zip(locs, labels):
blindlabels.append(str(loc-meanloc))
ax.xaxis.set_ticklabels(blindlabels)
# That's it for this panel, back to the total figure :
if dataout:
for rr in rrlist:
dc = delaycontainer(data = rr.tmpdata, name = rr.name, plotcolour = rr.plotcolour, objects=labels[:])
pycs.gen.util.writepickle(dc,outdir+ "%s_errorbars.pkl" % (rr.autoname))
rr.tmpdata = None
labelspacetop = 0.0
labelspaceright = 0.0
if n == 2:
labelspacetop = 0.04
labelspaceright = 0.04
for irr, rr in enumerate(rrlist):
if displayn:
labeltxt = "%s (%i) " % (getattr(rr, 'name', 'NoName'), rr.tsarray.shape[0])
else:
labeltxt = "%s" % (getattr(rr, 'name', 'NoName'))
if not tweakeddisplay:
plt.figtext(x = right - labelspaceright, y = top - labelspacetop - txtstep*irr, s = labeltxt, verticalalignment="top", horizontalalignment="right", color=rr.plotcolour, fontsize=15)
else:
plt.figtext(x = 0.54, y = 0.8325 - txtstep*irr, s = labeltxt, verticalalignment="top", horizontalalignment="left", color=rr.plotcolour, fontsize=17)
if title != None:
#plt.figtext(x = left + (right-left)/2.0, y = ytitle, s = title, horizontalalignment="center", color="black", fontsize=18)
plt.figtext(x = xtitle, y = ytitle, s = title, horizontalalignment="center", color="black", fontsize=titlesize)
if filename==None:
plt.show()
else:
plt.savefig(filename)
def covplot(rrlist, showpoints=False, showcontour=True, showdensity=False, fractionalresiduals=False, bins=50, smoothing=0.0, figsize=(12, 12), left=0.02, right=0.98, bottom=0.02, top=0.98, wspace=0.05, hspace=0.05, r=5.0, title=None, txtstep=0.04, filename=None):
"""
Covariance scatter of all measurement errors.
Give me a single runresults object (from a sim, with known true delays).
"""
import scipy.stats
import matplotlib.colors as colors
nimages = rrlist[0].nimages()
imginds = np.arange(nimages)
#nruns = len(rr[0])
labels = rrlist[0].labels
couplelist = [(i, j) for j in imginds for i in imginds if i > j]
ncouples = len(couplelist)
fig = plt.figure(figsize=figsize)
fig.subplots_adjust(left=left, right=right, bottom=bottom, top=top, wspace=wspace, hspace=hspace)
axisNum = 0
for ii, i in enumerate(couplelist): # (0, 1), (0, 2) ...
for jj, j in enumerate(couplelist):
if (ii == 0) or (jj == ncouples-1) :
continue # No plot
axisNum += 1
if jj >= ii:
continue
#print i, j, axisNum
ax = plt.subplot(ncouples-1, ncouples-1, axisNum, aspect='equal')
ax.axhline(0, color="black")
ax.axvline(0, color="black")
for rr in rrlist:
#print idelaylabel, " vs ", jdelaylabel
itruedelays = rr.truetsarray[:,i[0]] - rr.truetsarray[:,i[1]]
imeasdelays = rr.tsarray[:,i[0]] - rr.tsarray[:,i[1]]
if fractionalresiduals:
iresis = (imeasdelays - itruedelays)/itruedelays
else:
iresis = imeasdelays - itruedelays
jtruedelays = rr.truetsarray[:,j[0]] - rr.truetsarray[:,j[1]]
jmeasdelays = rr.tsarray[:,j[0]] - rr.tsarray[:,j[1]]
if fractionalresiduals:
jresis = (jmeasdelays - jtruedelays)/jtruedelays
else:
jresis = jmeasdelays - jtruedelays
if showdensity or "diff" in rr.name:
cmap = colors.LinearSegmentedColormap.from_list('custom',['white', rr.plotcolour],gamma=1.0)
#cmap = colors.LinearSegmentedColormap.from_list('custom',[rr.plotcolour, rr.plotcolour],gamma=1.0)
#cmap._init()
#alphas = np.abs(np.linspace(0.0, 0.5, cmap.N))
#cmap._lut[:-3,-1] = alphas
ax.hexbin(iresis, jresis, gridsize=bins, extent=(-r, r, -r, r), mincnt=1, cmap=cmap, edgecolor="none")
if showpoints:
ax.scatter(iresis, jresis, s=2, facecolor=rr.plotcolour, lw = 0)
#ax.hexbin(iresis, jresis, gridsize=20, extent=(-r, r, -r, r))
if showcontour:
"""
H, xedges, yedges = np.histogram2d(iresis, jresis, range=[[-r,r], [-r,r]], bins=(bins, bins))
H = H.transpose()
if smoothing > 0.01:
H = scipy.ndimage.filters.gaussian_filter(H, smoothing, mode='constant', cval=0.0)
extent = [yedges[0], yedges[-1], xedges[0], xedges[-1]]
#levels = [np.mean(H), np.max(H)/2.0]
#levels = [2.0*np.mean(H), 6.0*np.mean(H)]
#levels = (1.0e4, 1.0e3, 1.0e2, 2.0e1)
levels = [scipy.stats.scoreatpercentile(H.flatten(), 95.45), scipy.stats.scoreatpercentile(H.flatten(), 68.27)]
#levels = [scipy.stats.scoreatpercentile(H.flatten(), 68.27)]
cset = ax.contour(H, levels=levels, origin="lower", colors=rr.plotcolour, extent=extent, linewidth=0.5)
"""
H, xedges, yedges = np.histogram2d(iresis, jresis, range=[[-r,r], [-r,r]], bins=(bins, bins))
extent = [yedges[0], yedges[-1], xedges[0], xedges[-1]]
data = np.vstack((iresis, jresis))
#print data.shape
kde = scipy.stats.kde.gaussian_kde(data)
grid = np.mgrid[-r:r:1j*bins, -r:r:1j*bins]
grid_coords = np.append(grid[0].reshape(-1,1),grid[1].reshape(-1,1),axis=1)
z = kde(grid_coords.T)
z = z.reshape(bins,bins)
#levels = [scipy.stats.scoreatpercentile(z.flatten(), 95.45)]
levels = [np.max(z)*0.45]
cset = ax.contour(grid[0], grid[1], z, levels=levels, origin="lower", colors=rr.plotcolour, extent=extent, linewidth=0.5)
idelaylabel="%s%s" % (labels[i[1]], labels[i[0]])
jdelaylabel="%s%s" % (labels[j[1]], labels[j[0]])
#ax.set_xlabel(idelaylabel)
#ax.set_ylabel(jdelaylabel)
if figsize[0] > 8:
ax.annotate(idelaylabel, xy=(0.9, 0.05), xycoords='axes fraction', ha="center") # x axis
ax.annotate(jdelaylabel, xy=(0.06, 0.85), xycoords='axes fraction', ha="left", rotation=90.0) # y axis
else:
ax.annotate(idelaylabel, xy=(0.78, 0.08), xycoords='axes fraction', ha="center") # x axis
ax.annotate(jdelaylabel, xy=(0.08, 0.76), xycoords='axes fraction', ha="left", rotation=90.0) # y axis
ax.set_xlim(-r, r)
ax.set_ylim(-r, r)
majorLocator = MultipleLocator(1.0)
ax.xaxis.set_major_locator(majorLocator)
majorLocator = MultipleLocator(1.0)
ax.yaxis.set_major_locator(majorLocator)
ax.set_xticklabels([])
ax.set_yticklabels([])
#ax.annotate(delaytext, xy=(0.03, 0.78 - 3*txtstep*(irr+0.5)), xycoords='axes fraction', color = datarr.plotcolour)
if title != None:
plt.figtext(x = 0.5, y = 0.97, s = title, horizontalalignment="center", color="black", fontsize=18)
#for (irr, rr) in enumerate(rrlist):
# plt.figtext(x = left + 0.25*irr, y = 0.96, s = getattr(rr, 'name', 'NoName'), horizontalalignment="left", color=rr.plotcolour)
for irr, rr in enumerate(rrlist):
labeltxt = "%s" % (getattr(rr, 'name', 'NoName'))
plt.figtext(x = right, y = top - txtstep*irr, s = labeltxt, verticalalignment="top", horizontalalignment="right", color=rr.plotcolour)
if filename==None:
plt.show()
else:
plt.savefig(filename)
| gpl-3.0 |
jereze/scikit-learn | examples/linear_model/plot_theilsen.py | 232 | 3615 | """
====================
Theil-Sen Regression
====================
Computes a Theil-Sen Regression on a synthetic dataset.
See :ref:`theil_sen_regression` for more information on the regressor.
Compared to the OLS (ordinary least squares) estimator, the Theil-Sen
estimator is robust against outliers. It has a breakdown point of about 29.3%
in case of a simple linear regression which means that it can tolerate
arbitrary corrupted data (outliers) of up to 29.3% in the two-dimensional
case.
The estimation of the model is done by calculating the slopes and intercepts
of a subpopulation of all possible combinations of p subsample points. If an
intercept is fitted, p must be greater than or equal to n_features + 1. The
final slope and intercept is then defined as the spatial median of these
slopes and intercepts.
In certain cases Theil-Sen performs better than :ref:`RANSAC
<ransac_regression>` which is also a robust method. This is illustrated in the
second example below where outliers with respect to the x-axis perturb RANSAC.
Tuning the ``residual_threshold`` parameter of RANSAC remedies this but in
general a priori knowledge about the data and the nature of the outliers is
needed.
Due to the computational complexity of Theil-Sen it is recommended to use it
only for small problems in terms of number of samples and features. For larger
problems the ``max_subpopulation`` parameter restricts the magnitude of all
possible combinations of p subsample points to a randomly chosen subset and
therefore also limits the runtime. Therefore, Theil-Sen is applicable to larger
problems with the drawback of losing some of its mathematical properties since
it then works on a random subset.
"""
# Author: Florian Wilhelm -- <florian.wilhelm@gmail.com>
# License: BSD 3 clause
import time
import numpy as np
import matplotlib.pyplot as plt
from sklearn.linear_model import LinearRegression, TheilSenRegressor
from sklearn.linear_model import RANSACRegressor
print(__doc__)
estimators = [('OLS', LinearRegression()),
('Theil-Sen', TheilSenRegressor(random_state=42)),
('RANSAC', RANSACRegressor(random_state=42)), ]
##############################################################################
# Outliers only in the y direction
np.random.seed(0)
n_samples = 200
# Linear model y = 3*x + N(2, 0.1**2)
x = np.random.randn(n_samples)
w = 3.
c = 2.
noise = 0.1 * np.random.randn(n_samples)
y = w * x + c + noise
# 10% outliers
y[-20:] += -20 * x[-20:]
X = x[:, np.newaxis]
plt.plot(x, y, 'k+', mew=2, ms=8)
line_x = np.array([-3, 3])
for name, estimator in estimators:
t0 = time.time()
estimator.fit(X, y)
elapsed_time = time.time() - t0
y_pred = estimator.predict(line_x.reshape(2, 1))
plt.plot(line_x, y_pred,
label='%s (fit time: %.2fs)' % (name, elapsed_time))
plt.axis('tight')
plt.legend(loc='upper left')
##############################################################################
# Outliers in the X direction
np.random.seed(0)
# Linear model y = 3*x + N(2, 0.1**2)
x = np.random.randn(n_samples)
noise = 0.1 * np.random.randn(n_samples)
y = 3 * x + 2 + noise
# 10% outliers
x[-20:] = 9.9
y[-20:] += 22
X = x[:, np.newaxis]
plt.figure()
plt.plot(x, y, 'k+', mew=2, ms=8)
line_x = np.array([-3, 10])
for name, estimator in estimators:
t0 = time.time()
estimator.fit(X, y)
elapsed_time = time.time() - t0
y_pred = estimator.predict(line_x.reshape(2, 1))
plt.plot(line_x, y_pred,
label='%s (fit time: %.2fs)' % (name, elapsed_time))
plt.axis('tight')
plt.legend(loc='upper left')
plt.show()
| bsd-3-clause |
dementrock/cgt | examples/demo_variational_autoencoder.py | 18 | 10799 | import cgt
from cgt import core
from cgt import nn
import numpy as np
import cPickle as pickle
from scipy.stats import norm
import matplotlib.pyplot as plt
from example_utils import fetch_dataset
'''
MNIST manifold demo (with 2-dimensional latent z) using variational autoencoder
'''
rng = np.random.RandomState(1234)
def kld_unit_mvn(mu, var):
# KL divergence from N(0, I)
return (mu.shape[1] + cgt.sum(cgt.log(var), axis=1) - cgt.sum(cgt.square(mu), axis=1) - cgt.sum(var, axis=1)) / 2.0
def log_diag_mvn(mu, var):
# log probability of x under N(mu, diag(var))
def f(x):
# expects batches
k = mu.shape[1]
logp = (-k / 2.0) * np.log(2 * np.pi) - 0.5 * cgt.sum(cgt.log(var), axis=1) - cgt.sum(0.5 * (1.0 / var) * (x - mu) * (x - mu), axis=1)
return logp
return f
class HiddenLayer(object):
# adapted from http://deeplearning.net/tutorial/mlp.html
def __init__(self, input, n_in, n_out, W=None, b=None,
activation=cgt.tanh, prefix=""):
self.n_in = n_in
self.n_out = n_out
if W is None:
# XXX replace with nn init
W_values = np.asarray(
rng.uniform(
low=-np.sqrt(6. / (n_in + n_out)),
high=np.sqrt(6. / (n_in + n_out)),
size=(n_in, n_out)
),
dtype=cgt.floatX
)
if activation == cgt.sigmoid:
W_values *= 4
W = cgt.shared(W_values, name=prefix+"_W")
if b is None:
b_values = np.zeros((n_out,), dtype=cgt.floatX)
b = cgt.shared(b_values, name=prefix+"_b")
self.W = W
self.b = b
# XXX broadcast api may change
lin_output = cgt.broadcast("+", cgt.dot(input, self.W),
cgt.dimshuffle(self.b, ["x", 0]), "xx,1x")
self.output = (
lin_output if activation is None
else activation(lin_output)
)
# parameters of the model
self.params = [self.W, self.b]
class _MLP(object):
# building block for MLP instantiations defined below
def __init__(self, x, n_in, n_hid, nlayers=1, prefix=""):
self.nlayers = nlayers
self.hidden_layers = list()
inp = x
for k in xrange(self.nlayers):
hlayer = HiddenLayer(
input=inp,
n_in=n_in,
n_out=n_hid,
activation=cgt.tanh,
prefix=prefix + ("_%d" % (k + 1))
)
n_in = n_hid
inp = hlayer.output
self.hidden_layers.append(hlayer)
self.params = [param for l in self.hidden_layers for param in l.params]
self.input = input
# NOTE output layer computed by instantations
class GaussianMLP(_MLP):
def __init__(self, x, n_in, n_hid, n_out, nlayers=1, y=None, eps=None):
super(GaussianMLP, self).__init__(x, n_in, n_hid, nlayers=nlayers, prefix="GaussianMLP_hidden")
self.mu_layer = HiddenLayer(
input=self.hidden_layers[-1].output,
n_in=self.hidden_layers[-1].n_out,
n_out=n_out,
activation=None,
prefix="GaussianMLP_mu"
)
# log(sigma^2)
self.logvar_layer = HiddenLayer(
input=self.hidden_layers[-1].output,
n_in=self.hidden_layers[-1].n_out,
n_out=n_out,
activation=None,
prefix="GaussianMLP_logvar"
)
self.mu = self.mu_layer.output
self.var = cgt.exp(self.logvar_layer.output)
self.sigma = cgt.sqrt(self.var)
self.params = self.params + self.mu_layer.params +\
self.logvar_layer.params
# for use as encoder
if eps is not None:
assert(y is None)
self.out = self.mu + self.sigma * eps
# for use as decoder
if y:
assert(eps is None)
self.out = cgt.sigmoid(self.mu)
self.cost = -cgt.sum(log_diag_mvn(self.out, self.var)(y))
class BernoulliMLP(_MLP):
def __init__(self, x, n_in, n_hid, n_out, nlayers=1, y=None):
super(BernoulliMLP, self).__init__(x, n_in, n_hid, nlayers=nlayers, prefix="BernoulliMLP_hidden")
self.out_layer = HiddenLayer(
input=self.hidden_layers[-1].output,
n_in=self.hidden_layers[-1].n_out,
n_out=n_out,
activation=cgt.sigmoid,
prefix="BernoulliMLP_y_hat"
)
self.params = self.params + self.out_layer.params
if y is not None:
self.out = self.out_layer.output
self.cost = cgt.sum(nn.binary_crossentropy(self.out, y))
class VAE(object):
def __init__(self, xdim, args, dec="bernoulli"):
self.xdim = xdim
self.hdim = args.hdim
self.zdim = args.zdim
self.lmbda = args.lmbda # weight decay coefficient * 2
self.x = cgt.matrix("x", dtype=cgt.floatX)
self.eps = cgt.matrix("eps", dtype=cgt.floatX)
self.enc_mlp = GaussianMLP(self.x, self.xdim, self.hdim, self.zdim, nlayers=args.nlayers, eps=self.eps)
if dec == "bernoulli":
# log p(x | z) defined as -CE(x, y) = dec_mlp.cost(y)
self.dec_mlp = BernoulliMLP(self.enc_mlp.out, self.zdim, self.hdim, self.xdim, nlayers=args.nlayers, y=self.x)
elif dec == "gaussian":
self.dec_mlp = GaussianMLP(self.enc_mlp.out, self.zdim, self.hdim, self.xdim, nlayers=args.nlayers, y=self.x)
else:
raise RuntimeError("unrecognized decoder %" % dec)
self.cost = (-cgt.sum(kld_unit_mvn(self.enc_mlp.mu, self.enc_mlp.var)) + self.dec_mlp.cost) / args.batch_size
self.params = self.enc_mlp.params + self.dec_mlp.params
# L2 regularization
self.gparams = [cgt.grad(self.cost, [p])[0] + self.lmbda * p for p in self.params]
self.gaccums = [cgt.shared(np.zeros(p.op.get_value().shape, dtype=cgt.floatX)) for p in self.params]
# XXX replace w/ adagrad update from nn
ADAGRAD_EPS = 1e-10 # for stability
self.updates = [
(param, param - args.lr * gparam / cgt.sqrt(gaccum + cgt.square(gparam) + ADAGRAD_EPS))
for param, gparam, gaccum in zip(self.params, self.gparams, self.gaccums)
]
self.updates += [
(gaccum, gaccum + cgt.square(gparam))
for gaccum, gparam in zip(self.gaccums, self.gparams)
]
self.train = cgt.function(
[self.x, self.eps],
self.cost,
updates=self.updates
)
self.test = cgt.function(
[self.x, self.eps],
self.cost,
updates=None
)
# can be used for semi-supervised learning for example
self.encode = cgt.function(
[self.x, self.eps],
self.enc_mlp.out
)
def main():
import argparse
parser = argparse.ArgumentParser()
parser.add_argument("--batch_size", default=100)
parser.add_argument("--nlayers", default=1, type=int, help="number of hidden layers in MLP before output layers")
parser.add_argument("--hdim", default=500, type=int, help="dimension of hidden layer")
parser.add_argument("--zdim", default=2, type=int, help="dimension of continuous latent variable")
parser.add_argument("--lmbda", default=0.001, type=float, help="weight decay coefficient")
parser.add_argument("--lr", default=0.01, type=float, help="learning rate")
parser.add_argument("--epochs", default=1000, type=int, help="number of passes over dataset")
parser.add_argument("--print_every", default=100, type=int, help="how often to print cost")
parser.add_argument("--outfile", default="vae_model.pk", help="output file to save model to")
args = parser.parse_args()
print(args)
if args.epochs > 100:
print("NOTE: training might take a while. You may want to first sanity check by setting --epochs to something like 20 (manifold will be fuzzy).")
# set up dataset
mnist = fetch_dataset("http://rll.berkeley.edu/cgt-data/mnist.npz")
X = (mnist["X"]/255.).astype(cgt.floatX)
y = mnist["y"]
np.random.seed(0)
sortinds = np.random.permutation(70000)
X = X[sortinds]
y = y[sortinds]
train_x = X[0:50000]
train_y = y[0:50000]
valid_x = X[50000:60000]
valid_y = y[50000:60000]
# run SGVB algorithm
model = VAE(train_x.shape[1], args, dec="bernoulli")
expcost = None
num_train_batches = train_x.shape[0] / args.batch_size
num_valid_batches = valid_x.shape[0] / args.batch_size
valid_freq = num_train_batches
for b in xrange(args.epochs * num_train_batches):
k = b % num_train_batches
x = train_x[k * args.batch_size:(k + 1) * args.batch_size, :]
eps = np.random.randn(x.shape[0], args.zdim).astype(cgt.floatX)
cost = model.train(x, eps)
if not expcost:
expcost = cost
else:
expcost = 0.01 * cost + 0.99 * expcost
if (b + 1) % args.print_every == 0:
print("iter %d, cost %f, expcost %f" % (b + 1, cost, expcost))
if (b + 1) % valid_freq == 0:
valid_cost = 0
for l in xrange(num_valid_batches):
x_val = valid_x[l * args.batch_size:(l + 1) * args.batch_size, :]
eps_val = np.zeros((x_val.shape[0], args.zdim), dtype=cgt.floatX)
valid_cost = valid_cost + model.test(x_val, eps_val)
valid_cost = valid_cost / num_valid_batches
print("valid cost: %f" % valid_cost)
# XXX fix pickling of cgt models
#print("saving final model")
#with open(args.outfile, "wb") as f:
#pickle.dump(model, f, protocol=pickle.HIGHEST_PROTOCOL)
# XXX use this to sample, should later be able to compile f(z) = y directly (See Issue #18)
newz = cgt.matrix("newz", dtype=cgt.floatX)
newy = cgt.core.clone(model.dec_mlp.out, {model.enc_mlp.out:newz})
decode = cgt.function(
[newz],
newy
)
S = (28, 28)
M = 20
manifold = np.zeros((S[0]*M, S[1]*M), dtype=cgt.floatX)
for z1 in xrange(M):
for z2 in xrange(M):
print z1, z2
z = np.zeros((1, 2))
# pass unit square through inverse Gaussian CDF
z[0, 0] = norm.ppf(z1 * 1.0/M + 1.0/(M * 2))
z[0, 1] = norm.ppf(z2 * 1.0/M + 1.0/(M * 2))
z = np.array(z, dtype=cgt.floatX)
x_hat = decode(z)
x_hat = x_hat.reshape(S)
manifold[z1 * S[0]:(z1 + 1) * S[0],
z2 * S[1]:(z2 + 1) * S[1]] = x_hat
plt.imshow(manifold, cmap="Greys_r")
plt.axis("off")
plt.show()
if __name__ == "__main__":
main()
| mit |
RachitKansal/scikit-learn | sklearn/tests/test_isotonic.py | 230 | 11087 | import numpy as np
import pickle
from sklearn.isotonic import (check_increasing, isotonic_regression,
IsotonicRegression)
from sklearn.utils.testing import (assert_raises, assert_array_equal,
assert_true, assert_false, assert_equal,
assert_array_almost_equal,
assert_warns_message, assert_no_warnings)
from sklearn.utils import shuffle
def test_permutation_invariance():
# check that fit is permuation invariant.
# regression test of missing sorting of sample-weights
ir = IsotonicRegression()
x = [1, 2, 3, 4, 5, 6, 7]
y = [1, 41, 51, 1, 2, 5, 24]
sample_weight = [1, 2, 3, 4, 5, 6, 7]
x_s, y_s, sample_weight_s = shuffle(x, y, sample_weight, random_state=0)
y_transformed = ir.fit_transform(x, y, sample_weight=sample_weight)
y_transformed_s = ir.fit(x_s, y_s, sample_weight=sample_weight_s).transform(x)
assert_array_equal(y_transformed, y_transformed_s)
def test_check_increasing_up():
x = [0, 1, 2, 3, 4, 5]
y = [0, 1.5, 2.77, 8.99, 8.99, 50]
# Check that we got increasing=True and no warnings
is_increasing = assert_no_warnings(check_increasing, x, y)
assert_true(is_increasing)
def test_check_increasing_up_extreme():
x = [0, 1, 2, 3, 4, 5]
y = [0, 1, 2, 3, 4, 5]
# Check that we got increasing=True and no warnings
is_increasing = assert_no_warnings(check_increasing, x, y)
assert_true(is_increasing)
def test_check_increasing_down():
x = [0, 1, 2, 3, 4, 5]
y = [0, -1.5, -2.77, -8.99, -8.99, -50]
# Check that we got increasing=False and no warnings
is_increasing = assert_no_warnings(check_increasing, x, y)
assert_false(is_increasing)
def test_check_increasing_down_extreme():
x = [0, 1, 2, 3, 4, 5]
y = [0, -1, -2, -3, -4, -5]
# Check that we got increasing=False and no warnings
is_increasing = assert_no_warnings(check_increasing, x, y)
assert_false(is_increasing)
def test_check_ci_warn():
x = [0, 1, 2, 3, 4, 5]
y = [0, -1, 2, -3, 4, -5]
# Check that we got increasing=False and CI interval warning
is_increasing = assert_warns_message(UserWarning, "interval",
check_increasing,
x, y)
assert_false(is_increasing)
def test_isotonic_regression():
y = np.array([3, 7, 5, 9, 8, 7, 10])
y_ = np.array([3, 6, 6, 8, 8, 8, 10])
assert_array_equal(y_, isotonic_regression(y))
x = np.arange(len(y))
ir = IsotonicRegression(y_min=0., y_max=1.)
ir.fit(x, y)
assert_array_equal(ir.fit(x, y).transform(x), ir.fit_transform(x, y))
assert_array_equal(ir.transform(x), ir.predict(x))
# check that it is immune to permutation
perm = np.random.permutation(len(y))
ir = IsotonicRegression(y_min=0., y_max=1.)
assert_array_equal(ir.fit_transform(x[perm], y[perm]),
ir.fit_transform(x, y)[perm])
assert_array_equal(ir.transform(x[perm]), ir.transform(x)[perm])
# check we don't crash when all x are equal:
ir = IsotonicRegression()
assert_array_equal(ir.fit_transform(np.ones(len(x)), y), np.mean(y))
def test_isotonic_regression_ties_min():
# Setup examples with ties on minimum
x = [0, 1, 1, 2, 3, 4, 5]
y = [0, 1, 2, 3, 4, 5, 6]
y_true = [0, 1.5, 1.5, 3, 4, 5, 6]
# Check that we get identical results for fit/transform and fit_transform
ir = IsotonicRegression()
ir.fit(x, y)
assert_array_equal(ir.fit(x, y).transform(x), ir.fit_transform(x, y))
assert_array_equal(y_true, ir.fit_transform(x, y))
def test_isotonic_regression_ties_max():
# Setup examples with ties on maximum
x = [1, 2, 3, 4, 5, 5]
y = [1, 2, 3, 4, 5, 6]
y_true = [1, 2, 3, 4, 5.5, 5.5]
# Check that we get identical results for fit/transform and fit_transform
ir = IsotonicRegression()
ir.fit(x, y)
assert_array_equal(ir.fit(x, y).transform(x), ir.fit_transform(x, y))
assert_array_equal(y_true, ir.fit_transform(x, y))
def test_isotonic_regression_ties_secondary_():
"""
Test isotonic regression fit, transform and fit_transform
against the "secondary" ties method and "pituitary" data from R
"isotone" package, as detailed in: J. d. Leeuw, K. Hornik, P. Mair,
Isotone Optimization in R: Pool-Adjacent-Violators Algorithm
(PAVA) and Active Set Methods
Set values based on pituitary example and
the following R command detailed in the paper above:
> library("isotone")
> data("pituitary")
> res1 <- gpava(pituitary$age, pituitary$size, ties="secondary")
> res1$x
`isotone` version: 1.0-2, 2014-09-07
R version: R version 3.1.1 (2014-07-10)
"""
x = [8, 8, 8, 10, 10, 10, 12, 12, 12, 14, 14]
y = [21, 23.5, 23, 24, 21, 25, 21.5, 22, 19, 23.5, 25]
y_true = [22.22222, 22.22222, 22.22222, 22.22222, 22.22222, 22.22222,
22.22222, 22.22222, 22.22222, 24.25, 24.25]
# Check fit, transform and fit_transform
ir = IsotonicRegression()
ir.fit(x, y)
assert_array_almost_equal(ir.transform(x), y_true, 4)
assert_array_almost_equal(ir.fit_transform(x, y), y_true, 4)
def test_isotonic_regression_reversed():
y = np.array([10, 9, 10, 7, 6, 6.1, 5])
y_ = IsotonicRegression(increasing=False).fit_transform(
np.arange(len(y)), y)
assert_array_equal(np.ones(y_[:-1].shape), ((y_[:-1] - y_[1:]) >= 0))
def test_isotonic_regression_auto_decreasing():
# Set y and x for decreasing
y = np.array([10, 9, 10, 7, 6, 6.1, 5])
x = np.arange(len(y))
# Create model and fit_transform
ir = IsotonicRegression(increasing='auto')
y_ = assert_no_warnings(ir.fit_transform, x, y)
# Check that relationship decreases
is_increasing = y_[0] < y_[-1]
assert_false(is_increasing)
def test_isotonic_regression_auto_increasing():
# Set y and x for decreasing
y = np.array([5, 6.1, 6, 7, 10, 9, 10])
x = np.arange(len(y))
# Create model and fit_transform
ir = IsotonicRegression(increasing='auto')
y_ = assert_no_warnings(ir.fit_transform, x, y)
# Check that relationship increases
is_increasing = y_[0] < y_[-1]
assert_true(is_increasing)
def test_assert_raises_exceptions():
ir = IsotonicRegression()
rng = np.random.RandomState(42)
assert_raises(ValueError, ir.fit, [0, 1, 2], [5, 7, 3], [0.1, 0.6])
assert_raises(ValueError, ir.fit, [0, 1, 2], [5, 7])
assert_raises(ValueError, ir.fit, rng.randn(3, 10), [0, 1, 2])
assert_raises(ValueError, ir.transform, rng.randn(3, 10))
def test_isotonic_sample_weight_parameter_default_value():
# check if default value of sample_weight parameter is one
ir = IsotonicRegression()
# random test data
rng = np.random.RandomState(42)
n = 100
x = np.arange(n)
y = rng.randint(-50, 50, size=(n,)) + 50. * np.log(1 + np.arange(n))
# check if value is correctly used
weights = np.ones(n)
y_set_value = ir.fit_transform(x, y, sample_weight=weights)
y_default_value = ir.fit_transform(x, y)
assert_array_equal(y_set_value, y_default_value)
def test_isotonic_min_max_boundaries():
# check if min value is used correctly
ir = IsotonicRegression(y_min=2, y_max=4)
n = 6
x = np.arange(n)
y = np.arange(n)
y_test = [2, 2, 2, 3, 4, 4]
y_result = np.round(ir.fit_transform(x, y))
assert_array_equal(y_result, y_test)
def test_isotonic_sample_weight():
ir = IsotonicRegression()
x = [1, 2, 3, 4, 5, 6, 7]
y = [1, 41, 51, 1, 2, 5, 24]
sample_weight = [1, 2, 3, 4, 5, 6, 7]
expected_y = [1, 13.95, 13.95, 13.95, 13.95, 13.95, 24]
received_y = ir.fit_transform(x, y, sample_weight=sample_weight)
assert_array_equal(expected_y, received_y)
def test_isotonic_regression_oob_raise():
# Set y and x
y = np.array([3, 7, 5, 9, 8, 7, 10])
x = np.arange(len(y))
# Create model and fit
ir = IsotonicRegression(increasing='auto', out_of_bounds="raise")
ir.fit(x, y)
# Check that an exception is thrown
assert_raises(ValueError, ir.predict, [min(x) - 10, max(x) + 10])
def test_isotonic_regression_oob_clip():
# Set y and x
y = np.array([3, 7, 5, 9, 8, 7, 10])
x = np.arange(len(y))
# Create model and fit
ir = IsotonicRegression(increasing='auto', out_of_bounds="clip")
ir.fit(x, y)
# Predict from training and test x and check that min/max match.
y1 = ir.predict([min(x) - 10, max(x) + 10])
y2 = ir.predict(x)
assert_equal(max(y1), max(y2))
assert_equal(min(y1), min(y2))
def test_isotonic_regression_oob_nan():
# Set y and x
y = np.array([3, 7, 5, 9, 8, 7, 10])
x = np.arange(len(y))
# Create model and fit
ir = IsotonicRegression(increasing='auto', out_of_bounds="nan")
ir.fit(x, y)
# Predict from training and test x and check that we have two NaNs.
y1 = ir.predict([min(x) - 10, max(x) + 10])
assert_equal(sum(np.isnan(y1)), 2)
def test_isotonic_regression_oob_bad():
# Set y and x
y = np.array([3, 7, 5, 9, 8, 7, 10])
x = np.arange(len(y))
# Create model and fit
ir = IsotonicRegression(increasing='auto', out_of_bounds="xyz")
# Make sure that we throw an error for bad out_of_bounds value
assert_raises(ValueError, ir.fit, x, y)
def test_isotonic_regression_oob_bad_after():
# Set y and x
y = np.array([3, 7, 5, 9, 8, 7, 10])
x = np.arange(len(y))
# Create model and fit
ir = IsotonicRegression(increasing='auto', out_of_bounds="raise")
# Make sure that we throw an error for bad out_of_bounds value in transform
ir.fit(x, y)
ir.out_of_bounds = "xyz"
assert_raises(ValueError, ir.transform, x)
def test_isotonic_regression_pickle():
y = np.array([3, 7, 5, 9, 8, 7, 10])
x = np.arange(len(y))
# Create model and fit
ir = IsotonicRegression(increasing='auto', out_of_bounds="clip")
ir.fit(x, y)
ir_ser = pickle.dumps(ir, pickle.HIGHEST_PROTOCOL)
ir2 = pickle.loads(ir_ser)
np.testing.assert_array_equal(ir.predict(x), ir2.predict(x))
def test_isotonic_duplicate_min_entry():
x = [0, 0, 1]
y = [0, 0, 1]
ir = IsotonicRegression(increasing=True, out_of_bounds="clip")
ir.fit(x, y)
all_predictions_finite = np.all(np.isfinite(ir.predict(x)))
assert_true(all_predictions_finite)
def test_isotonic_zero_weight_loop():
# Test from @ogrisel's issue:
# https://github.com/scikit-learn/scikit-learn/issues/4297
# Get deterministic RNG with seed
rng = np.random.RandomState(42)
# Create regression and samples
regression = IsotonicRegression()
n_samples = 50
x = np.linspace(-3, 3, n_samples)
y = x + rng.uniform(size=n_samples)
# Get some random weights and zero out
w = rng.uniform(size=n_samples)
w[5:8] = 0
regression.fit(x, y, sample_weight=w)
# This will hang in failure case.
regression.fit(x, y, sample_weight=w)
| bsd-3-clause |
wilsonkichoi/zipline | zipline/errors.py | 2 | 17574 | #
# Copyright 2015 Quantopian, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from zipline.utils.memoize import lazyval
class ZiplineError(Exception):
msg = None
def __init__(self, **kwargs):
self.kwargs = kwargs
@lazyval
def message(self):
return str(self)
def __str__(self):
msg = self.msg.format(**self.kwargs)
return msg
__unicode__ = __str__
__repr__ = __str__
class NoTradeDataAvailable(ZiplineError):
pass
class NoTradeDataAvailableTooEarly(NoTradeDataAvailable):
msg = "{sid} does not exist on {dt}. It started trading on {start_dt}."
class NoTradeDataAvailableTooLate(NoTradeDataAvailable):
msg = "{sid} does not exist on {dt}. It stopped trading on {end_dt}."
class BenchmarkAssetNotAvailableTooEarly(NoTradeDataAvailableTooEarly):
pass
class BenchmarkAssetNotAvailableTooLate(NoTradeDataAvailableTooLate):
pass
class InvalidBenchmarkAsset(ZiplineError):
msg = """
{sid} cannot be used as the benchmark because it has a stock \
dividend on {dt}. Choose another asset to use as the benchmark.
""".strip()
class WrongDataForTransform(ZiplineError):
"""
Raised whenever a rolling transform is called on an event that
does not have the necessary properties.
"""
msg = "{transform} requires {fields}. Event cannot be processed."
class UnsupportedSlippageModel(ZiplineError):
"""
Raised if a user script calls the set_slippage magic
with a slipage object that isn't a VolumeShareSlippage or
FixedSlipapge
"""
msg = """
You attempted to set slippage with an unsupported class. \
Please use VolumeShareSlippage or FixedSlippage.
""".strip()
class SetSlippagePostInit(ZiplineError):
# Raised if a users script calls set_slippage magic
# after the initialize method has returned.
msg = """
You attempted to set slippage outside of `initialize`. \
You may only call 'set_slippage' in your initialize method.
""".strip()
class SetCancelPolicyPostInit(ZiplineError):
# Raised if a users script calls set_cancel_policy
# after the initialize method has returned.
msg = """
You attempted to set the cancel policy outside of `initialize`. \
You may only call 'set_cancel_policy' in your initialize method.
""".strip()
class RegisterTradingControlPostInit(ZiplineError):
# Raised if a user's script register's a trading control after initialize
# has been run.
msg = """
You attempted to set a trading control outside of `initialize`. \
Trading controls may only be set in your initialize method.
""".strip()
class RegisterAccountControlPostInit(ZiplineError):
# Raised if a user's script register's a trading control after initialize
# has been run.
msg = """
You attempted to set an account control outside of `initialize`. \
Account controls may only be set in your initialize method.
""".strip()
class UnsupportedCommissionModel(ZiplineError):
"""
Raised if a user script calls the set_commission magic
with a commission object that isn't a PerShare, PerTrade or
PerDollar commission
"""
msg = """
You attempted to set commission with an unsupported class. \
Please use PerShare or PerTrade.
""".strip()
class UnsupportedCancelPolicy(ZiplineError):
"""
Raised if a user script calls set_cancel_policy with an object that isn't
a CancelPolicy.
"""
msg = """
You attempted to set the cancel policy with an unsupported class. Please use
an instance of CancelPolicy.
""".strip()
class SetCommissionPostInit(ZiplineError):
"""
Raised if a users script calls set_commission magic
after the initialize method has returned.
"""
msg = """
You attempted to override commission outside of `initialize`. \
You may only call 'set_commission' in your initialize method.
""".strip()
class TransactionWithNoVolume(ZiplineError):
"""
Raised if a transact call returns a transaction with zero volume.
"""
msg = """
Transaction {txn} has a volume of zero.
""".strip()
class TransactionWithWrongDirection(ZiplineError):
"""
Raised if a transact call returns a transaction with a direction that
does not match the order.
"""
msg = """
Transaction {txn} not in same direction as corresponding order {order}.
""".strip()
class TransactionWithNoAmount(ZiplineError):
"""
Raised if a transact call returns a transaction with zero amount.
"""
msg = """
Transaction {txn} has an amount of zero.
""".strip()
class TransactionVolumeExceedsOrder(ZiplineError):
"""
Raised if a transact call returns a transaction with a volume greater than
the corresponding order.
"""
msg = """
Transaction volume of {txn} exceeds the order volume of {order}.
""".strip()
class UnsupportedOrderParameters(ZiplineError):
"""
Raised if a set of mutually exclusive parameters are passed to an order
call.
"""
msg = "{msg}"
class CannotOrderDelistedAsset(ZiplineError):
"""
Raised if an order is for a delisted asset.
"""
msg = "{msg}"
class BadOrderParameters(ZiplineError):
"""
Raised if any impossible parameters (nan, negative limit/stop)
are passed to an order call.
"""
msg = "{msg}"
class OrderDuringInitialize(ZiplineError):
"""
Raised if order is called during initialize()
"""
msg = "{msg}"
class SetBenchmarkOutsideInitialize(ZiplineError):
"""
Raised if set_benchmark is called outside initialize()
"""
msg = "'set_benchmark' can only be called within initialize function."
class AccountControlViolation(ZiplineError):
"""
Raised if the account violates a constraint set by a AccountControl.
"""
msg = """
Account violates account constraint {constraint}.
""".strip()
class TradingControlViolation(ZiplineError):
"""
Raised if an order would violate a constraint set by a TradingControl.
"""
msg = """
Order for {amount} shares of {asset} at {datetime} violates trading constraint
{constraint}.
""".strip()
class IncompatibleHistoryFrequency(ZiplineError):
"""
Raised when a frequency is given to history which is not supported.
At least, not yet.
"""
msg = """
Requested history at frequency '{frequency}' cannot be created with data
at frequency '{data_frequency}'.
""".strip()
class HistoryInInitialize(ZiplineError):
"""
Raised when an algorithm calls history() in initialize.
"""
msg = "history() should only be called in handle_data()"
class OrderInBeforeTradingStart(ZiplineError):
"""
Raised when an algorithm calls an order method in before_trading_start.
"""
msg = "Cannot place orders inside before_trading_start."
class MultipleSymbolsFound(ZiplineError):
"""
Raised when a symbol() call contains a symbol that changed over
time and is thus not resolvable without additional information
provided via as_of_date.
"""
msg = """
Multiple symbols with the name '{symbol}' found. Use the
as_of_date' argument to to specify when the date symbol-lookup
should be valid.
Possible options:{options}
""".strip()
class SymbolNotFound(ZiplineError):
"""
Raised when a symbol() call contains a non-existant symbol.
"""
msg = """
Symbol '{symbol}' was not found.
""".strip()
class RootSymbolNotFound(ZiplineError):
"""
Raised when a lookup_future_chain() call contains a non-existant symbol.
"""
msg = """
Root symbol '{root_symbol}' was not found.
""".strip()
class SidsNotFound(ZiplineError):
"""
Raised when a retrieve_asset() or retrieve_all() call contains a
non-existent sid.
"""
@lazyval
def plural(self):
return len(self.sids) > 1
@lazyval
def sids(self):
return self.kwargs['sids']
@lazyval
def msg(self):
if self.plural:
return "No assets found for sids: {sids}."
return "No asset found for sid: {sids[0]}."
class EquitiesNotFound(SidsNotFound):
"""
Raised when a call to `retrieve_equities` fails to find an asset.
"""
@lazyval
def msg(self):
if self.plural:
return "No equities found for sids: {sids}."
return "No equity found for sid: {sids[0]}."
class FutureContractsNotFound(SidsNotFound):
"""
Raised when a call to `retrieve_futures_contracts` fails to find an asset.
"""
@lazyval
def msg(self):
if self.plural:
return "No future contracts found for sids: {sids}."
return "No future contract found for sid: {sids[0]}."
class ConsumeAssetMetaDataError(ZiplineError):
"""
Raised when AssetFinder.consume() is called on an invalid object.
"""
msg = """
AssetFinder can not consume metadata of type {obj}. Metadata must be a dict, a
DataFrame, or a tables.Table. If the provided metadata is a Table, the rows
must contain both or one of 'sid' or 'symbol'.
""".strip()
class MapAssetIdentifierIndexError(ZiplineError):
"""
Raised when AssetMetaData.map_identifier_index_to_sids() is called on an
index of invalid objects.
"""
msg = """
AssetFinder can not map an index with values of type {obj}. Asset indices of
DataFrames or Panels must be integer sids, string symbols, or Asset objects.
""".strip()
class SidAssignmentError(ZiplineError):
"""
Raised when an AssetFinder tries to build an Asset that does not have a sid
and that AssetFinder is not permitted to assign sids.
"""
msg = """
AssetFinder metadata is missing a SID for identifier '{identifier}'.
""".strip()
class NoSourceError(ZiplineError):
"""
Raised when no source is given to the pipeline
"""
msg = """
No data source given.
""".strip()
class PipelineDateError(ZiplineError):
"""
Raised when only one date is passed to the pipeline
"""
msg = """
Only one simulation date given. Please specify both the 'start' and 'end' for
the simulation, or neither. If neither is given, the start and end of the
DataSource will be used. Given start = '{start}', end = '{end}'
""".strip()
class WindowLengthTooLong(ZiplineError):
"""
Raised when a trailing window is instantiated with a lookback greater than
the length of the underlying array.
"""
msg = (
"Can't construct a rolling window of length "
"{window_length} on an array of length {nrows}."
).strip()
class WindowLengthNotPositive(ZiplineError):
"""
Raised when a trailing window would be instantiated with a length less than
1.
"""
msg = (
"Expected a window_length greater than 0, got {window_length}."
).strip()
class NonWindowSafeInput(ZiplineError):
"""
Raised when a Pipeline API term that is not deemed window safe is specified
as an input to another windowed term.
This is an error because it's generally not safe to compose windowed
functions on split/dividend adjusted data.
"""
msg = (
"Can't compute windowed expression {parent} with "
"windowed input {child}."
)
class TermInputsNotSpecified(ZiplineError):
"""
Raised if a user attempts to construct a term without specifying inputs and
that term does not have class-level default inputs.
"""
msg = "{termname} requires inputs, but no inputs list was passed."
class TermOutputsEmpty(ZiplineError):
"""
Raised if a user attempts to construct a term with an empty outputs list.
"""
msg = (
"{termname} requires at least one output when passed an outputs "
"argument."
)
class InvalidOutputName(ZiplineError):
"""
Raised if a term's output names conflict with any of its attributes.
"""
msg = (
"{output_name!r} cannot be used as an output name for {termname}. "
"Output names cannot start with an underscore or be contained in the "
"following list: {disallowed_names}."
)
class WindowLengthNotSpecified(ZiplineError):
"""
Raised if a user attempts to construct a term without specifying window
length and that term does not have a class-level default window length.
"""
msg = (
"{termname} requires a window_length, but no window_length was passed."
)
class InvalidTermParams(ZiplineError):
"""
Raised if a user attempts to construct a Term using ParameterizedTermMixin
without specifying a `params` list in the class body.
"""
msg = (
"Expected a list of strings as a class-level attribute for "
"{termname}.params, but got {value} instead."
)
class DTypeNotSpecified(ZiplineError):
"""
Raised if a user attempts to construct a term without specifying dtype and
that term does not have class-level default dtype.
"""
msg = (
"{termname} requires a dtype, but no dtype was passed."
)
class NotDType(ZiplineError):
"""
Raised when a pipeline Term is constructed with a dtype that isn't a numpy
dtype object.
"""
msg = (
"{termname} expected a numpy dtype "
"object for a dtype, but got {dtype} instead."
)
class UnsupportedDType(ZiplineError):
"""
Raised when a pipeline Term is constructed with a dtype that's not
supported.
"""
msg = (
"Failed to construct {termname}.\n"
"Pipeline terms of dtype {dtype} are not yet supported."
)
class BadPercentileBounds(ZiplineError):
"""
Raised by API functions accepting percentile bounds when the passed bounds
are invalid.
"""
msg = (
"Percentile bounds must fall between 0.0 and 100.0, and min must be "
"less than max."
"\nInputs were min={min_percentile}, max={max_percentile}."
)
class UnknownRankMethod(ZiplineError):
"""
Raised during construction of a Rank factor when supplied a bad Rank
method.
"""
msg = (
"Unknown ranking method: '{method}'. "
"`method` must be one of {choices}"
)
class AttachPipelineAfterInitialize(ZiplineError):
"""
Raised when a user tries to call add_pipeline outside of initialize.
"""
msg = (
"Attempted to attach a pipeline after initialize()."
"attach_pipeline() can only be called during initialize."
)
class PipelineOutputDuringInitialize(ZiplineError):
"""
Raised when a user tries to call `pipeline_output` during initialize.
"""
msg = (
"Attempted to call pipeline_output() during initialize. "
"pipeline_output() can only be called once initialize has completed."
)
class NoSuchPipeline(ZiplineError, KeyError):
"""
Raised when a user tries to access a non-existent pipeline by name.
"""
msg = (
"No pipeline named '{name}' exists. Valid pipeline names are {valid}. "
"Did you forget to call attach_pipeline()?"
)
class UnsupportedDataType(ZiplineError):
"""
Raised by CustomFactors with unsupported dtypes.
"""
msg = "{typename} instances with dtype {dtype} are not supported."
class NoFurtherDataError(ZiplineError):
"""
Raised by calendar operations that would ask for dates beyond the extent of
our known data.
"""
# This accepts an arbitrary message string because it's used in more places
# that can be usefully templated.
msg = '{msg}'
class UnsupportedDatetimeFormat(ZiplineError):
"""
Raised when an unsupported datetime is passed to an API method.
"""
msg = ("The input '{input}' passed to '{method}' is not "
"coercible to a pandas.Timestamp object.")
class PositionTrackerMissingAssetFinder(ZiplineError):
"""
Raised by a PositionTracker if it is asked to update an Asset but does not
have an AssetFinder
"""
msg = (
"PositionTracker attempted to update its Asset information but does "
"not have an AssetFinder. This may be caused by a failure to properly "
"de-serialize a TradingAlgorithm."
)
class AssetDBVersionError(ZiplineError):
"""
Raised by an AssetDBWriter or AssetFinder if the version number in the
versions table does not match the ASSET_DB_VERSION in asset_writer.py.
"""
msg = (
"The existing Asset database has an incorrect version: {db_version}. "
"Expected version: {expected_version}. Try rebuilding your asset "
"database or updating your version of Zipline."
)
class AssetDBImpossibleDowngrade(ZiplineError):
msg = (
"The existing Asset database is version: {db_version} which is lower "
"than the desired downgrade version: {desired_version}."
)
class HistoryWindowStartsBeforeData(ZiplineError):
msg = (
"History window extends before {first_trading_day}. To use this "
"history window, start the backtest on or after {suggested_start_day}."
)
class NonExistentAssetInTimeFrame(ZiplineError):
msg = (
"The target asset '{asset}' does not exist for the entire timeframe "
"between {start_date} and {end_date}."
)
| apache-2.0 |
hydroffice/hyo_soundspeed | hyo2/soundspeed/profile/profile.py | 1 | 62861 | import os
import time
import math
import numpy as np
import logging
from hyo2.soundspeed import __version__ as soundspeed_version
from hyo2.soundspeed.profile.metadata import Metadata
from hyo2.soundspeed.profile.samples import Samples
from hyo2.soundspeed.profile.more import More
from hyo2.soundspeed.profile.dicts import Dicts
from hyo2.soundspeed.profile.oceanography import Oceanography as Oc
from hyo2.soundspeed.profile.ray_tracing.ray_tracing import RayTracing
from hyo2.soundspeed.profile.ray_tracing.ray_path import RayPath
from hyo2.soundspeed.profile.ray_tracing.tracedprofile import TracedProfile
logger = logging.getLogger(__name__)
class Profile:
""""A sound speed profile with 3 sections: metadata, data specific to the task, and additional data"""
def __init__(self):
self.meta = Metadata() # metadata
self.data = Samples() # raw data
self.proc = Samples() # processed data
self.sis = Samples() # sis data
self.more = More() # additional fields
self.woa09 = None
self.woa13 = None
self.rtofs = None
self.gomofs = None
# variable for listener since the data are populated in another thread
self.listener_completed = False
self.listener_displayed = False
def __repr__(self):
msg = "<Profile>\n"
msg += "%s" % self.meta
msg += "%s" % self.data
msg += "%s" % self.more
return msg
@classmethod
def calc_weights(cls, depths):
mids = (depths[1:] + depths[:-1]) / 2
mids = np.insert(mids, 0, depths[0])
mids = np.append(mids, depths[-1])
diff = np.diff(mids)
diff = diff / np.sum(diff)
return diff
@classmethod
def weighted_median(cls, values, weights):
# sort values and weights based on values
sorted_indices = np.argsort(values)
values_sorted = values[sorted_indices]
weights_sorted = weights[sorted_indices]
acc_prob = 0
for idx, val in enumerate(values_sorted):
acc_prob += weights_sorted[idx]
if acc_prob >= 0.5:
return val
raise RuntimeError("unable to calculated median")
def weighted_harmonic_mean(self):
avg_depth = 10000.0 # just a very deep value
half_swath_angle = 1.0 # a small angle since we just cure about nadir
tp1 = TracedProfile(ssp=self, avg_depth=avg_depth,
half_swath=half_swath_angle)
if len(tp1.harmonic_means) == 0:
return 0
return tp1.harmonic_means[0]
@classmethod
def weighted_arithmetic_std(cls, values, weights):
avg = np.average(values, weights=weights)
var = np.average((values - avg) ** 2, weights=weights)
return math.sqrt(var)
def weighted_harmonic_std(self):
w = self.calc_weights(self.proc.speed[self.proc_valid])
avg = self.weighted_harmonic_mean()
var = np.average((self.proc.speed[self.proc_valid] - avg) ** 2, weights=w)
return math.sqrt(var)
def init_data(self, num_samples):
if num_samples == 0:
return
self.data.num_samples = num_samples
self.data.init_pressure()
self.data.init_depth()
self.data.init_speed()
self.data.init_temp()
self.data.init_conductivity()
self.data.init_sal()
self.data.init_source()
self.data.init_flag()
def init_proc(self, num_samples):
if num_samples == 0:
return
self.proc.num_samples = num_samples
self.proc.init_pressure()
self.proc.init_depth()
self.proc.init_speed()
self.proc.init_temp()
self.proc.init_conductivity()
self.proc.init_sal()
self.proc.init_source()
self.proc.init_flag()
def init_sis(self, num_samples=0):
self.sis.num_samples = num_samples
self.sis.init_pressure()
self.sis.init_depth()
self.sis.init_speed()
self.sis.init_temp()
self.sis.init_conductivity()
self.sis.init_sal()
self.sis.init_source()
self.sis.init_flag()
def init_more(self, more_fields):
self.more.init_struct_array(self.data.num_samples, more_fields)
def data_resize(self, count):
self.data.resize(count)
self.more.resize(count)
@property
def data_valid(self):
"""Return indices of valid data"""
return np.equal(self.data.flag, Dicts.flags['valid'])
@property
def proc_valid(self):
"""Return indices of valid proc samples"""
return np.equal(self.proc.flag, Dicts.flags['valid'])
@property
def nr_valid_proc_samples(self):
"""Return the number of valid proc samples"""
return len(self.proc.depth[self.proc_valid])
@property
def proc_dqa_valid(self):
"""Return indices of DQA valid proc samples"""
return np.logical_and(self.proc_valid, np.logical_or(np.equal(self.proc.source, Dicts.sources['raw']),
np.equal(self.proc.source, Dicts.sources['smoothing'])))
@property
def sis_valid(self):
"""Return indices of valid sis samples"""
return np.equal(self.sis.flag, Dicts.flags['valid'])
@property
def sis_thinned(self):
"""Return indices of thinned sis samples"""
return np.equal(self.sis.flag, Dicts.flags['thin'])
@property
def proc_invalid_direction(self):
"""Return indices of invalid data for direction"""
return np.equal(self.proc.flag, Dicts.flags['direction']) # numpy 1.10.4 if a warning
@property
def proc_depth_min(self):
return self.proc.depth[self.proc_valid].min()
@property
def proc_speed_min(self):
return self.proc.speed[self.proc_valid].min()
@property
def proc_temp_min(self):
return self.proc.temp[self.proc_valid].min()
@property
def proc_sal_min(self):
return self.proc.sal[self.proc_valid].min()
@property
def proc_depth_max(self):
return self.proc.depth[self.proc_valid].max()
@property
def proc_speed_max(self):
return self.proc.speed[self.proc_valid].max()
@property
def proc_temp_max(self):
return self.proc.temp[self.proc_valid].max()
@property
def proc_sal_max(self):
return self.proc.sal[self.proc_valid].max()
@property
def proc_depth_median(self):
w = self.calc_weights(self.proc.depth[self.proc_valid])
return self.weighted_median(self.proc.depth[self.proc_valid], w)
@property
def proc_speed_median(self):
w = self.calc_weights(self.proc.speed[self.proc_valid])
return self.weighted_median(self.proc.speed[self.proc_valid], w)
@property
def proc_temp_median(self):
w = self.calc_weights(self.proc.temp[self.proc_valid])
return self.weighted_median(self.proc.temp[self.proc_valid], w)
@property
def proc_sal_median(self):
w = self.calc_weights(self.proc.sal[self.proc_valid])
return self.weighted_median(self.proc.sal[self.proc_valid], w)
@property
def proc_depth_mean(self):
w = self.calc_weights(self.proc.depth[self.proc_valid])
return np.average(self.proc.depth[self.proc_valid], weights=w)
@property
def proc_speed_mean(self):
return self.weighted_harmonic_mean()
@property
def proc_temp_mean(self):
w = self.calc_weights(self.proc.temp[self.proc_valid])
return np.average(self.proc.temp[self.proc_valid], weights=w)
@property
def proc_sal_mean(self):
w = self.calc_weights(self.proc.sal[self.proc_valid])
return np.average(self.proc.sal[self.proc_valid], weights=w)
@property
def proc_depth_std(self):
w = self.calc_weights(self.proc.depth[self.proc_valid])
return self.weighted_arithmetic_std(self.proc.depth[self.proc_valid], w)
@property
def proc_speed_std(self):
return self.weighted_harmonic_std()
@property
def proc_temp_std(self):
w = self.calc_weights(self.proc.temp[self.proc_valid])
return self.weighted_arithmetic_std(self.proc.temp[self.proc_valid], w)
@property
def proc_sal_std(self):
w = self.calc_weights(self.proc.sal[self.proc_valid])
return self.weighted_arithmetic_std(self.proc.sal[self.proc_valid], w)
def _calc_water_salinity_threshold(self):
"""Determine salinity threshold from min/max values to help determine where instrument entered water"""
sal_min = self.proc.sal[self.proc_valid].min()
sal_max = self.proc.sal[self.proc_valid].max()
sal_thresh = sal_min + 0.1 * (sal_max - sal_min) # '10% between extremes
if sal_thresh < 0.03:
sal_thresh = 0.03
elif sal_thresh > 3:
sal_thresh = 3.0
return sal_thresh
def _calc_air_water_index(self):
"""Use the salinity threshold to determine where instrument first entered water."""
sal_thresh = self._calc_water_salinity_threshold()
try:
water_i = np.argwhere(self.proc.sal[self.proc_valid] >= sal_thresh)[0][0]
except IndexError: # never passed threshold
water_i = len(self.proc.sal[self.proc_valid]) - 1
logger.debug("water index: %d (using salinity threshold: %.3f)" % (water_i, sal_thresh))
return water_i
def remove_pre_water_entry(self):
"""Look for data that is likely out of the water by searching for very low salinity levels"""
if not self.proc.sal.any():
logger.debug("all salinity values are zero -> skipping pre-water entry removal")
return
if self.nr_valid_proc_samples == 0:
logger.debug("not valid processing samples")
return
# remove initial samples out of water using salinity
water_i = self._calc_air_water_index()
in_air_ii = np.arange(0, len(self.proc_valid)) < water_i
valid_and_in_air_ii = np.logical_and(self.proc_valid, in_air_ii)
self.proc.flag[valid_and_in_air_ii] = Dicts.flags['filtered']
# remove samples out of water within the profile using salinity
try:
sal_th = 0.8 * self.proc.sal[self.proc_valid][0]
in_air_ii = self.proc.sal <= sal_th
valid_and_in_air_ii = np.logical_and(self.proc_valid, in_air_ii)
self.proc.flag[valid_and_in_air_ii] = Dicts.flags['filtered']
except IndexError:
logger.warning("issue with removing samples out of the water using salinity")
# remove samples out of water within the profile using pressure
try:
press_th = -0.001
in_air_ii = self.proc.pressure <= press_th
valid_and_in_air_ii = np.logical_and(self.proc_valid, in_air_ii)
self.proc.flag[valid_and_in_air_ii] = Dicts.flags['filtered']
except IndexError:
logger.warning("issue with removing samples out of the water using salinity")
def statistical_filter(self):
speed = self.proc.speed[self.proc_valid]
depth = self.proc.depth[self.proc_valid]
sigma = speed * 0.0
speed_mean = speed * 0.0
nr_samples = len(speed)
sigma_min_th = 0.2 # Minimum standard deviation allowed.
logger.debug("applying statistical filter at %d valid samples" % nr_samples)
# Calculate local mean and std dev for each sample, use 2 neighbors on either sides.
# Endpoints treated separately. Target: single point fliers
for i in range(2, nr_samples - 2):
speed_sum = 0
speed_sum_sq = 0
for k in range(-2, 3):
if k == 0: # skip itself
continue
speed_sum += speed[i + k]
speed_sum_sq += speed[i + k] * speed[i + k]
variance = ((4 * speed_sum_sq) - speed_sum * speed_sum) / (4 * 3) # unbiased variance
speed_mean[i] = speed_sum / 4
if variance < 0:
variance = 0
sigma[i] = np.sqrt(variance) # Local standard deviation
if sigma[i] < sigma_min_th:
sigma[i] = sigma_min_th
# Endpoints (use only three neighboring points). Relax tolerance.
c_end = 1.3 # Relaxed tolerance factor at endpoints.
ends_i = [0, 1, nr_samples - 2, nr_samples - 1]
index = [(1, 2, 3), (0, 2, 3), (nr_samples - 4, nr_samples - 3, nr_samples - 1),
(nr_samples - 4, nr_samples - 3, nr_samples - 2)]
for k in range(4):
speed_sum = 0
speed_sum_sq = 0
i = ends_i[k] # Point number
for j in range(3):
ind_kj = index[k][j]
speed_sum += speed[ind_kj]
speed_sum_sq += speed[ind_kj] * speed[ind_kj]
variance = ((3 * speed_sum_sq) - speed_sum * speed_sum) / (3 * 2) # unbiased variance
speed_mean[i] = speed_sum / 3
if variance < 0:
variance = 0
sigma[i] = np.sqrt(variance)
if sigma[i] < sigma_min_th:
sigma[i] = sigma_min_th
sigma[i] *= c_end # Relax tolerance for end pts
# identify the sample to filter
nr_std_dev = 2 # number of standard deviations to use for error band.
tolerance_factor = 1.3 # Tolerance factor.
depth_th = 33.0 # Depth at which to relax error band.
factor = tolerance_factor
stat_filtered = np.zeros(nr_samples, dtype=bool)
for i in range(nr_samples):
if depth[i] > depth_th:
factor = 1.0
th = factor * nr_std_dev * sigma[i]
if np.absolute(speed[i] - speed_mean[i]) > th:
logger.debug("statistical filtering for sample #%d (%.2f, %.2f, th: %.2f)"
% (i, speed[i], speed_mean[i], th))
stat_filtered[i] = True
# finally apply the statistical filtering
filtered_ii = np.zeros(len(self.proc_valid), dtype=bool)
filtered_ii[self.proc_valid] = stat_filtered
valid_and_filtered_ii = np.logical_and(self.proc_valid, filtered_ii)
self.proc.flag[valid_and_filtered_ii] = Dicts.flags['filtered']
def cosine_smooth(self):
"""Cosine-averaging to smooth the profile data"""
verbose = False # set to True for verbose intermediate steps
valid_and_smoothed = self.proc.source[self.proc_valid] == Dicts.sources['smoothing']
# noinspection PyUnresolvedReferences
if (valid_and_smoothed == True).all():
return
# logger.debug(valid_and_smoothed)
# retrieve the depths
zs = self.proc.depth[self.proc_valid]
# create a dictionary with data types, not including source and type
names = ["pressure", "speed", "temp", "conductivity", "sal"]
records = dict()
records["pressure"] = self.proc.pressure[self.proc_valid]
records["speed"] = self.proc.speed[self.proc_valid]
records["temp"] = self.proc.temp[self.proc_valid]
records["conductivity"] = self.proc.conductivity[self.proc_valid]
records["sal"] = self.proc.sal[self.proc_valid]
# create the window widths
ww_min = 1.7 # minimum window width for the cos( ) function
ww_mul = 0.0025 # used to grow the window width (that is, to spread the averaging) as the z gets larger
window_width = np.maximum(np.absolute(zs * ww_mul), ww_min)
if verbose:
logger.debug("cosine avg -> window width: %s" % (window_width,))
# retrieve the profile's z range
z_min = zs.min()
z_max = zs.max()
if verbose:
logger.debug("cosine avg -> z range: (%f, %f)" % (z_min, z_max))
# create a 2D storage array:
# - [rows -> types]: 2 additional rows (bin values on row #0, weights on row #-1)
# - [cols -> values]: extra columns (that will be removed at the end) on both sides
bin_size = 1.0 # size of bin, unit or measure is meter (depth)
bin_width = 4 # number of bins on both sides of the value
storage = np.zeros([len(names) + 2, int(2 * (bin_width + 1) + (z_max - z_min) / bin_size)])
if verbose:
logger.debug("cosine avg -> storage: rows %s, columns %s" % (storage.shape[0], storage.shape[1]))
# populate bin values (row #0)
for i in range(storage.shape[1]):
storage[0][i] = z_min + (- bin_width + i) * bin_size
if verbose:
logger.debug("cosine avg -> storage bin values: %s" % (storage[0],))
# populate weights
for i, z in enumerate(zs):
# calculate the index of the central bin value
center_idx = int((z - z_min) / bin_size + .5) + bin_width
# if debug:
# logger.debug("cosine avg -> z: %s, center index: %s" % (z, center_idx))
# calculate the differences from the current z values in the averaging windows
z_diff = z - storage[0][center_idx - bin_width:center_idx + bin_width + 1]
# if debug:
# logger.debug("cosine avg -> z: %s, z diff: %s" % (z, z_diff))
# Insure that weight will be .1 at a window width from point I
bin_weights = 1.0 + np.cos(2.69 * z_diff / window_width[i])
bin_weights *= np.absolute(z_diff) < window_width[i] # set to 0 when outside the window width
# if debug:
# logger.debug("cosine avg -> z: %s, bin weights: %s" % (z, bin_weights))
for j, name in enumerate(names):
# summing up for all the types, row is j + 1 since the first row is for bin values
storage[1 + j][center_idx - bin_width:center_idx + bin_width + 1] += records[name][i] * bin_weights
storage[-1][center_idx - bin_width:center_idx + bin_width + 1] += bin_weights
if verbose:
logger.debug("cosine avg -> storage weights: %s" % (storage[-1],))
for j, name in enumerate(names):
logger.debug("cosine avg -> storage %s sums: %s" % (name, storage[1 + j],))
# remove the end stuff for the bin widths
storage = storage[:, bin_width: -(bin_width + 1)]
# remove bins that didn't have enough weighting.
storage = np.compress(storage[-1] > 0.1, storage, axis=1)
# normalize by using the stored weights
for i in range(len(names)):
storage[1 + i] /= storage[-1]
# calculate the z differences + add 1 value to the outcome (to have the same length as the storage)
delta_zs = np.hstack(([1.0], np.diff(storage[0])))
# remove duplicated z values
storage = np.compress(delta_zs >= .00001, storage, axis=1)
# logger.debug(self.proc.depth)
# insert created data into the self.proc arrays
last_depth = 0.0
for row in storage.T:
# index of where the storage record falls
d_th = float(row[0])
if d_th < last_depth:
continue
last_depth = d_th
try:
z_bools = np.logical_and(self.proc_valid, self.proc.depth > d_th)
i = np.argwhere(z_bools)[0]
except IndexError:
i = zs.size
self.proc.depth = np.insert(self.proc.depth, i, d_th)
self.proc.source = np.insert(self.proc.source, i, Dicts.sources['smoothing'])
self.proc.flag = np.insert(self.proc.flag, i, Dicts.flags['valid'])
for j, name in enumerate(names):
setattr(self.proc, name, np.insert(getattr(self.proc, name), i, row[j + 1]))
# logger.debug(self.proc.depth)
# since we inserted new samples
self.proc.num_samples = self.proc.depth.size
# mark previous 'valid' data as 'smoothed'
self.proc.flag[self.proc.source != Dicts.sources['smoothing']] = Dicts.flags['smoothed']
def reduce_up_down(self, ssp_direction, use_pressure=False):
"""Reduce the raw data samples based on the passed direction"""
if self.data.num_samples == 0: # skipping if there are no data
return
# identify max depth
if use_pressure:
max_value = self.data.pressure[self.data_valid].max() # max pressure
logger.debug("reduce up/down > max pressure: %s" % max_value)
else:
max_value = self.data.depth[self.data_valid].max() # max depth
logger.debug("reduce up/down > max depth: %s" % max_value)
# loop through the sample using max depth as turning point
max_reached = False
last_value = None
for i in range(self.data.num_samples):
if use_pressure:
value = self.data.pressure[i]
else:
value = self.data.depth[i]
if (ssp_direction == Dicts.ssp_directions['up'] and not max_reached) \
or (ssp_direction == Dicts.ssp_directions['down'] and max_reached):
self.data.flag[i] = Dicts.flags['direction'] # set invalid for direction
elif ssp_direction == Dicts.ssp_directions['down'] and not max_reached:
if (i != 0) and (last_value is not None):
if value <= last_value:
# print(last_value, value)
self.data.flag[i] = Dicts.flags['direction'] # set invalid for direction
else:
last_value = value
else:
last_value = value
elif ssp_direction == Dicts.ssp_directions['up'] and max_reached:
if (i != 0) and (last_value is not None):
if value >= last_value:
self.data.flag[i] = Dicts.flags['direction'] # set invalid for direction
else:
last_value = value
else:
last_value = value
if (value == max_value) and (max_reached is False):
max_reached = True
last_value = value
if ssp_direction == Dicts.ssp_directions['up']:
self.data.flag[i] = Dicts.flags['valid'] # switch back to valid the last flagged one
if np.sum(self.data_valid) <= 1:
raise RuntimeError('Unable to locate the upcast values. Double check their presence in the input file.')
if ssp_direction == Dicts.ssp_directions['up']:
logger.debug("flipping data for up direction")
self.data.pressure[:] = np.flipud(self.data.pressure)
self.data.depth[:] = np.flipud(self.data.depth)
self.data.speed[:] = np.flipud(self.data.speed)
self.data.temp[:] = np.flipud(self.data.temp)
self.data.conductivity[:] = np.flipud(self.data.conductivity)
self.data.sal[:] = np.flipud(self.data.sal)
self.data.source[:] = np.flipud(self.data.source)
self.data.flag[:] = np.flipud(self.data.flag)
def calc_salinity_from_conductivity(self):
if np.count_nonzero(self.data.pressure):
pressure = self.data.pressure
else:
if not self.meta.latitude:
latitude = 30.0
logger.warning("using default latitude: %s" % latitude)
else:
latitude = self.meta.latitude
pressure = Oc.d2p_backup(self.data.depth, latitude)
self.data.sal = np.zeros_like(self.data.conductivity)
# convert from S/m to mmho/cm
# self.data.sal[self.data_valid] = Oc.c2s(self.data.conductivity[self.data_valid] * 10.0,
# pressure[self.data_valid], self.data.temp[self.data_valid])
self.data.sal = Oc.c2s(self.data.conductivity * 10.0, pressure,
self.data.temp) # FIXME - what is the standard conductivity unit? S/m or mmho/cm (mS/cm)
self.modify_proc_info(Dicts.proc_import_infos['CALC_SAL'])
def calc_salinity(self):
"""Helper method to calculate salinity from depth, sound speed and temperature"""
# logger.debug("calculate salinity")
if not self.meta.latitude:
latitude = 30.0
logger.warning("using default latitude: %s" % latitude)
else:
latitude = self.meta.latitude
for count in range(self.data.num_samples):
self.data.sal[count] = Oc.sal(d=self.data.depth[count], speed=self.data.speed[count],
t=self.data.temp[count], lat=latitude)
self.modify_proc_info(Dicts.proc_import_infos['CALC_SAL'])
def calc_dyn_height(self):
"""Helper method to calculate the dynamic height"""
if not self.meta.latitude:
latitude = 30.0
logger.warning("using default latitude: %s" % latitude)
else:
latitude = self.meta.latitude
if not self.meta.longitude:
longitude = -70.0
logger.warning("using default longitude: %s" % longitude)
else:
longitude = self.meta.longitude
try:
# print(self.data_valid)
sa = Oc.sal2sa(sal=self.data.sal[self.data_valid],
p=self.data.pressure[self.data_valid],
lon=longitude, lat=latitude)
ct = Oc.t2ct(sa=sa,
t=self.data.temp[self.data_valid],
p=self.data.pressure[self.data_valid])
dh = Oc.geo_strf_dyn_height(sa=sa, ct=ct, p=self.data.pressure[self.data_valid], p_ref=0)
for val in dh:
if np.isnan(val):
raise RuntimeError("nan in geo_strf_dyn_height")
return dh
except Exception as e:
logger.warning("issue: %s" % e)
return None
def calc_data_depth(self):
"""Helper method to calculate depth from pressure (in dBar)"""
dyn_height = self.calc_dyn_height()
if not self.meta.latitude:
latitude = 30.0
logger.warning("using default latitude: %s" % latitude)
else:
latitude = self.meta.latitude
self.data.depth = np.zeros_like(self.data.pressure)
self.data.depth[self.data_valid] = Oc.p2d(p=self.data.pressure[self.data_valid], lat=latitude,
dyn_height=dyn_height, debug=True)
self.modify_proc_info(Dicts.proc_import_infos['CALC_DEP'])
def calc_dyn_height_with_depth(self):
"""Helper method to calculate the dynamic height"""
if not self.meta.latitude:
latitude = 30.0
logger.warning("using default latitude: %s" % latitude)
else:
latitude = self.meta.latitude
if not self.meta.longitude:
longitude = -70.0
logger.warning("using default longitude: %s" % longitude)
else:
longitude = self.meta.longitude
try:
# print(self.data_valid)
sa = Oc.sal2sa(sal=self.data.sal[self.data_valid],
p=self.data.depth[self.data_valid],
lon=longitude, lat=latitude)
ct = Oc.t2ct(sa=sa,
t=self.data.temp[self.data_valid],
p=self.data.depth[self.data_valid])
dh = Oc.geo_strf_dyn_height(sa=sa, ct=ct, p=self.data.depth[self.data_valid], p_ref=0)
for val in dh:
if np.isnan(val):
raise RuntimeError("nan in geo_strf_dyn_height_with_depth")
return dh
except Exception as e:
logger.warning("issue: %s" % e)
return None
def calc_data_pressure(self):
"""Helper method to calculate pressure from depth (in m)"""
dyn_height = self.calc_dyn_height_with_depth()
raise RuntimeError("Not implemented")
def calc_data_speed(self):
"""Helper method to calculate sound speed"""
# logger.debug("calculate sound speed")
if not self.meta.latitude:
latitude = 30.0
logger.warning("using default latitude: %s" % latitude)
else:
latitude = self.meta.latitude
for count in range(self.data.num_samples):
self.data.speed[count] = Oc.speed(self.data.depth[count],
self.data.temp[count],
self.data.sal[count],
latitude)
self.modify_proc_info(Dicts.proc_import_infos['CALC_SPD'])
def calc_proc_speed(self):
"""Helper method to calculate processed sound speed"""
# logger.debug("calculate sound speed")
if not self.meta.latitude:
latitude = 30.0
logger.warning("using default latitude: %s" % latitude)
else:
latitude = self.meta.latitude
for count in range(self.proc.num_samples):
self.proc.speed[count] = Oc.speed(self.proc.depth[count],
self.proc.temp[count],
self.proc.sal[count],
latitude)
self.modify_proc_info(Dicts.proc_user_infos['RECALC_SPD'])
def calc_attenuation(self, frequency, ph):
"""Helper method to calculation attenuation [unused]"""
depth = np.zeros(self.proc.num_samples)
attenuation = np.zeros(self.proc.num_samples)
for i in range(self.proc.num_samples):
depth[i] = self.proc.depth[i]
attenuation[i] = Oc.a(frequency, self.proc.temp[i], self.proc.sal[i],
self.proc.depth[i], ph)
return attenuation, depth
def calc_cumulative_attenuation(self, frequency, ph):
"""Helper method to calculation cumulative attenuation [unused]"""
attenuation, depth = self.calc_attenuation(frequency, ph)
cumulative_attenuation = np.zeros(len(attenuation))
total_loss = 0
for count in range(len(attenuation) - 1):
layer_loss = attenuation[count] * (depth[count + 1] - depth[count]) / 1000.0
total_loss += layer_loss
cumulative_attenuation[count] = total_loss / (depth[count + 1] / 1000.0)
cumulative_attenuation[-1] = cumulative_attenuation[-2]
return cumulative_attenuation, depth
def insert_proc_speed(self, depth, speed, src=Dicts.sources['user']):
# logger.debug("insert speed to proc data: d:%s, vs:%s" % (depth, speed))
# we need to take care of valid samples and user-invalidated samples (to avoid to brake in case un-flagged)
valid = self.proc.flag == Dicts.flags['valid'] # valid samples
iv = np.indices(self.proc.flag.shape)[0][valid] # indices of valid samples
user_invalid = self.proc.flag == Dicts.flags['user'] # user-invalidate samples
possible = np.logical_or(valid, user_invalid) # possible samples
ip = np.indices(self.proc.flag.shape)[0][possible] # indices of possible samples
# find depth index both in the valid and in the possible samples
try:
# noinspection PyTypeChecker
v_i = np.argwhere(self.proc.depth[valid] > depth)[0][0] # the index in the valid array
i = iv[v_i] # the corresponding index of the masked index in the full array
except IndexError: # in case that there are not
v_i = self.proc.depth[valid].size - 1
i = iv[v_i]
try:
# noinspection PyTypeChecker
p_i = np.argwhere(self.proc.depth[possible] > depth)[0][0] # the index in the possible array
j = ip[p_i]
except IndexError: # in case that there are not
p_i = self.proc.depth[possible].size - 1
j = ip[p_i]
# check if we already have this depth in the masked array
d_exists = self.proc.depth[valid][v_i] == depth
# manipulate profile (linear interpolation)
if d_exists:
# print('already present')
self.proc.speed[i] = speed
self.proc.source[i] = src
self.proc.flag[i] = Dicts.flags['valid']
else:
# print('new depth')
if depth < self.proc.depth[valid][0]:
m_ids = [0, 1]
# print('before beginning: %s' % j)
elif depth > self.proc.depth[valid][-1]:
j += 1
m_ids = [-2, -1]
# print('after end')
else:
if self.proc.depth[valid][v_i] < depth:
m_ids = [v_i, v_i + 1]
else:
m_ids = [v_i - 1, v_i]
# print('in the middle')
di = np.array([self.proc.depth[valid][m_ids[0]], self.proc.depth[valid][m_ids[1]]])
a = np.array([[di[0], 1.], [di[1], 1.]])
# interpolate for pressure
pi = np.array([self.proc.pressure[valid][m_ids[0]], self.proc.pressure[valid][m_ids[1]]])
pm, pc = np.linalg.lstsq(a, pi, rcond=None)[0]
self.proc.pressure = np.insert(self.proc.pressure, j, pm * depth + pc)
# print(self.proc.pressure[0], self.proc.pressure.size)
# interpolate for temp
ti = np.array([self.proc.temp[valid][m_ids[0]], self.proc.temp[valid][m_ids[1]]])
tm, tc = np.linalg.lstsq(a, ti, rcond=None)[0]
self.proc.temp = np.insert(self.proc.temp, j, tm * depth + tc)
# print(self.proc.temp[0], self.proc.temp.size)
# interpolate for conductivity
ci = np.array([self.proc.conductivity[valid][m_ids[0]], self.proc.conductivity[valid][m_ids[1]]])
cm, cc = np.linalg.lstsq(a, ci, rcond=None)[0]
self.proc.conductivity = np.insert(self.proc.conductivity, j, cm * depth + cc)
# print(self.proc.conductivity[0], self.proc.conductivity.size)
# interpolate for sal
si = np.array([self.proc.sal[valid][m_ids[0]], self.proc.sal[valid][m_ids[1]]])
sm, sc = np.linalg.lstsq(a, si, rcond=None)[0]
self.proc.sal = np.insert(self.proc.sal, j, sm * depth + sc)
# print(self.proc.sal[0], self.proc.sal.size)
self.proc.depth = np.insert(self.proc.depth, j, depth)
self.proc.speed = np.insert(self.proc.speed, j, speed)
self.proc.source = np.insert(self.proc.source, j, src)
self.proc.flag = np.insert(self.proc.flag, j, Dicts.flags['valid'])
self.proc.num_samples += 1
def insert_sis_speed(self, depth, speed, src=Dicts.sources['user'], temp=None, cond=None, sal=None):
# logger.debug("insert speed to sis data: d:%s, vs:%s" % (depth, speed))
# we need to take care of valid samples and user-invalidated samples (to avoid to brake in case un-flagged)
valid = self.sis_thinned # valid samples
iv = np.indices(self.sis.flag.shape)[0][valid] # indices of valid samples
user_invalid = self.sis.flag == Dicts.flags['user'] # user-invalidate samples
possible = np.logical_or(valid, user_invalid) # possible samples
ip = np.indices(self.sis.flag.shape)[0][possible] # indices of possible samples
# find depth index both in the valid and in the possible samples
try:
# noinspection PyTypeChecker
v_i = np.argwhere(self.sis.depth[valid] > depth)[0][0] # the index in the valid array
i = iv[v_i] # the corresponding index of the masked index in the full array
except IndexError: # in case that there are not
v_i = self.sis.depth[valid].size - 1
i = iv[v_i]
try:
# noinspection PyTypeChecker
p_i = np.argwhere(self.sis.depth[possible] > depth)[0][0] # the index in the possible array
j = ip[p_i]
except IndexError: # in case that there are not
p_i = self.sis.depth[possible].size - 1
j = ip[p_i]
# check if we already have this depth in the masked array
d_exists = self.sis.depth[valid][v_i] == depth
# manipulate profile (linear interpolation)
if d_exists:
# logger.debug('sample already present with depth: %s -> modifying' % depth)
self.sis.speed[i] = speed
self.sis.source[i] = src
self.sis.flag[i] = Dicts.flags['thin']
if temp is not None:
self.sis.temp[i] = temp
if cond is not None:
self.sis.conductivity[i] = cond
if sal is not None:
self.sis.sal[i] = sal
else:
# logger.debug("added new sample at depth: %s" % depth)
if depth < self.sis.depth[valid][0]:
m_ids = [0, 1]
# print('before beginning: %s' % j)
elif depth > self.sis.depth[valid][-1]:
j += 1
m_ids = [-2, -1]
# print('after end')
else:
if self.sis.depth[valid][v_i] < depth:
m_ids = [v_i, v_i + 1]
else:
m_ids = [v_i - 1, v_i]
# print('in the middle')
di = np.array([self.sis.depth[valid][m_ids[0]], self.sis.depth[valid][m_ids[1]]])
a = np.array([[di[0], 1.], [di[1], 1.]])
# interpolate for pressure
pi = np.array([self.sis.pressure[valid][m_ids[0]], self.sis.pressure[valid][m_ids[1]]])
pm, pc = np.linalg.lstsq(a, pi, rcond=None)[0]
self.sis.pressure = np.insert(self.sis.pressure, j, pm * depth + pc)
# print(self.sis.pressure[0], self.sis.pressure.size)
# interpolate for temp
if temp is None:
ti = np.array([self.sis.temp[valid][m_ids[0]], self.sis.temp[valid][m_ids[1]]])
tm, tc = np.linalg.lstsq(a, ti, rcond=None)[0]
self.sis.temp = np.insert(self.sis.temp, j, tm * depth + tc)
else:
self.sis.temp = np.insert(self.sis.temp, j, temp)
# logger.debug("added temperature: %s" % self.sis.temp[j])
# interpolate for conductivity
if cond is None:
ci = np.array([self.sis.conductivity[valid][m_ids[0]], self.sis.conductivity[valid][m_ids[1]]])
cm, cc = np.linalg.lstsq(a, ci, rcond=None)[0]
self.sis.conductivity = np.insert(self.sis.conductivity, j, cm * depth + cc)
else:
self.sis.conductivity = np.insert(self.sis.conductivity, j, cond)
# logger.debug("added conductivity: %s" % self.sis.conductivity[j])
# interpolate for sal
if sal is None:
si = np.array([self.sis.sal[valid][m_ids[0]], self.sis.sal[valid][m_ids[1]]])
sm, sc = np.linalg.lstsq(a, si, rcond=None)[0]
self.sis.sal = np.insert(self.sis.sal, j, sm * depth + sc)
else:
self.sis.sal = np.insert(self.sis.sal, j, sal)
# logger.debug("added salinity: %s" % self.sis.sal[j])
self.sis.depth = np.insert(self.sis.depth, j, depth)
self.sis.speed = np.insert(self.sis.speed, j, speed)
self.sis.source = np.insert(self.sis.source, j, src)
# we flag it as thin since the user most likely wants to have this value in the export
self.sis.flag = np.insert(self.sis.flag, j, Dicts.flags['thin'])
self.sis.num_samples += 1
def insert_proc_temp_sal(self, depth, temp, sal):
logger.debug("insert temp, sal to proc data: d:%s, t:%s, s:%s" % (depth, temp, sal))
speed = Oc.speed(d=depth, t=temp, s=sal, lat=self.meta.latitude)
# we need to take care of valid samples and user-invalidated samples (to avoid to brake in case un-flagged)
valid = self.proc.flag == Dicts.flags['valid'] # valid samples
iv = np.indices(self.proc.flag.shape)[0][valid] # indices of valid samples
user_invalid = self.proc.flag == Dicts.flags['user'] # user-invalidate samples
possible = np.logical_or(valid, user_invalid) # possible samples
ip = np.indices(self.proc.flag.shape)[0][possible] # indices of possible samples
# find depth index both in the valid and in the possible samples
try:
# noinspection PyTypeChecker
v_i = np.argwhere(self.proc.depth[valid] > depth)[0][0] # the index in the valid array
i = iv[v_i] # the corresponding index of the masked index in the full array
except IndexError: # in case that there are not
v_i = self.proc.depth[valid].size - 1
i = iv[v_i]
try:
# noinspection PyTypeChecker
p_i = np.argwhere(self.proc.depth[possible] > depth)[0][0] # the index in the possible array
j = ip[p_i]
except IndexError: # in case that there are not
p_i = self.proc.depth[possible].size - 1
j = ip[p_i]
# check if we already have this depth in the masked array
d_exists = self.proc.depth[valid][v_i] == depth
# manipulate profile (linear interpolation)
if d_exists:
# print('already present')
self.proc.temp[i] = temp
self.proc.sal[i] = sal
self.proc.speed[i] = speed
self.proc.source[i] = Dicts.sources['user']
self.proc.flag[i] = Dicts.flags['valid']
else:
# print('new depth')
if depth < self.proc.depth[valid][0]:
m_ids = [0, 1]
# print('before beginning: %s' % j)
elif depth > self.proc.depth[valid][-1]:
j += 1
m_ids = [-2, -1]
# print('after end')
else:
if self.proc.depth[valid][v_i] < depth:
m_ids = [v_i, v_i + 1]
else:
m_ids = [v_i - 1, v_i]
# print('in the middle')
di = np.array([self.proc.depth[valid][m_ids[0]], self.proc.depth[valid][m_ids[1]]])
a = np.array([[di[0], 1.], [di[1], 1.]])
# interpolate for pressure
pi = np.array([self.proc.pressure[valid][m_ids[0]], self.proc.pressure[valid][m_ids[1]]])
pm, pc = np.linalg.lstsq(a, pi, rcond=None)[0]
self.proc.pressure = np.insert(self.proc.pressure, j, pm * depth + pc)
# print(self.proc.pressure[0], self.proc.pressure.size)
# interpolate for conductivity
ci = np.array([self.proc.conductivity[valid][m_ids[0]], self.proc.conductivity[valid][m_ids[1]]])
cm, cc = np.linalg.lstsq(a, ci, rcond=None)[0]
self.proc.conductivity = np.insert(self.proc.conductivity, j, cm * depth + cc)
# print(self.proc.conductivity[0], self.proc.conductivity.size)
self.proc.depth = np.insert(self.proc.depth, j, depth)
self.proc.speed = np.insert(self.proc.speed, j, speed)
self.proc.temp = np.insert(self.proc.temp, j, temp)
self.proc.sal = np.insert(self.proc.sal, j, sal)
self.proc.source = np.insert(self.proc.source, j, Dicts.sources['user'])
self.proc.flag = np.insert(self.proc.flag, j, Dicts.flags['valid'])
self.proc.num_samples += 1
def extend_profile(self, extender, ext_type):
""" Use the extender samples to extend the profile """
logger.debug("extension source type: %s" % Dicts.first_match(Dicts.sources, ext_type))
try:
extender.cur.proc.source[:] = ext_type
except AttributeError:
return False
# find the max valid depth in the current profile
if self.proc.num_samples > 0:
vi = self.proc_valid
ivs = np.indices(self.proc.flag.shape)[0][vi] # indices of valid samples
max_depth = self.proc.depth[vi].max() # this is the max of the valid samples
# noinspection PyTypeChecker
vi_idx = np.argwhere(self.proc.depth[vi] >= max_depth)[0][0] # index of the max depth
max_idx = ivs[vi_idx] # index of the max depth in the original array
else:
max_depth = 0
max_idx = 0
# logger.debug("orig.max depth: %s[%s]" % (max_depth, max_idx))
# find the depth values in the extender that are deeper than the current (valid) max depth
ext_vi = extender.cur.proc_valid
try:
# noinspection PyTypeChecker
ind2 = np.argwhere(extender.cur.proc.depth[ext_vi][:] > max_depth)[0][0]
if ind2 <= 0:
logger.info("nothing to extend with")
return True
# logger.debug("ext.max depth: [%s]" % ind2)
except IndexError as e:
logger.warning("too short to extend with: %s" % e)
return True
# stack the extending samples after the last valid (max depth) index
self.proc.pressure = np.hstack([self.proc.depth[:max_idx + 1],
np.zeros_like(extender.cur.proc.depth[ext_vi][ind2:])])
self.proc.depth = np.hstack([self.proc.depth[:max_idx + 1],
extender.cur.proc.depth[ext_vi][ind2:]])
self.proc.speed = np.hstack([self.proc.speed[:max_idx + 1],
extender.cur.proc.speed[ext_vi][ind2:]])
self.proc.temp = np.hstack([self.proc.temp[:max_idx + 1],
extender.cur.proc.temp[ext_vi][ind2:]])
self.proc.conductivity = np.hstack([self.proc.sal[:max_idx + 1],
np.zeros_like(extender.cur.proc.sal[ext_vi][ind2:])])
self.proc.sal = np.hstack([self.proc.sal[:max_idx + 1],
extender.cur.proc.sal[ext_vi][ind2:]])
self.proc.source = np.hstack([self.proc.source[:max_idx + 1],
extender.cur.proc.source[ext_vi][ind2:]])
self.proc.flag = np.hstack([self.proc.flag[:max_idx + 1],
extender.cur.proc.flag[ext_vi][ind2:]])
self.proc.num_samples = self.proc.depth.size
# update processing info
if ext_type == Dicts.sources['ref_ext']:
self.modify_proc_info(Dicts.proc_user_infos['EXT_REF'])
elif ext_type == Dicts.sources['woa09_ext']:
self.modify_proc_info(Dicts.proc_user_infos['EXT_WOA09'])
elif ext_type == Dicts.sources['woa13_ext']:
self.modify_proc_info(Dicts.proc_user_infos['EXT_WOA13'])
elif ext_type == Dicts.sources['rtofs_ext']:
self.modify_proc_info(Dicts.proc_user_infos['EXT_RTOFS'])
elif ext_type == Dicts.sources['gomofs_ext']:
self.modify_proc_info(Dicts.proc_user_infos['EXT_GoMOFS'])
else:
logger.warning("unknown atlases: %s" % ext_type)
return False
return True
def modify_proc_info(self, info):
if info not in Dicts.proc_user_infos.values():
if info not in Dicts.proc_import_infos.values():
raise RuntimeError("invalid processing info: %s" % info)
# if empty, add the info
if not self.meta.proc_info:
self.meta.proc_info = info
return
# check if it is already present
tokens = self.meta.proc_info.split(';')
if info not in tokens:
self.meta.proc_info += ';%s' % info
def remove_user_proc_info(self):
# if empty, nothing to do
if not self.meta.proc_info:
return
# check if it is already present
tokens = self.meta.proc_info.split(';')
self.meta.proc_info = None
for i, token in enumerate(tokens):
if token in Dicts.proc_import_infos.values():
if self.meta.proc_info is None:
self.meta.proc_info = '%s' % token
else:
self.meta.proc_info += ';%s' % token
def clone_data_to_proc(self):
"""Clone the raw data samples into proc samples
The operation eliminates the direction-flagged samples
"""
# logger.info("cloning raw data to proc samples")
if self.data.num_samples == 0:
return
vi = self.data_valid # invalid samples (no direction-flagged)
self.init_proc(np.sum(vi))
self.proc.pressure[:] = self.data.pressure[vi]
self.proc.depth[:] = self.data.depth[vi]
self.proc.speed[:] = self.data.speed[vi]
self.proc.temp[:] = self.data.temp[vi]
self.proc.conductivity[:] = self.data.conductivity[vi]
self.proc.sal[:] = self.data.sal[vi]
self.proc.source[:] = self.data.source[vi]
self.proc.flag[:] = self.data.flag[vi]
self.update_proc_time()
def clone_proc_to_sis(self):
"""Clone the processed data samples into sis samples"""
# logger.info("cloning proc data to sis samples")
if self.proc.num_samples == 0:
return
self.init_sis(self.proc.depth.size)
self.sis.pressure[:] = self.proc.pressure
self.sis.depth[:] = self.proc.depth
self.sis.speed[:] = self.proc.speed
self.sis.temp[:] = self.proc.temp
self.sis.conductivity[:] = self.proc.conductivity
self.sis.sal[:] = self.proc.sal
self.sis.source[:] = self.proc.source
self.sis.flag[:] = self.proc.flag
def update_proc_time(self):
self.meta.update_proc_time()
def replace_proc_sal(self, source):
try:
self.proc.sal = np.interp(self.proc.depth[:], source.cur.proc.depth[:], source.cur.proc.sal[:])
except Exception as e:
logger.warning("in replace salinity, %s" % e)
return False
return True
def replace_proc_temp_sal(self, source):
try:
self.proc.temp = np.interp(self.proc.depth[:], source.cur.proc.depth[:], source.cur.proc.temp[:])
self.proc.sal = np.interp(self.proc.depth[:], source.cur.proc.depth[:], source.cur.proc.sal[:])
except Exception as e:
logger.warning("in replace temp/sal, %s" % e)
return False
return True
# - thinning
def thin(self, tolerance):
"""Thin the sis data"""
# logger.info("thinning the sis samples")
# if the profile is too short, we just pass it back
if self.sis.depth[self.sis_valid].size < 100:
self.sis.flag[self.sis_valid] = Dicts.flags['thin']
logger.debug("skipping thinning for short profile (%d samples)" % self.sis.depth[self.sis_valid].size)
return True
# - 1000 points for: EM2040, EM710, EM302 and EM122;
# - 570 points for: EM3000, EM3002, EM1002, EM300, EM120
flagged = self.sis.flag[self.sis_valid][:]
idx_start = 0
idx_end = self.sis.depth[self.sis_valid].size - 1
# logger.debug('first: %s, last: %s[%s]'
# % (self.sis.depth[self.sis_valid][idx_start],
# self.sis.depth[self.sis_valid][idx_end],
# self.sis.flag[self.sis_valid][idx_end]))
self.douglas_peucker_1d(idx_start, idx_end, tolerance=tolerance, data=flagged)
self.sis.flag[self.sis_valid] = flagged[:]
# logger.info("thinned: %s" % self.sis.flag[self.sis_thinned].size)
return True
def douglas_peucker_1d(self, start, end, tolerance, data):
""" Recursive implementation """
# logger.debug("dp: %s, %s" % (start, end))
# We always keep end points
data[start] = Dicts.flags['thin']
data[end] = Dicts.flags['thin']
slope = (self.sis.speed[self.sis_valid][end] - self.sis.speed[self.sis_valid][start]) / \
(self.sis.depth[self.sis_valid][end] - self.sis.depth[self.sis_valid][start])
max_dist = 0
max_ind = 0
for ind in range(start + 1, end):
dist = abs(self.sis.speed[self.sis_valid][start] +
slope * (self.sis.depth[self.sis_valid][ind] - self.sis.depth[self.sis_valid][start]) -
self.sis.speed[self.sis_valid][ind])
if dist > max_dist:
max_dist = dist
max_ind = ind
if max_dist <= tolerance:
return
else:
data[max_ind] = Dicts.flags['thin']
# print(max_ind, max_dist, data[max_ind])
self.douglas_peucker_1d(start, max_ind, tolerance, data=data)
self.douglas_peucker_1d(max_ind, end, tolerance, data=data)
return
# - debugging
def data_debug_plot(self, more=False):
"""Create a debug plot with the data, optionally with the extra data if available"""
if self.data.depth is None:
return
else:
self._plot(samples=self.data, more=more, kind='data')
def proc_debug_plot(self, more=False):
"""Create a debug plot with the processed data, optionally with the extra data if available"""
if self.proc.depth is None:
return
else:
self._plot(samples=self.proc, more=more, kind='proc')
def sis_debug_plot(self, more=False):
"""Create a debug plot with the sis-targeted data, optionally with the extra data if available"""
if self.sis.depth is None:
return
else:
self._plot(samples=self.sis, more=more, kind='sis')
def _plot(self, samples, more, kind):
from matplotlib import pyplot as plt
plt.figure("[%s] %s" % (self.meta.original_path, kind), dpi=120)
if samples.speed is not None:
plt.subplot(231) # speed
plt.plot(samples.speed, samples.depth)
plt.gca().invert_yaxis()
plt.grid(True)
plt.title('speed')
if samples.temp is not None:
plt.subplot(232) # temp
plt.plot(samples.temp, samples.depth)
plt.gca().invert_yaxis()
plt.grid(True)
plt.title('temp')
if samples.sal is not None:
plt.subplot(233) # sal
plt.plot(samples.sal, samples.depth)
plt.gca().invert_yaxis()
plt.grid(True)
plt.title('sal')
if samples.flag is not None:
plt.subplot(234) # source
plt.plot(samples.source, samples.depth)
plt.gca().invert_yaxis()
plt.grid(True)
plt.title('source')
if samples.flag is not None:
plt.subplot(235) # flag
plt.plot(samples.flag, samples.depth)
plt.gca().invert_yaxis()
plt.grid(True)
plt.title('flag')
plt.subplot(236) # meta
fs = 8 # font size
plt.title('meta[%s]' % kind)
plt.axis('off')
plt.text(0.1, 0.25, self.meta.debug_info(), fontsize=fs)
plt.show(block=False)
if more:
self.more.debug_plot()
def interpolate_proc_speed_at_depth(self, depth, points=3):
""" Return speed difference at the passed depth"""
# identify start/end of profile data to be used for interpolation
idx = np.searchsorted(self.proc.depth[self.proc_dqa_valid], depth)
start = (idx - points) if (idx - points >= 0) else 0
end = idx + points
# calculate coefficients and interpolated speed
coefficients = np.polyfit(self.proc.depth[self.proc_dqa_valid][start:end],
self.proc.speed[self.proc_dqa_valid][start:end], 2)
cast_speed = np.poly1d(coefficients)(depth)
return cast_speed
def compute_ray_paths(self, draft, thetas_deg, travel_times=None, res=.005, b_project=False):
"""Returns a RayPath object for each launch angle."""
if not draft or draft == 'Unknown':
draft = 0.0
else:
draft = float(draft)
depths = self.proc.depth[self.proc_dqa_valid] - draft
speeds = self.proc.speed[self.proc_dqa_valid]
ray_paths = []
for launch in thetas_deg:
params = RayTracing.get_svp_layer_parameters(np.deg2rad(launch), depths, speeds)
if travel_times is None:
tt = np.arange(res, params[-2][-1], res) # make travel_times to reach end of profile
else:
tt = np.array(travel_times)
rays = RayTracing.ray_trace(tt, depths, speeds, params, b_project=b_project)
rays[:, 0] += draft
ray_paths.append(RayPath(np.vstack((tt, rays.transpose())).transpose()))
return ray_paths
def compare_profile(self, profile, angle):
dep_max = min(self.proc.depth[self.proc_dqa_valid].max(), profile.proc.depth[profile.proc_dqa_valid].max())
if dep_max <= 400:
tt_inc = 0.002 # Travel time increment in seconds.
elif dep_max <= 800:
tt_inc = 0.005
else:
tt_inc = 0.01
draft1 = 0.0 # TODO
draft2 = 0.0 # TODO
draft = max(draft1, draft2)
# Generate the travel time table for the two profiles.
ray1 = self.compute_ray_paths(draft, [angle], res=tt_inc)[0]
ray2 = profile.compute_ray_paths(draft, [angle], res=tt_inc)[0]
nr_points = min(len(ray1.data), len(ray2.data))
if nr_points == 0:
raise RuntimeError("One of the two profiles is too shallow!")
depth1 = ray1.data[:nr_points, 1]
depth2 = ray2.data[:nr_points, 1]
delta_depth = depth2 - depth1
larger_depths = np.maximum(depth1, depth2)
pct_diff = np.absolute(delta_depth / larger_depths) * 100.0
# noinspection PyUnresolvedReferences
max_diff_index = pct_diff.argmax()
# noinspection PyUnresolvedReferences
max_diff = pct_diff[max_diff_index]
max_diff_depth = larger_depths[max_diff_index]
# create output message
p1 = "<pre style='margin:1px;'>"
p2 = "</pre>"
self_path = self.meta.original_path
self_path = self_path if os.path.exists(self_path) else os.path.basename(self_path)
profile_path = profile.meta.original_path
profile_path = profile_path if os.path.exists(profile_path) else os.path.basename(profile_path)
msg = "%s<b>SUMMARY OF RESULTS - COMPARE 2 CASTS</b>%s<br>" % (p1, p2)
msg += "%s<b>Sound Speed library</b>: %s%s" % (p1, soundspeed_version, p2)
msg += "%s<b>Reference profile</b>: %s%s" % (p1, self_path, p2)
msg += "%s<b>Comparison profile</b>: %s%s" % (p1, profile_path, p2)
msg += "%s<b>Reference instrument</b>: sensor-%s, probe-%s%s%s" \
% (p1, self.meta.sensor, self.meta.probe, ", sn-%s" % self.meta.sn if self.meta.sn else "", p2)
msg += "%s<b>Comparison instrument</b>: sensor-%s, probe-%s%s%s<br>" \
% (p1, profile.meta.sensor, profile.meta.probe, ", sn-%s" % profile.meta.sn if profile.meta.sn else "",
p2)
msg += "%s<b>Draft</b>: %.2f m%s" % (p1, draft, p2)
msg += "%s<b>Maximum Common Depth</b>: %.2f m%s" % (p1, dep_max, p2)
msg += "%s<b>Maximum Depth Percentage Difference</b>: %.2f%%%s" % (p1, max_diff, p2)
msg += "%s<b>Maximum Percentage Difference at</b>: %.2f m%s<br>" % (p1, max_diff_depth, p2)
msg += "%s<b>Max percentage diff. line and last line of travel time table</b>:%s" % (p1, p2)
msg += "%sTravel time, Avg Depth, Depth Diff, Pct Depth Diff, Avg Crosstrack, Crosstrack Diff, " \
"Pct Crosstrack Diff%s" % (p1, p2)
for ni in (max_diff_index, nr_points - 1):
msg += "%s%9.2f s,%8.2f m,%9.2f m,%14.2f%%,%13.2f m,%14.2f m,%19.2f%%%s" \
% (
p1,
ray1.data[ni, 0], # travel time
np.average([ray1.data[ni, 1], ray2.data[ni, 1]]), # avg. depth
np.absolute(delta_depth[ni]), # depth diff.
pct_diff[ni], # pct. depth diff.
np.average([ray1.data[ni, 2], ray2.data[ni, 2]]), # avg. cross-track
np.absolute(ray1.data[ni, 2] - ray2.data[ni, 2]), # cross-track diff.
100.0 * np.absolute(ray1.data[ni, 2] - ray2.data[ni, 2]) /
max(ray1.data[ni, 2], ray2.data[ni, 2]), # pct. cross-track diff.
p2
)
msg += '<br>%s%s%s<br>' % (p1, time.ctime(), p2)
if max_diff > 0.25:
msg += "%s<b>RESULTS INDICATE PROBLEM.</b>%s" % (p1, p2)
msg += "%sThe absolute value of percent depth difference exceeds the recommended amount (.25).%s" % (p1, p2)
msg += "%sIf test was conducted to compare 2 casts for possible grouping into one representative%s" \
% (p1, p2)
msg += "%scast, then the 2 casts should NOT be grouped.%s" % (p1, p2)
msg += "%sIf test was run as part of a Data Quality Assurance for 2 simultaneous casts, then one%s" \
% (p1, p2)
msg += "%sor both of the instruments used is functioning improperly. Investigate further by%s" % (p1, p2)
msg += "%sperforming simultaneous casts of each of the instruments with a third instrument.%s" % (p1, p2)
msg += "%sThen rerun this procedure with the 2 new pairs of casts to determine which one of%s" % (p1, p2)
msg += "%sthe instruments is not functioning properly.%s" % (p1, p2)
msg += "%sIf the test was run to compare an XBT cast with the last CTD cast, then it is time%s" % (p1, p2)
msg += "%sto take a new CTD cast.%s<br>" % (p1, p2)
msg += "%s<b>RESULTS</b>: PERCENT DEPTH DIFFERENCE TOO LARGE%s" % (p1, p2)
else:
msg += "%s<b>RESULTS OK.</b>%s" % (p1, p2)
msg += "%sPercent depth difference is within recommended bounds.%s<br>" % (p1, p2)
msg += "%s<b>RESULTS</b>: PERCENT DEPTH DIFFERENCE OK%s" % (p1, p2)
return msg
| lgpl-2.1 |
ShenLab/hotspot | base.py | 1 | 17881 | from __future__ import print_function
import string
import sys
from collections import deque
import numpy as np
from sklearn.base import BaseEstimator
from sklearn.utils import check_random_state
from sklearn.utils.extmath import logsumexp
import _hmmc
from utils import normalize
decoder_algorithms = frozenset(("viterbi", "map"))
ZEROLOGPROB = -1e200
EPS = np.finfo(float).eps
NEGINF = -np.inf
class ConvergenceMonitor(object):
"""Monitors and reports convergence to :data:`sys.stderr`.
Parameters
----------
thresh : double
Convergence threshold. The algorithm has convereged eitehr if
the maximum number of iterations is reached or the log probability
improvement between the two consecutive iterations is less than
threshold.
n_iter : int
Maximum number of iterations to perform.
verbose : bool
If ``True`` then per-iteration convergence reports are printed,
otherwise the monitor is mute.
history : deque
The log probability of the data for the last two training
iterations. If the values are not strictly increasing, the
model did not converge.
iter : int
Number of iterations performed while training the model.
"""
fmt = "{iter:>10d} {logprob:>16.4f} {delta:>+16.4f}"
def __init__(self, thresh, n_iter, verbose):
self.thresh = thresh
self.n_iter = n_iter
self.verbose = verbose
self.history = deque(maxlen=2)
self.iter = 1
def report(self, logprob):
if self.history and self.verbose:
delta = logprob - self.history[-1]
message = self.fmt.format(
iter=self.iter, logprob=logprob, delta=delta)
print(message, file=sys.stderr)
self.history.append(logprob)
self.iter += 1
@property
def converged(self):
return (self.iter == self.n_iter or
(len(self.history) == 2 and
np.abs(self.history[1] - self.history[0]) < self.thresh))
class _BaseHMM(BaseEstimator):
"""Hidden Markov Model base class.
Representation of a hidden Markov model probability distribution.
This class allows for easy evaluation of, sampling from, and
maximum-likelihood estimation of the parameters of a HMM.
See the instance documentation for details specific to a
particular object.
Attributes
----------
n_components : int
Number of states in the model.
transmat : array, shape (`n_components`, `n_components`)
Matrix of transition probabilities between states.
startprob : array, shape ('n_components`,)
Initial state occupation distribution.
transmat_prior : array, shape (`n_components`, `n_components`)
Matrix of prior transition probabilities between states.
startprob_prior : array, shape ('n_components`,)
Initial state occupation prior distribution.
algorithm : string, one of the decoder_algorithms
Decoder algorithm.
random_state: RandomState or an int seed (0 by default)
A random number generator instance
n_iter : int, optional
Maximum number of iterations to perform.
thresh : float, optional
Convergence threshold.
verbose : bool, optional
When ``True`` per-iteration convergence reports are printed
to :data:`sys.stderr`. You can diagnose convergence via the
:attr:`monitor_` attribute.
params : string, optional
Controls which parameters are updated in the training
process. Can contain any combination of 's' for startprob,
't' for transmat, and other characters for subclass-specific
emmission parameters. Defaults to all parameters.
init_params : string, optional
Controls which parameters are initialized prior to
training. Can contain any combination of 's' for
startprob, 't' for transmat, and other characters for
subclass-specific emmission parameters. Defaults to all
parameters.
See Also
--------
GMM : Gaussian mixture model
"""
# This class implements the public interface to all HMMs that
# derive from it, including all of the machinery for the
# forward-backward and Viterbi algorithms. Subclasses need only
# implement _generate_sample_from_state(), _compute_log_likelihood(),
# _init(), _initialize_sufficient_statistics(),
# _accumulate_sufficient_statistics(), and _do_mstep(), all of
# which depend on the specific emission distribution.
#
# Subclasses will probably also want to implement properties for
# the emission distribution parameters to expose them publicly.
def __init__(self, n_components=1, startprob=None, transmat=None,
startprob_prior=None, transmat_prior=None,
algorithm="viterbi", random_state=None,
n_iter=10, thresh=1e-2, verbose=False,
params=string.ascii_letters,
init_params=string.ascii_letters):
# TODO: move all validation from descriptors to 'fit' and 'predict'.
self.n_components = n_components
self.n_iter = n_iter
self.thresh = thresh
self.monitor_ = ConvergenceMonitor(thresh, n_iter, verbose)
self.params = params
self.init_params = init_params
self.startprob_ = startprob
self.startprob_prior = startprob_prior
self.transmat_ = transmat
self.transmat_prior = transmat_prior
self.algorithm = algorithm
self.random_state = random_state
def eval(self, X):
return self.score_samples(X)
def score_samples(self, obs):
"""Compute the log probability under the model and compute posteriors.
Parameters
----------
obs : array_like, shape (n, n_features)
Sequence of n_features-dimensional data points. Each row
corresponds to a single point in the sequence.
Returns
-------
logprob : float
Log likelihood of the sequence ``obs``.
posteriors : array_like, shape (n, n_components)
Posterior probabilities of each state for each
observation
See Also
--------
score : Compute the log probability under the model
decode : Find most likely state sequence corresponding to a `obs`
"""
obs = np.asarray(obs)
framelogprob = self._compute_log_likelihood(obs)
logprob, fwdlattice = self._do_forward_pass(framelogprob)
bwdlattice = self._do_backward_pass(framelogprob)
gamma = fwdlattice + bwdlattice
# gamma is guaranteed to be correctly normalized by logprob at
# all frames, unless we do approximate inference using pruning.
# So, we will normalize each frame explicitly in case we
# pruned too aggressively.
posteriors = np.exp(gamma.T - logsumexp(gamma, axis=1)).T
posteriors += np.finfo(np.float64).eps
posteriors /= np.sum(posteriors, axis=1).reshape((-1, 1))
return logprob, posteriors
def score(self, obs):
"""Compute the log probability under the model.
Parameters
----------
obs : array_like, shape (n, n_features)
Sequence of n_features-dimensional data points. Each row
corresponds to a single data point.
Returns
-------
logprob : float
Log likelihood of the ``obs``.
See Also
--------
score_samples : Compute the log probability under the model and
posteriors
decode : Find most likely state sequence corresponding to a `obs`
"""
obs = np.asarray(obs)
framelogprob = self._compute_log_likelihood(obs)
logprob, _ = self._do_forward_pass(framelogprob)
return logprob
def _decode_viterbi(self, obs):
"""Find most likely state sequence corresponding to ``obs``.
Uses the Viterbi algorithm.
Parameters
----------
obs : array_like, shape (n, n_features)
Sequence of n_features-dimensional data points. Each row
corresponds to a single point in the sequence.
Returns
-------
viterbi_logprob : float
Log probability of the maximum likelihood path through the HMM.
state_sequence : array_like, shape (n,)
Index of the most likely states for each observation.
See Also
--------
score_samples : Compute the log probability under the model and
posteriors.
score : Compute the log probability under the model
"""
obs = np.asarray(obs)
framelogprob = self._compute_log_likelihood(obs)
viterbi_logprob, state_sequence = self._do_viterbi_pass(framelogprob)
return viterbi_logprob, state_sequence
def decode(self, obs, algorithm="viterbi"):
"""Find most likely state sequence corresponding to ``obs``.
Uses the selected algorithm for decoding.
Parameters
----------
obs : array_like, shape (n, n_features)
Sequence of n_features-dimensional data points. Each row
corresponds to a single point in the sequence.
algorithm : string, one of the `decoder_algorithms`
decoder algorithm to be used
Returns
-------
logprob : float
Log probability of the maximum likelihood path through the HMM
state_sequence : array_like, shape (n,)
Index of the most likely states for each observation
See Also
--------
score_samples : Compute the log probability under the model and
posteriors.
score : Compute the log probability under the model.
"""
if self.algorithm in decoder_algorithms:
algorithm = self.algorithm
elif algorithm in decoder_algorithms:
algorithm = algorithm
decoder = {"viterbi": self._decode_viterbi}
logprob, state_sequence = decoder[algorithm](obs)
return logprob, state_sequence
def predict(self, obs, algorithm="viterbi"):
"""Find most likely state sequence corresponding to `obs`.
Parameters
----------
obs : array_like, shape (n, n_features)
Sequence of n_features-dimensional data points. Each row
corresponds to a single point in the sequence.
Returns
-------
state_sequence : array_like, shape (n,)
Index of the most likely states for each observation
"""
_, state_sequence = self.decode(obs, algorithm)
return state_sequence
def fit(self, obs):
"""Estimate model parameters.
An initialization step is performed before entering the EM
algorithm. If you want to avoid this step, pass proper
``init_params`` keyword argument to estimator's constructor.
Parameters
----------
obs : list
List of array-like observation sequences, each of which
has shape (n_i, n_features), where n_i is the length of
the i_th observation.
"""
self._init(obs, self.init_params)
for i in range(self.n_iter):
stats = self._initialize_sufficient_statistics()
curr_logprob = 0
for seq in obs:
framelogprob = self._compute_log_likelihood(seq)
lpr, fwdlattice = self._do_forward_pass(framelogprob)
bwdlattice = self._do_backward_pass(framelogprob)
gamma = fwdlattice + bwdlattice
posteriors = np.exp(gamma.T - logsumexp(gamma, axis=1)).T
curr_logprob += lpr
self._accumulate_sufficient_statistics(
stats, seq, framelogprob, posteriors, fwdlattice,
bwdlattice, self.params)
self.monitor_.report(curr_logprob)
if self.monitor_.converged:
break
self._do_mstep(stats, self.params)
return self
def _get_algorithm(self):
"decoder algorithm"
return self._algorithm
def _set_algorithm(self, algorithm):
if algorithm not in decoder_algorithms:
raise ValueError("algorithm must be one of the decoder_algorithms")
self._algorithm = algorithm
algorithm = property(_get_algorithm, _set_algorithm)
def _get_startprob(self):
"""Mixing startprob for each state."""
return np.exp(self._log_startprob)
def _set_startprob(self, startprob):
if startprob is None:
startprob = np.tile(1.0 / self.n_components, self.n_components)
else:
startprob = np.asarray(startprob, dtype=np.float)
# check if there exists a component whose value is exactly zero
# if so, add a small number and re-normalize
if not np.alltrue(startprob):
normalize(startprob)
if len(startprob) != self.n_components:
raise ValueError('startprob must have length n_components')
if not np.allclose(np.sum(startprob), 1.0):
raise ValueError('startprob must sum to 1.0')
self._log_startprob = np.log(np.asarray(startprob).copy())
startprob_ = property(_get_startprob, _set_startprob)
def _get_transmat(self):
"""Matrix of transition probabilities."""
return np.exp(self._log_transmat)
def _set_transmat(self, transmat):
if transmat is None:
transmat = np.tile(1.0 / self.n_components,
(self.n_components, self.n_components))
# check if there exists a component whose value is exactly zero
# if so, add a small number and re-normalize
if not np.alltrue(transmat):
normalize(transmat, axis=1)
if (np.asarray(transmat).shape
!= (self.n_components, self.n_components)):
raise ValueError('transmat must have shape '
'(n_components, n_components)')
if not np.all(np.allclose(np.sum(transmat, axis=1), 1.0)):
raise ValueError('Rows of transmat must sum to 1.0')
self._log_transmat = np.log(np.asarray(transmat).copy())
underflow_idx = np.isnan(self._log_transmat)
self._log_transmat[underflow_idx] = NEGINF
transmat_ = property(_get_transmat, _set_transmat)
def _do_viterbi_pass(self, framelogprob):
n_observations, n_components = framelogprob.shape
state_sequence, logprob = _hmmc._viterbi(
n_observations, n_components, self._log_startprob,
self._log_transmat, framelogprob)
return logprob, state_sequence
def _do_forward_pass(self, framelogprob):
n_observations, n_components = framelogprob.shape
fwdlattice = np.zeros((n_observations, n_components))
_hmmc._forward(n_observations, n_components, self._log_startprob,
self._log_transmat, framelogprob, fwdlattice)
return logsumexp(fwdlattice[-1]), fwdlattice
def _do_backward_pass(self, framelogprob):
n_observations, n_components = framelogprob.shape
bwdlattice = np.zeros((n_observations, n_components))
_hmmc._backward(n_observations, n_components, self._log_startprob,
self._log_transmat, framelogprob, bwdlattice)
return bwdlattice
def _compute_log_likelihood(self, obs):
pass
def _generate_sample_from_state(self, state, random_state=None):
pass
def _init(self, obs, params):
if 's' in params:
self.startprob_.fill(1.0 / self.n_components)
if 't' in params:
self.transmat_.fill(1.0 / self.n_components)
# Methods used by self.fit()
def _initialize_sufficient_statistics(self):
stats = {'nobs': 0,
'start': np.zeros(self.n_components),
'trans': np.zeros((self.n_components, self.n_components))}
return stats
def _accumulate_sufficient_statistics(self, stats, seq, framelogprob,
posteriors, fwdlattice, bwdlattice,
params):
stats['nobs'] += 1
if 's' in params:
stats['start'] += posteriors[0]
if 't' in params:
n_observations, n_components = framelogprob.shape
# when the sample is of length 1, it contains no transitions
# so there is no reason to update our trans. matrix estimate
if n_observations <= 1:
return
lneta = np.zeros((n_observations - 1, n_components, n_components))
_hmmc._compute_lneta(n_observations, n_components, fwdlattice,
self._log_transmat, bwdlattice, framelogprob,
lneta)
stats['trans'] += np.exp(logsumexp(lneta, axis=0))
def _do_mstep(self, stats, params):
# Based on Huang, Acero, Hon, "Spoken Language Processing",
# p. 443 - 445
if self.startprob_prior is None:
self.startprob_prior = 1.0
if self.transmat_prior is None:
self.transmat_prior = 1.0
if 's' in params:
self.startprob_ = normalize(
np.maximum(self.startprob_prior - 1.0 + stats['start'], 1e-20))
if 't' in params:
transmat_ = normalize(
np.maximum(self.transmat_prior - 1.0 + stats['trans'], 1e-20),
axis=1)
# impluse requiment of transmat_prior
transmat_ = normalize( np.maximum(self.transmat_prior , transmat_), axis=1)
self.transmat_ = transmat_
| bsd-3-clause |
datapythonista/pandas | pandas/tests/frame/test_query_eval.py | 3 | 47530 | from io import StringIO
import operator
import numpy as np
import pytest
import pandas.util._test_decorators as td
import pandas as pd
from pandas import (
DataFrame,
Index,
MultiIndex,
Series,
date_range,
)
import pandas._testing as tm
from pandas.core.computation.check import NUMEXPR_INSTALLED
PARSERS = "python", "pandas"
ENGINES = "python", pytest.param("numexpr", marks=td.skip_if_no_ne)
@pytest.fixture(params=PARSERS, ids=lambda x: x)
def parser(request):
return request.param
@pytest.fixture(params=ENGINES, ids=lambda x: x)
def engine(request):
return request.param
def skip_if_no_pandas_parser(parser):
if parser != "pandas":
pytest.skip(f"cannot evaluate with parser {repr(parser)}")
class TestCompat:
def setup_method(self, method):
self.df = DataFrame({"A": [1, 2, 3]})
self.expected1 = self.df[self.df.A > 0]
self.expected2 = self.df.A + 1
def test_query_default(self):
# GH 12749
# this should always work, whether NUMEXPR_INSTALLED or not
df = self.df
result = df.query("A>0")
tm.assert_frame_equal(result, self.expected1)
result = df.eval("A+1")
tm.assert_series_equal(result, self.expected2, check_names=False)
def test_query_None(self):
df = self.df
result = df.query("A>0", engine=None)
tm.assert_frame_equal(result, self.expected1)
result = df.eval("A+1", engine=None)
tm.assert_series_equal(result, self.expected2, check_names=False)
def test_query_python(self):
df = self.df
result = df.query("A>0", engine="python")
tm.assert_frame_equal(result, self.expected1)
result = df.eval("A+1", engine="python")
tm.assert_series_equal(result, self.expected2, check_names=False)
def test_query_numexpr(self):
df = self.df
if NUMEXPR_INSTALLED:
result = df.query("A>0", engine="numexpr")
tm.assert_frame_equal(result, self.expected1)
result = df.eval("A+1", engine="numexpr")
tm.assert_series_equal(result, self.expected2, check_names=False)
else:
msg = (
r"'numexpr' is not installed or an unsupported version. "
r"Cannot use engine='numexpr' for query/eval if 'numexpr' is "
r"not installed"
)
with pytest.raises(ImportError, match=msg):
df.query("A>0", engine="numexpr")
with pytest.raises(ImportError, match=msg):
df.eval("A+1", engine="numexpr")
class TestDataFrameEval:
# smaller hits python, larger hits numexpr
@pytest.mark.parametrize("n", [4, 4000])
@pytest.mark.parametrize(
"op_str,op,rop",
[
("+", "__add__", "__radd__"),
("-", "__sub__", "__rsub__"),
("*", "__mul__", "__rmul__"),
("/", "__truediv__", "__rtruediv__"),
],
)
def test_ops(self, op_str, op, rop, n):
# tst ops and reversed ops in evaluation
# GH7198
df = DataFrame(1, index=range(n), columns=list("abcd"))
df.iloc[0] = 2
m = df.mean()
base = DataFrame( # noqa
np.tile(m.values, n).reshape(n, -1), columns=list("abcd")
)
expected = eval(f"base {op_str} df")
# ops as strings
result = eval(f"m {op_str} df")
tm.assert_frame_equal(result, expected)
# these are commutative
if op in ["+", "*"]:
result = getattr(df, op)(m)
tm.assert_frame_equal(result, expected)
# these are not
elif op in ["-", "/"]:
result = getattr(df, rop)(m)
tm.assert_frame_equal(result, expected)
def test_dataframe_sub_numexpr_path(self):
# GH7192: Note we need a large number of rows to ensure this
# goes through the numexpr path
df = DataFrame({"A": np.random.randn(25000)})
df.iloc[0:5] = np.nan
expected = 1 - np.isnan(df.iloc[0:25])
result = (1 - np.isnan(df)).iloc[0:25]
tm.assert_frame_equal(result, expected)
def test_query_non_str(self):
# GH 11485
df = DataFrame({"A": [1, 2, 3], "B": ["a", "b", "b"]})
msg = "expr must be a string to be evaluated"
with pytest.raises(ValueError, match=msg):
df.query(lambda x: x.B == "b")
with pytest.raises(ValueError, match=msg):
df.query(111)
def test_query_empty_string(self):
# GH 13139
df = DataFrame({"A": [1, 2, 3]})
msg = "expr cannot be an empty string"
with pytest.raises(ValueError, match=msg):
df.query("")
def test_eval_resolvers_as_list(self):
# GH 14095
df = DataFrame(np.random.randn(10, 2), columns=list("ab"))
dict1 = {"a": 1}
dict2 = {"b": 2}
assert df.eval("a + b", resolvers=[dict1, dict2]) == dict1["a"] + dict2["b"]
assert pd.eval("a + b", resolvers=[dict1, dict2]) == dict1["a"] + dict2["b"]
def test_eval_object_dtype_binop(self):
# GH#24883
df = DataFrame({"a1": ["Y", "N"]})
res = df.eval("c = ((a1 == 'Y') & True)")
expected = DataFrame({"a1": ["Y", "N"], "c": [True, False]})
tm.assert_frame_equal(res, expected)
class TestDataFrameQueryWithMultiIndex:
def test_query_with_named_multiindex(self, parser, engine):
skip_if_no_pandas_parser(parser)
a = np.random.choice(["red", "green"], size=10)
b = np.random.choice(["eggs", "ham"], size=10)
index = MultiIndex.from_arrays([a, b], names=["color", "food"])
df = DataFrame(np.random.randn(10, 2), index=index)
ind = Series(
df.index.get_level_values("color").values, index=index, name="color"
)
# equality
res1 = df.query('color == "red"', parser=parser, engine=engine)
res2 = df.query('"red" == color', parser=parser, engine=engine)
exp = df[ind == "red"]
tm.assert_frame_equal(res1, exp)
tm.assert_frame_equal(res2, exp)
# inequality
res1 = df.query('color != "red"', parser=parser, engine=engine)
res2 = df.query('"red" != color', parser=parser, engine=engine)
exp = df[ind != "red"]
tm.assert_frame_equal(res1, exp)
tm.assert_frame_equal(res2, exp)
# list equality (really just set membership)
res1 = df.query('color == ["red"]', parser=parser, engine=engine)
res2 = df.query('["red"] == color', parser=parser, engine=engine)
exp = df[ind.isin(["red"])]
tm.assert_frame_equal(res1, exp)
tm.assert_frame_equal(res2, exp)
res1 = df.query('color != ["red"]', parser=parser, engine=engine)
res2 = df.query('["red"] != color', parser=parser, engine=engine)
exp = df[~ind.isin(["red"])]
tm.assert_frame_equal(res1, exp)
tm.assert_frame_equal(res2, exp)
# in/not in ops
res1 = df.query('["red"] in color', parser=parser, engine=engine)
res2 = df.query('"red" in color', parser=parser, engine=engine)
exp = df[ind.isin(["red"])]
tm.assert_frame_equal(res1, exp)
tm.assert_frame_equal(res2, exp)
res1 = df.query('["red"] not in color', parser=parser, engine=engine)
res2 = df.query('"red" not in color', parser=parser, engine=engine)
exp = df[~ind.isin(["red"])]
tm.assert_frame_equal(res1, exp)
tm.assert_frame_equal(res2, exp)
def test_query_with_unnamed_multiindex(self, parser, engine):
skip_if_no_pandas_parser(parser)
a = np.random.choice(["red", "green"], size=10)
b = np.random.choice(["eggs", "ham"], size=10)
index = MultiIndex.from_arrays([a, b])
df = DataFrame(np.random.randn(10, 2), index=index)
ind = Series(df.index.get_level_values(0).values, index=index)
res1 = df.query('ilevel_0 == "red"', parser=parser, engine=engine)
res2 = df.query('"red" == ilevel_0', parser=parser, engine=engine)
exp = df[ind == "red"]
tm.assert_frame_equal(res1, exp)
tm.assert_frame_equal(res2, exp)
# inequality
res1 = df.query('ilevel_0 != "red"', parser=parser, engine=engine)
res2 = df.query('"red" != ilevel_0', parser=parser, engine=engine)
exp = df[ind != "red"]
tm.assert_frame_equal(res1, exp)
tm.assert_frame_equal(res2, exp)
# list equality (really just set membership)
res1 = df.query('ilevel_0 == ["red"]', parser=parser, engine=engine)
res2 = df.query('["red"] == ilevel_0', parser=parser, engine=engine)
exp = df[ind.isin(["red"])]
tm.assert_frame_equal(res1, exp)
tm.assert_frame_equal(res2, exp)
res1 = df.query('ilevel_0 != ["red"]', parser=parser, engine=engine)
res2 = df.query('["red"] != ilevel_0', parser=parser, engine=engine)
exp = df[~ind.isin(["red"])]
tm.assert_frame_equal(res1, exp)
tm.assert_frame_equal(res2, exp)
# in/not in ops
res1 = df.query('["red"] in ilevel_0', parser=parser, engine=engine)
res2 = df.query('"red" in ilevel_0', parser=parser, engine=engine)
exp = df[ind.isin(["red"])]
tm.assert_frame_equal(res1, exp)
tm.assert_frame_equal(res2, exp)
res1 = df.query('["red"] not in ilevel_0', parser=parser, engine=engine)
res2 = df.query('"red" not in ilevel_0', parser=parser, engine=engine)
exp = df[~ind.isin(["red"])]
tm.assert_frame_equal(res1, exp)
tm.assert_frame_equal(res2, exp)
# ## LEVEL 1
ind = Series(df.index.get_level_values(1).values, index=index)
res1 = df.query('ilevel_1 == "eggs"', parser=parser, engine=engine)
res2 = df.query('"eggs" == ilevel_1', parser=parser, engine=engine)
exp = df[ind == "eggs"]
tm.assert_frame_equal(res1, exp)
tm.assert_frame_equal(res2, exp)
# inequality
res1 = df.query('ilevel_1 != "eggs"', parser=parser, engine=engine)
res2 = df.query('"eggs" != ilevel_1', parser=parser, engine=engine)
exp = df[ind != "eggs"]
tm.assert_frame_equal(res1, exp)
tm.assert_frame_equal(res2, exp)
# list equality (really just set membership)
res1 = df.query('ilevel_1 == ["eggs"]', parser=parser, engine=engine)
res2 = df.query('["eggs"] == ilevel_1', parser=parser, engine=engine)
exp = df[ind.isin(["eggs"])]
tm.assert_frame_equal(res1, exp)
tm.assert_frame_equal(res2, exp)
res1 = df.query('ilevel_1 != ["eggs"]', parser=parser, engine=engine)
res2 = df.query('["eggs"] != ilevel_1', parser=parser, engine=engine)
exp = df[~ind.isin(["eggs"])]
tm.assert_frame_equal(res1, exp)
tm.assert_frame_equal(res2, exp)
# in/not in ops
res1 = df.query('["eggs"] in ilevel_1', parser=parser, engine=engine)
res2 = df.query('"eggs" in ilevel_1', parser=parser, engine=engine)
exp = df[ind.isin(["eggs"])]
tm.assert_frame_equal(res1, exp)
tm.assert_frame_equal(res2, exp)
res1 = df.query('["eggs"] not in ilevel_1', parser=parser, engine=engine)
res2 = df.query('"eggs" not in ilevel_1', parser=parser, engine=engine)
exp = df[~ind.isin(["eggs"])]
tm.assert_frame_equal(res1, exp)
tm.assert_frame_equal(res2, exp)
def test_query_with_partially_named_multiindex(self, parser, engine):
skip_if_no_pandas_parser(parser)
a = np.random.choice(["red", "green"], size=10)
b = np.arange(10)
index = MultiIndex.from_arrays([a, b])
index.names = [None, "rating"]
df = DataFrame(np.random.randn(10, 2), index=index)
res = df.query("rating == 1", parser=parser, engine=engine)
ind = Series(
df.index.get_level_values("rating").values, index=index, name="rating"
)
exp = df[ind == 1]
tm.assert_frame_equal(res, exp)
res = df.query("rating != 1", parser=parser, engine=engine)
ind = Series(
df.index.get_level_values("rating").values, index=index, name="rating"
)
exp = df[ind != 1]
tm.assert_frame_equal(res, exp)
res = df.query('ilevel_0 == "red"', parser=parser, engine=engine)
ind = Series(df.index.get_level_values(0).values, index=index)
exp = df[ind == "red"]
tm.assert_frame_equal(res, exp)
res = df.query('ilevel_0 != "red"', parser=parser, engine=engine)
ind = Series(df.index.get_level_values(0).values, index=index)
exp = df[ind != "red"]
tm.assert_frame_equal(res, exp)
def test_query_multiindex_get_index_resolvers(self):
df = tm.makeCustomDataframe(
10, 3, r_idx_nlevels=2, r_idx_names=["spam", "eggs"]
)
resolvers = df._get_index_resolvers()
def to_series(mi, level):
level_values = mi.get_level_values(level)
s = level_values.to_series()
s.index = mi
return s
col_series = df.columns.to_series()
expected = {
"index": df.index,
"columns": col_series,
"spam": to_series(df.index, "spam"),
"eggs": to_series(df.index, "eggs"),
"C0": col_series,
}
for k, v in resolvers.items():
if isinstance(v, Index):
assert v.is_(expected[k])
elif isinstance(v, Series):
tm.assert_series_equal(v, expected[k])
else:
raise AssertionError("object must be a Series or Index")
@td.skip_if_no_ne
class TestDataFrameQueryNumExprPandas:
@classmethod
def setup_class(cls):
cls.engine = "numexpr"
cls.parser = "pandas"
@classmethod
def teardown_class(cls):
del cls.engine, cls.parser
def test_date_query_with_attribute_access(self):
engine, parser = self.engine, self.parser
skip_if_no_pandas_parser(parser)
df = DataFrame(np.random.randn(5, 3))
df["dates1"] = date_range("1/1/2012", periods=5)
df["dates2"] = date_range("1/1/2013", periods=5)
df["dates3"] = date_range("1/1/2014", periods=5)
res = df.query(
"@df.dates1 < 20130101 < @df.dates3", engine=engine, parser=parser
)
expec = df[(df.dates1 < "20130101") & ("20130101" < df.dates3)]
tm.assert_frame_equal(res, expec)
def test_date_query_no_attribute_access(self):
engine, parser = self.engine, self.parser
df = DataFrame(np.random.randn(5, 3))
df["dates1"] = date_range("1/1/2012", periods=5)
df["dates2"] = date_range("1/1/2013", periods=5)
df["dates3"] = date_range("1/1/2014", periods=5)
res = df.query("dates1 < 20130101 < dates3", engine=engine, parser=parser)
expec = df[(df.dates1 < "20130101") & ("20130101" < df.dates3)]
tm.assert_frame_equal(res, expec)
def test_date_query_with_NaT(self):
engine, parser = self.engine, self.parser
n = 10
df = DataFrame(np.random.randn(n, 3))
df["dates1"] = date_range("1/1/2012", periods=n)
df["dates2"] = date_range("1/1/2013", periods=n)
df["dates3"] = date_range("1/1/2014", periods=n)
df.loc[np.random.rand(n) > 0.5, "dates1"] = pd.NaT
df.loc[np.random.rand(n) > 0.5, "dates3"] = pd.NaT
res = df.query("dates1 < 20130101 < dates3", engine=engine, parser=parser)
expec = df[(df.dates1 < "20130101") & ("20130101" < df.dates3)]
tm.assert_frame_equal(res, expec)
def test_date_index_query(self):
engine, parser = self.engine, self.parser
n = 10
df = DataFrame(np.random.randn(n, 3))
df["dates1"] = date_range("1/1/2012", periods=n)
df["dates3"] = date_range("1/1/2014", periods=n)
return_value = df.set_index("dates1", inplace=True, drop=True)
assert return_value is None
res = df.query("index < 20130101 < dates3", engine=engine, parser=parser)
expec = df[(df.index < "20130101") & ("20130101" < df.dates3)]
tm.assert_frame_equal(res, expec)
def test_date_index_query_with_NaT(self):
engine, parser = self.engine, self.parser
n = 10
df = DataFrame(np.random.randn(n, 3))
df["dates1"] = date_range("1/1/2012", periods=n)
df["dates3"] = date_range("1/1/2014", periods=n)
df.iloc[0, 0] = pd.NaT
return_value = df.set_index("dates1", inplace=True, drop=True)
assert return_value is None
res = df.query("index < 20130101 < dates3", engine=engine, parser=parser)
expec = df[(df.index < "20130101") & ("20130101" < df.dates3)]
tm.assert_frame_equal(res, expec)
def test_date_index_query_with_NaT_duplicates(self):
engine, parser = self.engine, self.parser
n = 10
d = {}
d["dates1"] = date_range("1/1/2012", periods=n)
d["dates3"] = date_range("1/1/2014", periods=n)
df = DataFrame(d)
df.loc[np.random.rand(n) > 0.5, "dates1"] = pd.NaT
return_value = df.set_index("dates1", inplace=True, drop=True)
assert return_value is None
res = df.query("dates1 < 20130101 < dates3", engine=engine, parser=parser)
expec = df[(df.index.to_series() < "20130101") & ("20130101" < df.dates3)]
tm.assert_frame_equal(res, expec)
def test_date_query_with_non_date(self):
engine, parser = self.engine, self.parser
n = 10
df = DataFrame(
{"dates": date_range("1/1/2012", periods=n), "nondate": np.arange(n)}
)
result = df.query("dates == nondate", parser=parser, engine=engine)
assert len(result) == 0
result = df.query("dates != nondate", parser=parser, engine=engine)
tm.assert_frame_equal(result, df)
msg = r"Invalid comparison between dtype=datetime64\[ns\] and ndarray"
for op in ["<", ">", "<=", ">="]:
with pytest.raises(TypeError, match=msg):
df.query(f"dates {op} nondate", parser=parser, engine=engine)
def test_query_syntax_error(self):
engine, parser = self.engine, self.parser
df = DataFrame({"i": range(10), "+": range(3, 13), "r": range(4, 14)})
msg = "invalid syntax"
with pytest.raises(SyntaxError, match=msg):
df.query("i - +", engine=engine, parser=parser)
def test_query_scope(self):
from pandas.core.computation.ops import UndefinedVariableError
engine, parser = self.engine, self.parser
skip_if_no_pandas_parser(parser)
df = DataFrame(np.random.randn(20, 2), columns=list("ab"))
a, b = 1, 2 # noqa
res = df.query("a > b", engine=engine, parser=parser)
expected = df[df.a > df.b]
tm.assert_frame_equal(res, expected)
res = df.query("@a > b", engine=engine, parser=parser)
expected = df[a > df.b]
tm.assert_frame_equal(res, expected)
# no local variable c
with pytest.raises(
UndefinedVariableError, match="local variable 'c' is not defined"
):
df.query("@a > b > @c", engine=engine, parser=parser)
# no column named 'c'
with pytest.raises(UndefinedVariableError, match="name 'c' is not defined"):
df.query("@a > b > c", engine=engine, parser=parser)
def test_query_doesnt_pickup_local(self):
from pandas.core.computation.ops import UndefinedVariableError
engine, parser = self.engine, self.parser
n = m = 10
df = DataFrame(np.random.randint(m, size=(n, 3)), columns=list("abc"))
# we don't pick up the local 'sin'
with pytest.raises(UndefinedVariableError, match="name 'sin' is not defined"):
df.query("sin > 5", engine=engine, parser=parser)
def test_query_builtin(self):
from pandas.core.computation.engines import NumExprClobberingError
engine, parser = self.engine, self.parser
n = m = 10
df = DataFrame(np.random.randint(m, size=(n, 3)), columns=list("abc"))
df.index.name = "sin"
msg = "Variables in expression.+"
with pytest.raises(NumExprClobberingError, match=msg):
df.query("sin > 5", engine=engine, parser=parser)
def test_query(self):
engine, parser = self.engine, self.parser
df = DataFrame(np.random.randn(10, 3), columns=["a", "b", "c"])
tm.assert_frame_equal(
df.query("a < b", engine=engine, parser=parser), df[df.a < df.b]
)
tm.assert_frame_equal(
df.query("a + b > b * c", engine=engine, parser=parser),
df[df.a + df.b > df.b * df.c],
)
def test_query_index_with_name(self):
engine, parser = self.engine, self.parser
df = DataFrame(
np.random.randint(10, size=(10, 3)),
index=Index(range(10), name="blob"),
columns=["a", "b", "c"],
)
res = df.query("(blob < 5) & (a < b)", engine=engine, parser=parser)
expec = df[(df.index < 5) & (df.a < df.b)]
tm.assert_frame_equal(res, expec)
res = df.query("blob < b", engine=engine, parser=parser)
expec = df[df.index < df.b]
tm.assert_frame_equal(res, expec)
def test_query_index_without_name(self):
engine, parser = self.engine, self.parser
df = DataFrame(
np.random.randint(10, size=(10, 3)),
index=range(10),
columns=["a", "b", "c"],
)
# "index" should refer to the index
res = df.query("index < b", engine=engine, parser=parser)
expec = df[df.index < df.b]
tm.assert_frame_equal(res, expec)
# test against a scalar
res = df.query("index < 5", engine=engine, parser=parser)
expec = df[df.index < 5]
tm.assert_frame_equal(res, expec)
def test_nested_scope(self):
engine = self.engine
parser = self.parser
skip_if_no_pandas_parser(parser)
df = DataFrame(np.random.randn(5, 3))
df2 = DataFrame(np.random.randn(5, 3))
expected = df[(df > 0) & (df2 > 0)]
result = df.query("(@df > 0) & (@df2 > 0)", engine=engine, parser=parser)
tm.assert_frame_equal(result, expected)
result = pd.eval("df[df > 0 and df2 > 0]", engine=engine, parser=parser)
tm.assert_frame_equal(result, expected)
result = pd.eval(
"df[df > 0 and df2 > 0 and df[df > 0] > 0]", engine=engine, parser=parser
)
expected = df[(df > 0) & (df2 > 0) & (df[df > 0] > 0)]
tm.assert_frame_equal(result, expected)
result = pd.eval("df[(df>0) & (df2>0)]", engine=engine, parser=parser)
expected = df.query("(@df>0) & (@df2>0)", engine=engine, parser=parser)
tm.assert_frame_equal(result, expected)
def test_nested_raises_on_local_self_reference(self):
from pandas.core.computation.ops import UndefinedVariableError
df = DataFrame(np.random.randn(5, 3))
# can't reference ourself b/c we're a local so @ is necessary
with pytest.raises(UndefinedVariableError, match="name 'df' is not defined"):
df.query("df > 0", engine=self.engine, parser=self.parser)
def test_local_syntax(self):
skip_if_no_pandas_parser(self.parser)
engine, parser = self.engine, self.parser
df = DataFrame(np.random.randn(100, 10), columns=list("abcdefghij"))
b = 1
expect = df[df.a < b]
result = df.query("a < @b", engine=engine, parser=parser)
tm.assert_frame_equal(result, expect)
expect = df[df.a < df.b]
result = df.query("a < b", engine=engine, parser=parser)
tm.assert_frame_equal(result, expect)
def test_chained_cmp_and_in(self):
skip_if_no_pandas_parser(self.parser)
engine, parser = self.engine, self.parser
cols = list("abc")
df = DataFrame(np.random.randn(100, len(cols)), columns=cols)
res = df.query(
"a < b < c and a not in b not in c", engine=engine, parser=parser
)
ind = (df.a < df.b) & (df.b < df.c) & ~df.b.isin(df.a) & ~df.c.isin(df.b)
expec = df[ind]
tm.assert_frame_equal(res, expec)
def test_local_variable_with_in(self):
engine, parser = self.engine, self.parser
skip_if_no_pandas_parser(parser)
a = Series(np.random.randint(3, size=15), name="a")
b = Series(np.random.randint(10, size=15), name="b")
df = DataFrame({"a": a, "b": b})
expected = df.loc[(df.b - 1).isin(a)]
result = df.query("b - 1 in a", engine=engine, parser=parser)
tm.assert_frame_equal(expected, result)
b = Series(np.random.randint(10, size=15), name="b")
expected = df.loc[(b - 1).isin(a)]
result = df.query("@b - 1 in a", engine=engine, parser=parser)
tm.assert_frame_equal(expected, result)
def test_at_inside_string(self):
engine, parser = self.engine, self.parser
skip_if_no_pandas_parser(parser)
c = 1 # noqa
df = DataFrame({"a": ["a", "a", "b", "b", "@c", "@c"]})
result = df.query('a == "@c"', engine=engine, parser=parser)
expected = df[df.a == "@c"]
tm.assert_frame_equal(result, expected)
def test_query_undefined_local(self):
from pandas.core.computation.ops import UndefinedVariableError
engine, parser = self.engine, self.parser
skip_if_no_pandas_parser(parser)
df = DataFrame(np.random.rand(10, 2), columns=list("ab"))
with pytest.raises(
UndefinedVariableError, match="local variable 'c' is not defined"
):
df.query("a == @c", engine=engine, parser=parser)
def test_index_resolvers_come_after_columns_with_the_same_name(self):
n = 1 # noqa
a = np.r_[20:101:20]
df = DataFrame({"index": a, "b": np.random.randn(a.size)})
df.index.name = "index"
result = df.query("index > 5", engine=self.engine, parser=self.parser)
expected = df[df["index"] > 5]
tm.assert_frame_equal(result, expected)
df = DataFrame({"index": a, "b": np.random.randn(a.size)})
result = df.query("ilevel_0 > 5", engine=self.engine, parser=self.parser)
expected = df.loc[df.index[df.index > 5]]
tm.assert_frame_equal(result, expected)
df = DataFrame({"a": a, "b": np.random.randn(a.size)})
df.index.name = "a"
result = df.query("a > 5", engine=self.engine, parser=self.parser)
expected = df[df.a > 5]
tm.assert_frame_equal(result, expected)
result = df.query("index > 5", engine=self.engine, parser=self.parser)
expected = df.loc[df.index[df.index > 5]]
tm.assert_frame_equal(result, expected)
def test_inf(self):
n = 10
df = DataFrame({"a": np.random.rand(n), "b": np.random.rand(n)})
df.loc[::2, 0] = np.inf
d = {"==": operator.eq, "!=": operator.ne}
for op, f in d.items():
q = f"a {op} inf"
expected = df[f(df.a, np.inf)]
result = df.query(q, engine=self.engine, parser=self.parser)
tm.assert_frame_equal(result, expected)
def test_check_tz_aware_index_query(self, tz_aware_fixture):
# https://github.com/pandas-dev/pandas/issues/29463
tz = tz_aware_fixture
df_index = date_range(
start="2019-01-01", freq="1d", periods=10, tz=tz, name="time"
)
expected = DataFrame(index=df_index)
df = DataFrame(index=df_index)
result = df.query('"2018-01-03 00:00:00+00" < time')
tm.assert_frame_equal(result, expected)
expected = DataFrame(df_index)
result = df.reset_index().query('"2018-01-03 00:00:00+00" < time')
tm.assert_frame_equal(result, expected)
@td.skip_if_no_ne
class TestDataFrameQueryNumExprPython(TestDataFrameQueryNumExprPandas):
@classmethod
def setup_class(cls):
super().setup_class()
cls.engine = "numexpr"
cls.parser = "python"
def test_date_query_no_attribute_access(self):
engine, parser = self.engine, self.parser
df = DataFrame(np.random.randn(5, 3))
df["dates1"] = date_range("1/1/2012", periods=5)
df["dates2"] = date_range("1/1/2013", periods=5)
df["dates3"] = date_range("1/1/2014", periods=5)
res = df.query(
"(dates1 < 20130101) & (20130101 < dates3)", engine=engine, parser=parser
)
expec = df[(df.dates1 < "20130101") & ("20130101" < df.dates3)]
tm.assert_frame_equal(res, expec)
def test_date_query_with_NaT(self):
engine, parser = self.engine, self.parser
n = 10
df = DataFrame(np.random.randn(n, 3))
df["dates1"] = date_range("1/1/2012", periods=n)
df["dates2"] = date_range("1/1/2013", periods=n)
df["dates3"] = date_range("1/1/2014", periods=n)
df.loc[np.random.rand(n) > 0.5, "dates1"] = pd.NaT
df.loc[np.random.rand(n) > 0.5, "dates3"] = pd.NaT
res = df.query(
"(dates1 < 20130101) & (20130101 < dates3)", engine=engine, parser=parser
)
expec = df[(df.dates1 < "20130101") & ("20130101" < df.dates3)]
tm.assert_frame_equal(res, expec)
def test_date_index_query(self):
engine, parser = self.engine, self.parser
n = 10
df = DataFrame(np.random.randn(n, 3))
df["dates1"] = date_range("1/1/2012", periods=n)
df["dates3"] = date_range("1/1/2014", periods=n)
return_value = df.set_index("dates1", inplace=True, drop=True)
assert return_value is None
res = df.query(
"(index < 20130101) & (20130101 < dates3)", engine=engine, parser=parser
)
expec = df[(df.index < "20130101") & ("20130101" < df.dates3)]
tm.assert_frame_equal(res, expec)
def test_date_index_query_with_NaT(self):
engine, parser = self.engine, self.parser
n = 10
df = DataFrame(np.random.randn(n, 3))
df["dates1"] = date_range("1/1/2012", periods=n)
df["dates3"] = date_range("1/1/2014", periods=n)
df.iloc[0, 0] = pd.NaT
return_value = df.set_index("dates1", inplace=True, drop=True)
assert return_value is None
res = df.query(
"(index < 20130101) & (20130101 < dates3)", engine=engine, parser=parser
)
expec = df[(df.index < "20130101") & ("20130101" < df.dates3)]
tm.assert_frame_equal(res, expec)
def test_date_index_query_with_NaT_duplicates(self):
engine, parser = self.engine, self.parser
n = 10
df = DataFrame(np.random.randn(n, 3))
df["dates1"] = date_range("1/1/2012", periods=n)
df["dates3"] = date_range("1/1/2014", periods=n)
df.loc[np.random.rand(n) > 0.5, "dates1"] = pd.NaT
return_value = df.set_index("dates1", inplace=True, drop=True)
assert return_value is None
msg = r"'BoolOp' nodes are not implemented"
with pytest.raises(NotImplementedError, match=msg):
df.query("index < 20130101 < dates3", engine=engine, parser=parser)
def test_nested_scope(self):
from pandas.core.computation.ops import UndefinedVariableError
engine = self.engine
parser = self.parser
# smoke test
x = 1 # noqa
result = pd.eval("x + 1", engine=engine, parser=parser)
assert result == 2
df = DataFrame(np.random.randn(5, 3))
df2 = DataFrame(np.random.randn(5, 3))
# don't have the pandas parser
msg = r"The '@' prefix is only supported by the pandas parser"
with pytest.raises(SyntaxError, match=msg):
df.query("(@df>0) & (@df2>0)", engine=engine, parser=parser)
with pytest.raises(UndefinedVariableError, match="name 'df' is not defined"):
df.query("(df>0) & (df2>0)", engine=engine, parser=parser)
expected = df[(df > 0) & (df2 > 0)]
result = pd.eval("df[(df > 0) & (df2 > 0)]", engine=engine, parser=parser)
tm.assert_frame_equal(expected, result)
expected = df[(df > 0) & (df2 > 0) & (df[df > 0] > 0)]
result = pd.eval(
"df[(df > 0) & (df2 > 0) & (df[df > 0] > 0)]", engine=engine, parser=parser
)
tm.assert_frame_equal(expected, result)
class TestDataFrameQueryPythonPandas(TestDataFrameQueryNumExprPandas):
@classmethod
def setup_class(cls):
super().setup_class()
cls.engine = "python"
cls.parser = "pandas"
def test_query_builtin(self):
engine, parser = self.engine, self.parser
n = m = 10
df = DataFrame(np.random.randint(m, size=(n, 3)), columns=list("abc"))
df.index.name = "sin"
expected = df[df.index > 5]
result = df.query("sin > 5", engine=engine, parser=parser)
tm.assert_frame_equal(expected, result)
class TestDataFrameQueryPythonPython(TestDataFrameQueryNumExprPython):
@classmethod
def setup_class(cls):
super().setup_class()
cls.engine = cls.parser = "python"
def test_query_builtin(self):
engine, parser = self.engine, self.parser
n = m = 10
df = DataFrame(np.random.randint(m, size=(n, 3)), columns=list("abc"))
df.index.name = "sin"
expected = df[df.index > 5]
result = df.query("sin > 5", engine=engine, parser=parser)
tm.assert_frame_equal(expected, result)
class TestDataFrameQueryStrings:
def test_str_query_method(self, parser, engine):
df = DataFrame(np.random.randn(10, 1), columns=["b"])
df["strings"] = Series(list("aabbccddee"))
expect = df[df.strings == "a"]
if parser != "pandas":
col = "strings"
lst = '"a"'
lhs = [col] * 2 + [lst] * 2
rhs = lhs[::-1]
eq, ne = "==", "!="
ops = 2 * ([eq] + [ne])
msg = r"'(Not)?In' nodes are not implemented"
for lhs, op, rhs in zip(lhs, ops, rhs):
ex = f"{lhs} {op} {rhs}"
with pytest.raises(NotImplementedError, match=msg):
df.query(
ex,
engine=engine,
parser=parser,
local_dict={"strings": df.strings},
)
else:
res = df.query('"a" == strings', engine=engine, parser=parser)
tm.assert_frame_equal(res, expect)
res = df.query('strings == "a"', engine=engine, parser=parser)
tm.assert_frame_equal(res, expect)
tm.assert_frame_equal(res, df[df.strings.isin(["a"])])
expect = df[df.strings != "a"]
res = df.query('strings != "a"', engine=engine, parser=parser)
tm.assert_frame_equal(res, expect)
res = df.query('"a" != strings', engine=engine, parser=parser)
tm.assert_frame_equal(res, expect)
tm.assert_frame_equal(res, df[~df.strings.isin(["a"])])
def test_str_list_query_method(self, parser, engine):
df = DataFrame(np.random.randn(10, 1), columns=["b"])
df["strings"] = Series(list("aabbccddee"))
expect = df[df.strings.isin(["a", "b"])]
if parser != "pandas":
col = "strings"
lst = '["a", "b"]'
lhs = [col] * 2 + [lst] * 2
rhs = lhs[::-1]
eq, ne = "==", "!="
ops = 2 * ([eq] + [ne])
msg = r"'(Not)?In' nodes are not implemented"
for lhs, op, rhs in zip(lhs, ops, rhs):
ex = f"{lhs} {op} {rhs}"
with pytest.raises(NotImplementedError, match=msg):
df.query(ex, engine=engine, parser=parser)
else:
res = df.query('strings == ["a", "b"]', engine=engine, parser=parser)
tm.assert_frame_equal(res, expect)
res = df.query('["a", "b"] == strings', engine=engine, parser=parser)
tm.assert_frame_equal(res, expect)
expect = df[~df.strings.isin(["a", "b"])]
res = df.query('strings != ["a", "b"]', engine=engine, parser=parser)
tm.assert_frame_equal(res, expect)
res = df.query('["a", "b"] != strings', engine=engine, parser=parser)
tm.assert_frame_equal(res, expect)
def test_query_with_string_columns(self, parser, engine):
df = DataFrame(
{
"a": list("aaaabbbbcccc"),
"b": list("aabbccddeeff"),
"c": np.random.randint(5, size=12),
"d": np.random.randint(9, size=12),
}
)
if parser == "pandas":
res = df.query("a in b", parser=parser, engine=engine)
expec = df[df.a.isin(df.b)]
tm.assert_frame_equal(res, expec)
res = df.query("a in b and c < d", parser=parser, engine=engine)
expec = df[df.a.isin(df.b) & (df.c < df.d)]
tm.assert_frame_equal(res, expec)
else:
msg = r"'(Not)?In' nodes are not implemented"
with pytest.raises(NotImplementedError, match=msg):
df.query("a in b", parser=parser, engine=engine)
msg = r"'BoolOp' nodes are not implemented"
with pytest.raises(NotImplementedError, match=msg):
df.query("a in b and c < d", parser=parser, engine=engine)
def test_object_array_eq_ne(self, parser, engine):
df = DataFrame(
{
"a": list("aaaabbbbcccc"),
"b": list("aabbccddeeff"),
"c": np.random.randint(5, size=12),
"d": np.random.randint(9, size=12),
}
)
res = df.query("a == b", parser=parser, engine=engine)
exp = df[df.a == df.b]
tm.assert_frame_equal(res, exp)
res = df.query("a != b", parser=parser, engine=engine)
exp = df[df.a != df.b]
tm.assert_frame_equal(res, exp)
def test_query_with_nested_strings(self, parser, engine):
skip_if_no_pandas_parser(parser)
raw = """id event timestamp
1 "page 1 load" 1/1/2014 0:00:01
1 "page 1 exit" 1/1/2014 0:00:31
2 "page 2 load" 1/1/2014 0:01:01
2 "page 2 exit" 1/1/2014 0:01:31
3 "page 3 load" 1/1/2014 0:02:01
3 "page 3 exit" 1/1/2014 0:02:31
4 "page 1 load" 2/1/2014 1:00:01
4 "page 1 exit" 2/1/2014 1:00:31
5 "page 2 load" 2/1/2014 1:01:01
5 "page 2 exit" 2/1/2014 1:01:31
6 "page 3 load" 2/1/2014 1:02:01
6 "page 3 exit" 2/1/2014 1:02:31
"""
df = pd.read_csv(
StringIO(raw), sep=r"\s{2,}", engine="python", parse_dates=["timestamp"]
)
expected = df[df.event == '"page 1 load"']
res = df.query("""'"page 1 load"' in event""", parser=parser, engine=engine)
tm.assert_frame_equal(expected, res)
def test_query_with_nested_special_character(self, parser, engine):
skip_if_no_pandas_parser(parser)
df = DataFrame({"a": ["a", "b", "test & test"], "b": [1, 2, 3]})
res = df.query('a == "test & test"', parser=parser, engine=engine)
expec = df[df.a == "test & test"]
tm.assert_frame_equal(res, expec)
def test_query_lex_compare_strings(self, parser, engine):
a = Series(np.random.choice(list("abcde"), 20))
b = Series(np.arange(a.size))
df = DataFrame({"X": a, "Y": b})
ops = {"<": operator.lt, ">": operator.gt, "<=": operator.le, ">=": operator.ge}
for op, func in ops.items():
res = df.query(f'X {op} "d"', engine=engine, parser=parser)
expected = df[func(df.X, "d")]
tm.assert_frame_equal(res, expected)
def test_query_single_element_booleans(self, parser, engine):
columns = "bid", "bidsize", "ask", "asksize"
data = np.random.randint(2, size=(1, len(columns))).astype(bool)
df = DataFrame(data, columns=columns)
res = df.query("bid & ask", engine=engine, parser=parser)
expected = df[df.bid & df.ask]
tm.assert_frame_equal(res, expected)
def test_query_string_scalar_variable(self, parser, engine):
skip_if_no_pandas_parser(parser)
df = DataFrame(
{
"Symbol": ["BUD US", "BUD US", "IBM US", "IBM US"],
"Price": [109.70, 109.72, 183.30, 183.35],
}
)
e = df[df.Symbol == "BUD US"]
symb = "BUD US" # noqa
r = df.query("Symbol == @symb", parser=parser, engine=engine)
tm.assert_frame_equal(e, r)
class TestDataFrameEvalWithFrame:
def setup_method(self, method):
self.frame = DataFrame(np.random.randn(10, 3), columns=list("abc"))
def teardown_method(self, method):
del self.frame
def test_simple_expr(self, parser, engine):
res = self.frame.eval("a + b", engine=engine, parser=parser)
expect = self.frame.a + self.frame.b
tm.assert_series_equal(res, expect)
def test_bool_arith_expr(self, parser, engine):
res = self.frame.eval("a[a < 1] + b", engine=engine, parser=parser)
expect = self.frame.a[self.frame.a < 1] + self.frame.b
tm.assert_series_equal(res, expect)
@pytest.mark.parametrize("op", ["+", "-", "*", "/"])
def test_invalid_type_for_operator_raises(self, parser, engine, op):
df = DataFrame({"a": [1, 2], "b": ["c", "d"]})
msg = r"unsupported operand type\(s\) for .+: '.+' and '.+'"
with pytest.raises(TypeError, match=msg):
df.eval(f"a {op} b", engine=engine, parser=parser)
class TestDataFrameQueryBacktickQuoting:
@pytest.fixture(scope="class")
def df(self):
"""
Yields a dataframe with strings that may or may not need escaping
by backticks. The last two columns cannot be escaped by backticks
and should raise a ValueError.
"""
yield DataFrame(
{
"A": [1, 2, 3],
"B B": [3, 2, 1],
"C C": [4, 5, 6],
"C C": [7, 4, 3],
"C_C": [8, 9, 10],
"D_D D": [11, 1, 101],
"E.E": [6, 3, 5],
"F-F": [8, 1, 10],
"1e1": [2, 4, 8],
"def": [10, 11, 2],
"A (x)": [4, 1, 3],
"B(x)": [1, 1, 5],
"B (x)": [2, 7, 4],
" &^ :!€$?(} > <++*'' ": [2, 5, 6],
"": [10, 11, 1],
" A": [4, 7, 9],
" ": [1, 2, 1],
"it's": [6, 3, 1],
"that's": [9, 1, 8],
"☺": [8, 7, 6],
"foo#bar": [2, 4, 5],
1: [5, 7, 9],
}
)
def test_single_backtick_variable_query(self, df):
res = df.query("1 < `B B`")
expect = df[1 < df["B B"]]
tm.assert_frame_equal(res, expect)
def test_two_backtick_variables_query(self, df):
res = df.query("1 < `B B` and 4 < `C C`")
expect = df[(1 < df["B B"]) & (4 < df["C C"])]
tm.assert_frame_equal(res, expect)
def test_single_backtick_variable_expr(self, df):
res = df.eval("A + `B B`")
expect = df["A"] + df["B B"]
tm.assert_series_equal(res, expect)
def test_two_backtick_variables_expr(self, df):
res = df.eval("`B B` + `C C`")
expect = df["B B"] + df["C C"]
tm.assert_series_equal(res, expect)
def test_already_underscore_variable(self, df):
res = df.eval("`C_C` + A")
expect = df["C_C"] + df["A"]
tm.assert_series_equal(res, expect)
def test_same_name_but_underscores(self, df):
res = df.eval("C_C + `C C`")
expect = df["C_C"] + df["C C"]
tm.assert_series_equal(res, expect)
def test_mixed_underscores_and_spaces(self, df):
res = df.eval("A + `D_D D`")
expect = df["A"] + df["D_D D"]
tm.assert_series_equal(res, expect)
def test_backtick_quote_name_with_no_spaces(self, df):
res = df.eval("A + `C_C`")
expect = df["A"] + df["C_C"]
tm.assert_series_equal(res, expect)
def test_special_characters(self, df):
res = df.eval("`E.E` + `F-F` - A")
expect = df["E.E"] + df["F-F"] - df["A"]
tm.assert_series_equal(res, expect)
def test_start_with_digit(self, df):
res = df.eval("A + `1e1`")
expect = df["A"] + df["1e1"]
tm.assert_series_equal(res, expect)
def test_keyword(self, df):
res = df.eval("A + `def`")
expect = df["A"] + df["def"]
tm.assert_series_equal(res, expect)
def test_unneeded_quoting(self, df):
res = df.query("`A` > 2")
expect = df[df["A"] > 2]
tm.assert_frame_equal(res, expect)
def test_parenthesis(self, df):
res = df.query("`A (x)` > 2")
expect = df[df["A (x)"] > 2]
tm.assert_frame_equal(res, expect)
def test_empty_string(self, df):
res = df.query("`` > 5")
expect = df[df[""] > 5]
tm.assert_frame_equal(res, expect)
def test_multiple_spaces(self, df):
res = df.query("`C C` > 5")
expect = df[df["C C"] > 5]
tm.assert_frame_equal(res, expect)
def test_start_with_spaces(self, df):
res = df.eval("` A` + ` `")
expect = df[" A"] + df[" "]
tm.assert_series_equal(res, expect)
def test_lots_of_operators_string(self, df):
res = df.query("` &^ :!€$?(} > <++*'' ` > 4")
expect = df[df[" &^ :!€$?(} > <++*'' "] > 4]
tm.assert_frame_equal(res, expect)
def test_missing_attribute(self, df):
message = "module 'pandas' has no attribute 'thing'"
with pytest.raises(AttributeError, match=message):
df.eval("@pd.thing")
def test_failing_quote(self, df):
msg = r"(Could not convert ).*( to a valid Python identifier.)"
with pytest.raises(SyntaxError, match=msg):
df.query("`it's` > `that's`")
def test_failing_character_outside_range(self, df):
msg = r"(Could not convert ).*( to a valid Python identifier.)"
with pytest.raises(SyntaxError, match=msg):
df.query("`☺` > 4")
def test_failing_hashtag(self, df):
msg = "Failed to parse backticks"
with pytest.raises(SyntaxError, match=msg):
df.query("`foo#bar` > 4")
def test_call_non_named_expression(self, df):
"""
Only attributes and variables ('named functions') can be called.
.__call__() is not an allowed attribute because that would allow
calling anything.
https://github.com/pandas-dev/pandas/pull/32460
"""
def func(*_):
return 1
funcs = [func] # noqa
df.eval("@func()")
with pytest.raises(TypeError, match="Only named functions are supported"):
df.eval("@funcs[0]()")
with pytest.raises(TypeError, match="Only named functions are supported"):
df.eval("@funcs[0].__call__()")
| bsd-3-clause |
nhejazi/scikit-learn | sklearn/decomposition/tests/test_incremental_pca.py | 43 | 10272 | """Tests for Incremental PCA."""
import numpy as np
from sklearn.utils.testing import assert_almost_equal
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_raises
from sklearn import datasets
from sklearn.decomposition import PCA, IncrementalPCA
iris = datasets.load_iris()
def test_incremental_pca():
# Incremental PCA on dense arrays.
X = iris.data
batch_size = X.shape[0] // 3
ipca = IncrementalPCA(n_components=2, batch_size=batch_size)
pca = PCA(n_components=2)
pca.fit_transform(X)
X_transformed = ipca.fit_transform(X)
np.testing.assert_equal(X_transformed.shape, (X.shape[0], 2))
assert_almost_equal(ipca.explained_variance_ratio_.sum(),
pca.explained_variance_ratio_.sum(), 1)
for n_components in [1, 2, X.shape[1]]:
ipca = IncrementalPCA(n_components, batch_size=batch_size)
ipca.fit(X)
cov = ipca.get_covariance()
precision = ipca.get_precision()
assert_array_almost_equal(np.dot(cov, precision),
np.eye(X.shape[1]))
def test_incremental_pca_check_projection():
# Test that the projection of data is correct.
rng = np.random.RandomState(1999)
n, p = 100, 3
X = rng.randn(n, p) * .1
X[:10] += np.array([3, 4, 5])
Xt = 0.1 * rng.randn(1, p) + np.array([3, 4, 5])
# Get the reconstruction of the generated data X
# Note that Xt has the same "components" as X, just separated
# This is what we want to ensure is recreated correctly
Yt = IncrementalPCA(n_components=2).fit(X).transform(Xt)
# Normalize
Yt /= np.sqrt((Yt ** 2).sum())
# Make sure that the first element of Yt is ~1, this means
# the reconstruction worked as expected
assert_almost_equal(np.abs(Yt[0][0]), 1., 1)
def test_incremental_pca_inverse():
# Test that the projection of data can be inverted.
rng = np.random.RandomState(1999)
n, p = 50, 3
X = rng.randn(n, p) # spherical data
X[:, 1] *= .00001 # make middle component relatively small
X += [5, 4, 3] # make a large mean
# same check that we can find the original data from the transformed
# signal (since the data is almost of rank n_components)
ipca = IncrementalPCA(n_components=2, batch_size=10).fit(X)
Y = ipca.transform(X)
Y_inverse = ipca.inverse_transform(Y)
assert_almost_equal(X, Y_inverse, decimal=3)
def test_incremental_pca_validation():
# Test that n_components is >=1 and <= n_features.
X = [[0, 1], [1, 0]]
for n_components in [-1, 0, .99, 3]:
assert_raises(ValueError, IncrementalPCA(n_components,
batch_size=10).fit, X)
def test_incremental_pca_set_params():
# Test that components_ sign is stable over batch sizes.
rng = np.random.RandomState(1999)
n_samples = 100
n_features = 20
X = rng.randn(n_samples, n_features)
X2 = rng.randn(n_samples, n_features)
X3 = rng.randn(n_samples, n_features)
ipca = IncrementalPCA(n_components=20)
ipca.fit(X)
# Decreasing number of components
ipca.set_params(n_components=10)
assert_raises(ValueError, ipca.partial_fit, X2)
# Increasing number of components
ipca.set_params(n_components=15)
assert_raises(ValueError, ipca.partial_fit, X3)
# Returning to original setting
ipca.set_params(n_components=20)
ipca.partial_fit(X)
def test_incremental_pca_num_features_change():
# Test that changing n_components will raise an error.
rng = np.random.RandomState(1999)
n_samples = 100
X = rng.randn(n_samples, 20)
X2 = rng.randn(n_samples, 50)
ipca = IncrementalPCA(n_components=None)
ipca.fit(X)
assert_raises(ValueError, ipca.partial_fit, X2)
def test_incremental_pca_batch_signs():
# Test that components_ sign is stable over batch sizes.
rng = np.random.RandomState(1999)
n_samples = 100
n_features = 3
X = rng.randn(n_samples, n_features)
all_components = []
batch_sizes = np.arange(10, 20)
for batch_size in batch_sizes:
ipca = IncrementalPCA(n_components=None, batch_size=batch_size).fit(X)
all_components.append(ipca.components_)
for i, j in zip(all_components[:-1], all_components[1:]):
assert_almost_equal(np.sign(i), np.sign(j), decimal=6)
def test_incremental_pca_batch_values():
# Test that components_ values are stable over batch sizes.
rng = np.random.RandomState(1999)
n_samples = 100
n_features = 3
X = rng.randn(n_samples, n_features)
all_components = []
batch_sizes = np.arange(20, 40, 3)
for batch_size in batch_sizes:
ipca = IncrementalPCA(n_components=None, batch_size=batch_size).fit(X)
all_components.append(ipca.components_)
for i, j in zip(all_components[:-1], all_components[1:]):
assert_almost_equal(i, j, decimal=1)
def test_incremental_pca_partial_fit():
# Test that fit and partial_fit get equivalent results.
rng = np.random.RandomState(1999)
n, p = 50, 3
X = rng.randn(n, p) # spherical data
X[:, 1] *= .00001 # make middle component relatively small
X += [5, 4, 3] # make a large mean
# same check that we can find the original data from the transformed
# signal (since the data is almost of rank n_components)
batch_size = 10
ipca = IncrementalPCA(n_components=2, batch_size=batch_size).fit(X)
pipca = IncrementalPCA(n_components=2, batch_size=batch_size)
# Add one to make sure endpoint is included
batch_itr = np.arange(0, n + 1, batch_size)
for i, j in zip(batch_itr[:-1], batch_itr[1:]):
pipca.partial_fit(X[i:j, :])
assert_almost_equal(ipca.components_, pipca.components_, decimal=3)
def test_incremental_pca_against_pca_iris():
# Test that IncrementalPCA and PCA are approximate (to a sign flip).
X = iris.data
Y_pca = PCA(n_components=2).fit_transform(X)
Y_ipca = IncrementalPCA(n_components=2, batch_size=25).fit_transform(X)
assert_almost_equal(np.abs(Y_pca), np.abs(Y_ipca), 1)
def test_incremental_pca_against_pca_random_data():
# Test that IncrementalPCA and PCA are approximate (to a sign flip).
rng = np.random.RandomState(1999)
n_samples = 100
n_features = 3
X = rng.randn(n_samples, n_features) + 5 * rng.rand(1, n_features)
Y_pca = PCA(n_components=3).fit_transform(X)
Y_ipca = IncrementalPCA(n_components=3, batch_size=25).fit_transform(X)
assert_almost_equal(np.abs(Y_pca), np.abs(Y_ipca), 1)
def test_explained_variances():
# Test that PCA and IncrementalPCA calculations match
X = datasets.make_low_rank_matrix(1000, 100, tail_strength=0.,
effective_rank=10, random_state=1999)
prec = 3
n_samples, n_features = X.shape
for nc in [None, 99]:
pca = PCA(n_components=nc).fit(X)
ipca = IncrementalPCA(n_components=nc, batch_size=100).fit(X)
assert_almost_equal(pca.explained_variance_, ipca.explained_variance_,
decimal=prec)
assert_almost_equal(pca.explained_variance_ratio_,
ipca.explained_variance_ratio_, decimal=prec)
assert_almost_equal(pca.noise_variance_, ipca.noise_variance_,
decimal=prec)
def test_singular_values():
# Check that the IncrementalPCA output has the correct singular values
rng = np.random.RandomState(0)
n_samples = 1000
n_features = 100
X = datasets.make_low_rank_matrix(n_samples, n_features, tail_strength=0.0,
effective_rank=10, random_state=rng)
pca = PCA(n_components=10, svd_solver='full', random_state=rng).fit(X)
ipca = IncrementalPCA(n_components=10, batch_size=100).fit(X)
assert_array_almost_equal(pca.singular_values_, ipca.singular_values_, 2)
# Compare to the Frobenius norm
X_pca = pca.transform(X)
X_ipca = ipca.transform(X)
assert_array_almost_equal(np.sum(pca.singular_values_**2.0),
np.linalg.norm(X_pca, "fro")**2.0, 12)
assert_array_almost_equal(np.sum(ipca.singular_values_**2.0),
np.linalg.norm(X_ipca, "fro")**2.0, 2)
# Compare to the 2-norms of the score vectors
assert_array_almost_equal(pca.singular_values_,
np.sqrt(np.sum(X_pca**2.0, axis=0)), 12)
assert_array_almost_equal(ipca.singular_values_,
np.sqrt(np.sum(X_ipca**2.0, axis=0)), 2)
# Set the singular values and see what we get back
rng = np.random.RandomState(0)
n_samples = 100
n_features = 110
X = datasets.make_low_rank_matrix(n_samples, n_features, tail_strength=0.0,
effective_rank=3, random_state=rng)
pca = PCA(n_components=3, svd_solver='full', random_state=rng)
ipca = IncrementalPCA(n_components=3, batch_size=100)
X_pca = pca.fit_transform(X)
X_pca /= np.sqrt(np.sum(X_pca**2.0, axis=0))
X_pca[:, 0] *= 3.142
X_pca[:, 1] *= 2.718
X_hat = np.dot(X_pca, pca.components_)
pca.fit(X_hat)
ipca.fit(X_hat)
assert_array_almost_equal(pca.singular_values_, [3.142, 2.718, 1.0], 14)
assert_array_almost_equal(ipca.singular_values_, [3.142, 2.718, 1.0], 14)
def test_whitening():
# Test that PCA and IncrementalPCA transforms match to sign flip.
X = datasets.make_low_rank_matrix(1000, 10, tail_strength=0.,
effective_rank=2, random_state=1999)
prec = 3
n_samples, n_features = X.shape
for nc in [None, 9]:
pca = PCA(whiten=True, n_components=nc).fit(X)
ipca = IncrementalPCA(whiten=True, n_components=nc,
batch_size=250).fit(X)
Xt_pca = pca.transform(X)
Xt_ipca = ipca.transform(X)
assert_almost_equal(np.abs(Xt_pca), np.abs(Xt_ipca), decimal=prec)
Xinv_ipca = ipca.inverse_transform(Xt_ipca)
Xinv_pca = pca.inverse_transform(Xt_pca)
assert_almost_equal(X, Xinv_ipca, decimal=prec)
assert_almost_equal(X, Xinv_pca, decimal=prec)
assert_almost_equal(Xinv_pca, Xinv_ipca, decimal=prec)
| bsd-3-clause |
simvisage/oricreate | docs/howtos/ex082_node_dist/sim01_node_dist.py | 1 | 3101 | r'''
Fold the Miura ori crease pattern using psi control
---------------------------------------------------
'''
import numpy as np
from oricreate.api import \
CreasePatternState, SimulationTask, SimulationConfig, \
FuNodeDist, GuDofConstraints, fix, \
FTV, FTA
from oricreate.fu import \
FuTargetPsiValue
from oricreate.gu import \
GuConstantLength, GuPsiConstraints
from oricreate.hu import \
HuPsiConstraints
def create_cp_factory():
# begin
from oricreate.api import CustomCPFactory
cp = CreasePatternState(X=[[-0.5, 0, 0],
[1, 0, 0],
[0, 1, 0],
[1, 1, 0],
[2, 1, 0],
[1, 2, 0]],
L=[[0, 1],
[0, 2],
[1, 3],
[2, 3],
[0, 3],
[1, 4],
[3, 4],
[2, 5],
[3, 5]],
F=[[0, 1, 3],
[0, 3, 2],
[1, 4, 3],
[2, 3, 5]
])
cp_factory = CustomCPFactory(formed_object=cp)
# end
return cp_factory
if __name__ == '__main__':
cpf = create_cp_factory()
cp = cpf.formed_object
import matplotlib.pyplot as plt
fig, ax = plt.subplots()
cp.plot_mpl(ax, facets=True)
plt.tight_layout()
plt.show()
F_u_fix = cp.F_N[0]
dof_constraints = fix([F_u_fix[0]], [0, 1, 2]) + \
fix([F_u_fix[1]], [1, 2]) + \
fix([F_u_fix[2]], [2])
gu_dof_constraints = GuDofConstraints(dof_constraints=dof_constraints)
gu_constant_length = GuConstantLength()
fu_node_dist = \
FuNodeDist(forming_task=cpf,
L=[[4, 5]])
sim_config = SimulationConfig(goal_function_type='total potential energy',
gu={'cl': gu_constant_length,
'gu': gu_dof_constraints},
acc=1e-5, MAX_ITER=100)
sim_config._fu = fu_node_dist
sim_task = SimulationTask(previous_task=cpf,
config=sim_config,
n_steps=5)
cp = sim_task.formed_object
cp.u[(4, 5), 2] = 0.1
sim_task.u_1
ftv = FTV()
sim_task.sim_history.viz3d['cp'].set(tube_radius=0.005)
ftv.add(sim_task.sim_history.viz3d['cp'])
ftv.plot()
ftv.configure_traits()
fta = FTA(ftv=ftv)
fta.init_view(a=200, e=35, d=50, f=(0, 0, 0), r=0)
fta.add_cam_move(a=200, e=34, n=5, d=50, r=0,
duration=10,
vot_fn=lambda cmt: np.linspace(0, 1, 4),
azimuth_move='damped',
elevation_move='damped',
distance_move='damped')
fta.plot()
fta.render()
fta.configure_traits()
| gpl-3.0 |
ahoyosid/scikit-learn | examples/svm/plot_custom_kernel.py | 115 | 1546 | """
======================
SVM with custom kernel
======================
Simple usage of Support Vector Machines to classify a sample. It will
plot the decision surface and the support vectors.
"""
print(__doc__)
import numpy as np
import matplotlib.pyplot as plt
from sklearn import svm, datasets
# import some data to play with
iris = datasets.load_iris()
X = iris.data[:, :2] # we only take the first two features. We could
# avoid this ugly slicing by using a two-dim dataset
Y = iris.target
def my_kernel(x, y):
"""
We create a custom kernel:
(2 0)
k(x, y) = x ( ) y.T
(0 1)
"""
M = np.array([[2, 0], [0, 1.0]])
return np.dot(np.dot(x, M), y.T)
h = .02 # step size in the mesh
# we create an instance of SVM and fit out data.
clf = svm.SVC(kernel=my_kernel)
clf.fit(X, Y)
# Plot the decision boundary. For that, we will assign a color to each
# point in the mesh [x_min, m_max]x[y_min, y_max].
x_min, x_max = X[:, 0].min() - 1, X[:, 0].max() + 1
y_min, y_max = X[:, 1].min() - 1, X[:, 1].max() + 1
xx, yy = np.meshgrid(np.arange(x_min, x_max, h), np.arange(y_min, y_max, h))
Z = clf.predict(np.c_[xx.ravel(), yy.ravel()])
# Put the result into a color plot
Z = Z.reshape(xx.shape)
plt.pcolormesh(xx, yy, Z, cmap=plt.cm.Paired)
# Plot also the training points
plt.scatter(X[:, 0], X[:, 1], c=Y, cmap=plt.cm.Paired)
plt.title('3-Class classification using Support Vector Machine with custom'
' kernel')
plt.axis('tight')
plt.show()
| bsd-3-clause |
rohangoel96/IRCLogParser | setup.py | 1 | 1360 | from setuptools import setup, find_packages
from os import path
from codecs import open
import numpy
import scipy
current_path = path.abspath(path.dirname(__file__))
try:
import pypandoc
long_description = pypandoc.convert('README.md', 'rst')
except(IOError, ImportError):
long_description = open('README.md').read()
setup(
name='IRCLogParser',
version='1.0',
description='A Parser for IRC chat Logs',
long_description=long_description,
url='https://github.com/prasadtalasila/IRCLogParser',
download_url='https://github.com/prasadtalasila/IRCLogParser/archive/v1.0.1.tar.gz',
author='Prasad Talasila',
author_email='tsrkp@goa.bits-pilani.ac.in',
license='MIT',
keywords='IRC parser data-analysis research development',
packages=['IRCLogParser'],
install_requires=[
'scipy',
'numpy',
'networkx',
'matplotlib',
'pygraphviz',
'scikit-learn',
'pandas',
'python-igraph',
'sphinx',
'pyyaml',
't3SphinxThemeRtd',
't3fieldlisttable',
't3tablerows',
't3targets',
'sphinxcontrib-googlechart',
'sphinxcontrib-googlemaps',
'sphinxcontrib-httpdomain',
'sphinxcontrib-slide',
'sphinxcontrib.youtube',
'nltk',
'plotly',
'ddt'
],
)
| mit |
jwiggins/scikit-image | doc/examples/edges/plot_medial_transform.py | 11 | 2257 | """
===========================
Medial axis skeletonization
===========================
The medial axis of an object is the set of all points having more than one
closest point on the object's boundary. It is often called the **topological
skeleton**, because it is a 1-pixel wide skeleton of the object, with the same
connectivity as the original object.
Here, we use the medial axis transform to compute the width of the foreground
objects. As the function ``medial_axis`` (``skimage.morphology.medial_axis``)
returns the distance transform in addition to the medial axis (with the keyword
argument ``return_distance=True``), it is possible to compute the distance to
the background for all points of the medial axis with this function. This gives
an estimate of the local width of the objects.
For a skeleton with fewer branches, there exists another skeletonization
algorithm in ``skimage``: ``skimage.morphology.skeletonize``, that computes
a skeleton by iterative morphological thinnings.
"""
import numpy as np
from scipy import ndimage as ndi
from skimage.morphology import medial_axis
import matplotlib.pyplot as plt
def microstructure(l=256):
"""
Synthetic binary data: binary microstructure with blobs.
Parameters
----------
l: int, optional
linear size of the returned image
"""
n = 5
x, y = np.ogrid[0:l, 0:l]
mask = np.zeros((l, l))
generator = np.random.RandomState(1)
points = l * generator.rand(2, n**2)
mask[(points[0]).astype(np.int), (points[1]).astype(np.int)] = 1
mask = ndi.gaussian_filter(mask, sigma=l/(4.*n))
return mask > mask.mean()
data = microstructure(l=64)
# Compute the medial axis (skeleton) and the distance transform
skel, distance = medial_axis(data, return_distance=True)
# Distance to the background for pixels of the skeleton
dist_on_skel = distance * skel
fig, (ax1, ax2) = plt.subplots(1, 2, figsize=(8, 4), sharex=True, sharey=True,
subplot_kw={'adjustable': 'box-forced'})
ax1.imshow(data, cmap=plt.cm.gray, interpolation='nearest')
ax1.axis('off')
ax2.imshow(dist_on_skel, cmap=plt.cm.spectral, interpolation='nearest')
ax2.contour(data, [0.5], colors='w')
ax2.axis('off')
fig.tight_layout()
plt.show()
| bsd-3-clause |
anthrotype/freetype-py | examples/glyph-color.py | 3 | 2979 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# -----------------------------------------------------------------------------
#
# FreeType high-level python API - Copyright 2011-2015 Nicolas P. Rougier
# Distributed under the terms of the new BSD license.
#
# -----------------------------------------------------------------------------
'''
Glyph colored rendering (with outline)
'''
from freetype import *
if __name__ == '__main__':
import numpy as np
import matplotlib.pyplot as plt
face = Face('./Vera.ttf')
face.set_char_size( 96*64 )
RGBA = [('R',float), ('G',float), ('B',float), ('A',float)]
# Outline
flags = FT_LOAD_DEFAULT | FT_LOAD_NO_BITMAP
face.load_char('S', flags )
slot = face.glyph
glyph = slot.get_glyph()
stroker = Stroker( )
stroker.set(64, FT_STROKER_LINECAP_ROUND, FT_STROKER_LINEJOIN_ROUND, 0 )
glyph.stroke( stroker )
blyph = glyph.to_bitmap(FT_RENDER_MODE_NORMAL, Vector(0,0))
bitmap = blyph.bitmap
width, rows, pitch = bitmap.width, bitmap.rows, bitmap.pitch
top, left = blyph.top, blyph.left
data = []
for i in range(rows):
data.extend(bitmap.buffer[i*pitch:i*pitch+width])
Z = np.array(data).reshape(rows, width)/255.0
O = np.zeros((rows,width), dtype=RGBA)
O['A'] = Z
O['R'] = 1
O['G'] = 0
O['B'] = 0
# Plain
flags = FT_LOAD_RENDER
face.load_char('S', flags)
F = np.zeros((rows,width), dtype=RGBA)
Z = np.zeros((rows, width))
bitmap = face.glyph.bitmap
width, rows, pitch = bitmap.width, bitmap.rows, bitmap.pitch
top, left = face.glyph.bitmap_top, face.glyph.bitmap_left
dy = blyph.top - face.glyph.bitmap_top
dx = face.glyph.bitmap_left - blyph.left
data = []
for i in range(rows):
data.extend(bitmap.buffer[i*pitch:i*pitch+width])
Z[dx:dx+rows,dy:dy+width] = np.array(data).reshape(rows, width)/255.
F['R'] = 1
F['G'] = 1
F['B'] = 0
F['A'] = Z
# Combine outline and plain
R1,G1,B1,A1 = O['R'],O['G'],O['B'],O['A']
R2,G2,B2,A2 = F['R'],F['G'],F['B'],F['A']
Z = np.zeros(O.shape, dtype=RGBA)
Z['R'] = (A1 * R1 + A2 * (1 - A1) * R2)
Z['G'] = (A1 * G1 + A2 * (1 - A1) * G2)
Z['B'] = (A1 * B1 + A2 * (1 - A1) * B2)
Z['A'] = (A1 + A2 * (1 - A1))
# Draw
plt.figure(figsize=(12,5))
plt.subplot(1,3,1)
plt.title('Plain')
plt.xticks([]), plt.yticks([])
I = F.view(dtype=float).reshape(O.shape[0],O.shape[1],4)
plt.imshow(I, interpolation='nearest', origin='lower')
plt.subplot(1,3,2)
plt.title('Outline')
plt.xticks([]), plt.yticks([])
I = O.view(dtype=float).reshape(O.shape[0],O.shape[1],4)
plt.imshow(I, interpolation='nearest', origin='lower')
plt.subplot(1,3,3)
plt.title('Outline + Plain')
plt.xticks([]), plt.yticks([])
I = Z.view(dtype=float).reshape(O.shape[0],O.shape[1],4)
plt.imshow(I, interpolation='nearest', origin='lower')
plt.show()
| bsd-3-clause |
alivecor/tensorflow | tensorflow/contrib/timeseries/examples/predict.py | 69 | 5579 | # Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""An example of training and predicting with a TFTS estimator."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import argparse
import sys
import numpy as np
import tensorflow as tf
try:
import matplotlib # pylint: disable=g-import-not-at-top
matplotlib.use("TkAgg") # Need Tk for interactive plots.
from matplotlib import pyplot # pylint: disable=g-import-not-at-top
HAS_MATPLOTLIB = True
except ImportError:
# Plotting requires matplotlib, but the unit test running this code may
# execute in an environment without it (i.e. matplotlib is not a build
# dependency). We'd still like to test the TensorFlow-dependent parts of this
# example, namely train_and_predict.
HAS_MATPLOTLIB = False
FLAGS = None
def structural_ensemble_train_and_predict(csv_file_name):
# Cycle between 5 latent values over a period of 100. This leads to a very
# smooth periodic component (and a small model), which is a good fit for our
# example data. Modeling high-frequency periodic variations will require a
# higher cycle_num_latent_values.
structural = tf.contrib.timeseries.StructuralEnsembleRegressor(
periodicities=100, num_features=1, cycle_num_latent_values=5)
return train_and_predict(structural, csv_file_name, training_steps=150)
def ar_train_and_predict(csv_file_name):
# An autoregressive model, with periodicity handled as a time-based
# regression. Note that this requires windows of size 16 (input_window_size +
# output_window_size) for training.
ar = tf.contrib.timeseries.ARRegressor(
periodicities=100, input_window_size=10, output_window_size=6,
num_features=1,
# Use the (default) normal likelihood loss to adaptively fit the
# variance. SQUARED_LOSS overestimates variance when there are trends in
# the series.
loss=tf.contrib.timeseries.ARModel.NORMAL_LIKELIHOOD_LOSS)
return train_and_predict(ar, csv_file_name, training_steps=600)
def train_and_predict(estimator, csv_file_name, training_steps):
"""A simple example of training and predicting."""
# Read data in the default "time,value" CSV format with no header
reader = tf.contrib.timeseries.CSVReader(csv_file_name)
# Set up windowing and batching for training
train_input_fn = tf.contrib.timeseries.RandomWindowInputFn(
reader, batch_size=16, window_size=16)
# Fit model parameters to data
estimator.train(input_fn=train_input_fn, steps=training_steps)
# Evaluate on the full dataset sequentially, collecting in-sample predictions
# for a qualitative evaluation. Note that this loads the whole dataset into
# memory. For quantitative evaluation, use RandomWindowChunker.
evaluation_input_fn = tf.contrib.timeseries.WholeDatasetInputFn(reader)
evaluation = estimator.evaluate(input_fn=evaluation_input_fn, steps=1)
# Predict starting after the evaluation
(predictions,) = tuple(estimator.predict(
input_fn=tf.contrib.timeseries.predict_continuation_input_fn(
evaluation, steps=200)))
times = evaluation["times"][0]
observed = evaluation["observed"][0, :, 0]
mean = np.squeeze(np.concatenate(
[evaluation["mean"][0], predictions["mean"]], axis=0))
variance = np.squeeze(np.concatenate(
[evaluation["covariance"][0], predictions["covariance"]], axis=0))
all_times = np.concatenate([times, predictions["times"]], axis=0)
upper_limit = mean + np.sqrt(variance)
lower_limit = mean - np.sqrt(variance)
return times, observed, all_times, mean, upper_limit, lower_limit
def make_plot(name, training_times, observed, all_times, mean,
upper_limit, lower_limit):
"""Plot a time series in a new figure."""
pyplot.figure()
pyplot.plot(training_times, observed, "b", label="training series")
pyplot.plot(all_times, mean, "r", label="forecast")
pyplot.plot(all_times, upper_limit, "g", label="forecast upper bound")
pyplot.plot(all_times, lower_limit, "g", label="forecast lower bound")
pyplot.fill_between(all_times, lower_limit, upper_limit, color="grey",
alpha="0.2")
pyplot.axvline(training_times[-1], color="k", linestyle="--")
pyplot.xlabel("time")
pyplot.ylabel("observations")
pyplot.legend(loc=0)
pyplot.title(name)
def main(unused_argv):
if not HAS_MATPLOTLIB:
raise ImportError(
"Please install matplotlib to generate a plot from this example.")
make_plot("Structural ensemble",
*structural_ensemble_train_and_predict(FLAGS.input_filename))
make_plot("AR", *ar_train_and_predict(FLAGS.input_filename))
pyplot.show()
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument(
"--input_filename",
type=str,
required=True,
help="Input csv file.")
FLAGS, unparsed = parser.parse_known_args()
tf.app.run(main=main, argv=[sys.argv[0]] + unparsed)
| apache-2.0 |
sanketloke/scikit-learn | examples/plot_digits_pipe.py | 70 | 1813 | #!/usr/bin/python
# -*- coding: utf-8 -*-
"""
=========================================================
Pipelining: chaining a PCA and a logistic regression
=========================================================
The PCA does an unsupervised dimensionality reduction, while the logistic
regression does the prediction.
We use a GridSearchCV to set the dimensionality of the PCA
"""
print(__doc__)
# Code source: Gaël Varoquaux
# Modified for documentation by Jaques Grobler
# License: BSD 3 clause
import numpy as np
import matplotlib.pyplot as plt
from sklearn import linear_model, decomposition, datasets
from sklearn.pipeline import Pipeline
from sklearn.model_selection import GridSearchCV
logistic = linear_model.LogisticRegression()
pca = decomposition.PCA()
pipe = Pipeline(steps=[('pca', pca), ('logistic', logistic)])
digits = datasets.load_digits()
X_digits = digits.data
y_digits = digits.target
###############################################################################
# Plot the PCA spectrum
pca.fit(X_digits)
plt.figure(1, figsize=(4, 3))
plt.clf()
plt.axes([.2, .2, .7, .7])
plt.plot(pca.explained_variance_, linewidth=2)
plt.axis('tight')
plt.xlabel('n_components')
plt.ylabel('explained_variance_')
###############################################################################
# Prediction
n_components = [20, 40, 64]
Cs = np.logspace(-4, 4, 3)
#Parameters of pipelines can be set using ‘__’ separated parameter names:
estimator = GridSearchCV(pipe,
dict(pca__n_components=n_components,
logistic__C=Cs))
estimator.fit(X_digits, y_digits)
plt.axvline(estimator.best_estimator_.named_steps['pca'].n_components,
linestyle=':', label='n_components chosen')
plt.legend(prop=dict(size=12))
plt.show()
| bsd-3-clause |
OpenBookProjects/ipynb | XKCD-style/double_pendulum_xkcd.py | 1 | 5622 | """
General Numerical Solver for the 1D Time-Dependent Schrodinger's equation.
adapted from code at http://matplotlib.sourceforge.net/examples/animation/double_pendulum_animated.py
Double pendulum formula translated from the C code at
http://www.physics.usyd.edu.au/~wheat/dpend_html/solve_dpend.c
author: Jake Vanderplas
email: vanderplas@astro.washington.edu
website: http://jakevdp.github.com
license: BSD
Please feel free to use and modify this, but keep the above information. Thanks!
"""
from numpy import sin, cos
import numpy as np
import matplotlib.pyplot as plt
import scipy.integrate as integrate
import matplotlib.animation as animation
plt.xkcd() # XKCD-style requires matplotlib 1.3+
class DoublePendulum:
"""Double Pendulum Class
init_state is [theta1, omega1, theta2, omega2] in degrees,
where theta1, omega1 is the angular position and velocity of the first
pendulum arm, and theta2, omega2 is that of the second pendulum arm
"""
def __init__(self,
init_state = [120, 0, -20, 0],
L1=1.0, # length of pendulum 1 in m
L2=1.0, # length of pendulum 2 in m
M1=1.0, # mass of pendulum 1 in kg
M2=1.0, # mass of pendulum 2 in kg
G=9.8, # acceleration due to gravity, in m/s^2
origin=(0, 0)):
self.init_state = np.asarray(init_state, dtype='float')
self.params = (L1, L2, M1, M2, G)
self.origin = origin
self.time_elapsed = 0
self.state = self.init_state * np.pi / 180.
def position(self):
"""compute the current x,y positions of the pendulum arms"""
(L1, L2, M1, M2, G) = self.params
x = np.cumsum([self.origin[0],
L1 * sin(self.state[0]),
L2 * sin(self.state[2])])
y = np.cumsum([self.origin[1],
-L1 * cos(self.state[0]),
-L2 * cos(self.state[2])])
return (x, y)
def energy(self):
"""compute the energy of the current state"""
(L1, L2, M1, M2, G) = self.params
x = np.cumsum([L1 * sin(self.state[0]),
L2 * sin(self.state[2])])
y = np.cumsum([-L1 * cos(self.state[0]),
-L2 * cos(self.state[2])])
vx = np.cumsum([L1 * self.state[1] * cos(self.state[0]),
L2 * self.state[3] * cos(self.state[2])])
vy = np.cumsum([L1 * self.state[1] * sin(self.state[0]),
L2 * self.state[3] * sin(self.state[2])])
U = G * (M1 * y[0] + M2 * y[1])
K = 0.5 * (M1 * np.dot(vx, vx) + M2 * np.dot(vy, vy))
return U + K
def dstate_dt(self, state, t):
"""compute the derivative of the given state"""
(M1, M2, L1, L2, G) = self.params
dydx = np.zeros_like(state)
dydx[0] = state[1]
dydx[2] = state[3]
cos_delta = cos(state[2] - state[0])
sin_delta = sin(state[2] - state[0])
den1 = (M1 + M2) * L1 - M2 * L1 * cos_delta * cos_delta
dydx[1] = (M2 * L1 * state[1] * state[1] * sin_delta * cos_delta
+ M2 * G * sin(state[2]) * cos_delta
+ M2 * L2 * state[3] * state[3] * sin_delta
- (M1 + M2) * G * sin(state[0])) / den1
den2 = (L2 / L1) * den1
dydx[3] = (-M2 * L2 * state[3] * state[3] * sin_delta * cos_delta
+ (M1 + M2) * G * sin(state[0]) * cos_delta
- (M1 + M2) * L1 * state[1] * state[1] * sin_delta
- (M1 + M2) * G * sin(state[2])) / den2
return dydx
def step(self, dt):
"""execute one time step of length dt and update state"""
self.state = integrate.odeint(self.dstate_dt, self.state, [0, dt])[1]
self.time_elapsed += dt
#------------------------------------------------------------
# set up initial state and global variables
pendulum = DoublePendulum([180., 0.0, -20., 0.0])
dt = 1./30 # 30 fps
#------------------------------------------------------------
# set up figure and animation
fig = plt.figure()
ax = fig.add_subplot(111, aspect='equal', autoscale_on=False,
xlim=(-2, 2), ylim=(-2, 2))
ax.grid()
line, = ax.plot([], [], 'o-', lw=2)
time_text = ax.text(0.02, 0.95, '', transform=ax.transAxes)
energy_text = ax.text(0.02, 0.90, '', transform=ax.transAxes)
def init():
"""initialize animation"""
line.set_data([], [])
time_text.set_text('')
energy_text.set_text('')
return line, time_text, energy_text
def animate(i):
"""perform animation step"""
global pendulum, dt
pendulum.step(dt)
line.set_data(*pendulum.position())
time_text.set_text('time = %.1f' % pendulum.time_elapsed)
energy_text.set_text('energy = %.3f J' % pendulum.energy())
return line, time_text, energy_text
# choose the interval based on dt and the time to animate one step
from time import time
t0 = time()
animate(0)
t1 = time()
interval = 1000 * dt - (t1 - t0)
ani = animation.FuncAnimation(fig, animate, frames=300,
interval=interval, blit=True, init_func=init)
# save the animation as an mp4. This requires ffmpeg or mencoder to be
# installed. The extra_args ensure that the x264 codec is used, so that
# the video can be embedded in html5. You may need to adjust this for
# your system: for more information, see
# http://matplotlib.sourceforge.net/api/animation_api.html
#ani.save('double_pendulum_xkcd.mp4', fps=30, extra_args=['-vcodec', 'libx264'])
plt.show()
| mit |
mohanprasath/Course-Work | data_analysis/uh_data_analysis_with_python/hy-data-analysis-with-python-spring-2020/part05-e06_suicide_fractions/test/test_suicide_fractions.py | 1 | 2566 | #!/usr/bin/env python3
import unittest
from unittest.mock import patch, MagicMock
import pandas as pd
from tmc import points
from tmc.utils import load, get_out, patch_helper, spy_decorator
module_name="src.suicide_fractions"
suicide_fractions = load(module_name, "suicide_fractions")
main = load(module_name, "main")
ph = patch_helper(module_name)
@points('p05-06.1')
class SuicideFractions(unittest.TestCase):
# @classmethod
# def setUpClass(cls):
# cls.s = suicide_fractions()
def setUp(self):
self.s = suicide_fractions()
def test_shape(self):
self.assertEqual(self.s.shape, (141,), msg="The return Series has incorrect shape!")
def test_type(self):
self.assertIsInstance(self.s, pd.Series, msg="You should return a Series object!")
self.assertEqual(self.s.dtype, float, msg="The dtype of Series should be float!")
def test_index(self):
ind = ["Albania", "Anguilla", "Antigua and Barbuda", "Argentina", "Armenia"]
self.assertCountEqual(self.s.index[:5], ind, msg="First five indices were incorrect!")
def test_nulls(self):
nulls = self.s.isnull().sum()
self.assertEqual(nulls, 23, msg="Wrong number of missing values in the Series!")
def test_content(self):
self.assertAlmostEqual(self.s["Albania"], 0.000035, places=6, msg="Incorrect mean suicide fraction for Albania!")
self.assertAlmostEqual(self.s["Belgium"], 0.000222, places=6, msg="Incorrect mean suicide fraction for Belgium!")
self.assertAlmostEqual(self.s["Finland"], 0.000228, places=6, msg="Incorrect mean suicide fraction for Finland!")
def test_calls(self):
method = spy_decorator(pd.core.frame.DataFrame.groupby, "groupby")
with patch(ph("suicide_fractions"), wraps=suicide_fractions) as psf,\
patch.object(pd.core.frame.DataFrame, "groupby", new=method) as pgroupby,\
patch(ph("pd.read_csv"), wraps=pd.read_csv) as prc:
main()
psf.assert_called_once()
prc.assert_called_once()
method.mock.assert_called_once()
args, kwargs = method.mock.call_args
correct = ((len(args) > 0 and args[0]== "country") or
("by" in kwargs and kwargs["by"] == "country"))
self.assertTrue(correct, msg="Wrong or missing argument to groupby method!")
#self.assertEqual(args[0], "country", msg="Wrong argument to groupby method!")
if __name__ == '__main__':
unittest.main()
| gpl-3.0 |
jzbontar/orange-tree | Orange/widgets/data/owdatasampler.py | 1 | 8647 | import sys
import math
from PyQt4 import QtGui
from PyQt4.QtCore import Qt
import numpy as np
import sklearn.cross_validation as skl_cross_validation
from Orange.widgets import widget, gui
from Orange.widgets.settings import Setting
from Orange.data.table import Table
from Orange.data import DiscreteVariable
class OWDataSampler(widget.OWWidget):
name = "Data Sampler"
description = "Selects a subset of instances from the data set."
icon = "icons/DataSampler.svg"
priority = 100
category = "Data"
keywords = ["data", "sample"]
inputs = [("Data", Table, "set_data")]
outputs = [("Data Sample", Table), ("Remaining Data", Table)]
want_main_area = False
RandomSeed = 42
FixedSize, FixedProportion, CrossValidation = range(3)
use_seed = Setting(False)
replacement = Setting(False)
stratify = Setting(False)
sampling_type = Setting(0)
sampleSizeNumber = Setting(1)
sampleSizePercentage = Setting(70)
number_of_folds = Setting(10)
selectedFold = Setting(1)
def __init__(self):
super().__init__()
self.data = None
self.indices = None
box = gui.widgetBox(self.controlArea, "Information")
self.dataInfoLabel = gui.widgetLabel(box, 'No data on input.')
self.outputInfoLabel = gui.widgetLabel(box, ' ')
box = gui.widgetBox(self.controlArea, "Sampling Type")
sampling = gui.radioButtons(
box, self, "sampling_type", callback=self.sampling_type_changed)
def set_sampling_type(i):
def f():
self.sampling_type = i
self.sampling_type_changed()
return f
gui.appendRadioButton(sampling, "Fixed sample size:")
ibox = gui.indentedBox(sampling)
self.sampleSizeSpin = gui.spin(
ibox, self, "sampleSizeNumber", label="Instances: ",
minv=1, maxv=2 ** 31 - 1,
callback=set_sampling_type(self.FixedSize))
gui.checkBox(
ibox, self, "replacement", "Sample with replacement",
callback=set_sampling_type(self.FixedSize))
gui.separator(sampling, 12)
gui.appendRadioButton(sampling, "Fixed proportion of data:")
self.sampleSizePercentageSlider = gui.hSlider(
gui.indentedBox(sampling), self,
"sampleSizePercentage",
minValue=0, maxValue=100, ticks=10, labelFormat="%d %%",
callback=set_sampling_type(self.FixedProportion))
gui.separator(sampling, 12)
gui.appendRadioButton(sampling, "Cross Validation:")
form = QtGui.QFormLayout(
formAlignment=Qt.AlignLeft | Qt.AlignTop,
labelAlignment=Qt.AlignLeft,
fieldGrowthPolicy=QtGui.QFormLayout.AllNonFixedFieldsGrow)
ibox = gui.indentedBox(sampling, addSpace=True, orientation=form)
form.addRow("Number of folds",
gui.spin(
ibox, self, "number_of_folds", 2, 100,
addToLayout=False,
callback=self.number_of_folds_changed))
self.selected_fold_spin = gui.spin(
ibox, self, "selectedFold", 1, 100, addToLayout=False,
callback=self.fold_changed)
form.addRow("Selected fold", self.selected_fold_spin)
box = gui.widgetBox(self.controlArea, "Options")
gui.checkBox(box, self, "use_seed",
"Replicable (deterministic) sampling",
callback=self.settings_changed)
gui.checkBox(box, self, "stratify",
"Stratify sample (when possible)",
callback=self.settings_changed)
gui.button(self.controlArea, self, "Sample Data",
callback=self.commit)
self.layout().setSizeConstraint(QtGui.QLayout.SetFixedSize)
def sampling_type_changed(self):
self.settings_changed()
def number_of_folds_changed(self):
self.selected_fold_spin.setMaximum(self.number_of_folds)
self.sampling_type = self.CrossValidation
self.settings_changed()
def fold_changed(self):
# a separate callback - if we decide to cache indices
self.sampling_type = self.CrossValidation
def settings_changed(self):
self.indices = None
def set_data(self, dataset):
self.data = dataset
if dataset is not None:
self.dataInfoLabel.setText(
'%d instances in input data set.' % len(dataset))
self.sampleSizeSpin.setMaximum(len(dataset))
self.updateindices()
else:
self.dataInfoLabel.setText('No data on input.')
self.outputInfoLabel.setText('')
self.indices = None
self.commit()
def commit(self):
if self.data is None:
sample = other = None
self.outputInfoLabel.setText("")
else:
if self.indices is None or not self.use_seed:
self.updateindices()
if self.sampling_type in [self.FixedProportion, self.FixedSize]:
remaining, sample = self.indices
self.outputInfoLabel.setText(
'Outputting %d instances.' % len(sample))
else:
remaining, sample = self.indices[self.selectedFold - 1]
self.outputInfoLabel.setText(
'Outputting fold %d, %d instances.' %
(self.selectedFold, len(sample))
)
sample = self.data[sample]
other = self.data[remaining]
self.send("Data Sample", sample)
self.send("Remaining Data", other)
def updateindices(self):
rnd = self.RandomSeed if self.use_seed else None
stratified = (self.stratify and type(self.data) == Table
and is_discrete(self.data.domain.class_var))
if self.sampling_type == self.FixedSize:
self.indices = sample_random_n(
self.data, self.sampleSizeNumber,
stratified=stratified, replace=self.replacement,
random_state=rnd)
elif self.sampling_type == self.FixedProportion:
self.indices = sample_random_p(
self.data, self.sampleSizePercentage / 100,
stratified=stratified, random_state=rnd)
else:
self.indices = sample_fold_indices(
self.data, self.number_of_folds, stratified=stratified,
random_state=rnd)
def is_discrete(var):
return isinstance(var, DiscreteVariable)
def sample_fold_indices(table, folds=10, stratified=False, random_state=None):
"""
:param Orange.data.Table table:
:param int folds: Number of folds
:param bool stratified: Return stratified indices (if applicable).
:param Random random_state:
:rval tuple-of-arrays: A tuple of array indices one for each fold.
"""
if stratified and is_discrete(table.domain.class_var):
# XXX: StratifiedKFold does not support random_state
ind = skl_cross_validation.StratifiedKFold(
table.Y.ravel(), folds, random_state=random_state)
else:
ind = skl_cross_validation.KFold(
len(table), folds, shuffle=True, random_state=random_state)
return tuple(ind)
def sample_random_n(table, n, stratified=False, replace=False,
random_state=None):
if replace:
if random_state is None:
rgen = np.random
else:
rgen = np.random.mtrand.RandomState(random_state)
sample = rgen.random_integers(0, len(table) - 1, n)
o = np.ones(len(table))
o[sample] = 0
others = np.nonzero(o)[0]
return others, sample
if stratified and is_discrete(table.domain.class_var):
test_size = max(len(table.domain.class_var.values), n)
ind = skl_cross_validation.StratifiedShuffleSplit(
table.Y.ravel(), n_iter=1,
test_size=test_size, train_size=len(table) - test_size,
random_state=random_state)
else:
ind = skl_cross_validation.ShuffleSplit(
len(table), n_iter=1,
test_size=n, random_state=random_state)
return next(iter(ind))
def sample_random_p(table, p, stratified=False, random_state=None):
n = int(math.ceil(len(table) * p))
return sample_random_n(table, n, stratified, False, random_state)
def test_main():
app = QtGui.QApplication([])
data = Table("iris")
w = OWDataSampler()
w.set_data(data)
w.show()
return app.exec_()
if __name__ == "__main__":
sys.exit(test_main())
| gpl-3.0 |
nuclear-wizard/moose | python/peacock/tests/postprocessor_tab/test_FigurePlugin.py | 12 | 3320 | #!/usr/bin/env python3
#* This file is part of the MOOSE framework
#* https://www.mooseframework.org
#*
#* All rights reserved, see COPYRIGHT for full restrictions
#* https://github.com/idaholab/moose/blob/master/COPYRIGHT
#*
#* Licensed under LGPL 2.1, please see LICENSE for details
#* https://www.gnu.org/licenses/lgpl-2.1.html
import sys
from PyQt5 import QtWidgets
from peacock.PostprocessorViewer.plugins.FigurePlugin import main
from peacock.utils import Testing
import mooseutils
class TestFigurePlugin(Testing.PeacockImageTestCase):
"""
Test class for FigureWidget.
"""
#: QApplication: The main App for QT, this must be static to work correctly.
qapp = QtWidgets.QApplication(sys.argv)
def setUp(self):
"""
Creates the GUI.
"""
# Read some data
filename = '../input/white_elephant_jan_2016.csv'
self._reader = mooseutils.PostprocessorReader(filename)
# Create the widget with FigurePlugin only
self._widget = main()
self._window = self._widget.currentWidget().FigurePlugin
def testEmpty(self):
"""
Test that an empty plot with two projection options gets created.
"""
self._window.draw()
self.assertImage('testEmpty.png')
def testPlotLeft(self):
"""
Draws on left axis.
"""
ax = self._window.axes()[0]
ax.plot(self._reader['air_temp_low_24_hour_set_1'], '-b')
self._window.draw()
self.assertImage('testPlotLeft.png')
def testPlotRight(self):
"""
Draws right axis.
"""
ax = self._window.axes()[1]
ax.plot(self._reader['air_temp_high_24_hour_set_1'], '-r')
self._window.draw()
self.assertImage('testPlotRight.png')
def testPlotDual(self):
"""
Draws on both.
"""
ax = self._window.axes()[0]
ax.plot(self._reader['air_temp_low_24_hour_set_1'], '-b')
ax = self._window.axes()[1]
ax.plot(self._reader['air_temp_high_24_hour_set_1'], '-r')
self._window.draw()
self.assertImage('testPlotDual.png')
def testClear(self):
"""
Test that a plot can be created and cleared.
"""
ax = self._window.axes()[0]
ax.plot(self._reader['snow_water_equiv_set_1'], '-b')
self._window.draw()
self.assertImage('testClearPlot.png')
ax.clear()
self._window.draw()
self.assertImage('testEmpty.png')
def testRepr(self):
"""
Test the "repr" script output.
"""
output, imports = self._window.repr()
self.assertIn('import matplotlib.pyplot as plt', imports)
self.assertIn("figure = plt.figure(facecolor='white')", output)
self.assertIn('axes0 = figure.add_subplot(111)', output)
# This only appears if data exists on axes2
ax1 = 'axes1 = axes0.twinx()'
self.assertNotIn(ax1, output)
# Plot data on right and make sure axes1 appears
ax = self._window.axes()[1]
ax.plot(self._reader['air_temp_high_24_hour_set_1'], '-r')
output, imports = self._window.repr()
self.assertIn(ax1, output)
if __name__ == '__main__':
import unittest
unittest.main(module=__name__, verbosity=2)
| lgpl-2.1 |
Edu-Glez/Bank_sentiment_analysis | env/lib/python3.6/site-packages/pandas/tests/series/test_analytics.py | 7 | 66035 | # coding=utf-8
# pylint: disable-msg=E1101,W0612
from itertools import product
from distutils.version import LooseVersion
import nose
from numpy import nan
import numpy as np
import pandas as pd
from pandas import (Series, DataFrame, isnull, notnull, bdate_range,
date_range, _np_version_under1p10)
from pandas.core.index import MultiIndex
from pandas.tseries.index import Timestamp
from pandas.tseries.tdi import Timedelta
import pandas.core.config as cf
import pandas.core.nanops as nanops
from pandas.compat import lrange, range
from pandas import compat
from pandas.util.testing import (assert_series_equal, assert_almost_equal,
assert_frame_equal, assert_index_equal)
import pandas.util.testing as tm
from .common import TestData
class TestSeriesAnalytics(TestData, tm.TestCase):
_multiprocess_can_split_ = True
def test_sum_zero(self):
arr = np.array([])
self.assertEqual(nanops.nansum(arr), 0)
arr = np.empty((10, 0))
self.assertTrue((nanops.nansum(arr, axis=1) == 0).all())
# GH #844
s = Series([], index=[])
self.assertEqual(s.sum(), 0)
df = DataFrame(np.empty((10, 0)))
self.assertTrue((df.sum(1) == 0).all())
def test_nansum_buglet(self):
s = Series([1.0, np.nan], index=[0, 1])
result = np.nansum(s)
assert_almost_equal(result, 1)
def test_overflow(self):
# GH 6915
# overflowing on the smaller int dtypes
for dtype in ['int32', 'int64']:
v = np.arange(5000000, dtype=dtype)
s = Series(v)
# no bottleneck
result = s.sum(skipna=False)
self.assertEqual(int(result), v.sum(dtype='int64'))
result = s.min(skipna=False)
self.assertEqual(int(result), 0)
result = s.max(skipna=False)
self.assertEqual(int(result), v[-1])
# use bottleneck if available
result = s.sum()
self.assertEqual(int(result), v.sum(dtype='int64'))
result = s.min()
self.assertEqual(int(result), 0)
result = s.max()
self.assertEqual(int(result), v[-1])
for dtype in ['float32', 'float64']:
v = np.arange(5000000, dtype=dtype)
s = Series(v)
# no bottleneck
result = s.sum(skipna=False)
self.assertEqual(result, v.sum(dtype=dtype))
result = s.min(skipna=False)
self.assertTrue(np.allclose(float(result), 0.0))
result = s.max(skipna=False)
self.assertTrue(np.allclose(float(result), v[-1]))
# use bottleneck if available
result = s.sum()
self.assertEqual(result, v.sum(dtype=dtype))
result = s.min()
self.assertTrue(np.allclose(float(result), 0.0))
result = s.max()
self.assertTrue(np.allclose(float(result), v[-1]))
def test_sum(self):
self._check_stat_op('sum', np.sum, check_allna=True)
def test_sum_inf(self):
import pandas.core.nanops as nanops
s = Series(np.random.randn(10))
s2 = s.copy()
s[5:8] = np.inf
s2[5:8] = np.nan
self.assertTrue(np.isinf(s.sum()))
arr = np.random.randn(100, 100).astype('f4')
arr[:, 2] = np.inf
with cf.option_context("mode.use_inf_as_null", True):
assert_almost_equal(s.sum(), s2.sum())
res = nanops.nansum(arr, axis=1)
self.assertTrue(np.isinf(res).all())
def test_mean(self):
self._check_stat_op('mean', np.mean)
def test_median(self):
self._check_stat_op('median', np.median)
# test with integers, test failure
int_ts = Series(np.ones(10, dtype=int), index=lrange(10))
self.assertAlmostEqual(np.median(int_ts), int_ts.median())
def test_mode(self):
s = Series([12, 12, 11, 10, 19, 11])
exp = Series([11, 12])
assert_series_equal(s.mode(), exp)
assert_series_equal(
Series([1, 2, 3]).mode(), Series(
[], dtype='int64'))
lst = [5] * 20 + [1] * 10 + [6] * 25
np.random.shuffle(lst)
s = Series(lst)
assert_series_equal(s.mode(), Series([6]))
s = Series([5] * 10)
assert_series_equal(s.mode(), Series([5]))
s = Series(lst)
s[0] = np.nan
assert_series_equal(s.mode(), Series([6.]))
s = Series(list('adfasbasfwewefwefweeeeasdfasnbam'))
assert_series_equal(s.mode(), Series(['e']))
s = Series(['2011-01-03', '2013-01-02', '1900-05-03'], dtype='M8[ns]')
assert_series_equal(s.mode(), Series([], dtype="M8[ns]"))
s = Series(['2011-01-03', '2013-01-02', '1900-05-03', '2011-01-03',
'2013-01-02'], dtype='M8[ns]')
assert_series_equal(s.mode(), Series(['2011-01-03', '2013-01-02'],
dtype='M8[ns]'))
# GH 5986
s = Series(['1 days', '-1 days', '0 days'], dtype='timedelta64[ns]')
assert_series_equal(s.mode(), Series([], dtype='timedelta64[ns]'))
s = Series(['1 day', '1 day', '-1 day', '-1 day 2 min',
'2 min', '2 min'],
dtype='timedelta64[ns]')
assert_series_equal(s.mode(), Series(['2 min', '1 day'],
dtype='timedelta64[ns]'))
def test_prod(self):
self._check_stat_op('prod', np.prod)
def test_min(self):
self._check_stat_op('min', np.min, check_objects=True)
def test_max(self):
self._check_stat_op('max', np.max, check_objects=True)
def test_var_std(self):
alt = lambda x: np.std(x, ddof=1)
self._check_stat_op('std', alt)
alt = lambda x: np.var(x, ddof=1)
self._check_stat_op('var', alt)
result = self.ts.std(ddof=4)
expected = np.std(self.ts.values, ddof=4)
assert_almost_equal(result, expected)
result = self.ts.var(ddof=4)
expected = np.var(self.ts.values, ddof=4)
assert_almost_equal(result, expected)
# 1 - element series with ddof=1
s = self.ts.iloc[[0]]
result = s.var(ddof=1)
self.assertTrue(isnull(result))
result = s.std(ddof=1)
self.assertTrue(isnull(result))
def test_sem(self):
alt = lambda x: np.std(x, ddof=1) / np.sqrt(len(x))
self._check_stat_op('sem', alt)
result = self.ts.sem(ddof=4)
expected = np.std(self.ts.values,
ddof=4) / np.sqrt(len(self.ts.values))
assert_almost_equal(result, expected)
# 1 - element series with ddof=1
s = self.ts.iloc[[0]]
result = s.sem(ddof=1)
self.assertTrue(isnull(result))
def test_skew(self):
tm._skip_if_no_scipy()
from scipy.stats import skew
alt = lambda x: skew(x, bias=False)
self._check_stat_op('skew', alt)
# test corner cases, skew() returns NaN unless there's at least 3
# values
min_N = 3
for i in range(1, min_N + 1):
s = Series(np.ones(i))
df = DataFrame(np.ones((i, i)))
if i < min_N:
self.assertTrue(np.isnan(s.skew()))
self.assertTrue(np.isnan(df.skew()).all())
else:
self.assertEqual(0, s.skew())
self.assertTrue((df.skew() == 0).all())
def test_kurt(self):
tm._skip_if_no_scipy()
from scipy.stats import kurtosis
alt = lambda x: kurtosis(x, bias=False)
self._check_stat_op('kurt', alt)
index = MultiIndex(levels=[['bar'], ['one', 'two', 'three'], [0, 1]],
labels=[[0, 0, 0, 0, 0, 0], [0, 1, 2, 0, 1, 2],
[0, 1, 0, 1, 0, 1]])
s = Series(np.random.randn(6), index=index)
self.assertAlmostEqual(s.kurt(), s.kurt(level=0)['bar'])
# test corner cases, kurt() returns NaN unless there's at least 4
# values
min_N = 4
for i in range(1, min_N + 1):
s = Series(np.ones(i))
df = DataFrame(np.ones((i, i)))
if i < min_N:
self.assertTrue(np.isnan(s.kurt()))
self.assertTrue(np.isnan(df.kurt()).all())
else:
self.assertEqual(0, s.kurt())
self.assertTrue((df.kurt() == 0).all())
def test_describe(self):
s = Series([0, 1, 2, 3, 4], name='int_data')
result = s.describe()
expected = Series([5, 2, s.std(), 0, 1, 2, 3, 4],
name='int_data',
index=['count', 'mean', 'std', 'min', '25%',
'50%', '75%', 'max'])
self.assert_series_equal(result, expected)
s = Series([True, True, False, False, False], name='bool_data')
result = s.describe()
expected = Series([5, 2, False, 3], name='bool_data',
index=['count', 'unique', 'top', 'freq'])
self.assert_series_equal(result, expected)
s = Series(['a', 'a', 'b', 'c', 'd'], name='str_data')
result = s.describe()
expected = Series([5, 4, 'a', 2], name='str_data',
index=['count', 'unique', 'top', 'freq'])
self.assert_series_equal(result, expected)
def test_argsort(self):
self._check_accum_op('argsort', check_dtype=False)
argsorted = self.ts.argsort()
self.assertTrue(issubclass(argsorted.dtype.type, np.integer))
# GH 2967 (introduced bug in 0.11-dev I think)
s = Series([Timestamp('201301%02d' % (i + 1)) for i in range(5)])
self.assertEqual(s.dtype, 'datetime64[ns]')
shifted = s.shift(-1)
self.assertEqual(shifted.dtype, 'datetime64[ns]')
self.assertTrue(isnull(shifted[4]))
result = s.argsort()
expected = Series(lrange(5), dtype='int64')
assert_series_equal(result, expected)
result = shifted.argsort()
expected = Series(lrange(4) + [-1], dtype='int64')
assert_series_equal(result, expected)
def test_argsort_stable(self):
s = Series(np.random.randint(0, 100, size=10000))
mindexer = s.argsort(kind='mergesort')
qindexer = s.argsort()
mexpected = np.argsort(s.values, kind='mergesort')
qexpected = np.argsort(s.values, kind='quicksort')
self.assert_series_equal(mindexer, Series(mexpected),
check_dtype=False)
self.assert_series_equal(qindexer, Series(qexpected),
check_dtype=False)
self.assertFalse(np.array_equal(qindexer, mindexer))
def test_cumsum(self):
self._check_accum_op('cumsum')
def test_cumprod(self):
self._check_accum_op('cumprod')
def test_cummin(self):
self.assert_numpy_array_equal(self.ts.cummin().values,
np.minimum.accumulate(np.array(self.ts)))
ts = self.ts.copy()
ts[::2] = np.NaN
result = ts.cummin()[1::2]
expected = np.minimum.accumulate(ts.valid())
self.assert_series_equal(result, expected)
def test_cummax(self):
self.assert_numpy_array_equal(self.ts.cummax().values,
np.maximum.accumulate(np.array(self.ts)))
ts = self.ts.copy()
ts[::2] = np.NaN
result = ts.cummax()[1::2]
expected = np.maximum.accumulate(ts.valid())
self.assert_series_equal(result, expected)
def test_cummin_datetime64(self):
s = pd.Series(pd.to_datetime(['NaT', '2000-1-2', 'NaT', '2000-1-1',
'NaT', '2000-1-3']))
expected = pd.Series(pd.to_datetime(['NaT', '2000-1-2', 'NaT',
'2000-1-1', 'NaT', '2000-1-1']))
result = s.cummin(skipna=True)
self.assert_series_equal(expected, result)
expected = pd.Series(pd.to_datetime(
['NaT', '2000-1-2', '2000-1-2', '2000-1-1', '2000-1-1', '2000-1-1'
]))
result = s.cummin(skipna=False)
self.assert_series_equal(expected, result)
def test_cummax_datetime64(self):
s = pd.Series(pd.to_datetime(['NaT', '2000-1-2', 'NaT', '2000-1-1',
'NaT', '2000-1-3']))
expected = pd.Series(pd.to_datetime(['NaT', '2000-1-2', 'NaT',
'2000-1-2', 'NaT', '2000-1-3']))
result = s.cummax(skipna=True)
self.assert_series_equal(expected, result)
expected = pd.Series(pd.to_datetime(
['NaT', '2000-1-2', '2000-1-2', '2000-1-2', '2000-1-2', '2000-1-3'
]))
result = s.cummax(skipna=False)
self.assert_series_equal(expected, result)
def test_cummin_timedelta64(self):
s = pd.Series(pd.to_timedelta(['NaT',
'2 min',
'NaT',
'1 min',
'NaT',
'3 min', ]))
expected = pd.Series(pd.to_timedelta(['NaT',
'2 min',
'NaT',
'1 min',
'NaT',
'1 min', ]))
result = s.cummin(skipna=True)
self.assert_series_equal(expected, result)
expected = pd.Series(pd.to_timedelta(['NaT',
'2 min',
'2 min',
'1 min',
'1 min',
'1 min', ]))
result = s.cummin(skipna=False)
self.assert_series_equal(expected, result)
def test_cummax_timedelta64(self):
s = pd.Series(pd.to_timedelta(['NaT',
'2 min',
'NaT',
'1 min',
'NaT',
'3 min', ]))
expected = pd.Series(pd.to_timedelta(['NaT',
'2 min',
'NaT',
'2 min',
'NaT',
'3 min', ]))
result = s.cummax(skipna=True)
self.assert_series_equal(expected, result)
expected = pd.Series(pd.to_timedelta(['NaT',
'2 min',
'2 min',
'2 min',
'2 min',
'3 min', ]))
result = s.cummax(skipna=False)
self.assert_series_equal(expected, result)
def test_npdiff(self):
raise nose.SkipTest("skipping due to Series no longer being an "
"ndarray")
# no longer works as the return type of np.diff is now nd.array
s = Series(np.arange(5))
r = np.diff(s)
assert_series_equal(Series([nan, 0, 0, 0, nan]), r)
def _check_stat_op(self, name, alternate, check_objects=False,
check_allna=False):
import pandas.core.nanops as nanops
def testit():
f = getattr(Series, name)
# add some NaNs
self.series[5:15] = np.NaN
# idxmax, idxmin, min, and max are valid for dates
if name not in ['max', 'min']:
ds = Series(date_range('1/1/2001', periods=10))
self.assertRaises(TypeError, f, ds)
# skipna or no
self.assertTrue(notnull(f(self.series)))
self.assertTrue(isnull(f(self.series, skipna=False)))
# check the result is correct
nona = self.series.dropna()
assert_almost_equal(f(nona), alternate(nona.values))
assert_almost_equal(f(self.series), alternate(nona.values))
allna = self.series * nan
if check_allna:
# xref 9422
# bottleneck >= 1.0 give 0.0 for an allna Series sum
try:
self.assertTrue(nanops._USE_BOTTLENECK)
import bottleneck as bn # noqa
self.assertTrue(bn.__version__ >= LooseVersion('1.0'))
self.assertEqual(f(allna), 0.0)
except:
self.assertTrue(np.isnan(f(allna)))
# dtype=object with None, it works!
s = Series([1, 2, 3, None, 5])
f(s)
# 2888
l = [0]
l.extend(lrange(2 ** 40, 2 ** 40 + 1000))
s = Series(l, dtype='int64')
assert_almost_equal(float(f(s)), float(alternate(s.values)))
# check date range
if check_objects:
s = Series(bdate_range('1/1/2000', periods=10))
res = f(s)
exp = alternate(s)
self.assertEqual(res, exp)
# check on string data
if name not in ['sum', 'min', 'max']:
self.assertRaises(TypeError, f, Series(list('abc')))
# Invalid axis.
self.assertRaises(ValueError, f, self.series, axis=1)
# Unimplemented numeric_only parameter.
if 'numeric_only' in compat.signature(f).args:
self.assertRaisesRegexp(NotImplementedError, name, f,
self.series, numeric_only=True)
testit()
try:
import bottleneck as bn # noqa
nanops._USE_BOTTLENECK = False
testit()
nanops._USE_BOTTLENECK = True
except ImportError:
pass
def _check_accum_op(self, name, check_dtype=True):
func = getattr(np, name)
self.assert_numpy_array_equal(func(self.ts).values,
func(np.array(self.ts)),
check_dtype=check_dtype)
# with missing values
ts = self.ts.copy()
ts[::2] = np.NaN
result = func(ts)[1::2]
expected = func(np.array(ts.valid()))
self.assert_numpy_array_equal(result.values, expected,
check_dtype=False)
def test_compress(self):
cond = [True, False, True, False, False]
s = Series([1, -1, 5, 8, 7],
index=list('abcde'), name='foo')
expected = Series(s.values.compress(cond),
index=list('ac'), name='foo')
tm.assert_series_equal(s.compress(cond), expected)
def test_numpy_compress(self):
cond = [True, False, True, False, False]
s = Series([1, -1, 5, 8, 7],
index=list('abcde'), name='foo')
expected = Series(s.values.compress(cond),
index=list('ac'), name='foo')
tm.assert_series_equal(np.compress(cond, s), expected)
msg = "the 'axis' parameter is not supported"
tm.assertRaisesRegexp(ValueError, msg, np.compress,
cond, s, axis=1)
msg = "the 'out' parameter is not supported"
tm.assertRaisesRegexp(ValueError, msg, np.compress,
cond, s, out=s)
def test_round(self):
self.ts.index.name = "index_name"
result = self.ts.round(2)
expected = Series(np.round(self.ts.values, 2),
index=self.ts.index, name='ts')
assert_series_equal(result, expected)
self.assertEqual(result.name, self.ts.name)
def test_numpy_round(self):
# See gh-12600
s = Series([1.53, 1.36, 0.06])
out = np.round(s, decimals=0)
expected = Series([2., 1., 0.])
assert_series_equal(out, expected)
msg = "the 'out' parameter is not supported"
with tm.assertRaisesRegexp(ValueError, msg):
np.round(s, decimals=0, out=s)
def test_built_in_round(self):
if not compat.PY3:
raise nose.SkipTest(
'build in round cannot be overriden prior to Python 3')
s = Series([1.123, 2.123, 3.123], index=lrange(3))
result = round(s)
expected_rounded0 = Series([1., 2., 3.], index=lrange(3))
self.assert_series_equal(result, expected_rounded0)
decimals = 2
expected_rounded = Series([1.12, 2.12, 3.12], index=lrange(3))
result = round(s, decimals)
self.assert_series_equal(result, expected_rounded)
def test_prod_numpy16_bug(self):
s = Series([1., 1., 1.], index=lrange(3))
result = s.prod()
self.assertNotIsInstance(result, Series)
def test_all_any(self):
ts = tm.makeTimeSeries()
bool_series = ts > 0
self.assertFalse(bool_series.all())
self.assertTrue(bool_series.any())
# Alternative types, with implicit 'object' dtype.
s = Series(['abc', True])
self.assertEqual('abc', s.any()) # 'abc' || True => 'abc'
def test_all_any_params(self):
# Check skipna, with implicit 'object' dtype.
s1 = Series([np.nan, True])
s2 = Series([np.nan, False])
self.assertTrue(s1.all(skipna=False)) # nan && True => True
self.assertTrue(s1.all(skipna=True))
self.assertTrue(np.isnan(s2.any(skipna=False))) # nan || False => nan
self.assertFalse(s2.any(skipna=True))
# Check level.
s = pd.Series([False, False, True, True, False, True],
index=[0, 0, 1, 1, 2, 2])
assert_series_equal(s.all(level=0), Series([False, True, False]))
assert_series_equal(s.any(level=0), Series([False, True, True]))
# bool_only is not implemented with level option.
self.assertRaises(NotImplementedError, s.any, bool_only=True, level=0)
self.assertRaises(NotImplementedError, s.all, bool_only=True, level=0)
# bool_only is not implemented alone.
self.assertRaises(NotImplementedError, s.any, bool_only=True)
self.assertRaises(NotImplementedError, s.all, bool_only=True)
def test_modulo(self):
with np.errstate(all='ignore'):
# GH3590, modulo as ints
p = DataFrame({'first': [3, 4, 5, 8], 'second': [0, 0, 0, 3]})
result = p['first'] % p['second']
expected = Series(p['first'].values % p['second'].values,
dtype='float64')
expected.iloc[0:3] = np.nan
assert_series_equal(result, expected)
result = p['first'] % 0
expected = Series(np.nan, index=p.index, name='first')
assert_series_equal(result, expected)
p = p.astype('float64')
result = p['first'] % p['second']
expected = Series(p['first'].values % p['second'].values)
assert_series_equal(result, expected)
p = p.astype('float64')
result = p['first'] % p['second']
result2 = p['second'] % p['first']
self.assertFalse(np.array_equal(result, result2))
# GH 9144
s = Series([0, 1])
result = s % 0
expected = Series([nan, nan])
assert_series_equal(result, expected)
result = 0 % s
expected = Series([nan, 0.0])
assert_series_equal(result, expected)
def test_ops_consistency_on_empty(self):
# GH 7869
# consistency on empty
# float
result = Series(dtype=float).sum()
self.assertEqual(result, 0)
result = Series(dtype=float).mean()
self.assertTrue(isnull(result))
result = Series(dtype=float).median()
self.assertTrue(isnull(result))
# timedelta64[ns]
result = Series(dtype='m8[ns]').sum()
self.assertEqual(result, Timedelta(0))
result = Series(dtype='m8[ns]').mean()
self.assertTrue(result is pd.NaT)
result = Series(dtype='m8[ns]').median()
self.assertTrue(result is pd.NaT)
def test_corr(self):
tm._skip_if_no_scipy()
import scipy.stats as stats
# full overlap
self.assertAlmostEqual(self.ts.corr(self.ts), 1)
# partial overlap
self.assertAlmostEqual(self.ts[:15].corr(self.ts[5:]), 1)
self.assertTrue(isnull(self.ts[:15].corr(self.ts[5:], min_periods=12)))
ts1 = self.ts[:15].reindex(self.ts.index)
ts2 = self.ts[5:].reindex(self.ts.index)
self.assertTrue(isnull(ts1.corr(ts2, min_periods=12)))
# No overlap
self.assertTrue(np.isnan(self.ts[::2].corr(self.ts[1::2])))
# all NA
cp = self.ts[:10].copy()
cp[:] = np.nan
self.assertTrue(isnull(cp.corr(cp)))
A = tm.makeTimeSeries()
B = tm.makeTimeSeries()
result = A.corr(B)
expected, _ = stats.pearsonr(A, B)
self.assertAlmostEqual(result, expected)
def test_corr_rank(self):
tm._skip_if_no_scipy()
import scipy
import scipy.stats as stats
# kendall and spearman
A = tm.makeTimeSeries()
B = tm.makeTimeSeries()
A[-5:] = A[:5]
result = A.corr(B, method='kendall')
expected = stats.kendalltau(A, B)[0]
self.assertAlmostEqual(result, expected)
result = A.corr(B, method='spearman')
expected = stats.spearmanr(A, B)[0]
self.assertAlmostEqual(result, expected)
# these methods got rewritten in 0.8
if scipy.__version__ < LooseVersion('0.9'):
raise nose.SkipTest("skipping corr rank because of scipy version "
"{0}".format(scipy.__version__))
# results from R
A = Series(
[-0.89926396, 0.94209606, -1.03289164, -0.95445587, 0.76910310, -
0.06430576, -2.09704447, 0.40660407, -0.89926396, 0.94209606])
B = Series(
[-1.01270225, -0.62210117, -1.56895827, 0.59592943, -0.01680292,
1.17258718, -1.06009347, -0.10222060, -0.89076239, 0.89372375])
kexp = 0.4319297
sexp = 0.5853767
self.assertAlmostEqual(A.corr(B, method='kendall'), kexp)
self.assertAlmostEqual(A.corr(B, method='spearman'), sexp)
def test_cov(self):
# full overlap
self.assertAlmostEqual(self.ts.cov(self.ts), self.ts.std() ** 2)
# partial overlap
self.assertAlmostEqual(self.ts[:15].cov(self.ts[5:]),
self.ts[5:15].std() ** 2)
# No overlap
self.assertTrue(np.isnan(self.ts[::2].cov(self.ts[1::2])))
# all NA
cp = self.ts[:10].copy()
cp[:] = np.nan
self.assertTrue(isnull(cp.cov(cp)))
# min_periods
self.assertTrue(isnull(self.ts[:15].cov(self.ts[5:], min_periods=12)))
ts1 = self.ts[:15].reindex(self.ts.index)
ts2 = self.ts[5:].reindex(self.ts.index)
self.assertTrue(isnull(ts1.cov(ts2, min_periods=12)))
def test_count(self):
self.assertEqual(self.ts.count(), len(self.ts))
self.ts[::2] = np.NaN
self.assertEqual(self.ts.count(), np.isfinite(self.ts).sum())
mi = MultiIndex.from_arrays([list('aabbcc'), [1, 2, 2, nan, 1, 2]])
ts = Series(np.arange(len(mi)), index=mi)
left = ts.count(level=1)
right = Series([2, 3, 1], index=[1, 2, nan])
assert_series_equal(left, right)
ts.iloc[[0, 3, 5]] = nan
assert_series_equal(ts.count(level=1), right - 1)
def test_dot(self):
a = Series(np.random.randn(4), index=['p', 'q', 'r', 's'])
b = DataFrame(np.random.randn(3, 4), index=['1', '2', '3'],
columns=['p', 'q', 'r', 's']).T
result = a.dot(b)
expected = Series(np.dot(a.values, b.values), index=['1', '2', '3'])
assert_series_equal(result, expected)
# Check index alignment
b2 = b.reindex(index=reversed(b.index))
result = a.dot(b)
assert_series_equal(result, expected)
# Check ndarray argument
result = a.dot(b.values)
self.assertTrue(np.all(result == expected.values))
assert_almost_equal(a.dot(b['2'].values), expected['2'])
# Check series argument
assert_almost_equal(a.dot(b['1']), expected['1'])
assert_almost_equal(a.dot(b2['1']), expected['1'])
self.assertRaises(Exception, a.dot, a.values[:3])
self.assertRaises(ValueError, a.dot, b.T)
def test_value_counts_nunique(self):
# basics.rst doc example
series = Series(np.random.randn(500))
series[20:500] = np.nan
series[10:20] = 5000
result = series.nunique()
self.assertEqual(result, 11)
def test_unique(self):
# 714 also, dtype=float
s = Series([1.2345] * 100)
s[::2] = np.nan
result = s.unique()
self.assertEqual(len(result), 2)
s = Series([1.2345] * 100, dtype='f4')
s[::2] = np.nan
result = s.unique()
self.assertEqual(len(result), 2)
# NAs in object arrays #714
s = Series(['foo'] * 100, dtype='O')
s[::2] = np.nan
result = s.unique()
self.assertEqual(len(result), 2)
# decision about None
s = Series([1, 2, 3, None, None, None], dtype=object)
result = s.unique()
expected = np.array([1, 2, 3, None], dtype=object)
self.assert_numpy_array_equal(result, expected)
def test_drop_duplicates(self):
# check both int and object
for s in [Series([1, 2, 3, 3]), Series(['1', '2', '3', '3'])]:
expected = Series([False, False, False, True])
assert_series_equal(s.duplicated(), expected)
assert_series_equal(s.drop_duplicates(), s[~expected])
sc = s.copy()
sc.drop_duplicates(inplace=True)
assert_series_equal(sc, s[~expected])
expected = Series([False, False, True, False])
assert_series_equal(s.duplicated(keep='last'), expected)
assert_series_equal(s.drop_duplicates(keep='last'), s[~expected])
sc = s.copy()
sc.drop_duplicates(keep='last', inplace=True)
assert_series_equal(sc, s[~expected])
# deprecate take_last
with tm.assert_produces_warning(FutureWarning):
assert_series_equal(s.duplicated(take_last=True), expected)
with tm.assert_produces_warning(FutureWarning):
assert_series_equal(
s.drop_duplicates(take_last=True), s[~expected])
sc = s.copy()
with tm.assert_produces_warning(FutureWarning):
sc.drop_duplicates(take_last=True, inplace=True)
assert_series_equal(sc, s[~expected])
expected = Series([False, False, True, True])
assert_series_equal(s.duplicated(keep=False), expected)
assert_series_equal(s.drop_duplicates(keep=False), s[~expected])
sc = s.copy()
sc.drop_duplicates(keep=False, inplace=True)
assert_series_equal(sc, s[~expected])
for s in [Series([1, 2, 3, 5, 3, 2, 4]),
Series(['1', '2', '3', '5', '3', '2', '4'])]:
expected = Series([False, False, False, False, True, True, False])
assert_series_equal(s.duplicated(), expected)
assert_series_equal(s.drop_duplicates(), s[~expected])
sc = s.copy()
sc.drop_duplicates(inplace=True)
assert_series_equal(sc, s[~expected])
expected = Series([False, True, True, False, False, False, False])
assert_series_equal(s.duplicated(keep='last'), expected)
assert_series_equal(s.drop_duplicates(keep='last'), s[~expected])
sc = s.copy()
sc.drop_duplicates(keep='last', inplace=True)
assert_series_equal(sc, s[~expected])
# deprecate take_last
with tm.assert_produces_warning(FutureWarning):
assert_series_equal(s.duplicated(take_last=True), expected)
with tm.assert_produces_warning(FutureWarning):
assert_series_equal(
s.drop_duplicates(take_last=True), s[~expected])
sc = s.copy()
with tm.assert_produces_warning(FutureWarning):
sc.drop_duplicates(take_last=True, inplace=True)
assert_series_equal(sc, s[~expected])
expected = Series([False, True, True, False, True, True, False])
assert_series_equal(s.duplicated(keep=False), expected)
assert_series_equal(s.drop_duplicates(keep=False), s[~expected])
sc = s.copy()
sc.drop_duplicates(keep=False, inplace=True)
assert_series_equal(sc, s[~expected])
def test_rank(self):
tm._skip_if_no_scipy()
from scipy.stats import rankdata
self.ts[::2] = np.nan
self.ts[:10][::3] = 4.
ranks = self.ts.rank()
oranks = self.ts.astype('O').rank()
assert_series_equal(ranks, oranks)
mask = np.isnan(self.ts)
filled = self.ts.fillna(np.inf)
# rankdata returns a ndarray
exp = Series(rankdata(filled), index=filled.index, name='ts')
exp[mask] = np.nan
tm.assert_series_equal(ranks, exp)
iseries = Series(np.arange(5).repeat(2))
iranks = iseries.rank()
exp = iseries.astype(float).rank()
assert_series_equal(iranks, exp)
iseries = Series(np.arange(5)) + 1.0
exp = iseries / 5.0
iranks = iseries.rank(pct=True)
assert_series_equal(iranks, exp)
iseries = Series(np.repeat(1, 100))
exp = Series(np.repeat(0.505, 100))
iranks = iseries.rank(pct=True)
assert_series_equal(iranks, exp)
iseries[1] = np.nan
exp = Series(np.repeat(50.0 / 99.0, 100))
exp[1] = np.nan
iranks = iseries.rank(pct=True)
assert_series_equal(iranks, exp)
iseries = Series(np.arange(5)) + 1.0
iseries[4] = np.nan
exp = iseries / 4.0
iranks = iseries.rank(pct=True)
assert_series_equal(iranks, exp)
iseries = Series(np.repeat(np.nan, 100))
exp = iseries.copy()
iranks = iseries.rank(pct=True)
assert_series_equal(iranks, exp)
iseries = Series(np.arange(5)) + 1
iseries[4] = np.nan
exp = iseries / 4.0
iranks = iseries.rank(pct=True)
assert_series_equal(iranks, exp)
rng = date_range('1/1/1990', periods=5)
iseries = Series(np.arange(5), rng) + 1
iseries.ix[4] = np.nan
exp = iseries / 4.0
iranks = iseries.rank(pct=True)
assert_series_equal(iranks, exp)
iseries = Series([1e-50, 1e-100, 1e-20, 1e-2, 1e-20 + 1e-30, 1e-1])
exp = Series([2, 1, 3, 5, 4, 6.0])
iranks = iseries.rank()
assert_series_equal(iranks, exp)
# GH 5968
iseries = Series(['3 day', '1 day 10m', '-2 day', pd.NaT],
dtype='m8[ns]')
exp = Series([3, 2, 1, np.nan])
iranks = iseries.rank()
assert_series_equal(iranks, exp)
values = np.array(
[-50, -1, -1e-20, -1e-25, -1e-50, 0, 1e-40, 1e-20, 1e-10, 2, 40
], dtype='float64')
random_order = np.random.permutation(len(values))
iseries = Series(values[random_order])
exp = Series(random_order + 1.0, dtype='float64')
iranks = iseries.rank()
assert_series_equal(iranks, exp)
def test_rank_signature(self):
s = Series([0, 1])
s.rank(method='average')
self.assertRaises(ValueError, s.rank, 'average')
def test_rank_inf(self):
raise nose.SkipTest('DataFrame.rank does not currently rank '
'np.inf and -np.inf properly')
values = np.array(
[-np.inf, -50, -1, -1e-20, -1e-25, -1e-50, 0, 1e-40, 1e-20, 1e-10,
2, 40, np.inf], dtype='float64')
random_order = np.random.permutation(len(values))
iseries = Series(values[random_order])
exp = Series(random_order + 1.0, dtype='float64')
iranks = iseries.rank()
assert_series_equal(iranks, exp)
def test_clip(self):
val = self.ts.median()
self.assertEqual(self.ts.clip_lower(val).min(), val)
self.assertEqual(self.ts.clip_upper(val).max(), val)
self.assertEqual(self.ts.clip(lower=val).min(), val)
self.assertEqual(self.ts.clip(upper=val).max(), val)
result = self.ts.clip(-0.5, 0.5)
expected = np.clip(self.ts, -0.5, 0.5)
assert_series_equal(result, expected)
tm.assertIsInstance(expected, Series)
def test_clip_types_and_nulls(self):
sers = [Series([np.nan, 1.0, 2.0, 3.0]), Series([None, 'a', 'b', 'c']),
Series(pd.to_datetime(
[np.nan, 1, 2, 3], unit='D'))]
for s in sers:
thresh = s[2]
l = s.clip_lower(thresh)
u = s.clip_upper(thresh)
self.assertEqual(l[notnull(l)].min(), thresh)
self.assertEqual(u[notnull(u)].max(), thresh)
self.assertEqual(list(isnull(s)), list(isnull(l)))
self.assertEqual(list(isnull(s)), list(isnull(u)))
def test_clip_against_series(self):
# GH #6966
s = Series([1.0, 1.0, 4.0])
threshold = Series([1.0, 2.0, 3.0])
assert_series_equal(s.clip_lower(threshold), Series([1.0, 2.0, 4.0]))
assert_series_equal(s.clip_upper(threshold), Series([1.0, 1.0, 3.0]))
lower = Series([1.0, 2.0, 3.0])
upper = Series([1.5, 2.5, 3.5])
assert_series_equal(s.clip(lower, upper), Series([1.0, 2.0, 3.5]))
assert_series_equal(s.clip(1.5, upper), Series([1.5, 1.5, 3.5]))
def test_clip_with_datetimes(self):
# GH 11838
# naive and tz-aware datetimes
t = Timestamp('2015-12-01 09:30:30')
s = Series([Timestamp('2015-12-01 09:30:00'), Timestamp(
'2015-12-01 09:31:00')])
result = s.clip(upper=t)
expected = Series([Timestamp('2015-12-01 09:30:00'), Timestamp(
'2015-12-01 09:30:30')])
assert_series_equal(result, expected)
t = Timestamp('2015-12-01 09:30:30', tz='US/Eastern')
s = Series([Timestamp('2015-12-01 09:30:00', tz='US/Eastern'),
Timestamp('2015-12-01 09:31:00', tz='US/Eastern')])
result = s.clip(upper=t)
expected = Series([Timestamp('2015-12-01 09:30:00', tz='US/Eastern'),
Timestamp('2015-12-01 09:30:30', tz='US/Eastern')])
assert_series_equal(result, expected)
def test_cummethods_bool(self):
# GH 6270
# looks like a buggy np.maximum.accumulate for numpy 1.6.1, py 3.2
def cummin(x):
return np.minimum.accumulate(x)
def cummax(x):
return np.maximum.accumulate(x)
a = pd.Series([False, False, False, True, True, False, False])
b = ~a
c = pd.Series([False] * len(b))
d = ~c
methods = {'cumsum': np.cumsum,
'cumprod': np.cumprod,
'cummin': cummin,
'cummax': cummax}
args = product((a, b, c, d), methods)
for s, method in args:
expected = Series(methods[method](s.values))
result = getattr(s, method)()
assert_series_equal(result, expected)
e = pd.Series([False, True, nan, False])
cse = pd.Series([0, 1, nan, 1], dtype=object)
cpe = pd.Series([False, 0, nan, 0])
cmin = pd.Series([False, False, nan, False])
cmax = pd.Series([False, True, nan, True])
expecteds = {'cumsum': cse,
'cumprod': cpe,
'cummin': cmin,
'cummax': cmax}
for method in methods:
res = getattr(e, method)()
assert_series_equal(res, expecteds[method])
def test_isin(self):
s = Series(['A', 'B', 'C', 'a', 'B', 'B', 'A', 'C'])
result = s.isin(['A', 'C'])
expected = Series([True, False, True, False, False, False, True, True])
assert_series_equal(result, expected)
def test_isin_with_string_scalar(self):
# GH4763
s = Series(['A', 'B', 'C', 'a', 'B', 'B', 'A', 'C'])
with tm.assertRaises(TypeError):
s.isin('a')
with tm.assertRaises(TypeError):
s = Series(['aaa', 'b', 'c'])
s.isin('aaa')
def test_isin_with_i8(self):
# GH 5021
expected = Series([True, True, False, False, False])
expected2 = Series([False, True, False, False, False])
# datetime64[ns]
s = Series(date_range('jan-01-2013', 'jan-05-2013'))
result = s.isin(s[0:2])
assert_series_equal(result, expected)
result = s.isin(s[0:2].values)
assert_series_equal(result, expected)
# fails on dtype conversion in the first place
result = s.isin(s[0:2].values.astype('datetime64[D]'))
assert_series_equal(result, expected)
result = s.isin([s[1]])
assert_series_equal(result, expected2)
result = s.isin([np.datetime64(s[1])])
assert_series_equal(result, expected2)
result = s.isin(set(s[0:2]))
assert_series_equal(result, expected)
# timedelta64[ns]
s = Series(pd.to_timedelta(lrange(5), unit='d'))
result = s.isin(s[0:2])
assert_series_equal(result, expected)
def test_timedelta64_analytics(self):
from pandas import date_range
# index min/max
td = Series(date_range('2012-1-1', periods=3, freq='D')) - \
Timestamp('20120101')
result = td.idxmin()
self.assertEqual(result, 0)
result = td.idxmax()
self.assertEqual(result, 2)
# GH 2982
# with NaT
td[0] = np.nan
result = td.idxmin()
self.assertEqual(result, 1)
result = td.idxmax()
self.assertEqual(result, 2)
# abs
s1 = Series(date_range('20120101', periods=3))
s2 = Series(date_range('20120102', periods=3))
expected = Series(s2 - s1)
# this fails as numpy returns timedelta64[us]
# result = np.abs(s1-s2)
# assert_frame_equal(result,expected)
result = (s1 - s2).abs()
assert_series_equal(result, expected)
# max/min
result = td.max()
expected = Timedelta('2 days')
self.assertEqual(result, expected)
result = td.min()
expected = Timedelta('1 days')
self.assertEqual(result, expected)
def test_idxmin(self):
# test idxmin
# _check_stat_op approach can not be used here because of isnull check.
# add some NaNs
self.series[5:15] = np.NaN
# skipna or no
self.assertEqual(self.series[self.series.idxmin()], self.series.min())
self.assertTrue(isnull(self.series.idxmin(skipna=False)))
# no NaNs
nona = self.series.dropna()
self.assertEqual(nona[nona.idxmin()], nona.min())
self.assertEqual(nona.index.values.tolist().index(nona.idxmin()),
nona.values.argmin())
# all NaNs
allna = self.series * nan
self.assertTrue(isnull(allna.idxmin()))
# datetime64[ns]
from pandas import date_range
s = Series(date_range('20130102', periods=6))
result = s.idxmin()
self.assertEqual(result, 0)
s[0] = np.nan
result = s.idxmin()
self.assertEqual(result, 1)
def test_numpy_argmin(self):
# argmin is aliased to idxmin
data = np.random.randint(0, 11, size=10)
result = np.argmin(Series(data))
self.assertEqual(result, np.argmin(data))
if not _np_version_under1p10:
msg = "the 'out' parameter is not supported"
tm.assertRaisesRegexp(ValueError, msg, np.argmin,
Series(data), out=data)
def test_idxmax(self):
# test idxmax
# _check_stat_op approach can not be used here because of isnull check.
# add some NaNs
self.series[5:15] = np.NaN
# skipna or no
self.assertEqual(self.series[self.series.idxmax()], self.series.max())
self.assertTrue(isnull(self.series.idxmax(skipna=False)))
# no NaNs
nona = self.series.dropna()
self.assertEqual(nona[nona.idxmax()], nona.max())
self.assertEqual(nona.index.values.tolist().index(nona.idxmax()),
nona.values.argmax())
# all NaNs
allna = self.series * nan
self.assertTrue(isnull(allna.idxmax()))
from pandas import date_range
s = Series(date_range('20130102', periods=6))
result = s.idxmax()
self.assertEqual(result, 5)
s[5] = np.nan
result = s.idxmax()
self.assertEqual(result, 4)
# Float64Index
# GH 5914
s = pd.Series([1, 2, 3], [1.1, 2.1, 3.1])
result = s.idxmax()
self.assertEqual(result, 3.1)
result = s.idxmin()
self.assertEqual(result, 1.1)
s = pd.Series(s.index, s.index)
result = s.idxmax()
self.assertEqual(result, 3.1)
result = s.idxmin()
self.assertEqual(result, 1.1)
def test_numpy_argmax(self):
# argmax is aliased to idxmax
data = np.random.randint(0, 11, size=10)
result = np.argmax(Series(data))
self.assertEqual(result, np.argmax(data))
if not _np_version_under1p10:
msg = "the 'out' parameter is not supported"
tm.assertRaisesRegexp(ValueError, msg, np.argmax,
Series(data), out=data)
def test_ptp(self):
N = 1000
arr = np.random.randn(N)
ser = Series(arr)
self.assertEqual(np.ptp(ser), np.ptp(arr))
# GH11163
s = Series([3, 5, np.nan, -3, 10])
self.assertEqual(s.ptp(), 13)
self.assertTrue(pd.isnull(s.ptp(skipna=False)))
mi = pd.MultiIndex.from_product([['a', 'b'], [1, 2, 3]])
s = pd.Series([1, np.nan, 7, 3, 5, np.nan], index=mi)
expected = pd.Series([6, 2], index=['a', 'b'], dtype=np.float64)
self.assert_series_equal(s.ptp(level=0), expected)
expected = pd.Series([np.nan, np.nan], index=['a', 'b'])
self.assert_series_equal(s.ptp(level=0, skipna=False), expected)
with self.assertRaises(ValueError):
s.ptp(axis=1)
s = pd.Series(['a', 'b', 'c', 'd', 'e'])
with self.assertRaises(TypeError):
s.ptp()
with self.assertRaises(NotImplementedError):
s.ptp(numeric_only=True)
def test_empty_timeseries_redections_return_nat(self):
# covers #11245
for dtype in ('m8[ns]', 'm8[ns]', 'M8[ns]', 'M8[ns, UTC]'):
self.assertIs(Series([], dtype=dtype).min(), pd.NaT)
self.assertIs(Series([], dtype=dtype).max(), pd.NaT)
def test_unique_data_ownership(self):
# it works! #1807
Series(Series(["a", "c", "b"]).unique()).sort_values()
def test_repeat(self):
s = Series(np.random.randn(3), index=['a', 'b', 'c'])
reps = s.repeat(5)
exp = Series(s.values.repeat(5), index=s.index.values.repeat(5))
assert_series_equal(reps, exp)
to_rep = [2, 3, 4]
reps = s.repeat(to_rep)
exp = Series(s.values.repeat(to_rep),
index=s.index.values.repeat(to_rep))
assert_series_equal(reps, exp)
def test_numpy_repeat(self):
s = Series(np.arange(3), name='x')
expected = Series(s.values.repeat(2), name='x',
index=s.index.values.repeat(2))
assert_series_equal(np.repeat(s, 2), expected)
msg = "the 'axis' parameter is not supported"
tm.assertRaisesRegexp(ValueError, msg, np.repeat, s, 2, axis=0)
def test_searchsorted_numeric_dtypes_scalar(self):
s = Series([1, 2, 90, 1000, 3e9])
r = s.searchsorted(30)
e = 2
self.assertEqual(r, e)
r = s.searchsorted([30])
e = np.array([2], dtype=np.intp)
tm.assert_numpy_array_equal(r, e)
def test_searchsorted_numeric_dtypes_vector(self):
s = Series([1, 2, 90, 1000, 3e9])
r = s.searchsorted([91, 2e6])
e = np.array([3, 4], dtype=np.intp)
tm.assert_numpy_array_equal(r, e)
def test_search_sorted_datetime64_scalar(self):
s = Series(pd.date_range('20120101', periods=10, freq='2D'))
v = pd.Timestamp('20120102')
r = s.searchsorted(v)
e = 1
self.assertEqual(r, e)
def test_search_sorted_datetime64_list(self):
s = Series(pd.date_range('20120101', periods=10, freq='2D'))
v = [pd.Timestamp('20120102'), pd.Timestamp('20120104')]
r = s.searchsorted(v)
e = np.array([1, 2], dtype=np.intp)
tm.assert_numpy_array_equal(r, e)
def test_searchsorted_sorter(self):
# GH8490
s = Series([3, 1, 2])
r = s.searchsorted([0, 3], sorter=np.argsort(s))
e = np.array([0, 2], dtype=np.intp)
tm.assert_numpy_array_equal(r, e)
def test_is_unique(self):
# GH11946
s = Series(np.random.randint(0, 10, size=1000))
self.assertFalse(s.is_unique)
s = Series(np.arange(1000))
self.assertTrue(s.is_unique)
def test_is_monotonic(self):
s = Series(np.random.randint(0, 10, size=1000))
self.assertFalse(s.is_monotonic)
s = Series(np.arange(1000))
self.assertTrue(s.is_monotonic)
self.assertTrue(s.is_monotonic_increasing)
s = Series(np.arange(1000, 0, -1))
self.assertTrue(s.is_monotonic_decreasing)
s = Series(pd.date_range('20130101', periods=10))
self.assertTrue(s.is_monotonic)
self.assertTrue(s.is_monotonic_increasing)
s = Series(list(reversed(s.tolist())))
self.assertFalse(s.is_monotonic)
self.assertTrue(s.is_monotonic_decreasing)
def test_nsmallest_nlargest(self):
# float, int, datetime64 (use i8), timedelts64 (same),
# object that are numbers, object that are strings
base = [3, 2, 1, 2, 5]
s_list = [
Series(base, dtype='int8'),
Series(base, dtype='int16'),
Series(base, dtype='int32'),
Series(base, dtype='int64'),
Series(base, dtype='float32'),
Series(base, dtype='float64'),
Series(base, dtype='uint8'),
Series(base, dtype='uint16'),
Series(base, dtype='uint32'),
Series(base, dtype='uint64'),
Series(base).astype('timedelta64[ns]'),
Series(pd.to_datetime(['2003', '2002', '2001', '2002', '2005'])),
]
raising = [
Series([3., 2, 1, 2, '5'], dtype='object'),
Series([3., 2, 1, 2, 5], dtype='object'),
# not supported on some archs
# Series([3., 2, 1, 2, 5], dtype='complex256'),
Series([3., 2, 1, 2, 5], dtype='complex128'),
]
for r in raising:
dt = r.dtype
msg = "Cannot use method 'n(larg|small)est' with dtype %s" % dt
args = 2, len(r), 0, -1
methods = r.nlargest, r.nsmallest
for method, arg in product(methods, args):
with tm.assertRaisesRegexp(TypeError, msg):
method(arg)
for s in s_list:
assert_series_equal(s.nsmallest(2), s.iloc[[2, 1]])
assert_series_equal(s.nsmallest(2, keep='last'), s.iloc[[2, 3]])
with tm.assert_produces_warning(FutureWarning):
assert_series_equal(
s.nsmallest(2, take_last=True), s.iloc[[2, 3]])
assert_series_equal(s.nlargest(3), s.iloc[[4, 0, 1]])
assert_series_equal(s.nlargest(3, keep='last'), s.iloc[[4, 0, 3]])
with tm.assert_produces_warning(FutureWarning):
assert_series_equal(
s.nlargest(3, take_last=True), s.iloc[[4, 0, 3]])
empty = s.iloc[0:0]
assert_series_equal(s.nsmallest(0), empty)
assert_series_equal(s.nsmallest(-1), empty)
assert_series_equal(s.nlargest(0), empty)
assert_series_equal(s.nlargest(-1), empty)
assert_series_equal(s.nsmallest(len(s)), s.sort_values())
assert_series_equal(s.nsmallest(len(s) + 1), s.sort_values())
assert_series_equal(s.nlargest(len(s)), s.iloc[[4, 0, 1, 3, 2]])
assert_series_equal(s.nlargest(len(s) + 1),
s.iloc[[4, 0, 1, 3, 2]])
s = Series([3., np.nan, 1, 2, 5])
assert_series_equal(s.nlargest(), s.iloc[[4, 0, 3, 2]])
assert_series_equal(s.nsmallest(), s.iloc[[2, 3, 0, 4]])
msg = 'keep must be either "first", "last"'
with tm.assertRaisesRegexp(ValueError, msg):
s.nsmallest(keep='invalid')
with tm.assertRaisesRegexp(ValueError, msg):
s.nlargest(keep='invalid')
# GH 13412
s = Series([1, 4, 3, 2], index=[0, 0, 1, 1])
result = s.nlargest(3)
expected = s.sort_values(ascending=False).head(3)
assert_series_equal(result, expected)
result = s.nsmallest(3)
expected = s.sort_values().head(3)
assert_series_equal(result, expected)
def test_sortlevel(self):
mi = MultiIndex.from_tuples([[1, 1, 3], [1, 1, 1]], names=list('ABC'))
s = Series([1, 2], mi)
backwards = s.iloc[[1, 0]]
res = s.sortlevel('A')
assert_series_equal(backwards, res)
res = s.sortlevel(['A', 'B'])
assert_series_equal(backwards, res)
res = s.sortlevel('A', sort_remaining=False)
assert_series_equal(s, res)
res = s.sortlevel(['A', 'B'], sort_remaining=False)
assert_series_equal(s, res)
def test_apply_categorical(self):
values = pd.Categorical(list('ABBABCD'), categories=list('DCBA'),
ordered=True)
s = pd.Series(values, name='XX', index=list('abcdefg'))
result = s.apply(lambda x: x.lower())
# should be categorical dtype when the number of categories are
# the same
values = pd.Categorical(list('abbabcd'), categories=list('dcba'),
ordered=True)
exp = pd.Series(values, name='XX', index=list('abcdefg'))
tm.assert_series_equal(result, exp)
tm.assert_categorical_equal(result.values, exp.values)
result = s.apply(lambda x: 'A')
exp = pd.Series(['A'] * 7, name='XX', index=list('abcdefg'))
tm.assert_series_equal(result, exp)
self.assertEqual(result.dtype, np.object)
def test_shift_int(self):
ts = self.ts.astype(int)
shifted = ts.shift(1)
expected = ts.astype(float).shift(1)
assert_series_equal(shifted, expected)
def test_shift_categorical(self):
# GH 9416
s = pd.Series(['a', 'b', 'c', 'd'], dtype='category')
assert_series_equal(s.iloc[:-1], s.shift(1).shift(-1).valid())
sp1 = s.shift(1)
assert_index_equal(s.index, sp1.index)
self.assertTrue(np.all(sp1.values.codes[:1] == -1))
self.assertTrue(np.all(s.values.codes[:-1] == sp1.values.codes[1:]))
sn2 = s.shift(-2)
assert_index_equal(s.index, sn2.index)
self.assertTrue(np.all(sn2.values.codes[-2:] == -1))
self.assertTrue(np.all(s.values.codes[2:] == sn2.values.codes[:-2]))
assert_index_equal(s.values.categories, sp1.values.categories)
assert_index_equal(s.values.categories, sn2.values.categories)
def test_reshape_deprecate(self):
x = Series(np.random.random(10), name='x')
tm.assert_produces_warning(FutureWarning, x.reshape, x.shape)
def test_reshape_non_2d(self):
# see gh-4554
with tm.assert_produces_warning(FutureWarning):
x = Series(np.random.random(201), name='x')
self.assertTrue(x.reshape(x.shape, ) is x)
# see gh-2719
with tm.assert_produces_warning(FutureWarning):
a = Series([1, 2, 3, 4])
result = a.reshape(2, 2)
expected = a.values.reshape(2, 2)
tm.assert_numpy_array_equal(result, expected)
self.assertIsInstance(result, type(expected))
def test_reshape_2d_return_array(self):
x = Series(np.random.random(201), name='x')
with tm.assert_produces_warning(FutureWarning):
result = x.reshape((-1, 1))
self.assertNotIsInstance(result, Series)
with tm.assert_produces_warning(FutureWarning, check_stacklevel=False):
result2 = np.reshape(x, (-1, 1))
self.assertNotIsInstance(result2, Series)
with tm.assert_produces_warning(FutureWarning):
result = x[:, None]
expected = x.reshape((-1, 1))
assert_almost_equal(result, expected)
def test_reshape_bad_kwarg(self):
a = Series([1, 2, 3, 4])
with tm.assert_produces_warning(FutureWarning, check_stacklevel=False):
msg = "'foo' is an invalid keyword argument for this function"
tm.assertRaisesRegexp(TypeError, msg, a.reshape, (2, 2), foo=2)
with tm.assert_produces_warning(FutureWarning, check_stacklevel=False):
msg = r"reshape\(\) got an unexpected keyword argument 'foo'"
tm.assertRaisesRegexp(TypeError, msg, a.reshape, a.shape, foo=2)
def test_numpy_reshape(self):
a = Series([1, 2, 3, 4])
with tm.assert_produces_warning(FutureWarning, check_stacklevel=False):
result = np.reshape(a, (2, 2))
expected = a.values.reshape(2, 2)
tm.assert_numpy_array_equal(result, expected)
self.assertIsInstance(result, type(expected))
with tm.assert_produces_warning(FutureWarning, check_stacklevel=False):
result = np.reshape(a, a.shape)
tm.assert_series_equal(result, a)
def test_unstack(self):
from numpy import nan
index = MultiIndex(levels=[['bar', 'foo'], ['one', 'three', 'two']],
labels=[[1, 1, 0, 0], [0, 1, 0, 2]])
s = Series(np.arange(4.), index=index)
unstacked = s.unstack()
expected = DataFrame([[2., nan, 3.], [0., 1., nan]],
index=['bar', 'foo'],
columns=['one', 'three', 'two'])
assert_frame_equal(unstacked, expected)
unstacked = s.unstack(level=0)
assert_frame_equal(unstacked, expected.T)
index = MultiIndex(levels=[['bar'], ['one', 'two', 'three'], [0, 1]],
labels=[[0, 0, 0, 0, 0, 0], [0, 1, 2, 0, 1, 2],
[0, 1, 0, 1, 0, 1]])
s = Series(np.random.randn(6), index=index)
exp_index = MultiIndex(levels=[['one', 'two', 'three'], [0, 1]],
labels=[[0, 1, 2, 0, 1, 2], [0, 1, 0, 1, 0, 1]])
expected = DataFrame({'bar': s.values}, index=exp_index).sortlevel(0)
unstacked = s.unstack(0)
assert_frame_equal(unstacked, expected)
# GH5873
idx = pd.MultiIndex.from_arrays([[101, 102], [3.5, np.nan]])
ts = pd.Series([1, 2], index=idx)
left = ts.unstack()
right = DataFrame([[nan, 1], [2, nan]], index=[101, 102],
columns=[nan, 3.5])
assert_frame_equal(left, right)
idx = pd.MultiIndex.from_arrays([['cat', 'cat', 'cat', 'dog', 'dog'
], ['a', 'a', 'b', 'a', 'b'],
[1, 2, 1, 1, np.nan]])
ts = pd.Series([1.0, 1.1, 1.2, 1.3, 1.4], index=idx)
right = DataFrame([[1.0, 1.3], [1.1, nan], [nan, 1.4], [1.2, nan]],
columns=['cat', 'dog'])
tpls = [('a', 1), ('a', 2), ('b', nan), ('b', 1)]
right.index = pd.MultiIndex.from_tuples(tpls)
assert_frame_equal(ts.unstack(level=0), right)
def test_value_counts_datetime(self):
# most dtypes are tested in test_base.py
values = [pd.Timestamp('2011-01-01 09:00'),
pd.Timestamp('2011-01-01 10:00'),
pd.Timestamp('2011-01-01 11:00'),
pd.Timestamp('2011-01-01 09:00'),
pd.Timestamp('2011-01-01 09:00'),
pd.Timestamp('2011-01-01 11:00')]
exp_idx = pd.DatetimeIndex(['2011-01-01 09:00', '2011-01-01 11:00',
'2011-01-01 10:00'])
exp = pd.Series([3, 2, 1], index=exp_idx, name='xxx')
s = pd.Series(values, name='xxx')
tm.assert_series_equal(s.value_counts(), exp)
# check DatetimeIndex outputs the same result
idx = pd.DatetimeIndex(values, name='xxx')
tm.assert_series_equal(idx.value_counts(), exp)
# normalize
exp = pd.Series(np.array([3., 2., 1]) / 6.,
index=exp_idx, name='xxx')
tm.assert_series_equal(s.value_counts(normalize=True), exp)
tm.assert_series_equal(idx.value_counts(normalize=True), exp)
def test_value_counts_datetime_tz(self):
values = [pd.Timestamp('2011-01-01 09:00', tz='US/Eastern'),
pd.Timestamp('2011-01-01 10:00', tz='US/Eastern'),
pd.Timestamp('2011-01-01 11:00', tz='US/Eastern'),
pd.Timestamp('2011-01-01 09:00', tz='US/Eastern'),
pd.Timestamp('2011-01-01 09:00', tz='US/Eastern'),
pd.Timestamp('2011-01-01 11:00', tz='US/Eastern')]
exp_idx = pd.DatetimeIndex(['2011-01-01 09:00', '2011-01-01 11:00',
'2011-01-01 10:00'], tz='US/Eastern')
exp = pd.Series([3, 2, 1], index=exp_idx, name='xxx')
s = pd.Series(values, name='xxx')
tm.assert_series_equal(s.value_counts(), exp)
idx = pd.DatetimeIndex(values, name='xxx')
tm.assert_series_equal(idx.value_counts(), exp)
exp = pd.Series(np.array([3., 2., 1]) / 6.,
index=exp_idx, name='xxx')
tm.assert_series_equal(s.value_counts(normalize=True), exp)
tm.assert_series_equal(idx.value_counts(normalize=True), exp)
def test_value_counts_period(self):
values = [pd.Period('2011-01', freq='M'),
pd.Period('2011-02', freq='M'),
pd.Period('2011-03', freq='M'),
pd.Period('2011-01', freq='M'),
pd.Period('2011-01', freq='M'),
pd.Period('2011-03', freq='M')]
exp_idx = pd.PeriodIndex(['2011-01', '2011-03', '2011-02'], freq='M')
exp = pd.Series([3, 2, 1], index=exp_idx, name='xxx')
s = pd.Series(values, name='xxx')
tm.assert_series_equal(s.value_counts(), exp)
# check DatetimeIndex outputs the same result
idx = pd.PeriodIndex(values, name='xxx')
tm.assert_series_equal(idx.value_counts(), exp)
# normalize
exp = pd.Series(np.array([3., 2., 1]) / 6.,
index=exp_idx, name='xxx')
tm.assert_series_equal(s.value_counts(normalize=True), exp)
tm.assert_series_equal(idx.value_counts(normalize=True), exp)
def test_value_counts_categorical_ordered(self):
# most dtypes are tested in test_base.py
values = pd.Categorical([1, 2, 3, 1, 1, 3], ordered=True)
exp_idx = pd.CategoricalIndex([1, 3, 2], categories=[1, 2, 3],
ordered=True)
exp = pd.Series([3, 2, 1], index=exp_idx, name='xxx')
s = pd.Series(values, name='xxx')
tm.assert_series_equal(s.value_counts(), exp)
# check CategoricalIndex outputs the same result
idx = pd.CategoricalIndex(values, name='xxx')
tm.assert_series_equal(idx.value_counts(), exp)
# normalize
exp = pd.Series(np.array([3., 2., 1]) / 6.,
index=exp_idx, name='xxx')
tm.assert_series_equal(s.value_counts(normalize=True), exp)
tm.assert_series_equal(idx.value_counts(normalize=True), exp)
def test_value_counts_categorical_not_ordered(self):
values = pd.Categorical([1, 2, 3, 1, 1, 3], ordered=False)
exp_idx = pd.CategoricalIndex([1, 3, 2], categories=[1, 2, 3],
ordered=False)
exp = pd.Series([3, 2, 1], index=exp_idx, name='xxx')
s = pd.Series(values, name='xxx')
tm.assert_series_equal(s.value_counts(), exp)
# check CategoricalIndex outputs the same result
idx = pd.CategoricalIndex(values, name='xxx')
tm.assert_series_equal(idx.value_counts(), exp)
# normalize
exp = pd.Series(np.array([3., 2., 1]) / 6.,
index=exp_idx, name='xxx')
tm.assert_series_equal(s.value_counts(normalize=True), exp)
tm.assert_series_equal(idx.value_counts(normalize=True), exp)
| apache-2.0 |
anurag313/scikit-learn | examples/ensemble/plot_adaboost_regression.py | 311 | 1529 | """
======================================
Decision Tree Regression with AdaBoost
======================================
A decision tree is boosted using the AdaBoost.R2 [1] algorithm on a 1D
sinusoidal dataset with a small amount of Gaussian noise.
299 boosts (300 decision trees) is compared with a single decision tree
regressor. As the number of boosts is increased the regressor can fit more
detail.
.. [1] H. Drucker, "Improving Regressors using Boosting Techniques", 1997.
"""
print(__doc__)
# Author: Noel Dawe <noel.dawe@gmail.com>
#
# License: BSD 3 clause
# importing necessary libraries
import numpy as np
import matplotlib.pyplot as plt
from sklearn.tree import DecisionTreeRegressor
from sklearn.ensemble import AdaBoostRegressor
# Create the dataset
rng = np.random.RandomState(1)
X = np.linspace(0, 6, 100)[:, np.newaxis]
y = np.sin(X).ravel() + np.sin(6 * X).ravel() + rng.normal(0, 0.1, X.shape[0])
# Fit regression model
regr_1 = DecisionTreeRegressor(max_depth=4)
regr_2 = AdaBoostRegressor(DecisionTreeRegressor(max_depth=4),
n_estimators=300, random_state=rng)
regr_1.fit(X, y)
regr_2.fit(X, y)
# Predict
y_1 = regr_1.predict(X)
y_2 = regr_2.predict(X)
# Plot the results
plt.figure()
plt.scatter(X, y, c="k", label="training samples")
plt.plot(X, y_1, c="g", label="n_estimators=1", linewidth=2)
plt.plot(X, y_2, c="r", label="n_estimators=300", linewidth=2)
plt.xlabel("data")
plt.ylabel("target")
plt.title("Boosted Decision Tree Regression")
plt.legend()
plt.show()
| bsd-3-clause |
jniediek/combinato | combinato/plot/plot_unit_quality.py | 1 | 9362 | # JN 2015-07-05
# -*- encoding: utf-8 -*-
"""
create a unit overview plots for all units
"""
from __future__ import division, print_function, absolute_import
import os
import numpy as np
import matplotlib.pyplot as mpl
from matplotlib.gridspec import GridSpec
from matplotlib import cm
from .spike_heatmap import spike_heatmap
from .. import h5files, Combinato, TYPE_NAMES
LOCAL_TYPE_NAMES = {0: 'NA', 1: 'MU', 2: 'SU', -1: 'Arti'}
FIGSIZE = (13, 7.5)
GRID = GridSpec(2, 6, left=.08, right=.95,
top=.9, bottom=.05, wspace=.7, hspace=.25)
DENSITY_BINS = np.linspace(-150, 150, 150)
DPI = 100
FIG = mpl.figure(figsize=FIGSIZE, dpi=DPI)
def create_panels(fig):
panels = dict()
panels['maxima'] = fig.add_subplot(GRID[0, :4])
panels['isi'] = fig.add_subplot(GRID[1, 4:])
panels['cumulative'] = fig.add_subplot(GRID[1, :4])
panels['density'] = fig.add_subplot(GRID[0, 4], xticks=[])
panels['density2'] = fig.add_subplot(GRID[0, 5])
panels['density2'].axis('off')
panels['images'] = []
# for i in range(12):
# row = int(i/6) + 2
# col = i % 6
# plot = fig.add_subplot(GRID[row, col])
# plot.axis('off')
# panels['images'].append(plot)
#
return panels
PANELS = create_panels(FIG)
def add_events(plot, events):
"""
"""
for event in events:
if isinstance(event[1], int) or event[1] in ('OFF', '1.5', 'ON'):
va = 'top'
color = 'k'
ypos = 0
xpos = event[0]
backgroundcolor = 'none'
else:
va = 'bottom'
color = 'g'
ypos = 0
xpos = event[0] + 20 # datetime.timedelta(seconds=20)
backgroundcolor = 'w'
plot.axvline(event[0]/60, color=color)
plot.text(xpos/60, ypos, event[1], va=va, color=color,
backgroundcolor=backgroundcolor)
def make_colors(ncolors):
"""
create a list of N matplotlib colors
"""
return cm.spectral(np.linspace(0, 1, ncolors))
def plot_maxima_over_time(plot, group, start_stop, sign, thresholds=None):
"""
plots maxima over time, with different colors for different clusters
"""
plot.cla()
COLOR_CUTOFF = 10
with_colors = list()
same_color = list()
end_min = (start_stop[1] - start_stop[0])/1000/60
for clid in group:
if group[clid]['times'].shape[0] > COLOR_CUTOFF:
with_colors.append(clid)
else:
same_color.append(clid)
colors = make_colors(len(with_colors))
out_color = 'grey'
color_count = 0
for clid, cluster in group.items():
spikes = cluster['spikes']
times = cluster['times'] - start_stop[0]
if sign == 'neg':
data = spikes.min(1)
elif sign == 'pos':
data = spikes.max(1)
color = out_color
if clid in with_colors:
color = colors[color_count]
color_count += 1
plot.plot(times/1000/60, data, '.', ms=2, color=color)
# now plot the thresholds
thr_times = thresholds[:, :2].ravel()
thrs = np.vstack((thresholds[:, 2], thresholds[:, 2])).T.ravel()
thr_times -= start_stop[0]
thr_times /= 60*1000
plot.plot(thr_times, thrs)
tickpos = np.arange(0, end_min, 60)
ticklabels = [format(x, '.0f') for x in tickpos]
ticklabels[-1] += ' min'
plot.set_xticks(tickpos)
plot.set_xticklabels(ticklabels)
plot.set_xlim((0, end_min))
# plot.xaxis.set_tick_params(labeltop='on', labelbottom='off')
# plot.set_ylim(ylim)
plot.set_ylabel(u'µV')
plot.grid(True)
def plot_group(gid, group, group_joined, start_stop, sign,
savefolder, thresholds):
"""
just a simple group overview
"""
print('Plotting group {} ({} clusters)'.format(gid, len(group)))
panels = PANELS
# timelim = (start_time/60, stop_time/60)
# if sign == 'neg':
# ylim = (-200, 0)
# elif sign == 'pos':
# ylim = (0, 200)
#
# maxima over time
plot = panels['maxima']
plot_maxima_over_time(plot, group, start_stop, sign, thresholds)
plot.text(.5, 1.05,
'Group {} ({}) Firing over time'.format(gid,
TYPE_NAMES[group_joined['type']]),
va='bottom', ha='center', transform=plot.transAxes,
backgroundcolor='w')
#plot.text(0, 1, '{} ({})'.format(gid, group['type']),
# transform=plot.transAxes, va='bottom', ha='left')
# ISI
times = group_joined['times']
spikes = group_joined['spikes']
timelim = (start_stop[0]/1000/60, start_stop[1]/1000/60)
plot = panels['isi']
data = np.diff(times) # to ms
data = data[data <= 100]
plot.cla()
if data.shape[0] > 10:
plot.hist(data, 100, edgecolor='none')
plot.set_xlim([0, 100])
under3 = (data <= 3).sum()/data.shape[0]
plot.text(.5, 1.1, '{:.1%} < 3 ms'.format(under3),
va='top', ha='center', transform=plot.transAxes,
backgroundcolor='w')
else:
plot.axis('off')
plot.set_ylabel('# lags')
plot.set_xlabel('ms')
plot.text(.95, .97, 'Inter-Spike Intervals',
va='top', ha='right', transform=plot.transAxes,
backgroundcolor='w')
# all means?
# count over time
plot = panels['cumulative']
plot.cla()
plot.plot(times/1000/60, range(len(times)))
plot.set_xticklabels([])
plot.set_xlim(timelim)
plot.set_ylabel('# spikes')
plot.grid(True)
#plot.set_xticks(tickpos)
# add_events(plot, events)
# plot.text(.5, -.15, u'Propofol concentration [µg/mL]', va='top', ha='center',
# transform=plot.transAxes, backgroundcolor='w')
plot.text(.5, .95, 'Cumulative spike count',
va='top', ha='center', transform=plot.transAxes,
backgroundcolor='w')
# density
plot = panels['density']
plot.cla()
spike_heatmap(plot, spikes)
plot.set_xticks([])
plot.set_ylabel(u'µV')
# other density
data = np.array([np.histogram(row, bins=DENSITY_BINS)[0]
for row in spikes.T])
plot = panels['density2']
plot.cla()
plot.axis('off')
plot.imshow(data.T, aspect='auto', origin='lower', cmap=cm.hot)
# now the images
# for i in range(12):
# panels['images'][i].cla()
# panels['images'][i].axis('off')
#
# for cimg in range(min(len(images), 12)):
# img = mpl.imread(images[cimg])
# plot = panels['images'][cimg]
# plot.imshow(img)
#
# def add_label(plot, label):
# plot.text(-.2, .5, label, transform=plot.transAxes, rotation=90,
# ha='left', va='center',)
#
# label = 'Subunits 1 to {}'.format(min(len(images), 6))
# add_label(panels['images'][0], label)
#
# if len(images) > 6:
# label = 'Subunits 7 to {}'.format(min(len(images), 12))
# add_label(panels['images'][6], label)
#
#
#FIG.suptitle(title)
def run_file(fname, sign, label, savefolder):
"""
run overview plot on one spikes file
"""
print('Initializing {} {} {}'.format(fname, sign, label))
# get thresholds
manager = Combinato(fname, sign, label)
if not manager.initialized:
print('Could not initialize {} {}'.format(fname, label))
return
thresholds = manager.get_thresholds()
start = manager.times[sign][0]
stop = manager.times[sign][-1]
nspk = manager.times[sign].shape[0]
duration_min = (stop - start)/1000/60
start_stop = (start, stop)
entname = manager.header['AcqEntName']
if duration_min > 120:
dur_str = '{:.1f} h'.format(duration_min/60)
else:
dur_str = '{:.0f} min'.format(duration_min)
entity = manager.header['AcqEntName']
print('Sorting contains {} spikes from {}, duration {}'.
format(nspk, entity, dur_str))
if not manager.initialized:
print('could not initialize ' + fname)
return
groups_joined = manager.get_groups_joined()
groups = manager.get_groups()
bname = os.path.splitext(os.path.basename(fname))[0]
for gid in groups_joined:
gtype = manager.get_group_type(gid)
groups_joined[gid]['type'] = gtype
plot_group(gid,
groups[gid],
groups_joined[gid],
start_stop,
sign,
savefolder,
thresholds)
gtypename = LOCAL_TYPE_NAMES[gtype]
outfname = '{}_{}_{}_{:03d}_{}.png'.\
format(bname, entname, label, gid, gtypename)
FIG.savefig(outfname)
def parse_args():
"""
standard arg parser
"""
from argparse import ArgumentParser
parser = ArgumentParser()
parser.add_argument('--files', nargs='+')
parser.add_argument('--label', required=True)
parser.add_argument('--neg', action='store_true', default=False)
args = parser.parse_args()
if os.path.isdir('overview'):
savefolder = 'overview'
else:
savefolder = os.getcwd()
if args.files:
fnames = args.files
else:
fnames = h5files(os.getcwd())
sign = 'neg' if args.neg else 'pos'
label = args.label
for fname in fnames:
print(fname)
run_file(fname, sign, label, savefolder)
| mit |
bikong2/scikit-learn | sklearn/decomposition/tests/test_incremental_pca.py | 297 | 8265 | """Tests for Incremental PCA."""
import numpy as np
from sklearn.utils.testing import assert_almost_equal
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_raises
from sklearn import datasets
from sklearn.decomposition import PCA, IncrementalPCA
iris = datasets.load_iris()
def test_incremental_pca():
# Incremental PCA on dense arrays.
X = iris.data
batch_size = X.shape[0] // 3
ipca = IncrementalPCA(n_components=2, batch_size=batch_size)
pca = PCA(n_components=2)
pca.fit_transform(X)
X_transformed = ipca.fit_transform(X)
np.testing.assert_equal(X_transformed.shape, (X.shape[0], 2))
assert_almost_equal(ipca.explained_variance_ratio_.sum(),
pca.explained_variance_ratio_.sum(), 1)
for n_components in [1, 2, X.shape[1]]:
ipca = IncrementalPCA(n_components, batch_size=batch_size)
ipca.fit(X)
cov = ipca.get_covariance()
precision = ipca.get_precision()
assert_array_almost_equal(np.dot(cov, precision),
np.eye(X.shape[1]))
def test_incremental_pca_check_projection():
# Test that the projection of data is correct.
rng = np.random.RandomState(1999)
n, p = 100, 3
X = rng.randn(n, p) * .1
X[:10] += np.array([3, 4, 5])
Xt = 0.1 * rng.randn(1, p) + np.array([3, 4, 5])
# Get the reconstruction of the generated data X
# Note that Xt has the same "components" as X, just separated
# This is what we want to ensure is recreated correctly
Yt = IncrementalPCA(n_components=2).fit(X).transform(Xt)
# Normalize
Yt /= np.sqrt((Yt ** 2).sum())
# Make sure that the first element of Yt is ~1, this means
# the reconstruction worked as expected
assert_almost_equal(np.abs(Yt[0][0]), 1., 1)
def test_incremental_pca_inverse():
# Test that the projection of data can be inverted.
rng = np.random.RandomState(1999)
n, p = 50, 3
X = rng.randn(n, p) # spherical data
X[:, 1] *= .00001 # make middle component relatively small
X += [5, 4, 3] # make a large mean
# same check that we can find the original data from the transformed
# signal (since the data is almost of rank n_components)
ipca = IncrementalPCA(n_components=2, batch_size=10).fit(X)
Y = ipca.transform(X)
Y_inverse = ipca.inverse_transform(Y)
assert_almost_equal(X, Y_inverse, decimal=3)
def test_incremental_pca_validation():
# Test that n_components is >=1 and <= n_features.
X = [[0, 1], [1, 0]]
for n_components in [-1, 0, .99, 3]:
assert_raises(ValueError, IncrementalPCA(n_components,
batch_size=10).fit, X)
def test_incremental_pca_set_params():
# Test that components_ sign is stable over batch sizes.
rng = np.random.RandomState(1999)
n_samples = 100
n_features = 20
X = rng.randn(n_samples, n_features)
X2 = rng.randn(n_samples, n_features)
X3 = rng.randn(n_samples, n_features)
ipca = IncrementalPCA(n_components=20)
ipca.fit(X)
# Decreasing number of components
ipca.set_params(n_components=10)
assert_raises(ValueError, ipca.partial_fit, X2)
# Increasing number of components
ipca.set_params(n_components=15)
assert_raises(ValueError, ipca.partial_fit, X3)
# Returning to original setting
ipca.set_params(n_components=20)
ipca.partial_fit(X)
def test_incremental_pca_num_features_change():
# Test that changing n_components will raise an error.
rng = np.random.RandomState(1999)
n_samples = 100
X = rng.randn(n_samples, 20)
X2 = rng.randn(n_samples, 50)
ipca = IncrementalPCA(n_components=None)
ipca.fit(X)
assert_raises(ValueError, ipca.partial_fit, X2)
def test_incremental_pca_batch_signs():
# Test that components_ sign is stable over batch sizes.
rng = np.random.RandomState(1999)
n_samples = 100
n_features = 3
X = rng.randn(n_samples, n_features)
all_components = []
batch_sizes = np.arange(10, 20)
for batch_size in batch_sizes:
ipca = IncrementalPCA(n_components=None, batch_size=batch_size).fit(X)
all_components.append(ipca.components_)
for i, j in zip(all_components[:-1], all_components[1:]):
assert_almost_equal(np.sign(i), np.sign(j), decimal=6)
def test_incremental_pca_batch_values():
# Test that components_ values are stable over batch sizes.
rng = np.random.RandomState(1999)
n_samples = 100
n_features = 3
X = rng.randn(n_samples, n_features)
all_components = []
batch_sizes = np.arange(20, 40, 3)
for batch_size in batch_sizes:
ipca = IncrementalPCA(n_components=None, batch_size=batch_size).fit(X)
all_components.append(ipca.components_)
for i, j in zip(all_components[:-1], all_components[1:]):
assert_almost_equal(i, j, decimal=1)
def test_incremental_pca_partial_fit():
# Test that fit and partial_fit get equivalent results.
rng = np.random.RandomState(1999)
n, p = 50, 3
X = rng.randn(n, p) # spherical data
X[:, 1] *= .00001 # make middle component relatively small
X += [5, 4, 3] # make a large mean
# same check that we can find the original data from the transformed
# signal (since the data is almost of rank n_components)
batch_size = 10
ipca = IncrementalPCA(n_components=2, batch_size=batch_size).fit(X)
pipca = IncrementalPCA(n_components=2, batch_size=batch_size)
# Add one to make sure endpoint is included
batch_itr = np.arange(0, n + 1, batch_size)
for i, j in zip(batch_itr[:-1], batch_itr[1:]):
pipca.partial_fit(X[i:j, :])
assert_almost_equal(ipca.components_, pipca.components_, decimal=3)
def test_incremental_pca_against_pca_iris():
# Test that IncrementalPCA and PCA are approximate (to a sign flip).
X = iris.data
Y_pca = PCA(n_components=2).fit_transform(X)
Y_ipca = IncrementalPCA(n_components=2, batch_size=25).fit_transform(X)
assert_almost_equal(np.abs(Y_pca), np.abs(Y_ipca), 1)
def test_incremental_pca_against_pca_random_data():
# Test that IncrementalPCA and PCA are approximate (to a sign flip).
rng = np.random.RandomState(1999)
n_samples = 100
n_features = 3
X = rng.randn(n_samples, n_features) + 5 * rng.rand(1, n_features)
Y_pca = PCA(n_components=3).fit_transform(X)
Y_ipca = IncrementalPCA(n_components=3, batch_size=25).fit_transform(X)
assert_almost_equal(np.abs(Y_pca), np.abs(Y_ipca), 1)
def test_explained_variances():
# Test that PCA and IncrementalPCA calculations match
X = datasets.make_low_rank_matrix(1000, 100, tail_strength=0.,
effective_rank=10, random_state=1999)
prec = 3
n_samples, n_features = X.shape
for nc in [None, 99]:
pca = PCA(n_components=nc).fit(X)
ipca = IncrementalPCA(n_components=nc, batch_size=100).fit(X)
assert_almost_equal(pca.explained_variance_, ipca.explained_variance_,
decimal=prec)
assert_almost_equal(pca.explained_variance_ratio_,
ipca.explained_variance_ratio_, decimal=prec)
assert_almost_equal(pca.noise_variance_, ipca.noise_variance_,
decimal=prec)
def test_whitening():
# Test that PCA and IncrementalPCA transforms match to sign flip.
X = datasets.make_low_rank_matrix(1000, 10, tail_strength=0.,
effective_rank=2, random_state=1999)
prec = 3
n_samples, n_features = X.shape
for nc in [None, 9]:
pca = PCA(whiten=True, n_components=nc).fit(X)
ipca = IncrementalPCA(whiten=True, n_components=nc,
batch_size=250).fit(X)
Xt_pca = pca.transform(X)
Xt_ipca = ipca.transform(X)
assert_almost_equal(np.abs(Xt_pca), np.abs(Xt_ipca), decimal=prec)
Xinv_ipca = ipca.inverse_transform(Xt_ipca)
Xinv_pca = pca.inverse_transform(Xt_pca)
assert_almost_equal(X, Xinv_ipca, decimal=prec)
assert_almost_equal(X, Xinv_pca, decimal=prec)
assert_almost_equal(Xinv_pca, Xinv_ipca, decimal=prec)
| bsd-3-clause |
leesavide/pythonista-docs | Documentation/matplotlib/mpl_examples/pylab_examples/annotation_demo3.py | 6 | 3296 | import matplotlib.pyplot as plt
fig, (ax1, ax2) = plt.subplots(1, 2)
bbox_args = dict(boxstyle="round", fc="0.8")
arrow_args = dict(arrowstyle="->")
ax1.annotate('figure fraction : 0, 0', xy=(0, 0), xycoords='figure fraction',
xytext=(20, 20), textcoords='offset points',
ha="left", va="bottom",
bbox=bbox_args,
arrowprops=arrow_args
)
ax1.annotate('figure fraction : 1, 1', xy=(1, 1), xycoords='figure fraction',
xytext=(-20, -20), textcoords='offset points',
ha="right", va="top",
bbox=bbox_args,
arrowprops=arrow_args
)
ax1.annotate('axes fraction : 0, 0', xy=(0, 0), xycoords='axes fraction',
xytext=(20, 20), textcoords='offset points',
ha="left", va="bottom",
bbox=bbox_args,
arrowprops=arrow_args
)
ax1.annotate('axes fraction : 1, 1', xy=(1, 1), xycoords='axes fraction',
xytext=(-20, -20), textcoords='offset points',
ha="right", va="top",
bbox=bbox_args,
arrowprops=arrow_args
)
an1 = ax1.annotate('Drag me 1', xy=(.5, .7), xycoords='data',
#xytext=(.5, .7), textcoords='data',
ha="center", va="center",
bbox=bbox_args,
#arrowprops=arrow_args
)
an2 = ax1.annotate('Drag me 2', xy=(.5, .5), xycoords=an1,
xytext=(.5, .3), textcoords='axes fraction',
ha="center", va="center",
bbox=bbox_args,
arrowprops=dict(patchB=an1.get_bbox_patch(),
connectionstyle="arc3,rad=0.2",
**arrow_args)
)
an3 = ax1.annotate('', xy=(.5, .5), xycoords=an2,
xytext=(.5, .5), textcoords=an1,
ha="center", va="center",
bbox=bbox_args,
arrowprops=dict(patchA=an1.get_bbox_patch(),
patchB=an2.get_bbox_patch(),
connectionstyle="arc3,rad=0.2",
**arrow_args)
)
t = ax2.annotate('xy=(0, 1)\nxycoords=("data", "axes fraction")',
xy=(0, 1), xycoords=("data", 'axes fraction'),
xytext=(0, -20), textcoords='offset points',
ha="center", va="top",
bbox=bbox_args,
arrowprops=arrow_args
)
from matplotlib.text import OffsetFrom
ax2.annotate('xy=(0.5, 0)\nxycoords=artist',
xy=(0.5, 0.), xycoords=t,
xytext=(0, -20), textcoords='offset points',
ha="center", va="top",
bbox=bbox_args,
arrowprops=arrow_args
)
ax2.annotate('xy=(0.8, 0.5)\nxycoords=ax1.transData',
xy=(0.8, 0.5), xycoords=ax1.transData,
xytext=(10, 10), textcoords=OffsetFrom(ax2.bbox, (0, 0), "points"),
ha="left", va="bottom",
bbox=bbox_args,
arrowprops=arrow_args
)
ax2.set_xlim(-2, 2)
ax2.set_ylim(-2, 2)
an1.draggable()
an2.draggable()
plt.show()
| apache-2.0 |
devanshdalal/scikit-learn | sklearn/linear_model/ridge.py | 13 | 51397 | """
Ridge regression
"""
# Author: Mathieu Blondel <mathieu@mblondel.org>
# Reuben Fletcher-Costin <reuben.fletchercostin@gmail.com>
# Fabian Pedregosa <fabian@fseoane.net>
# Michael Eickenberg <michael.eickenberg@nsup.org>
# License: BSD 3 clause
from abc import ABCMeta, abstractmethod
import warnings
import numpy as np
from scipy import linalg
from scipy import sparse
from scipy.sparse import linalg as sp_linalg
from .base import LinearClassifierMixin, LinearModel, _rescale_data
from .sag import sag_solver
from ..base import RegressorMixin
from ..utils.extmath import safe_sparse_dot
from ..utils.extmath import row_norms
from ..utils import check_X_y
from ..utils import check_array
from ..utils import check_consistent_length
from ..utils import compute_sample_weight
from ..utils import column_or_1d
from ..preprocessing import LabelBinarizer
from ..model_selection import GridSearchCV
from ..externals import six
from ..metrics.scorer import check_scoring
def _solve_sparse_cg(X, y, alpha, max_iter=None, tol=1e-3, verbose=0):
n_samples, n_features = X.shape
X1 = sp_linalg.aslinearoperator(X)
coefs = np.empty((y.shape[1], n_features))
if n_features > n_samples:
def create_mv(curr_alpha):
def _mv(x):
return X1.matvec(X1.rmatvec(x)) + curr_alpha * x
return _mv
else:
def create_mv(curr_alpha):
def _mv(x):
return X1.rmatvec(X1.matvec(x)) + curr_alpha * x
return _mv
for i in range(y.shape[1]):
y_column = y[:, i]
mv = create_mv(alpha[i])
if n_features > n_samples:
# kernel ridge
# w = X.T * inv(X X^t + alpha*Id) y
C = sp_linalg.LinearOperator(
(n_samples, n_samples), matvec=mv, dtype=X.dtype)
coef, info = sp_linalg.cg(C, y_column, tol=tol)
coefs[i] = X1.rmatvec(coef)
else:
# linear ridge
# w = inv(X^t X + alpha*Id) * X.T y
y_column = X1.rmatvec(y_column)
C = sp_linalg.LinearOperator(
(n_features, n_features), matvec=mv, dtype=X.dtype)
coefs[i], info = sp_linalg.cg(C, y_column, maxiter=max_iter,
tol=tol)
if info < 0:
raise ValueError("Failed with error code %d" % info)
if max_iter is None and info > 0 and verbose:
warnings.warn("sparse_cg did not converge after %d iterations." %
info)
return coefs
def _solve_lsqr(X, y, alpha, max_iter=None, tol=1e-3):
n_samples, n_features = X.shape
coefs = np.empty((y.shape[1], n_features))
n_iter = np.empty(y.shape[1], dtype=np.int32)
# According to the lsqr documentation, alpha = damp^2.
sqrt_alpha = np.sqrt(alpha)
for i in range(y.shape[1]):
y_column = y[:, i]
info = sp_linalg.lsqr(X, y_column, damp=sqrt_alpha[i],
atol=tol, btol=tol, iter_lim=max_iter)
coefs[i] = info[0]
n_iter[i] = info[2]
return coefs, n_iter
def _solve_cholesky(X, y, alpha):
# w = inv(X^t X + alpha*Id) * X.T y
n_samples, n_features = X.shape
n_targets = y.shape[1]
A = safe_sparse_dot(X.T, X, dense_output=True)
Xy = safe_sparse_dot(X.T, y, dense_output=True)
one_alpha = np.array_equal(alpha, len(alpha) * [alpha[0]])
if one_alpha:
A.flat[::n_features + 1] += alpha[0]
return linalg.solve(A, Xy, sym_pos=True,
overwrite_a=True).T
else:
coefs = np.empty([n_targets, n_features])
for coef, target, current_alpha in zip(coefs, Xy.T, alpha):
A.flat[::n_features + 1] += current_alpha
coef[:] = linalg.solve(A, target, sym_pos=True,
overwrite_a=False).ravel()
A.flat[::n_features + 1] -= current_alpha
return coefs
def _solve_cholesky_kernel(K, y, alpha, sample_weight=None, copy=False):
# dual_coef = inv(X X^t + alpha*Id) y
n_samples = K.shape[0]
n_targets = y.shape[1]
if copy:
K = K.copy()
alpha = np.atleast_1d(alpha)
one_alpha = (alpha == alpha[0]).all()
has_sw = isinstance(sample_weight, np.ndarray) \
or sample_weight not in [1.0, None]
if has_sw:
# Unlike other solvers, we need to support sample_weight directly
# because K might be a pre-computed kernel.
sw = np.sqrt(np.atleast_1d(sample_weight))
y = y * sw[:, np.newaxis]
K *= np.outer(sw, sw)
if one_alpha:
# Only one penalty, we can solve multi-target problems in one time.
K.flat[::n_samples + 1] += alpha[0]
try:
# Note: we must use overwrite_a=False in order to be able to
# use the fall-back solution below in case a LinAlgError
# is raised
dual_coef = linalg.solve(K, y, sym_pos=True,
overwrite_a=False)
except np.linalg.LinAlgError:
warnings.warn("Singular matrix in solving dual problem. Using "
"least-squares solution instead.")
dual_coef = linalg.lstsq(K, y)[0]
# K is expensive to compute and store in memory so change it back in
# case it was user-given.
K.flat[::n_samples + 1] -= alpha[0]
if has_sw:
dual_coef *= sw[:, np.newaxis]
return dual_coef
else:
# One penalty per target. We need to solve each target separately.
dual_coefs = np.empty([n_targets, n_samples])
for dual_coef, target, current_alpha in zip(dual_coefs, y.T, alpha):
K.flat[::n_samples + 1] += current_alpha
dual_coef[:] = linalg.solve(K, target, sym_pos=True,
overwrite_a=False).ravel()
K.flat[::n_samples + 1] -= current_alpha
if has_sw:
dual_coefs *= sw[np.newaxis, :]
return dual_coefs.T
def _solve_svd(X, y, alpha):
U, s, Vt = linalg.svd(X, full_matrices=False)
idx = s > 1e-15 # same default value as scipy.linalg.pinv
s_nnz = s[idx][:, np.newaxis]
UTy = np.dot(U.T, y)
d = np.zeros((s.size, alpha.size))
d[idx] = s_nnz / (s_nnz ** 2 + alpha)
d_UT_y = d * UTy
return np.dot(Vt.T, d_UT_y).T
def ridge_regression(X, y, alpha, sample_weight=None, solver='auto',
max_iter=None, tol=1e-3, verbose=0, random_state=None,
return_n_iter=False, return_intercept=False):
"""Solve the ridge equation by the method of normal equations.
Read more in the :ref:`User Guide <ridge_regression>`.
Parameters
----------
X : {array-like, sparse matrix, LinearOperator},
shape = [n_samples, n_features]
Training data
y : array-like, shape = [n_samples] or [n_samples, n_targets]
Target values
alpha : {float, array-like},
shape = [n_targets] if array-like
Regularization strength; must be a positive float. Regularization
improves the conditioning of the problem and reduces the variance of
the estimates. Larger values specify stronger regularization.
Alpha corresponds to ``C^-1`` in other linear models such as
LogisticRegression or LinearSVC. If an array is passed, penalties are
assumed to be specific to the targets. Hence they must correspond in
number.
max_iter : int, optional
Maximum number of iterations for conjugate gradient solver.
For 'sparse_cg' and 'lsqr' solvers, the default value is determined
by scipy.sparse.linalg. For 'sag' solver, the default value is 1000.
sample_weight : float or numpy array of shape [n_samples]
Individual weights for each sample. If sample_weight is not None and
solver='auto', the solver will be set to 'cholesky'.
.. versionadded:: 0.17
solver : {'auto', 'svd', 'cholesky', 'lsqr', 'sparse_cg'}
Solver to use in the computational routines:
- 'auto' chooses the solver automatically based on the type of data.
- 'svd' uses a Singular Value Decomposition of X to compute the Ridge
coefficients. More stable for singular matrices than
'cholesky'.
- 'cholesky' uses the standard scipy.linalg.solve function to
obtain a closed-form solution via a Cholesky decomposition of
dot(X.T, X)
- 'sparse_cg' uses the conjugate gradient solver as found in
scipy.sparse.linalg.cg. As an iterative algorithm, this solver is
more appropriate than 'cholesky' for large-scale data
(possibility to set `tol` and `max_iter`).
- 'lsqr' uses the dedicated regularized least-squares routine
scipy.sparse.linalg.lsqr. It is the fastest but may not be available
in old scipy versions. It also uses an iterative procedure.
- 'sag' uses a Stochastic Average Gradient descent. It also uses an
iterative procedure, and is often faster than other solvers when
both n_samples and n_features are large. Note that 'sag' fast
convergence is only guaranteed on features with approximately the
same scale. You can preprocess the data with a scaler from
sklearn.preprocessing.
All last four solvers support both dense and sparse data. However,
only 'sag' supports sparse input when `fit_intercept` is True.
.. versionadded:: 0.17
Stochastic Average Gradient descent solver.
tol : float
Precision of the solution.
verbose : int
Verbosity level. Setting verbose > 0 will display additional
information depending on the solver used.
random_state : int seed, RandomState instance, or None (default)
The seed of the pseudo random number generator to use when
shuffling the data. Used only in 'sag' solver.
return_n_iter : boolean, default False
If True, the method also returns `n_iter`, the actual number of
iteration performed by the solver.
.. versionadded:: 0.17
return_intercept : boolean, default False
If True and if X is sparse, the method also returns the intercept,
and the solver is automatically changed to 'sag'. This is only a
temporary fix for fitting the intercept with sparse data. For dense
data, use sklearn.linear_model._preprocess_data before your regression.
.. versionadded:: 0.17
Returns
-------
coef : array, shape = [n_features] or [n_targets, n_features]
Weight vector(s).
n_iter : int, optional
The actual number of iteration performed by the solver.
Only returned if `return_n_iter` is True.
intercept : float or array, shape = [n_targets]
The intercept of the model. Only returned if `return_intercept`
is True and if X is a scipy sparse array.
Notes
-----
This function won't compute the intercept.
"""
if return_intercept and sparse.issparse(X) and solver != 'sag':
if solver != 'auto':
warnings.warn("In Ridge, only 'sag' solver can currently fit the "
"intercept when X is sparse. Solver has been "
"automatically changed into 'sag'.")
solver = 'sag'
# SAG needs X and y columns to be C-contiguous and np.float64
if solver == 'sag':
X = check_array(X, accept_sparse=['csr'],
dtype=np.float64, order='C')
y = check_array(y, dtype=np.float64, ensure_2d=False, order='F')
else:
X = check_array(X, accept_sparse=['csr', 'csc', 'coo'],
dtype=np.float64)
y = check_array(y, dtype='numeric', ensure_2d=False)
check_consistent_length(X, y)
n_samples, n_features = X.shape
if y.ndim > 2:
raise ValueError("Target y has the wrong shape %s" % str(y.shape))
ravel = False
if y.ndim == 1:
y = y.reshape(-1, 1)
ravel = True
n_samples_, n_targets = y.shape
if n_samples != n_samples_:
raise ValueError("Number of samples in X and y does not correspond:"
" %d != %d" % (n_samples, n_samples_))
has_sw = sample_weight is not None
if solver == 'auto':
# cholesky if it's a dense array and cg in any other case
if not sparse.issparse(X) or has_sw:
solver = 'cholesky'
else:
solver = 'sparse_cg'
elif solver == 'lsqr' and not hasattr(sp_linalg, 'lsqr'):
warnings.warn("""lsqr not available on this machine, falling back
to sparse_cg.""")
solver = 'sparse_cg'
if has_sw:
if np.atleast_1d(sample_weight).ndim > 1:
raise ValueError("Sample weights must be 1D array or scalar")
if solver != 'sag':
# SAG supports sample_weight directly. For other solvers,
# we implement sample_weight via a simple rescaling.
X, y = _rescale_data(X, y, sample_weight)
# There should be either 1 or n_targets penalties
alpha = np.asarray(alpha).ravel()
if alpha.size not in [1, n_targets]:
raise ValueError("Number of targets and number of penalties "
"do not correspond: %d != %d"
% (alpha.size, n_targets))
if alpha.size == 1 and n_targets > 1:
alpha = np.repeat(alpha, n_targets)
if solver not in ('sparse_cg', 'cholesky', 'svd', 'lsqr', 'sag'):
raise ValueError('Solver %s not understood' % solver)
n_iter = None
if solver == 'sparse_cg':
coef = _solve_sparse_cg(X, y, alpha, max_iter, tol, verbose)
elif solver == 'lsqr':
coef, n_iter = _solve_lsqr(X, y, alpha, max_iter, tol)
elif solver == 'cholesky':
if n_features > n_samples:
K = safe_sparse_dot(X, X.T, dense_output=True)
try:
dual_coef = _solve_cholesky_kernel(K, y, alpha)
coef = safe_sparse_dot(X.T, dual_coef, dense_output=True).T
except linalg.LinAlgError:
# use SVD solver if matrix is singular
solver = 'svd'
else:
try:
coef = _solve_cholesky(X, y, alpha)
except linalg.LinAlgError:
# use SVD solver if matrix is singular
solver = 'svd'
elif solver == 'sag':
# precompute max_squared_sum for all targets
max_squared_sum = row_norms(X, squared=True).max()
coef = np.empty((y.shape[1], n_features))
n_iter = np.empty(y.shape[1], dtype=np.int32)
intercept = np.zeros((y.shape[1], ))
for i, (alpha_i, target) in enumerate(zip(alpha, y.T)):
init = {'coef': np.zeros((n_features + int(return_intercept), 1))}
coef_, n_iter_, _ = sag_solver(
X, target.ravel(), sample_weight, 'squared', alpha_i,
max_iter, tol, verbose, random_state, False, max_squared_sum,
init)
if return_intercept:
coef[i] = coef_[:-1]
intercept[i] = coef_[-1]
else:
coef[i] = coef_
n_iter[i] = n_iter_
if intercept.shape[0] == 1:
intercept = intercept[0]
coef = np.asarray(coef)
if solver == 'svd':
if sparse.issparse(X):
raise TypeError('SVD solver does not support sparse'
' inputs currently')
coef = _solve_svd(X, y, alpha)
if ravel:
# When y was passed as a 1d-array, we flatten the coefficients.
coef = coef.ravel()
if return_n_iter and return_intercept:
return coef, n_iter, intercept
elif return_intercept:
return coef, intercept
elif return_n_iter:
return coef, n_iter
else:
return coef
class _BaseRidge(six.with_metaclass(ABCMeta, LinearModel)):
@abstractmethod
def __init__(self, alpha=1.0, fit_intercept=True, normalize=False,
copy_X=True, max_iter=None, tol=1e-3, solver="auto",
random_state=None):
self.alpha = alpha
self.fit_intercept = fit_intercept
self.normalize = normalize
self.copy_X = copy_X
self.max_iter = max_iter
self.tol = tol
self.solver = solver
self.random_state = random_state
def fit(self, X, y, sample_weight=None):
X, y = check_X_y(X, y, ['csr', 'csc', 'coo'], dtype=np.float64,
multi_output=True, y_numeric=True)
if ((sample_weight is not None) and
np.atleast_1d(sample_weight).ndim > 1):
raise ValueError("Sample weights must be 1D array or scalar")
X, y, X_offset, y_offset, X_scale = self._preprocess_data(
X, y, self.fit_intercept, self.normalize, self.copy_X,
sample_weight=sample_weight)
# temporary fix for fitting the intercept with sparse data using 'sag'
if sparse.issparse(X) and self.fit_intercept:
self.coef_, self.n_iter_, self.intercept_ = ridge_regression(
X, y, alpha=self.alpha, sample_weight=sample_weight,
max_iter=self.max_iter, tol=self.tol, solver=self.solver,
random_state=self.random_state, return_n_iter=True,
return_intercept=True)
self.intercept_ += y_offset
else:
self.coef_, self.n_iter_ = ridge_regression(
X, y, alpha=self.alpha, sample_weight=sample_weight,
max_iter=self.max_iter, tol=self.tol, solver=self.solver,
random_state=self.random_state, return_n_iter=True,
return_intercept=False)
self._set_intercept(X_offset, y_offset, X_scale)
return self
class Ridge(_BaseRidge, RegressorMixin):
"""Linear least squares with l2 regularization.
This model solves a regression model where the loss function is
the linear least squares function and regularization is given by
the l2-norm. Also known as Ridge Regression or Tikhonov regularization.
This estimator has built-in support for multi-variate regression
(i.e., when y is a 2d-array of shape [n_samples, n_targets]).
Read more in the :ref:`User Guide <ridge_regression>`.
Parameters
----------
alpha : {float, array-like}, shape (n_targets)
Regularization strength; must be a positive float. Regularization
improves the conditioning of the problem and reduces the variance of
the estimates. Larger values specify stronger regularization.
Alpha corresponds to ``C^-1`` in other linear models such as
LogisticRegression or LinearSVC. If an array is passed, penalties are
assumed to be specific to the targets. Hence they must correspond in
number.
copy_X : boolean, optional, default True
If True, X will be copied; else, it may be overwritten.
fit_intercept : boolean
Whether to calculate the intercept for this model. If set
to false, no intercept will be used in calculations
(e.g. data is expected to be already centered).
max_iter : int, optional
Maximum number of iterations for conjugate gradient solver.
For 'sparse_cg' and 'lsqr' solvers, the default value is determined
by scipy.sparse.linalg. For 'sag' solver, the default value is 1000.
normalize : boolean, optional, default False
If True, the regressors X will be normalized before regression.
This parameter is ignored when `fit_intercept` is set to False.
When the regressors are normalized, note that this makes the
hyperparameters learnt more robust and almost independent of the number
of samples. The same property is not valid for standardized data.
However, if you wish to standardize, please use
`preprocessing.StandardScaler` before calling `fit` on an estimator
with `normalize=False`.
solver : {'auto', 'svd', 'cholesky', 'lsqr', 'sparse_cg', 'sag'}
Solver to use in the computational routines:
- 'auto' chooses the solver automatically based on the type of data.
- 'svd' uses a Singular Value Decomposition of X to compute the Ridge
coefficients. More stable for singular matrices than
'cholesky'.
- 'cholesky' uses the standard scipy.linalg.solve function to
obtain a closed-form solution.
- 'sparse_cg' uses the conjugate gradient solver as found in
scipy.sparse.linalg.cg. As an iterative algorithm, this solver is
more appropriate than 'cholesky' for large-scale data
(possibility to set `tol` and `max_iter`).
- 'lsqr' uses the dedicated regularized least-squares routine
scipy.sparse.linalg.lsqr. It is the fastest but may not be available
in old scipy versions. It also uses an iterative procedure.
- 'sag' uses a Stochastic Average Gradient descent. It also uses an
iterative procedure, and is often faster than other solvers when
both n_samples and n_features are large. Note that 'sag' fast
convergence is only guaranteed on features with approximately the
same scale. You can preprocess the data with a scaler from
sklearn.preprocessing.
All last four solvers support both dense and sparse data. However,
only 'sag' supports sparse input when `fit_intercept` is True.
.. versionadded:: 0.17
Stochastic Average Gradient descent solver.
tol : float
Precision of the solution.
random_state : int seed, RandomState instance, or None (default)
The seed of the pseudo random number generator to use when
shuffling the data. Used only in 'sag' solver.
.. versionadded:: 0.17
*random_state* to support Stochastic Average Gradient.
Attributes
----------
coef_ : array, shape (n_features,) or (n_targets, n_features)
Weight vector(s).
intercept_ : float | array, shape = (n_targets,)
Independent term in decision function. Set to 0.0 if
``fit_intercept = False``.
n_iter_ : array or None, shape (n_targets,)
Actual number of iterations for each target. Available only for
sag and lsqr solvers. Other solvers will return None.
.. versionadded:: 0.17
See also
--------
RidgeClassifier, RidgeCV, :class:`sklearn.kernel_ridge.KernelRidge`
Examples
--------
>>> from sklearn.linear_model import Ridge
>>> import numpy as np
>>> n_samples, n_features = 10, 5
>>> np.random.seed(0)
>>> y = np.random.randn(n_samples)
>>> X = np.random.randn(n_samples, n_features)
>>> clf = Ridge(alpha=1.0)
>>> clf.fit(X, y) # doctest: +NORMALIZE_WHITESPACE
Ridge(alpha=1.0, copy_X=True, fit_intercept=True, max_iter=None,
normalize=False, random_state=None, solver='auto', tol=0.001)
"""
def __init__(self, alpha=1.0, fit_intercept=True, normalize=False,
copy_X=True, max_iter=None, tol=1e-3, solver="auto",
random_state=None):
super(Ridge, self).__init__(alpha=alpha, fit_intercept=fit_intercept,
normalize=normalize, copy_X=copy_X,
max_iter=max_iter, tol=tol, solver=solver,
random_state=random_state)
def fit(self, X, y, sample_weight=None):
"""Fit Ridge regression model
Parameters
----------
X : {array-like, sparse matrix}, shape = [n_samples, n_features]
Training data
y : array-like, shape = [n_samples] or [n_samples, n_targets]
Target values
sample_weight : float or numpy array of shape [n_samples]
Individual weights for each sample
Returns
-------
self : returns an instance of self.
"""
return super(Ridge, self).fit(X, y, sample_weight=sample_weight)
class RidgeClassifier(LinearClassifierMixin, _BaseRidge):
"""Classifier using Ridge regression.
Read more in the :ref:`User Guide <ridge_regression>`.
Parameters
----------
alpha : float
Regularization strength; must be a positive float. Regularization
improves the conditioning of the problem and reduces the variance of
the estimates. Larger values specify stronger regularization.
Alpha corresponds to ``C^-1`` in other linear models such as
LogisticRegression or LinearSVC.
class_weight : dict or 'balanced', optional
Weights associated with classes in the form ``{class_label: weight}``.
If not given, all classes are supposed to have weight one.
The "balanced" mode uses the values of y to automatically adjust
weights inversely proportional to class frequencies in the input data
as ``n_samples / (n_classes * np.bincount(y))``
copy_X : boolean, optional, default True
If True, X will be copied; else, it may be overwritten.
fit_intercept : boolean
Whether to calculate the intercept for this model. If set to false, no
intercept will be used in calculations (e.g. data is expected to be
already centered).
max_iter : int, optional
Maximum number of iterations for conjugate gradient solver.
The default value is determined by scipy.sparse.linalg.
normalize : boolean, optional, default False
If True, the regressors X will be normalized before regression.
This parameter is ignored when `fit_intercept` is set to False.
When the regressors are normalized, note that this makes the
hyperparameters learnt more robust and almost independent of the number
of samples. The same property is not valid for standardized data.
However, if you wish to standardize, please use
`preprocessing.StandardScaler` before calling `fit` on an estimator
with `normalize=False`.
solver : {'auto', 'svd', 'cholesky', 'lsqr', 'sparse_cg', 'sag'}
Solver to use in the computational routines:
- 'auto' chooses the solver automatically based on the type of data.
- 'svd' uses a Singular Value Decomposition of X to compute the Ridge
coefficients. More stable for singular matrices than
'cholesky'.
- 'cholesky' uses the standard scipy.linalg.solve function to
obtain a closed-form solution.
- 'sparse_cg' uses the conjugate gradient solver as found in
scipy.sparse.linalg.cg. As an iterative algorithm, this solver is
more appropriate than 'cholesky' for large-scale data
(possibility to set `tol` and `max_iter`).
- 'lsqr' uses the dedicated regularized least-squares routine
scipy.sparse.linalg.lsqr. It is the fastest but may not be available
in old scipy versions. It also uses an iterative procedure.
- 'sag' uses a Stochastic Average Gradient descent. It also uses an
iterative procedure, and is faster than other solvers when both
n_samples and n_features are large.
.. versionadded:: 0.17
Stochastic Average Gradient descent solver.
tol : float
Precision of the solution.
random_state : int seed, RandomState instance, or None (default)
The seed of the pseudo random number generator to use when
shuffling the data. Used in 'sag' solver.
Attributes
----------
coef_ : array, shape (n_features,) or (n_classes, n_features)
Weight vector(s).
intercept_ : float | array, shape = (n_targets,)
Independent term in decision function. Set to 0.0 if
``fit_intercept = False``.
n_iter_ : array or None, shape (n_targets,)
Actual number of iterations for each target. Available only for
sag and lsqr solvers. Other solvers will return None.
See also
--------
Ridge, RidgeClassifierCV
Notes
-----
For multi-class classification, n_class classifiers are trained in
a one-versus-all approach. Concretely, this is implemented by taking
advantage of the multi-variate response support in Ridge.
"""
def __init__(self, alpha=1.0, fit_intercept=True, normalize=False,
copy_X=True, max_iter=None, tol=1e-3, class_weight=None,
solver="auto", random_state=None):
super(RidgeClassifier, self).__init__(
alpha=alpha, fit_intercept=fit_intercept, normalize=normalize,
copy_X=copy_X, max_iter=max_iter, tol=tol, solver=solver,
random_state=random_state)
self.class_weight = class_weight
def fit(self, X, y, sample_weight=None):
"""Fit Ridge regression model.
Parameters
----------
X : {array-like, sparse matrix}, shape = [n_samples,n_features]
Training data
y : array-like, shape = [n_samples]
Target values
sample_weight : float or numpy array of shape (n_samples,)
Sample weight.
.. versionadded:: 0.17
*sample_weight* support to Classifier.
Returns
-------
self : returns an instance of self.
"""
self._label_binarizer = LabelBinarizer(pos_label=1, neg_label=-1)
Y = self._label_binarizer.fit_transform(y)
if not self._label_binarizer.y_type_.startswith('multilabel'):
y = column_or_1d(y, warn=True)
else:
# we don't (yet) support multi-label classification in Ridge
raise ValueError(
"%s doesn't support multi-label classification" % (
self.__class__.__name__))
if self.class_weight:
if sample_weight is None:
sample_weight = 1.
# modify the sample weights with the corresponding class weight
sample_weight = (sample_weight *
compute_sample_weight(self.class_weight, y))
super(RidgeClassifier, self).fit(X, Y, sample_weight=sample_weight)
return self
@property
def classes_(self):
return self._label_binarizer.classes_
class _RidgeGCV(LinearModel):
"""Ridge regression with built-in Generalized Cross-Validation
It allows efficient Leave-One-Out cross-validation.
This class is not intended to be used directly. Use RidgeCV instead.
Notes
-----
We want to solve (K + alpha*Id)c = y,
where K = X X^T is the kernel matrix.
Let G = (K + alpha*Id)^-1.
Dual solution: c = Gy
Primal solution: w = X^T c
Compute eigendecomposition K = Q V Q^T.
Then G = Q (V + alpha*Id)^-1 Q^T,
where (V + alpha*Id) is diagonal.
It is thus inexpensive to inverse for many alphas.
Let loov be the vector of prediction values for each example
when the model was fitted with all examples but this example.
loov = (KGY - diag(KG)Y) / diag(I-KG)
Let looe be the vector of prediction errors for each example
when the model was fitted with all examples but this example.
looe = y - loov = c / diag(G)
References
----------
http://cbcl.mit.edu/projects/cbcl/publications/ps/MIT-CSAIL-TR-2007-025.pdf
http://www.mit.edu/~9.520/spring07/Classes/rlsslides.pdf
"""
def __init__(self, alphas=(0.1, 1.0, 10.0),
fit_intercept=True, normalize=False,
scoring=None, copy_X=True,
gcv_mode=None, store_cv_values=False):
self.alphas = np.asarray(alphas)
self.fit_intercept = fit_intercept
self.normalize = normalize
self.scoring = scoring
self.copy_X = copy_X
self.gcv_mode = gcv_mode
self.store_cv_values = store_cv_values
def _pre_compute(self, X, y, centered_kernel=True):
# even if X is very sparse, K is usually very dense
K = safe_sparse_dot(X, X.T, dense_output=True)
# the following emulates an additional constant regressor
# corresponding to fit_intercept=True
# but this is done only when the features have been centered
if centered_kernel:
K += np.ones_like(K)
v, Q = linalg.eigh(K)
QT_y = np.dot(Q.T, y)
return v, Q, QT_y
def _decomp_diag(self, v_prime, Q):
# compute diagonal of the matrix: dot(Q, dot(diag(v_prime), Q^T))
return (v_prime * Q ** 2).sum(axis=-1)
def _diag_dot(self, D, B):
# compute dot(diag(D), B)
if len(B.shape) > 1:
# handle case where B is > 1-d
D = D[(slice(None), ) + (np.newaxis, ) * (len(B.shape) - 1)]
return D * B
def _errors_and_values_helper(self, alpha, y, v, Q, QT_y):
"""Helper function to avoid code duplication between self._errors and
self._values.
Notes
-----
We don't construct matrix G, instead compute action on y & diagonal.
"""
w = 1. / (v + alpha)
constant_column = np.var(Q, 0) < 1.e-12
# detect constant columns
w[constant_column] = 0 # cancel the regularization for the intercept
c = np.dot(Q, self._diag_dot(w, QT_y))
G_diag = self._decomp_diag(w, Q)
# handle case where y is 2-d
if len(y.shape) != 1:
G_diag = G_diag[:, np.newaxis]
return G_diag, c
def _errors(self, alpha, y, v, Q, QT_y):
G_diag, c = self._errors_and_values_helper(alpha, y, v, Q, QT_y)
return (c / G_diag) ** 2, c
def _values(self, alpha, y, v, Q, QT_y):
G_diag, c = self._errors_and_values_helper(alpha, y, v, Q, QT_y)
return y - (c / G_diag), c
def _pre_compute_svd(self, X, y, centered_kernel=True):
if sparse.issparse(X):
raise TypeError("SVD not supported for sparse matrices")
if centered_kernel:
X = np.hstack((X, np.ones((X.shape[0], 1))))
# to emulate fit_intercept=True situation, add a column on ones
# Note that by centering, the other columns are orthogonal to that one
U, s, _ = linalg.svd(X, full_matrices=0)
v = s ** 2
UT_y = np.dot(U.T, y)
return v, U, UT_y
def _errors_and_values_svd_helper(self, alpha, y, v, U, UT_y):
"""Helper function to avoid code duplication between self._errors_svd
and self._values_svd.
"""
constant_column = np.var(U, 0) < 1.e-12
# detect columns colinear to ones
w = ((v + alpha) ** -1) - (alpha ** -1)
w[constant_column] = - (alpha ** -1)
# cancel the regularization for the intercept
c = np.dot(U, self._diag_dot(w, UT_y)) + (alpha ** -1) * y
G_diag = self._decomp_diag(w, U) + (alpha ** -1)
if len(y.shape) != 1:
# handle case where y is 2-d
G_diag = G_diag[:, np.newaxis]
return G_diag, c
def _errors_svd(self, alpha, y, v, U, UT_y):
G_diag, c = self._errors_and_values_svd_helper(alpha, y, v, U, UT_y)
return (c / G_diag) ** 2, c
def _values_svd(self, alpha, y, v, U, UT_y):
G_diag, c = self._errors_and_values_svd_helper(alpha, y, v, U, UT_y)
return y - (c / G_diag), c
def fit(self, X, y, sample_weight=None):
"""Fit Ridge regression model
Parameters
----------
X : {array-like, sparse matrix}, shape = [n_samples, n_features]
Training data
y : array-like, shape = [n_samples] or [n_samples, n_targets]
Target values
sample_weight : float or array-like of shape [n_samples]
Sample weight
Returns
-------
self : Returns self.
"""
X, y = check_X_y(X, y, ['csr', 'csc', 'coo'], dtype=np.float64,
multi_output=True, y_numeric=True)
if sample_weight is not None and not isinstance(sample_weight, float):
sample_weight = check_array(sample_weight, ensure_2d=False)
n_samples, n_features = X.shape
X, y, X_offset, y_offset, X_scale = LinearModel._preprocess_data(
X, y, self.fit_intercept, self.normalize, self.copy_X,
sample_weight=sample_weight)
gcv_mode = self.gcv_mode
with_sw = len(np.shape(sample_weight))
if gcv_mode is None or gcv_mode == 'auto':
if sparse.issparse(X) or n_features > n_samples or with_sw:
gcv_mode = 'eigen'
else:
gcv_mode = 'svd'
elif gcv_mode == "svd" and with_sw:
# FIXME non-uniform sample weights not yet supported
warnings.warn("non-uniform sample weights unsupported for svd, "
"forcing usage of eigen")
gcv_mode = 'eigen'
if gcv_mode == 'eigen':
_pre_compute = self._pre_compute
_errors = self._errors
_values = self._values
elif gcv_mode == 'svd':
# assert n_samples >= n_features
_pre_compute = self._pre_compute_svd
_errors = self._errors_svd
_values = self._values_svd
else:
raise ValueError('bad gcv_mode "%s"' % gcv_mode)
if sample_weight is not None:
X, y = _rescale_data(X, y, sample_weight)
centered_kernel = not sparse.issparse(X) and self.fit_intercept
v, Q, QT_y = _pre_compute(X, y, centered_kernel)
n_y = 1 if len(y.shape) == 1 else y.shape[1]
cv_values = np.zeros((n_samples * n_y, len(self.alphas)))
C = []
scorer = check_scoring(self, scoring=self.scoring, allow_none=True)
error = scorer is None
for i, alpha in enumerate(self.alphas):
if error:
out, c = _errors(alpha, y, v, Q, QT_y)
else:
out, c = _values(alpha, y, v, Q, QT_y)
cv_values[:, i] = out.ravel()
C.append(c)
if error:
best = cv_values.mean(axis=0).argmin()
else:
# The scorer want an object that will make the predictions but
# they are already computed efficiently by _RidgeGCV. This
# identity_estimator will just return them
def identity_estimator():
pass
identity_estimator.decision_function = lambda y_predict: y_predict
identity_estimator.predict = lambda y_predict: y_predict
out = [scorer(identity_estimator, y.ravel(), cv_values[:, i])
for i in range(len(self.alphas))]
best = np.argmax(out)
self.alpha_ = self.alphas[best]
self.dual_coef_ = C[best]
self.coef_ = safe_sparse_dot(self.dual_coef_.T, X)
self._set_intercept(X_offset, y_offset, X_scale)
if self.store_cv_values:
if len(y.shape) == 1:
cv_values_shape = n_samples, len(self.alphas)
else:
cv_values_shape = n_samples, n_y, len(self.alphas)
self.cv_values_ = cv_values.reshape(cv_values_shape)
return self
class _BaseRidgeCV(LinearModel):
def __init__(self, alphas=(0.1, 1.0, 10.0),
fit_intercept=True, normalize=False, scoring=None,
cv=None, gcv_mode=None,
store_cv_values=False):
self.alphas = alphas
self.fit_intercept = fit_intercept
self.normalize = normalize
self.scoring = scoring
self.cv = cv
self.gcv_mode = gcv_mode
self.store_cv_values = store_cv_values
def fit(self, X, y, sample_weight=None):
"""Fit Ridge regression model
Parameters
----------
X : array-like, shape = [n_samples, n_features]
Training data
y : array-like, shape = [n_samples] or [n_samples, n_targets]
Target values
sample_weight : float or array-like of shape [n_samples]
Sample weight
Returns
-------
self : Returns self.
"""
if self.cv is None:
estimator = _RidgeGCV(self.alphas,
fit_intercept=self.fit_intercept,
normalize=self.normalize,
scoring=self.scoring,
gcv_mode=self.gcv_mode,
store_cv_values=self.store_cv_values)
estimator.fit(X, y, sample_weight=sample_weight)
self.alpha_ = estimator.alpha_
if self.store_cv_values:
self.cv_values_ = estimator.cv_values_
else:
if self.store_cv_values:
raise ValueError("cv!=None and store_cv_values=True "
" are incompatible")
parameters = {'alpha': self.alphas}
gs = GridSearchCV(Ridge(fit_intercept=self.fit_intercept),
parameters, cv=self.cv, scoring=self.scoring)
gs.fit(X, y, sample_weight=sample_weight)
estimator = gs.best_estimator_
self.alpha_ = gs.best_estimator_.alpha
self.coef_ = estimator.coef_
self.intercept_ = estimator.intercept_
return self
class RidgeCV(_BaseRidgeCV, RegressorMixin):
"""Ridge regression with built-in cross-validation.
By default, it performs Generalized Cross-Validation, which is a form of
efficient Leave-One-Out cross-validation.
Read more in the :ref:`User Guide <ridge_regression>`.
Parameters
----------
alphas : numpy array of shape [n_alphas]
Array of alpha values to try.
Regularization strength; must be a positive float. Regularization
improves the conditioning of the problem and reduces the variance of
the estimates. Larger values specify stronger regularization.
Alpha corresponds to ``C^-1`` in other linear models such as
LogisticRegression or LinearSVC.
fit_intercept : boolean
Whether to calculate the intercept for this model. If set
to false, no intercept will be used in calculations
(e.g. data is expected to be already centered).
normalize : boolean, optional, default False
If True, the regressors X will be normalized before regression.
This parameter is ignored when `fit_intercept` is set to False.
When the regressors are normalized, note that this makes the
hyperparameters learnt more robust and almost independent of the number
of samples. The same property is not valid for standardized data.
However, if you wish to standardize, please use
`preprocessing.StandardScaler` before calling `fit` on an estimator
with `normalize=False`.
scoring : string, callable or None, optional, default: None
A string (see model evaluation documentation) or
a scorer callable object / function with signature
``scorer(estimator, X, y)``.
cv : int, cross-validation generator or an iterable, optional
Determines the cross-validation splitting strategy.
Possible inputs for cv are:
- None, to use the efficient Leave-One-Out cross-validation
- integer, to specify the number of folds.
- An object to be used as a cross-validation generator.
- An iterable yielding train/test splits.
For integer/None inputs, if ``y`` is binary or multiclass,
:class:`sklearn.model_selection.StratifiedKFold` is used, else,
:class:`sklearn.model_selection.KFold` is used.
Refer :ref:`User Guide <cross_validation>` for the various
cross-validation strategies that can be used here.
gcv_mode : {None, 'auto', 'svd', eigen'}, optional
Flag indicating which strategy to use when performing
Generalized Cross-Validation. Options are::
'auto' : use svd if n_samples > n_features or when X is a sparse
matrix, otherwise use eigen
'svd' : force computation via singular value decomposition of X
(does not work for sparse matrices)
'eigen' : force computation via eigendecomposition of X^T X
The 'auto' mode is the default and is intended to pick the cheaper
option of the two depending upon the shape and format of the training
data.
store_cv_values : boolean, default=False
Flag indicating if the cross-validation values corresponding to
each alpha should be stored in the `cv_values_` attribute (see
below). This flag is only compatible with `cv=None` (i.e. using
Generalized Cross-Validation).
Attributes
----------
cv_values_ : array, shape = [n_samples, n_alphas] or \
shape = [n_samples, n_targets, n_alphas], optional
Cross-validation values for each alpha (if `store_cv_values=True` and \
`cv=None`). After `fit()` has been called, this attribute will \
contain the mean squared errors (by default) or the values of the \
`{loss,score}_func` function (if provided in the constructor).
coef_ : array, shape = [n_features] or [n_targets, n_features]
Weight vector(s).
intercept_ : float | array, shape = (n_targets,)
Independent term in decision function. Set to 0.0 if
``fit_intercept = False``.
alpha_ : float
Estimated regularization parameter.
See also
--------
Ridge: Ridge regression
RidgeClassifier: Ridge classifier
RidgeClassifierCV: Ridge classifier with built-in cross validation
"""
pass
class RidgeClassifierCV(LinearClassifierMixin, _BaseRidgeCV):
"""Ridge classifier with built-in cross-validation.
By default, it performs Generalized Cross-Validation, which is a form of
efficient Leave-One-Out cross-validation. Currently, only the n_features >
n_samples case is handled efficiently.
Read more in the :ref:`User Guide <ridge_regression>`.
Parameters
----------
alphas : numpy array of shape [n_alphas]
Array of alpha values to try.
Regularization strength; must be a positive float. Regularization
improves the conditioning of the problem and reduces the variance of
the estimates. Larger values specify stronger regularization.
Alpha corresponds to ``C^-1`` in other linear models such as
LogisticRegression or LinearSVC.
fit_intercept : boolean
Whether to calculate the intercept for this model. If set
to false, no intercept will be used in calculations
(e.g. data is expected to be already centered).
normalize : boolean, optional, default False
If True, the regressors X will be normalized before regression.
This parameter is ignored when `fit_intercept` is set to False.
When the regressors are normalized, note that this makes the
hyperparameters learnt more robust and almost independent of the number
of samples. The same property is not valid for standardized data.
However, if you wish to standardize, please use
`preprocessing.StandardScaler` before calling `fit` on an estimator
with `normalize=False`.
scoring : string, callable or None, optional, default: None
A string (see model evaluation documentation) or
a scorer callable object / function with signature
``scorer(estimator, X, y)``.
cv : int, cross-validation generator or an iterable, optional
Determines the cross-validation splitting strategy.
Possible inputs for cv are:
- None, to use the efficient Leave-One-Out cross-validation
- integer, to specify the number of folds.
- An object to be used as a cross-validation generator.
- An iterable yielding train/test splits.
Refer :ref:`User Guide <cross_validation>` for the various
cross-validation strategies that can be used here.
class_weight : dict or 'balanced', optional
Weights associated with classes in the form ``{class_label: weight}``.
If not given, all classes are supposed to have weight one.
The "balanced" mode uses the values of y to automatically adjust
weights inversely proportional to class frequencies in the input data
as ``n_samples / (n_classes * np.bincount(y))``
Attributes
----------
cv_values_ : array, shape = [n_samples, n_alphas] or \
shape = [n_samples, n_responses, n_alphas], optional
Cross-validation values for each alpha (if `store_cv_values=True` and
`cv=None`). After `fit()` has been called, this attribute will contain \
the mean squared errors (by default) or the values of the \
`{loss,score}_func` function (if provided in the constructor).
coef_ : array, shape = [n_features] or [n_targets, n_features]
Weight vector(s).
intercept_ : float | array, shape = (n_targets,)
Independent term in decision function. Set to 0.0 if
``fit_intercept = False``.
alpha_ : float
Estimated regularization parameter
See also
--------
Ridge: Ridge regression
RidgeClassifier: Ridge classifier
RidgeCV: Ridge regression with built-in cross validation
Notes
-----
For multi-class classification, n_class classifiers are trained in
a one-versus-all approach. Concretely, this is implemented by taking
advantage of the multi-variate response support in Ridge.
"""
def __init__(self, alphas=(0.1, 1.0, 10.0), fit_intercept=True,
normalize=False, scoring=None, cv=None, class_weight=None):
super(RidgeClassifierCV, self).__init__(
alphas=alphas, fit_intercept=fit_intercept, normalize=normalize,
scoring=scoring, cv=cv)
self.class_weight = class_weight
def fit(self, X, y, sample_weight=None):
"""Fit the ridge classifier.
Parameters
----------
X : array-like, shape (n_samples, n_features)
Training vectors, where n_samples is the number of samples
and n_features is the number of features.
y : array-like, shape (n_samples,)
Target values.
sample_weight : float or numpy array of shape (n_samples,)
Sample weight.
Returns
-------
self : object
Returns self.
"""
self._label_binarizer = LabelBinarizer(pos_label=1, neg_label=-1)
Y = self._label_binarizer.fit_transform(y)
if not self._label_binarizer.y_type_.startswith('multilabel'):
y = column_or_1d(y, warn=True)
if self.class_weight:
if sample_weight is None:
sample_weight = 1.
# modify the sample weights with the corresponding class weight
sample_weight = (sample_weight *
compute_sample_weight(self.class_weight, y))
_BaseRidgeCV.fit(self, X, Y, sample_weight=sample_weight)
return self
@property
def classes_(self):
return self._label_binarizer.classes_
| bsd-3-clause |
n-west/gnuradio-volk | gr-utils/python/utils/plot_fft_base.py | 53 | 10449 | #!/usr/bin/env python
#
# Copyright 2007,2008,2011 Free Software Foundation, Inc.
#
# This file is part of GNU Radio
#
# GNU Radio is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 3, or (at your option)
# any later version.
#
# GNU Radio is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with GNU Radio; see the file COPYING. If not, write to
# the Free Software Foundation, Inc., 51 Franklin Street,
# Boston, MA 02110-1301, USA.
#
try:
import scipy
from scipy import fftpack
except ImportError:
print "Please install SciPy to run this script (http://www.scipy.org/)"
raise SystemExit, 1
try:
from pylab import *
except ImportError:
print "Please install Matplotlib to run this script (http://matplotlib.sourceforge.net/)"
raise SystemExit, 1
from optparse import OptionParser
class plot_fft_base:
def __init__(self, datatype, filename, options):
self.hfile = open(filename, "r")
self.block_length = options.block
self.start = options.start
self.sample_rate = options.sample_rate
self.datatype = getattr(scipy, datatype)
self.sizeof_data = self.datatype().nbytes # number of bytes per sample in file
self.axis_font_size = 16
self.label_font_size = 18
self.title_font_size = 20
self.text_size = 22
# Setup PLOT
self.fig = figure(1, figsize=(16, 12), facecolor='w')
rcParams['xtick.labelsize'] = self.axis_font_size
rcParams['ytick.labelsize'] = self.axis_font_size
self.text_file = figtext(0.10, 0.94, ("File: %s" % filename), weight="heavy", size=self.text_size)
self.text_file_pos = figtext(0.10, 0.88, "File Position: ", weight="heavy", size=self.text_size)
self.text_block = figtext(0.35, 0.88, ("Block Size: %d" % self.block_length),
weight="heavy", size=self.text_size)
self.text_sr = figtext(0.60, 0.88, ("Sample Rate: %.2f" % self.sample_rate),
weight="heavy", size=self.text_size)
self.make_plots()
self.button_left_axes = self.fig.add_axes([0.45, 0.01, 0.05, 0.05], frameon=True)
self.button_left = Button(self.button_left_axes, "<")
self.button_left_callback = self.button_left.on_clicked(self.button_left_click)
self.button_right_axes = self.fig.add_axes([0.50, 0.01, 0.05, 0.05], frameon=True)
self.button_right = Button(self.button_right_axes, ">")
self.button_right_callback = self.button_right.on_clicked(self.button_right_click)
self.xlim = self.sp_iq.get_xlim()
self.manager = get_current_fig_manager()
connect('draw_event', self.zoom)
connect('key_press_event', self.click)
show()
def get_data(self):
self.position = self.hfile.tell()/self.sizeof_data
self.text_file_pos.set_text("File Position: %d" % (self.position))
try:
self.iq = scipy.fromfile(self.hfile, dtype=self.datatype, count=self.block_length)
except MemoryError:
print "End of File"
else:
self.iq_fft = self.dofft(self.iq)
tstep = 1.0 / self.sample_rate
#self.time = scipy.array([tstep*(self.position + i) for i in xrange(len(self.iq))])
self.time = scipy.array([tstep*(i) for i in xrange(len(self.iq))])
self.freq = self.calc_freq(self.time, self.sample_rate)
def dofft(self, iq):
N = len(iq)
iq_fft = scipy.fftpack.fftshift(scipy.fft(iq)) # fft and shift axis
iq_fft = 20*scipy.log10(abs((iq_fft+1e-15)/N)) # convert to decibels, adjust power
# adding 1e-15 (-300 dB) to protect against value errors if an item in iq_fft is 0
return iq_fft
def calc_freq(self, time, sample_rate):
N = len(time)
Fs = 1.0 / (time.max() - time.min())
Fn = 0.5 * sample_rate
freq = scipy.array([-Fn + i*Fs for i in xrange(N)])
return freq
def make_plots(self):
# if specified on the command-line, set file pointer
self.hfile.seek(self.sizeof_data*self.start, 1)
# Subplot for real and imaginary parts of signal
self.sp_iq = self.fig.add_subplot(2,2,1, position=[0.075, 0.2, 0.4, 0.6])
self.sp_iq.set_title(("I&Q"), fontsize=self.title_font_size, fontweight="bold")
self.sp_iq.set_xlabel("Time (s)", fontsize=self.label_font_size, fontweight="bold")
self.sp_iq.set_ylabel("Amplitude (V)", fontsize=self.label_font_size, fontweight="bold")
# Subplot for FFT plot
self.sp_fft = self.fig.add_subplot(2,2,2, position=[0.575, 0.2, 0.4, 0.6])
self.sp_fft.set_title(("FFT"), fontsize=self.title_font_size, fontweight="bold")
self.sp_fft.set_xlabel("Frequency (Hz)", fontsize=self.label_font_size, fontweight="bold")
self.sp_fft.set_ylabel("Power Spectrum (dBm)", fontsize=self.label_font_size, fontweight="bold")
self.get_data()
self.plot_iq = self.sp_iq.plot([], 'bo-') # make plot for reals
self.plot_iq += self.sp_iq.plot([], 'ro-') # make plot for imags
self.draw_time() # draw the plot
self.plot_fft = self.sp_fft.plot([], 'bo-') # make plot for FFT
self.draw_fft() # draw the plot
draw()
def draw_time(self):
reals = self.iq.real
imags = self.iq.imag
self.plot_iq[0].set_data([self.time, reals])
self.plot_iq[1].set_data([self.time, imags])
self.sp_iq.set_xlim(self.time.min(), self.time.max())
self.sp_iq.set_ylim([1.5*min([reals.min(), imags.min()]),
1.5*max([reals.max(), imags.max()])])
def draw_fft(self):
self.plot_fft[0].set_data([self.freq, self.iq_fft])
self.sp_fft.set_xlim(self.freq.min(), self.freq.max())
self.sp_fft.set_ylim([self.iq_fft.min()-10, self.iq_fft.max()+10])
def update_plots(self):
self.draw_time()
self.draw_fft()
self.xlim = self.sp_iq.get_xlim()
draw()
def zoom(self, event):
newxlim = scipy.array(self.sp_iq.get_xlim())
curxlim = scipy.array(self.xlim)
if(newxlim[0] != curxlim[0] or newxlim[1] != curxlim[1]):
self.xlim = newxlim
#xmin = max(0, int(ceil(self.sample_rate*(self.xlim[0] - self.position))))
#xmax = min(int(ceil(self.sample_rate*(self.xlim[1] - self.position))), len(self.iq))
xmin = max(0, int(ceil(self.sample_rate*(self.xlim[0]))))
xmax = min(int(ceil(self.sample_rate*(self.xlim[1]))), len(self.iq))
iq = self.iq[xmin : xmax]
time = self.time[xmin : xmax]
iq_fft = self.dofft(iq)
freq = self.calc_freq(time, self.sample_rate)
self.plot_fft[0].set_data(freq, iq_fft)
self.sp_fft.axis([freq.min(), freq.max(),
iq_fft.min()-10, iq_fft.max()+10])
draw()
def click(self, event):
forward_valid_keys = [" ", "down", "right"]
backward_valid_keys = ["up", "left"]
if(find(event.key, forward_valid_keys)):
self.step_forward()
elif(find(event.key, backward_valid_keys)):
self.step_backward()
def button_left_click(self, event):
self.step_backward()
def button_right_click(self, event):
self.step_forward()
def step_forward(self):
self.get_data()
self.update_plots()
def step_backward(self):
# Step back in file position
if(self.hfile.tell() >= 2*self.sizeof_data*self.block_length ):
self.hfile.seek(-2*self.sizeof_data*self.block_length, 1)
else:
self.hfile.seek(-self.hfile.tell(),1)
self.get_data()
self.update_plots()
@staticmethod
def setup_options():
usage="%prog: [options] input_filename"
description = "Takes a GNU Radio complex binary file and displays the I&Q data versus time as well as the frequency domain (FFT) plot. The y-axis values are plotted assuming volts as the amplitude of the I&Q streams and converted into dBm in the frequency domain (the 1/N power adjustment out of the FFT is performed internally). The script plots a certain block of data at a time, specified on the command line as -B or --block. This value defaults to 1000. The start position in the file can be set by specifying -s or --start and defaults to 0 (the start of the file). By default, the system assumes a sample rate of 1, so in time, each sample is plotted versus the sample number. To set a true time and frequency axis, set the sample rate (-R or --sample-rate) to the sample rate used when capturing the samples."
parser = OptionParser(conflict_handler="resolve", usage=usage, description=description)
parser.add_option("-d", "--data-type", type="string", default="complex64",
help="Specify the data type (complex64, float32, (u)int32, (u)int16, (u)int8) [default=%default]")
parser.add_option("-B", "--block", type="int", default=1000,
help="Specify the block size [default=%default]")
parser.add_option("-s", "--start", type="int", default=0,
help="Specify where to start in the file [default=%default]")
parser.add_option("-R", "--sample-rate", type="float", default=1.0,
help="Set the sampler rate of the data [default=%default]")
return parser
def find(item_in, list_search):
try:
return list_search.index(item_in) != None
except ValueError:
return False
def main():
parser = plot_fft_base.setup_options()
(options, args) = parser.parse_args ()
if len(args) != 1:
parser.print_help()
raise SystemExit, 1
filename = args[0]
dc = plot_fft_base(options.data_type, filename, options)
if __name__ == "__main__":
try:
main()
except KeyboardInterrupt:
pass
| gpl-3.0 |
knabar/openmicroscopy | components/tools/OmeroPy/test/unit/test_jvmcfg.py | 13 | 7583 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Copyright (C) 2014-2015 Glencoe Software, Inc. All Rights Reserved.
# Use is subject to license terms supplied in LICENSE.txt
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License along
# with this program; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
"""
Test of the automatic JVM setting logic for OMERO startup.
"""
import pytest
from omero.config import ConfigXml, xml
from omero.install.jvmcfg import adjust_settings
from omero.install.jvmcfg import ManualStrategy
from omero.install.jvmcfg import PercentStrategy
from omero.install.jvmcfg import Settings
from omero.install.jvmcfg import Strategy
from omero.install.jvmcfg import strip_dict
from omero.install.jvmcfg import usage_charts
from omero.util.temp_files import create_path
from path import path
from xml.etree.ElementTree import SubElement
from xml.etree.ElementTree import tostring
from xml.etree.ElementTree import XML
from test.unit.test_config import initial
def write_config(data):
p = create_path()
i = initial()
for k, v in data.items():
for x in i[0:2]: # __ACTIVE__ & default
SubElement(x, "property", name=k, value=v)
string = tostring(i, 'utf-8')
txt = xml.dom.minidom.parseString(string).toprettyxml(" ", "\n", None)
p.write_text(txt)
return p
class TestMemoryStrip(object):
def test_1(self):
rv = strip_dict({"a.b": "c"}, prefix="a")
assert {"b": "c"} == rv
def test_2(self):
rv = strip_dict({"a.b.c": "d"}, prefix="a.b")
assert rv["c"] == "d"
def test_3(self):
rv = strip_dict({
"omero.jvmcfg.foo": "a",
"something.else": "b"})
assert rv["foo"] == "a"
assert "something.else" not in rv
@pytest.mark.parametrize("input,output", (
({"omero.jvmcfg.heap_size.blitz": "1g"}, {"heap_size": "1g"}),
))
def test_4(self, input, output):
p = write_config(input)
config = ConfigXml(filename=str(p), env_config="default")
try:
m = config.as_map()
s = strip_dict(m, suffix="blitz")
assert s == output
finally:
config.close()
def test_5(self):
rv = strip_dict({
"omero.jvmcfg.a.blitz": "b",
}, suffix="blitz")
assert rv["a"] == "b"
class TestSettings(object):
def test_initial(self):
s = Settings()
assert s.perm_gen == "128m"
assert s.heap_dump == "off"
assert s.heap_size == "512m"
def test_explicit(self):
s = Settings({
"perm_gen": "xxx",
"heap_dump": "yyy",
"heap_size": "zzz",
})
assert s.perm_gen == "xxx"
assert s.heap_dump == "yyy"
assert s.heap_size == "zzz"
def test_defaults(self):
s = Settings({}, {
"perm_gen": "xxx",
"heap_dump": "yyy",
"heap_size": "zzz",
})
assert s.perm_gen == "xxx"
assert s.heap_dump == "yyy"
assert s.heap_size == "zzz"
def test_both(self):
s = Settings({
"perm_gen": "aaa",
"heap_dump": "bbb",
"heap_size": "ccc",
}, {
"perm_gen": "xxx",
"heap_dump": "yyy",
"heap_size": "zzz",
})
assert s.perm_gen == "aaa"
assert s.heap_dump == "bbb"
assert s.heap_size == "ccc"
class TestStrategy(object):
def test_no_instantiate(self):
with pytest.raises(Exception):
Strategy("blitz")
def test_hard_coded(self):
strategy = ManualStrategy("blitz")
settings = strategy.get_memory_settings()
assert settings == [
"-Xmx512m",
"-XX:MaxPermSize=128m",
"-XX:+IgnoreUnrecognizedVMOptions",
]
def test_percent_usage(self):
strategy = PercentStrategy("blitz")
table = list(strategy.usage_table(15, 16))[0]
assert table[0] == 2**15
assert table[1] == 2**15*15/100
def test_heap_dump_on(self):
settings = Settings({"heap_dump": "on"})
strategy = PercentStrategy("blitz", settings)
hd = strategy.get_heap_dump()
append = strategy.get_append()
assert " " not in hd
assert "HeapDumpPath" not in hd
assert not append
def test_heap_dump_tmp(self):
settings = Settings({"heap_dump": "tmp"})
strategy = PercentStrategy("blitz", settings)
hd = strategy.get_heap_dump()
append = strategy.get_append()
assert " " not in hd
assert "HeapDumpPath" not in hd
assert "HeapDumpPath" in "".join(append)
class AdjustFixture(object):
def __init__(self, input, output, name, **kwargs):
self.input = input
self.output = output
self.name = name
self.kwargs = kwargs
def validate(self, rv):
for k, v in self.output.items():
assert k in rv
found = rv[k]
found.pop(0) # settings
assert v == found, "%s.%s: %s <> %s" % (self.name, k,
v, found)
import json
f = open(__file__[:-3] + ".json", "r")
data = json.load(f)
AFS = []
for x in data:
AFS.append(AdjustFixture(x["input"], x["output"], x["name"]))
def template_xml():
templates = path(__file__) / ".." / ".." / ".."
templates = templates / ".." / ".." / ".."
templates = templates / "etc" / "templates" / "grid" / "templates.xml"
templates = templates.abspath()
return XML(templates.text())
class TestAdjustStrategy(object):
@pytest.mark.parametrize("fixture", AFS, ids=[x.name for x in AFS])
def test_adjust(self, fixture, monkeypatch):
monkeypatch.setattr(Strategy, '_system_memory_mb_java',
lambda x: (2000, 4000))
p = write_config(fixture.input)
xml = template_xml()
config = ConfigXml(filename=str(p), env_config="default")
try:
rv = adjust_settings(config, xml, **fixture.kwargs)
fixture.validate(rv)
finally:
config.close()
@pytest.mark.parametrize("fixture", AFS, ids=[x.name for x in AFS])
def test_12527(self, fixture, monkeypatch):
monkeypatch.setattr(Strategy, '_system_memory_mb_java',
lambda x: (2000, 4000))
p = write_config(fixture.input)
old_templates = path(__file__).dirname() / "old_templates.xml"
xml = XML(old_templates.abspath().text())
config = ConfigXml(filename=str(p), env_config="default")
with pytest.raises(Exception):
adjust_settings(config, xml, **fixture.kwargs)
class TestChart(object):
def test_percent_chart(self):
try:
usage_charts("target/charts.png")
except ImportError:
# Requires matplotlib, etc
pass
| gpl-2.0 |
liangz0707/scikit-learn | examples/applications/plot_prediction_latency.py | 234 | 11277 | """
==================
Prediction Latency
==================
This is an example showing the prediction latency of various scikit-learn
estimators.
The goal is to measure the latency one can expect when doing predictions
either in bulk or atomic (i.e. one by one) mode.
The plots represent the distribution of the prediction latency as a boxplot.
"""
# Authors: Eustache Diemert <eustache@diemert.fr>
# License: BSD 3 clause
from __future__ import print_function
from collections import defaultdict
import time
import gc
import numpy as np
import matplotlib.pyplot as plt
from scipy.stats import scoreatpercentile
from sklearn.datasets.samples_generator import make_regression
from sklearn.ensemble.forest import RandomForestRegressor
from sklearn.linear_model.ridge import Ridge
from sklearn.linear_model.stochastic_gradient import SGDRegressor
from sklearn.svm.classes import SVR
def _not_in_sphinx():
# Hack to detect whether we are running by the sphinx builder
return '__file__' in globals()
def atomic_benchmark_estimator(estimator, X_test, verbose=False):
"""Measure runtime prediction of each instance."""
n_instances = X_test.shape[0]
runtimes = np.zeros(n_instances, dtype=np.float)
for i in range(n_instances):
instance = X_test[i, :]
start = time.time()
estimator.predict(instance)
runtimes[i] = time.time() - start
if verbose:
print("atomic_benchmark runtimes:", min(runtimes), scoreatpercentile(
runtimes, 50), max(runtimes))
return runtimes
def bulk_benchmark_estimator(estimator, X_test, n_bulk_repeats, verbose):
"""Measure runtime prediction of the whole input."""
n_instances = X_test.shape[0]
runtimes = np.zeros(n_bulk_repeats, dtype=np.float)
for i in range(n_bulk_repeats):
start = time.time()
estimator.predict(X_test)
runtimes[i] = time.time() - start
runtimes = np.array(list(map(lambda x: x / float(n_instances), runtimes)))
if verbose:
print("bulk_benchmark runtimes:", min(runtimes), scoreatpercentile(
runtimes, 50), max(runtimes))
return runtimes
def benchmark_estimator(estimator, X_test, n_bulk_repeats=30, verbose=False):
"""
Measure runtimes of prediction in both atomic and bulk mode.
Parameters
----------
estimator : already trained estimator supporting `predict()`
X_test : test input
n_bulk_repeats : how many times to repeat when evaluating bulk mode
Returns
-------
atomic_runtimes, bulk_runtimes : a pair of `np.array` which contain the
runtimes in seconds.
"""
atomic_runtimes = atomic_benchmark_estimator(estimator, X_test, verbose)
bulk_runtimes = bulk_benchmark_estimator(estimator, X_test, n_bulk_repeats,
verbose)
return atomic_runtimes, bulk_runtimes
def generate_dataset(n_train, n_test, n_features, noise=0.1, verbose=False):
"""Generate a regression dataset with the given parameters."""
if verbose:
print("generating dataset...")
X, y, coef = make_regression(n_samples=n_train + n_test,
n_features=n_features, noise=noise, coef=True)
X_train = X[:n_train]
y_train = y[:n_train]
X_test = X[n_train:]
y_test = y[n_train:]
idx = np.arange(n_train)
np.random.seed(13)
np.random.shuffle(idx)
X_train = X_train[idx]
y_train = y_train[idx]
std = X_train.std(axis=0)
mean = X_train.mean(axis=0)
X_train = (X_train - mean) / std
X_test = (X_test - mean) / std
std = y_train.std(axis=0)
mean = y_train.mean(axis=0)
y_train = (y_train - mean) / std
y_test = (y_test - mean) / std
gc.collect()
if verbose:
print("ok")
return X_train, y_train, X_test, y_test
def boxplot_runtimes(runtimes, pred_type, configuration):
"""
Plot a new `Figure` with boxplots of prediction runtimes.
Parameters
----------
runtimes : list of `np.array` of latencies in micro-seconds
cls_names : list of estimator class names that generated the runtimes
pred_type : 'bulk' or 'atomic'
"""
fig, ax1 = plt.subplots(figsize=(10, 6))
bp = plt.boxplot(runtimes, )
cls_infos = ['%s\n(%d %s)' % (estimator_conf['name'],
estimator_conf['complexity_computer'](
estimator_conf['instance']),
estimator_conf['complexity_label']) for
estimator_conf in configuration['estimators']]
plt.setp(ax1, xticklabels=cls_infos)
plt.setp(bp['boxes'], color='black')
plt.setp(bp['whiskers'], color='black')
plt.setp(bp['fliers'], color='red', marker='+')
ax1.yaxis.grid(True, linestyle='-', which='major', color='lightgrey',
alpha=0.5)
ax1.set_axisbelow(True)
ax1.set_title('Prediction Time per Instance - %s, %d feats.' % (
pred_type.capitalize(),
configuration['n_features']))
ax1.set_ylabel('Prediction Time (us)')
plt.show()
def benchmark(configuration):
"""Run the whole benchmark."""
X_train, y_train, X_test, y_test = generate_dataset(
configuration['n_train'], configuration['n_test'],
configuration['n_features'])
stats = {}
for estimator_conf in configuration['estimators']:
print("Benchmarking", estimator_conf['instance'])
estimator_conf['instance'].fit(X_train, y_train)
gc.collect()
a, b = benchmark_estimator(estimator_conf['instance'], X_test)
stats[estimator_conf['name']] = {'atomic': a, 'bulk': b}
cls_names = [estimator_conf['name'] for estimator_conf in configuration[
'estimators']]
runtimes = [1e6 * stats[clf_name]['atomic'] for clf_name in cls_names]
boxplot_runtimes(runtimes, 'atomic', configuration)
runtimes = [1e6 * stats[clf_name]['bulk'] for clf_name in cls_names]
boxplot_runtimes(runtimes, 'bulk (%d)' % configuration['n_test'],
configuration)
def n_feature_influence(estimators, n_train, n_test, n_features, percentile):
"""
Estimate influence of the number of features on prediction time.
Parameters
----------
estimators : dict of (name (str), estimator) to benchmark
n_train : nber of training instances (int)
n_test : nber of testing instances (int)
n_features : list of feature-space dimensionality to test (int)
percentile : percentile at which to measure the speed (int [0-100])
Returns:
--------
percentiles : dict(estimator_name,
dict(n_features, percentile_perf_in_us))
"""
percentiles = defaultdict(defaultdict)
for n in n_features:
print("benchmarking with %d features" % n)
X_train, y_train, X_test, y_test = generate_dataset(n_train, n_test, n)
for cls_name, estimator in estimators.items():
estimator.fit(X_train, y_train)
gc.collect()
runtimes = bulk_benchmark_estimator(estimator, X_test, 30, False)
percentiles[cls_name][n] = 1e6 * scoreatpercentile(runtimes,
percentile)
return percentiles
def plot_n_features_influence(percentiles, percentile):
fig, ax1 = plt.subplots(figsize=(10, 6))
colors = ['r', 'g', 'b']
for i, cls_name in enumerate(percentiles.keys()):
x = np.array(sorted([n for n in percentiles[cls_name].keys()]))
y = np.array([percentiles[cls_name][n] for n in x])
plt.plot(x, y, color=colors[i], )
ax1.yaxis.grid(True, linestyle='-', which='major', color='lightgrey',
alpha=0.5)
ax1.set_axisbelow(True)
ax1.set_title('Evolution of Prediction Time with #Features')
ax1.set_xlabel('#Features')
ax1.set_ylabel('Prediction Time at %d%%-ile (us)' % percentile)
plt.show()
def benchmark_throughputs(configuration, duration_secs=0.1):
"""benchmark throughput for different estimators."""
X_train, y_train, X_test, y_test = generate_dataset(
configuration['n_train'], configuration['n_test'],
configuration['n_features'])
throughputs = dict()
for estimator_config in configuration['estimators']:
estimator_config['instance'].fit(X_train, y_train)
start_time = time.time()
n_predictions = 0
while (time.time() - start_time) < duration_secs:
estimator_config['instance'].predict(X_test[0])
n_predictions += 1
throughputs[estimator_config['name']] = n_predictions / duration_secs
return throughputs
def plot_benchmark_throughput(throughputs, configuration):
fig, ax = plt.subplots(figsize=(10, 6))
colors = ['r', 'g', 'b']
cls_infos = ['%s\n(%d %s)' % (estimator_conf['name'],
estimator_conf['complexity_computer'](
estimator_conf['instance']),
estimator_conf['complexity_label']) for
estimator_conf in configuration['estimators']]
cls_values = [throughputs[estimator_conf['name']] for estimator_conf in
configuration['estimators']]
plt.bar(range(len(throughputs)), cls_values, width=0.5, color=colors)
ax.set_xticks(np.linspace(0.25, len(throughputs) - 0.75, len(throughputs)))
ax.set_xticklabels(cls_infos, fontsize=10)
ymax = max(cls_values) * 1.2
ax.set_ylim((0, ymax))
ax.set_ylabel('Throughput (predictions/sec)')
ax.set_title('Prediction Throughput for different estimators (%d '
'features)' % configuration['n_features'])
plt.show()
###############################################################################
# main code
start_time = time.time()
# benchmark bulk/atomic prediction speed for various regressors
configuration = {
'n_train': int(1e3),
'n_test': int(1e2),
'n_features': int(1e2),
'estimators': [
{'name': 'Linear Model',
'instance': SGDRegressor(penalty='elasticnet', alpha=0.01,
l1_ratio=0.25, fit_intercept=True),
'complexity_label': 'non-zero coefficients',
'complexity_computer': lambda clf: np.count_nonzero(clf.coef_)},
{'name': 'RandomForest',
'instance': RandomForestRegressor(),
'complexity_label': 'estimators',
'complexity_computer': lambda clf: clf.n_estimators},
{'name': 'SVR',
'instance': SVR(kernel='rbf'),
'complexity_label': 'support vectors',
'complexity_computer': lambda clf: len(clf.support_vectors_)},
]
}
benchmark(configuration)
# benchmark n_features influence on prediction speed
percentile = 90
percentiles = n_feature_influence({'ridge': Ridge()},
configuration['n_train'],
configuration['n_test'],
[100, 250, 500], percentile)
plot_n_features_influence(percentiles, percentile)
# benchmark throughput
throughputs = benchmark_throughputs(configuration)
plot_benchmark_throughput(throughputs, configuration)
stop_time = time.time()
print("example run in %.2fs" % (stop_time - start_time))
| bsd-3-clause |
TNT-Samuel/Coding-Projects | DNS Server/Source - Copy/Lib/site-packages/dask/tests/test_base.py | 2 | 25265 | # -*- coding: utf-8 -*-
import os
import pytest
from operator import add, mul
import subprocess
import sys
import warnings
from toolz import merge
import dask
from dask import delayed
from dask.base import (compute, tokenize, normalize_token, normalize_function,
visualize, persist, function_cache, is_dask_collection,
DaskMethodsMixin, optimize, unpack_collections,
named_schedulers, get_scheduler)
from dask.delayed import Delayed
from dask.utils import tmpdir, tmpfile, ignoring
from dask.utils_test import inc, dec
from dask.compatibility import long, unicode
def import_or_none(path):
with ignoring():
return pytest.importorskip(path)
return None
tz = pytest.importorskip('toolz')
da = import_or_none('dask.array')
db = import_or_none('dask.bag')
dd = import_or_none('dask.dataframe')
np = import_or_none('numpy')
sp = import_or_none('scipy.sparse')
pd = import_or_none('pandas')
def f1(a, b, c=1):
pass
def f2(a, b=1, c=2):
pass
def f3(a):
pass
def test_normalize_function():
assert normalize_function(f2)
assert normalize_function(lambda a: a)
assert (normalize_function(tz.partial(f2, b=2)) ==
normalize_function(tz.partial(f2, b=2)))
assert (normalize_function(tz.partial(f2, b=2)) !=
normalize_function(tz.partial(f2, b=3)))
assert (normalize_function(tz.partial(f1, b=2)) !=
normalize_function(tz.partial(f2, b=2)))
assert (normalize_function(tz.compose(f2, f3)) ==
normalize_function(tz.compose(f2, f3)))
assert (normalize_function(tz.compose(f2, f3)) !=
normalize_function(tz.compose(f2, f1)))
assert normalize_function(tz.curry(f2)) == normalize_function(tz.curry(f2))
assert normalize_function(tz.curry(f2)) != normalize_function(tz.curry(f1))
assert (normalize_function(tz.curry(f2, b=1)) ==
normalize_function(tz.curry(f2, b=1)))
assert (normalize_function(tz.curry(f2, b=1)) !=
normalize_function(tz.curry(f2, b=2)))
def test_tokenize():
a = (1, 2, 3)
assert isinstance(tokenize(a), (str, bytes))
@pytest.mark.skipif('not np')
def test_tokenize_numpy_array_consistent_on_values():
assert (tokenize(np.random.RandomState(1234).random_sample(1000)) ==
tokenize(np.random.RandomState(1234).random_sample(1000)))
@pytest.mark.skipif('not np')
def test_tokenize_numpy_array_supports_uneven_sizes():
tokenize(np.random.random(7).astype(dtype='i2'))
@pytest.mark.skipif('not np')
def test_tokenize_discontiguous_numpy_array():
tokenize(np.random.random(8)[::2])
@pytest.mark.skipif('not np')
def test_tokenize_numpy_datetime():
tokenize(np.array(['2000-01-01T12:00:00'], dtype='M8[ns]'))
@pytest.mark.skipif('not np')
def test_tokenize_numpy_scalar():
assert tokenize(np.array(1.0, dtype='f8')) == tokenize(np.array(1.0, dtype='f8'))
assert (tokenize(np.array([(1, 2)], dtype=[('a', 'i4'), ('b', 'i8')])[0]) ==
tokenize(np.array([(1, 2)], dtype=[('a', 'i4'), ('b', 'i8')])[0]))
@pytest.mark.skipif('not np')
def test_tokenize_numpy_array_on_object_dtype():
assert (tokenize(np.array(['a', 'aa', 'aaa'], dtype=object)) ==
tokenize(np.array(['a', 'aa', 'aaa'], dtype=object)))
assert (tokenize(np.array(['a', None, 'aaa'], dtype=object)) ==
tokenize(np.array(['a', None, 'aaa'], dtype=object)))
assert (tokenize(np.array([(1, 'a'), (1, None), (1, 'aaa')], dtype=object)) ==
tokenize(np.array([(1, 'a'), (1, None), (1, 'aaa')], dtype=object)))
if sys.version_info[0] == 2:
assert (tokenize(np.array([unicode("Rebeca Alón", encoding="utf-8")], dtype=object)) ==
tokenize(np.array([unicode("Rebeca Alón", encoding="utf-8")], dtype=object)))
@pytest.mark.skipif('not np')
def test_tokenize_numpy_memmap():
with tmpfile('.npy') as fn:
x = np.arange(5)
np.save(fn, x)
y = tokenize(np.load(fn, mmap_mode='r'))
with tmpfile('.npy') as fn:
x = np.arange(5)
np.save(fn, x)
z = tokenize(np.load(fn, mmap_mode='r'))
assert y != z
with tmpfile('.npy') as fn:
x = np.random.normal(size=(10, 10))
np.save(fn, x)
mm = np.load(fn, mmap_mode='r')
mm2 = np.load(fn, mmap_mode='r')
a = tokenize(mm[0, :])
b = tokenize(mm[1, :])
c = tokenize(mm[0:3, :])
d = tokenize(mm[:, 0])
assert len(set([a, b, c, d])) == 4
assert tokenize(mm) == tokenize(mm2)
assert tokenize(mm[1, :]) == tokenize(mm2[1, :])
@pytest.mark.skipif('not np')
def test_tokenize_numpy_memmap_no_filename():
# GH 1562:
with tmpfile('.npy') as fn1, tmpfile('.npy') as fn2:
x = np.arange(5)
np.save(fn1, x)
np.save(fn2, x)
a = np.load(fn1, mmap_mode='r')
b = a + a
assert tokenize(b) == tokenize(b)
@pytest.mark.skipif('not np')
def test_tokenize_numpy_ufunc_consistent():
assert tokenize(np.sin) == '02106e2c67daf452fb480d264e0dac21'
assert tokenize(np.cos) == 'c99e52e912e4379882a9a4b387957a0b'
# Make a ufunc that isn't in the numpy namespace. Similar to
# any found in other packages.
inc = np.frompyfunc(lambda x: x + 1, 1, 1)
assert tokenize(inc) == tokenize(inc)
def test_tokenize_partial_func_args_kwargs_consistent():
f = tz.partial(f3, f2, c=f1)
res = normalize_token(f)
sol = (b'cdask.tests.test_base\nf3\np0\n.',
(b'cdask.tests.test_base\nf2\np0\n.',),
(('c', b'cdask.tests.test_base\nf1\np0\n.'),))
assert res == sol
def test_normalize_base():
for i in [1, long(1), 1.1, '1', slice(1, 2, 3)]:
assert normalize_token(i) is i
@pytest.mark.skipif('not pd')
def test_tokenize_pandas():
a = pd.DataFrame({'x': [1, 2, 3], 'y': ['4', 'asd', None]}, index=[1, 2, 3])
b = pd.DataFrame({'x': [1, 2, 3], 'y': ['4', 'asd', None]}, index=[1, 2, 3])
assert tokenize(a) == tokenize(b)
b.index.name = 'foo'
assert tokenize(a) != tokenize(b)
a = pd.DataFrame({'x': [1, 2, 3], 'y': ['a', 'b', 'a']})
b = pd.DataFrame({'x': [1, 2, 3], 'y': ['a', 'b', 'a']})
a['z'] = a.y.astype('category')
assert tokenize(a) != tokenize(b)
b['z'] = a.y.astype('category')
assert tokenize(a) == tokenize(b)
def test_tokenize_kwargs():
assert tokenize(5, x=1) == tokenize(5, x=1)
assert tokenize(5) != tokenize(5, x=1)
assert tokenize(5, x=1) != tokenize(5, x=2)
assert tokenize(5, x=1) != tokenize(5, y=1)
def test_tokenize_same_repr():
class Foo(object):
def __init__(self, x):
self.x = x
def __repr__(self):
return 'a foo'
assert tokenize(Foo(1)) != tokenize(Foo(2))
def test_tokenize_method():
class Foo(object):
def __init__(self, x):
self.x = x
def __dask_tokenize__(self):
return self.x
a, b = Foo(1), Foo(2)
assert tokenize(a) == tokenize(a)
assert tokenize(a) != tokenize(b)
# dispatch takes precedence
before = tokenize(a)
normalize_token.register(Foo, lambda self: self.x + 1)
after = tokenize(a)
assert before != after
@pytest.mark.skipif('not np')
def test_tokenize_sequences():
assert tokenize([1]) != tokenize([2])
assert tokenize([1]) != tokenize((1,))
assert tokenize([1]) == tokenize([1])
x = np.arange(2000) # long enough to drop information in repr
y = np.arange(2000)
y[1000] = 0 # middle isn't printed in repr
assert tokenize([x]) != tokenize([y])
def test_tokenize_dict():
assert tokenize({'x': 1, 1: 'x'}) == tokenize({'x': 1, 1: 'x'})
def test_tokenize_set():
assert tokenize({1, 2, 'x', (1, 'x')}) == tokenize({1, 2, 'x', (1, 'x')})
def test_tokenize_ordered_dict():
with ignoring(ImportError):
from collections import OrderedDict
a = OrderedDict([('a', 1), ('b', 2)])
b = OrderedDict([('a', 1), ('b', 2)])
c = OrderedDict([('b', 2), ('a', 1)])
assert tokenize(a) == tokenize(b)
assert tokenize(a) != tokenize(c)
@pytest.mark.skipif('not np')
def test_tokenize_object_array_with_nans():
a = np.array([u'foo', u'Jos\xe9', np.nan], dtype='O')
assert tokenize(a) == tokenize(a)
@pytest.mark.parametrize('x', [1, True, 'a', b'a', 1.0, 1j, 1.0j,
[], (), {}, None, str, int])
def test_tokenize_base_types(x):
assert tokenize(x) == tokenize(x), x
@pytest.mark.skipif('not np')
def test_tokenize_numpy_matrix():
rng = np.random.RandomState(1234)
a = np.asmatrix(rng.rand(100))
b = a.copy()
assert tokenize(a) == tokenize(b)
b[:10] = 1
assert tokenize(a) != tokenize(b)
@pytest.mark.skipif('not sp')
@pytest.mark.parametrize('cls_name',
('dia', 'bsr', 'coo', 'csc', 'csr', 'dok', 'lil'))
def test_tokenize_dense_sparse_array(cls_name):
rng = np.random.RandomState(1234)
with pytest.warns(None):
# ignore scipy.sparse.SparseEfficiencyWarning
a = sp.rand(10, 10000, random_state=rng).asformat(cls_name)
b = a.copy()
assert tokenize(a) == tokenize(b)
# modifying the data values
if hasattr(b, 'data'):
b.data[:10] = 1
elif cls_name == 'dok':
b[3, 3] = 1
else:
raise ValueError
assert tokenize(a) != tokenize(b)
# modifying the data indices
with pytest.warns(None):
b = a.copy().asformat('coo')
b.row[:10] = np.arange(10)
b = b.asformat(cls_name)
assert tokenize(a) != tokenize(b)
def test_is_dask_collection():
class DummyCollection(object):
def __init__(self, dsk=None):
self.dask = dsk
def __dask_graph__(self):
return self.dask
x = delayed(1) + 2
assert is_dask_collection(x)
assert not is_dask_collection(2)
assert is_dask_collection(DummyCollection({}))
assert not is_dask_collection(DummyCollection())
assert not is_dask_collection(DummyCollection)
def test_unpack_collections():
a = delayed(1) + 5
b = a + 1
c = a + 2
def build(a, b, c, iterator):
return (a, b, # Top-level collections
{'a': a, # dict
a: b, # collections as keys
'b': [1, 2, [b]], # list
'c': 10, # other builtins pass through unchanged
'd': (c, 2), # tuple
'e': {a, 2, 3}}, # set
iterator) # Iterator
args = build(a, b, c, (i for i in [a, b, c]))
collections, repack = unpack_collections(*args)
assert len(collections) == 3
# Replace collections with `'~a'` strings
result = repack(['~a', '~b', '~c'])
sol = build('~a', '~b', '~c', ['~a', '~b', '~c'])
assert result == sol
# traverse=False
collections, repack = unpack_collections(*args, traverse=False)
assert len(collections) == 2 # just a and b
assert repack(collections) == args
# No collections
collections, repack = unpack_collections(1, 2, {'a': 3})
assert not collections
assert repack(collections) == (1, 2, {'a': 3})
# Result that looks like a task
def fail(*args):
raise ValueError("Shouldn't have been called")
collections, repack = unpack_collections(a, (fail, 1), [(fail, 2, 3)],
traverse=False)
repack(collections) # Smoketest task literals
repack([(fail, 1)]) # Smoketest results that look like tasks
class Tuple(DaskMethodsMixin):
__slots__ = ('_dask', '_keys')
__dask_scheduler__ = staticmethod(dask.threaded.get)
def __init__(self, dsk, keys):
self._dask = dsk
self._keys = keys
def __add__(self, other):
if isinstance(other, Tuple):
return Tuple(merge(self._dask, other._dask),
self._keys + other._keys)
return NotImplemented
def __dask_graph__(self):
return self._dask
def __dask_keys__(self):
return self._keys
def __dask_tokenize__(self):
return self._keys
def __dask_postcompute__(self):
return tuple, ()
def __dask_postpersist__(self):
return Tuple, (self._keys,)
def test_custom_collection():
dsk = {'a': 1, 'b': 2}
dsk2 = {'c': (add, 'a', 'b'),
'd': (add, 'c', 1)}
dsk2.update(dsk)
dsk3 = {'e': (add, 'a', 4),
'f': (inc, 'e')}
dsk3.update(dsk)
x = Tuple(dsk, ['a', 'b'])
y = Tuple(dsk2, ['c', 'd'])
z = Tuple(dsk3, ['e', 'f'])
# __slots__ defined on base mixin class propogates
with pytest.raises(AttributeError):
x.foo = 1
# is_dask_collection
assert is_dask_collection(x)
# tokenize
assert tokenize(x) == tokenize(x)
assert tokenize(x) != tokenize(y)
# compute
assert x.compute() == (1, 2)
assert dask.compute(x, [y, z]) == ((1, 2), [(3, 4), (5, 6)])
t = x + y + z
assert t.compute() == (1, 2, 3, 4, 5, 6)
# persist
t2 = t.persist()
assert isinstance(t2, Tuple)
assert t2._dask == dict(zip('abcdef', range(1, 7)))
assert t2.compute() == (1, 2, 3, 4, 5, 6)
x2, y2, z2 = dask.persist(x, y, z)
t3 = x2 + y2 + z2
assert t2._dask == t3._dask
@pytest.mark.skipif('not db')
def test_compute_no_opt():
# Bag does `fuse` by default. Test that with `optimize_graph=False` that
# doesn't get called. We check this by using a callback to track the keys
# that are computed.
from dask.callbacks import Callback
b = db.from_sequence(range(100), npartitions=4)
add1 = tz.partial(add, 1)
mul2 = tz.partial(mul, 2)
o = b.map(add1).map(mul2)
# Check that with the kwarg, the optimization doesn't happen
keys = []
with Callback(pretask=lambda key, *args: keys.append(key)):
o.compute(scheduler='single-threaded', optimize_graph=False)
assert len([k for k in keys if '-mul-' in k[0]]) == 4
assert len([k for k in keys if '-add-' in k[0]]) == 4
# Check that without the kwarg, the optimization does happen
keys = []
with Callback(pretask=lambda key, *args: keys.append(key)):
o.compute(scheduler='single-threaded')
# Names of fused tasks have been merged, and the original key is an alias.
# Otherwise, the lengths below would be 4 and 0.
assert len([k for k in keys if '-mul-' in k[0]]) == 8
assert len([k for k in keys if '-add-' in k[0]]) == 4
assert len([k for k in keys if 'add-map-mul' in k[0]]) == 4 # See? Renamed
@pytest.mark.skipif('not da')
def test_compute_array():
arr = np.arange(100).reshape((10, 10))
darr = da.from_array(arr, chunks=(5, 5))
darr1 = darr + 1
darr2 = darr + 2
out1, out2 = compute(darr1, darr2)
assert np.allclose(out1, arr + 1)
assert np.allclose(out2, arr + 2)
@pytest.mark.skipif('not da')
def test_persist_array():
from dask.array.utils import assert_eq
arr = np.arange(100).reshape((10, 10))
x = da.from_array(arr, chunks=(5, 5))
x = (x + 1) - x.mean(axis=0)
y = x.persist()
assert_eq(x, y)
assert set(y.dask).issubset(x.dask)
assert len(y.dask) == y.npartitions
@pytest.mark.skipif('not dd')
def test_compute_dataframe():
df = pd.DataFrame({'a': [1, 2, 3, 4], 'b': [5, 5, 3, 3]})
ddf = dd.from_pandas(df, npartitions=2)
ddf1 = ddf.a + 1
ddf2 = ddf.a + ddf.b
out1, out2 = compute(ddf1, ddf2)
pd.util.testing.assert_series_equal(out1, df.a + 1)
pd.util.testing.assert_series_equal(out2, df.a + df.b)
@pytest.mark.skipif('not dd or not da')
def test_compute_array_dataframe():
arr = np.arange(100).reshape((10, 10))
darr = da.from_array(arr, chunks=(5, 5)) + 1
df = pd.DataFrame({'a': [1, 2, 3, 4], 'b': [5, 5, 3, 3]})
ddf = dd.from_pandas(df, npartitions=2).a + 2
arr_out, df_out = compute(darr, ddf)
assert np.allclose(arr_out, arr + 1)
pd.util.testing.assert_series_equal(df_out, df.a + 2)
@pytest.mark.skipif('not da or not db')
def test_compute_array_bag():
x = da.arange(5, chunks=2)
b = db.from_sequence([1, 2, 3])
pytest.raises(ValueError, lambda: compute(x, b))
xx, bb = compute(x, b, scheduler='single-threaded')
assert np.allclose(xx, np.arange(5))
assert bb == [1, 2, 3]
@pytest.mark.skipif('not da')
def test_compute_with_literal():
x = da.arange(5, chunks=2)
y = 10
xx, yy = compute(x, y)
assert (xx == x.compute()).all()
assert yy == y
assert compute(5) == (5,)
def test_compute_nested():
a = delayed(1) + 5
b = a + 1
c = a + 2
assert (compute({'a': a, 'b': [1, 2, b]}, (c, 2)) ==
({'a': 6, 'b': [1, 2, 7]}, (8, 2)))
res = compute([a, b], c, traverse=False)
assert res[0][0] is a
assert res[0][1] is b
assert res[1] == 8
@pytest.mark.skipif('not da')
@pytest.mark.skipif(sys.flags.optimize,
reason="graphviz exception with Python -OO flag")
def test_visualize():
pytest.importorskip('graphviz')
with tmpdir() as d:
x = da.arange(5, chunks=2)
x.visualize(filename=os.path.join(d, 'mydask'))
assert os.path.exists(os.path.join(d, 'mydask.png'))
x.visualize(filename=os.path.join(d, 'mydask.pdf'))
assert os.path.exists(os.path.join(d, 'mydask.pdf'))
visualize(x, 1, 2, filename=os.path.join(d, 'mydask.png'))
assert os.path.exists(os.path.join(d, 'mydask.png'))
dsk = {'a': 1, 'b': (add, 'a', 2), 'c': (mul, 'a', 1)}
visualize(x, dsk, filename=os.path.join(d, 'mydask.png'))
assert os.path.exists(os.path.join(d, 'mydask.png'))
x = Tuple(dsk, ['a', 'b', 'c'])
visualize(x, filename=os.path.join(d, 'mydask.png'))
assert os.path.exists(os.path.join(d, 'mydask.png'))
@pytest.mark.skipif('not da')
@pytest.mark.skipif(sys.flags.optimize,
reason="graphviz exception with Python -OO flag")
def test_visualize_order():
pytest.importorskip('matplotlib')
x = da.arange(5, chunks=2)
with tmpfile(extension='dot') as fn:
x.visualize(color='order', filename=fn, cmap='RdBu')
with open(fn) as f:
text = f.read()
assert 'color="#' in text
def test_use_cloudpickle_to_tokenize_functions_in__main__():
import sys
from textwrap import dedent
defn = dedent("""
def inc():
return x
""")
__main__ = sys.modules['__main__']
exec(compile(defn, '<test>', 'exec'), __main__.__dict__)
f = __main__.inc
t = normalize_token(f)
assert b'cloudpickle' in t
def inc_to_dec(dsk, keys):
for key in dsk:
if dsk[key][0] == inc:
dsk[key] = (dec,) + dsk[key][1:]
return dsk
def test_optimizations_keyword():
x = dask.delayed(inc)(1)
assert x.compute() == 2
with dask.config.set(optimizations=[inc_to_dec]):
assert x.compute() == 0
assert x.compute() == 2
def test_optimize():
x = dask.delayed(inc)(1)
y = dask.delayed(inc)(x)
z = x + y
x2, y2, z2, constant = optimize(x, y, z, 1)
assert constant == 1
# Same graphs for each
dsk = dict(x2.dask)
assert dict(y2.dask) == dsk
assert dict(z2.dask) == dsk
# Computationally equivalent
assert dask.compute(x2, y2, z2) == dask.compute(x, y, z)
# Applying optimizations before compute and during compute gives
# same results. Shows optimizations are occurring.
sols = dask.compute(x, y, z, optimizations=[inc_to_dec])
x3, y3, z3 = optimize(x, y, z, optimizations=[inc_to_dec])
assert dask.compute(x3, y3, z3) == sols
# Optimize respects global optimizations as well
with dask.config.set(optimizations=[inc_to_dec]):
x4, y4, z4 = optimize(x, y, z)
for a, b in zip([x3, y3, z3], [x4, y4, z4]):
assert dict(a.dask) == dict(b.dask)
def test_optimize_nested():
a = dask.delayed(inc)(1)
b = dask.delayed(inc)(a)
c = a + b
result = optimize({'a': a, 'b': [1, 2, b]}, (c, 2))
a2 = result[0]['a']
b2 = result[0]['b'][2]
c2 = result[1][0]
assert isinstance(a2, Delayed)
assert isinstance(b2, Delayed)
assert isinstance(c2, Delayed)
assert dict(a2.dask) == dict(b2.dask) == dict(c2.dask)
assert compute(*result) == ({'a': 2, 'b': [1, 2, 3]}, (5, 2))
res = optimize([a, b], c, traverse=False)
assert res[0][0] is a
assert res[0][1] is b
assert res[1].compute() == 5
# TODO: remove after deprecation cycle of `dask.optimize` module is completed
def test_optimize_has_deprecated_module_functions_as_attributes():
import dask.optimize as deprecated_optimize
# Function has method attributes
assert dask.optimize.cull is deprecated_optimize.cull
assert dask.optimize.inline is deprecated_optimize.inline
with pytest.warns(UserWarning):
dask.optimize.cull({}, [])
def test_default_imports():
"""
Startup time: `import dask` should not import too many modules.
"""
code = """if 1:
import dask
import sys
print(sorted(sys.modules))
"""
out = subprocess.check_output([sys.executable, '-c', code])
modules = set(eval(out.decode()))
assert 'dask' in modules
blacklist = ['dask.array', 'dask.dataframe', 'numpy', 'pandas',
'partd', 's3fs', 'distributed']
for mod in blacklist:
assert mod not in modules
def test_persist_literals():
assert persist(1, 2, 3) == (1, 2, 3)
def test_persist_nested():
a = delayed(1) + 5
b = a + 1
c = a + 2
result = persist({'a': a, 'b': [1, 2, b]}, (c, 2))
assert isinstance(result[0]['a'], Delayed)
assert isinstance(result[0]['b'][2], Delayed)
assert isinstance(result[1][0], Delayed)
assert compute(*result) == ({'a': 6, 'b': [1, 2, 7]}, (8, 2))
res = persist([a, b], c, traverse=False)
assert res[0][0] is a
assert res[0][1] is b
assert res[1].compute() == 8
def test_persist_delayed():
x1 = delayed(1)
x2 = delayed(inc)(x1)
x3 = delayed(inc)(x2)
xx, = persist(x3)
assert isinstance(xx, Delayed)
assert xx.key == x3.key
assert len(xx.dask) == 1
assert x3.compute() == xx.compute()
@pytest.mark.skipif('not da or not db')
def test_persist_array_bag():
x = da.arange(5, chunks=2) + 1
b = db.from_sequence([1, 2, 3]).map(inc)
with pytest.raises(ValueError):
persist(x, b)
xx, bb = persist(x, b, scheduler='single-threaded')
assert isinstance(xx, da.Array)
assert isinstance(bb, db.Bag)
assert xx.name == x.name
assert bb.name == b.name
assert len(xx.dask) == xx.npartitions < len(x.dask)
assert len(bb.dask) == bb.npartitions < len(b.dask)
assert np.allclose(x, xx)
assert list(b) == list(bb)
def test_normalize_function_limited_size():
for i in range(1000):
normalize_function(lambda x: x)
assert 50 < len(function_cache) < 600
def test_optimize_globals():
da = pytest.importorskip('dask.array')
db = pytest.importorskip('dask.bag')
x = da.ones(10, chunks=(5,))
def optimize_double(dsk, keys):
return {k: (mul, 2, v) for k, v in dsk.items()}
from dask.array.utils import assert_eq
assert_eq(x + 1, np.ones(10) + 1)
with dask.config.set(array_optimize=optimize_double):
assert_eq(x + 1, (np.ones(10) * 2 + 1) * 2)
assert_eq(x + 1, np.ones(10) + 1)
b = db.range(10, npartitions=2)
with dask.config.set(array_optimize=optimize_double):
xx, bb = dask.compute(x + 1, b.map(inc), scheduler='single-threaded')
assert_eq(xx, (np.ones(10) * 2 + 1) * 2)
def test_optimize_None():
da = pytest.importorskip('dask.array')
x = da.ones(10, chunks=(5,))
y = x[:9][1:8][::2] + 1 # normally these slices would be fused
def my_get(dsk, keys):
assert dsk == dict(y.dask) # but they aren't
return dask.get(dsk, keys)
with dask.config.set(array_optimize=None, get=my_get):
y.compute()
def test_scheduler_keyword():
def schedule(dsk, keys, **kwargs):
return [[123]]
named_schedulers['foo'] = schedule
x = delayed(inc)(1)
try:
assert x.compute() == 2
assert x.compute(scheduler='foo') == 123
with dask.config.set(scheduler='foo'):
assert x.compute() == 123
assert x.compute() == 2
with dask.config.set(scheduler='foo'):
assert x.compute(scheduler='threads') == 2
with pytest.raises(ValueError):
x.compute(get=dask.threaded.get, scheduler='foo')
finally:
del named_schedulers['foo']
def test_warn_get_keyword():
x = delayed(inc)(1)
with warnings.catch_warnings(record=True) as record:
x.compute(get=dask.get)
assert 'scheduler=' in str(record[0].message)
def test_get_scheduler():
assert get_scheduler() is None
assert get_scheduler(scheduler='threads') is dask.threaded.get
assert get_scheduler(scheduler='sync') is dask.local.get_sync
with dask.config.set(scheduler='threads'):
assert get_scheduler(scheduler='threads') is dask.threaded.get
assert get_scheduler() is None
| gpl-3.0 |
massmutual/scikit-learn | sklearn/tests/test_base.py | 216 | 7045 | # Author: Gael Varoquaux
# License: BSD 3 clause
import numpy as np
import scipy.sparse as sp
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_true
from sklearn.utils.testing import assert_false
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_not_equal
from sklearn.utils.testing import assert_raises
from sklearn.base import BaseEstimator, clone, is_classifier
from sklearn.svm import SVC
from sklearn.pipeline import Pipeline
from sklearn.grid_search import GridSearchCV
from sklearn.utils import deprecated
#############################################################################
# A few test classes
class MyEstimator(BaseEstimator):
def __init__(self, l1=0, empty=None):
self.l1 = l1
self.empty = empty
class K(BaseEstimator):
def __init__(self, c=None, d=None):
self.c = c
self.d = d
class T(BaseEstimator):
def __init__(self, a=None, b=None):
self.a = a
self.b = b
class DeprecatedAttributeEstimator(BaseEstimator):
def __init__(self, a=None, b=None):
self.a = a
if b is not None:
DeprecationWarning("b is deprecated and renamed 'a'")
self.a = b
@property
@deprecated("Parameter 'b' is deprecated and renamed to 'a'")
def b(self):
return self._b
class Buggy(BaseEstimator):
" A buggy estimator that does not set its parameters right. "
def __init__(self, a=None):
self.a = 1
class NoEstimator(object):
def __init__(self):
pass
def fit(self, X=None, y=None):
return self
def predict(self, X=None):
return None
class VargEstimator(BaseEstimator):
"""Sklearn estimators shouldn't have vargs."""
def __init__(self, *vargs):
pass
#############################################################################
# The tests
def test_clone():
# Tests that clone creates a correct deep copy.
# We create an estimator, make a copy of its original state
# (which, in this case, is the current state of the estimator),
# and check that the obtained copy is a correct deep copy.
from sklearn.feature_selection import SelectFpr, f_classif
selector = SelectFpr(f_classif, alpha=0.1)
new_selector = clone(selector)
assert_true(selector is not new_selector)
assert_equal(selector.get_params(), new_selector.get_params())
selector = SelectFpr(f_classif, alpha=np.zeros((10, 2)))
new_selector = clone(selector)
assert_true(selector is not new_selector)
def test_clone_2():
# Tests that clone doesn't copy everything.
# We first create an estimator, give it an own attribute, and
# make a copy of its original state. Then we check that the copy doesn't
# have the specific attribute we manually added to the initial estimator.
from sklearn.feature_selection import SelectFpr, f_classif
selector = SelectFpr(f_classif, alpha=0.1)
selector.own_attribute = "test"
new_selector = clone(selector)
assert_false(hasattr(new_selector, "own_attribute"))
def test_clone_buggy():
# Check that clone raises an error on buggy estimators.
buggy = Buggy()
buggy.a = 2
assert_raises(RuntimeError, clone, buggy)
no_estimator = NoEstimator()
assert_raises(TypeError, clone, no_estimator)
varg_est = VargEstimator()
assert_raises(RuntimeError, clone, varg_est)
def test_clone_empty_array():
# Regression test for cloning estimators with empty arrays
clf = MyEstimator(empty=np.array([]))
clf2 = clone(clf)
assert_array_equal(clf.empty, clf2.empty)
clf = MyEstimator(empty=sp.csr_matrix(np.array([[0]])))
clf2 = clone(clf)
assert_array_equal(clf.empty.data, clf2.empty.data)
def test_clone_nan():
# Regression test for cloning estimators with default parameter as np.nan
clf = MyEstimator(empty=np.nan)
clf2 = clone(clf)
assert_true(clf.empty is clf2.empty)
def test_repr():
# Smoke test the repr of the base estimator.
my_estimator = MyEstimator()
repr(my_estimator)
test = T(K(), K())
assert_equal(
repr(test),
"T(a=K(c=None, d=None), b=K(c=None, d=None))"
)
some_est = T(a=["long_params"] * 1000)
assert_equal(len(repr(some_est)), 415)
def test_str():
# Smoke test the str of the base estimator
my_estimator = MyEstimator()
str(my_estimator)
def test_get_params():
test = T(K(), K())
assert_true('a__d' in test.get_params(deep=True))
assert_true('a__d' not in test.get_params(deep=False))
test.set_params(a__d=2)
assert_true(test.a.d == 2)
assert_raises(ValueError, test.set_params, a__a=2)
def test_get_params_deprecated():
# deprecated attribute should not show up as params
est = DeprecatedAttributeEstimator(a=1)
assert_true('a' in est.get_params())
assert_true('a' in est.get_params(deep=True))
assert_true('a' in est.get_params(deep=False))
assert_true('b' not in est.get_params())
assert_true('b' not in est.get_params(deep=True))
assert_true('b' not in est.get_params(deep=False))
def test_is_classifier():
svc = SVC()
assert_true(is_classifier(svc))
assert_true(is_classifier(GridSearchCV(svc, {'C': [0.1, 1]})))
assert_true(is_classifier(Pipeline([('svc', svc)])))
assert_true(is_classifier(Pipeline([('svc_cv',
GridSearchCV(svc, {'C': [0.1, 1]}))])))
def test_set_params():
# test nested estimator parameter setting
clf = Pipeline([("svc", SVC())])
# non-existing parameter in svc
assert_raises(ValueError, clf.set_params, svc__stupid_param=True)
# non-existing parameter of pipeline
assert_raises(ValueError, clf.set_params, svm__stupid_param=True)
# we don't currently catch if the things in pipeline are estimators
# bad_pipeline = Pipeline([("bad", NoEstimator())])
# assert_raises(AttributeError, bad_pipeline.set_params,
# bad__stupid_param=True)
def test_score_sample_weight():
from sklearn.tree import DecisionTreeClassifier
from sklearn.tree import DecisionTreeRegressor
from sklearn import datasets
rng = np.random.RandomState(0)
# test both ClassifierMixin and RegressorMixin
estimators = [DecisionTreeClassifier(max_depth=2),
DecisionTreeRegressor(max_depth=2)]
sets = [datasets.load_iris(),
datasets.load_boston()]
for est, ds in zip(estimators, sets):
est.fit(ds.data, ds.target)
# generate random sample weights
sample_weight = rng.randint(1, 10, size=len(ds.target))
# check that the score with and without sample weights are different
assert_not_equal(est.score(ds.data, ds.target),
est.score(ds.data, ds.target,
sample_weight=sample_weight),
msg="Unweighted and weighted scores "
"are unexpectedly equal")
| bsd-3-clause |
eisoku9618/KalmanFilter_tutorial | LinearKalmanFilter/ConstantVelocity_AKF_with_GPS.py | 1 | 7243 | #!/usr/bin/python
# -*- coding: utf-8 -*-
# http://nbviewer.ipython.org/github/balzer82/Kalman/blob/master/Adaptive-Kalman-Filter-CV.ipynb
# https://github.com/joferkington/oost_paper_code/blob/master/error_ellipse.py
import argparse
import matplotlib.pyplot as plt
from matplotlib.patches import Ellipse
from LinearKalmanFilter import *
def plot_cov_ellipse(cov, pos, nstd=2, ax=None, **kwargs):
def eigsorted(cov):
vals, vecs = np.linalg.eigh(cov)
order = vals.argsort()[::-1]
return vals[order], vecs[:,order]
if ax is None:
ax = plt.gca()
vals, vecs = eigsorted(cov)
theta = np.degrees(np.arctan2(*vecs[:,0][::-1]))
width, height = 2 * nstd * np.sqrt(vals)
ellip = Ellipse(xy=pos, width=width, height=height, angle=theta, **kwargs)
ax.add_artist(ellip)
return ellip
def plotData(data_dict, vx, vy, goal_x, goal_y):
f, axarr = plt.subplots(2, 2)
l = len(data_dict["P"])
axarr[0, 0].semilogy(range(l), [p[0, 0] for p in data_dict["P"]], label='$x$')
axarr[0, 0].semilogy(range(l), [p[1, 1] for p in data_dict["P"]], label='$y$')
axarr[0, 0].semilogy(range(l), [p[2, 2] for p in data_dict["P"]], label='$\dot x$')
axarr[0, 0].semilogy(range(l), [p[3, 3] for p in data_dict["P"]], label='$\dot y$')
axarr[0, 0].set_xlabel('Filter Step')
axarr[0, 0].set_title('Uncertainty (Elements from Matrix $P$)')
axarr[0, 0].legend(loc='best')
l = len(data_dict["x"])
axarr[0, 1].plot(range(l), [x[2, 0] for x in data_dict["x"]], label='$\dot x$')
axarr[0, 1].plot(range(l), [x[3, 0] for x in data_dict["x"]], label='$\dot y$')
axarr[0, 1].axhline(vx, color='#999999', label='$\dot x_{real}$')
axarr[0, 1].axhline(vy, color='#999999', label='$\dot y_{real}$')
axarr[0, 1].set_xlabel('Filter Step')
axarr[0, 1].set_title('Estimate (Elements from State Vector $x$)')
axarr[0, 1].legend(loc='best')
axarr[0, 1].set_ylabel('Velocity')
l = len(data_dict["R"])
axarr[1, 0].semilogy(range(l), [r[0, 0] for r in data_dict["R"]], label='$x$')
axarr[1, 0].semilogy(range(l), [r[1, 1] for r in data_dict["R"]], label='$y$')
axarr[1, 0].semilogy(range(l), [r[2, 2] for r in data_dict["R"]], label='$\dot x$')
axarr[1, 0].semilogy(range(l), [r[3, 3] for r in data_dict["R"]], label='$\dot y$')
axarr[1, 0].set_xlabel('Filter Step')
axarr[1, 0].set_ylabel('')
axarr[1, 0].set_title('Measurement Uncertainty $R$ (Adaptive)')
axarr[1, 0].legend(loc='best')
l = len(data_dict["x"])
axarr[1, 1].scatter([x[0, 0] for x in data_dict["x"]], [x[1, 0] for x in data_dict["x"]], s=20, label='State', c='k')
axarr[1, 1].scatter(data_dict["x"][0][0, 0], data_dict["x"][0][1, 0], s=30, label='Start', c='b')
axarr[1, 1].scatter(data_dict["x"][-1][0, 0], data_dict["x"][-1][1, 0], s=30, label='Goal', c='r')
axarr[1, 1].scatter(goal_x, goal_y, s=30, label='True Goal', c='y')
axarr[1, 1].set_xlabel('X')
axarr[1, 1].set_ylabel('Y')
axarr[1, 1].set_title('Position')
axarr[1, 1].legend(loc='lower right')
axarr[1, 1].set_aspect('equal')
for i in range(l):
if i % 50 == 0:
plot_cov_ellipse(data_dict["P"][i][0:2, 0:2], np.array([data_dict["x"][i][0, 0], data_dict["x"][i][1, 0]]), ax=axarr[1, 1],
nstd=400, alpha=0.5, color='green')
plt.show()
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='Adaptive Kalman Filter sample')
parser.add_argument('-t', type=float, default=0.5, help='time step')
parser.add_argument('--sra', type=float, default=2.0, help='true GPS standard deviation of R which value is known only by God')
parser.add_argument('--srb', type=float, default=1.0, help='true Speed Detector standard deviation of R which value is known only by God')
parser.add_argument('--sraf', type=float, default=1.0, help='user setting GPS standard deviation of R which value is assumed right')
parser.add_argument('--srbf', type=float, default=0.5, help='user setting Speed Detector standard deviation of R which value is assumed right')
parser.add_argument('-N', type=int, default=500, help='number of trials')
parser.add_argument('--vx', type=float, default=40, help='ground truth of velocity x')
parser.add_argument('--vy', type=float, default=20, help='ground truth of velocity y')
parser.add_argument('--noisea', type=float, default=100, help='unexpected observation noise of x')
parser.add_argument('--noiseb', type=float, default=50, help='unexpected observation noise of velocity y')
group = parser.add_mutually_exclusive_group()
group.add_argument('--adaptive', dest='akf', action='store_true', help='use adaptive kalman filter')
group.add_argument('--non-adaptive', dest='akf', action='store_false', help='do not use adaptive kalman filter')
parser.set_defaults(akf=True)
# parameter
dt = parser.parse_args().t
ra = parser.parse_args().sraf**2
rb = parser.parse_args().srbf**2
sv = 1.0
num = parser.parse_args().N
vx = parser.parse_args().vx # in X
vy = parser.parse_args().vy # in Y
# initialize
x0 = np.matrix([[0.0, 0.0, 0, 0]]).T
P0 = 1.0 * np.eye(4)
R = np.matrix([[ra, 0.0, 0.0, 0.0],
[0.0, ra, 0.0, 0.0],
[0.0, 0.0, rb, 0.0],
[0.0, 0.0, 0.0, rb]])
G = np.matrix([[0.5*dt**2],
[0.5*dt**2],
[dt],
[dt]])
Q = G * G.T * sv**2
F = np.matrix([[1.0, 0.0, dt, 0.0],
[0.0, 1.0, 0.0, dt],
[0.0, 0.0, 1.0, 0.0],
[0.0, 0.0, 0.0, 1.0]])
H = 1.0 * np.eye(4)
AKF = LinearKalmanFilter(x0, P0, Q, R, F, H)
# input measurement
mx = np.linspace(0.0 + vx * dt * 1, 0.0 + vx * dt * num, num) + parser.parse_args().sra * np.random.randn(num)
my = np.linspace(0.0 + vy * dt * 1, 0.0 + vy * dt * num, num) + parser.parse_args().sra * np.random.randn(num)
mvx = np.array(vx + parser.parse_args().srb * np.random.randn(num))
mvy = np.array(vy + parser.parse_args().srb * np.random.randn(num))
# some different error somewhere in the measurements
mx[(2 * num/8):(3 * num/8)] = np.linspace(0.0 + vx * dt * (2 * num/8), 0.0 + vx * dt * (3 * num/8), num/8) + parser.parse_args().noisea * np.random.randn(num/8)
mvy[(6* num/8):(7 * num/8)] = np.array(vy + parser.parse_args().noiseb * np.random.randn(num/8))
measurements = np.vstack((mx, my, mvx, mvy))
for i in range(len(measurements[0])):
if parser.parse_args().akf:
n = 10
if i > n:
R = np.matrix([[np.std(measurements[0, (i-n):i] - np.linspace(vx * dt * (i-n), vx * dt * i, n))**2, 0.0, 0.0, 0.0],
[0.0, np.std(measurements[1, (i-n):i] - np.linspace(vy * dt * (i-n), vy * dt * i, n))**2, 0.0, 0.0],
[0.0, 0.0, np.std(measurements[2, (i-n):i])**2, 0.0],
[0.0, 0.0, 0.0, np.std(measurements[3, (i-n):i])**2],
])
AKF.proc(Q, measurements[:, i].reshape(4, 1), R)
plotData(AKF.getData(), vx, vy, vx*dt*num, vy*dt*num)
| mit |
ecatkins/instabilly | project/spotify/neighbors.py | 1 | 2496 | from spotify.models import User, UserProfile, UserGenre, Genre, NearestNeigh, UserSong
from sklearn.neighbors import KNeighborsClassifier
import random
import pdb
def similar_users(user,genre_arrays,neighbors):
''' Pass: the active user object and the number of neighbors to calculate
Returns: An array of similar users, containing the username and the distance to that user on the genre-dimensional plot
'''
# if user.pk == 18:
# pdb.set_trace()
id_array, x_array = genre_arrays
copy_id_array = id_array.copy()
copy_x_array = x_array.copy()
user_id = user.pk
id_index = copy_id_array.index(user_id)
user_array = copy_x_array[id_index]
del copy_id_array[id_index]
del copy_x_array[id_index]
if len(copy_x_array) < neighbors:
neighbors = len(copy_x_array)
y_array = [random.random() for x in range(len(copy_x_array))]
neigh = KNeighborsClassifier(n_neighbors=neighbors)
neigh.fit(copy_x_array, y_array)
result = neigh.kneighbors(user_array,neighbors)
similar_users = [[copy_id_array[result[1][0][x]],result[0][0][x]] for x in range(neighbors)]
return similar_users
def get_genre_arrays():
all_user_genres = UserGenre.objects.filter(proportion__gt = 0).values('id','proportion', 'user_id', 'genre_id').order_by('user_id')
genre_count = Genre.objects.count()
genres = Genre.objects.all()
genre_count_array = [i for i in range(genre_count)]
all_user_genres_pks = [j.pk for j in genres]
zipped_genre_dictionary = dict(zip(all_user_genres_pks,genre_count_array))
counter = -1
current_user_id = 0
id_array = []
x_array = []
user_array = []
for row in all_user_genres:
if row['user_id'] != current_user_id:
id_array.append(row['user_id'])
x_array.append([0] * genre_count)
counter += 1
current_user_id = row['user_id']
array = x_array[counter]
array[zipped_genre_dictionary[row['genre_id']]] = row['proportion']
return id_array, x_array
def update_users(user_list,neighbors=10):
genre_arrays = get_genre_arrays()
for user in user_list:
user_profile = UserProfile.objects.get(user=user)
existing_neighbors = NearestNeigh.objects.filter(user=user_profile)
existing_neighbors.delete()
neighbor_array = similar_users(user,genre_arrays,neighbors)
for neighbor in neighbor_array:
neighbor_id = neighbor[0]
neighbor_distance = neighbor[1]
neighbor_user_object = User.objects.get(pk=neighbor_id)
new_neighbor = NearestNeigh(user=user_profile,neighbor=neighbor_user_object,distance=neighbor_distance)
new_neighbor.save() | mit |
kazemakase/scikit-learn | sklearn/utils/setup.py | 296 | 2884 | import os
from os.path import join
from sklearn._build_utils import get_blas_info
def configuration(parent_package='', top_path=None):
import numpy
from numpy.distutils.misc_util import Configuration
config = Configuration('utils', parent_package, top_path)
config.add_subpackage('sparsetools')
cblas_libs, blas_info = get_blas_info()
cblas_compile_args = blas_info.pop('extra_compile_args', [])
cblas_includes = [join('..', 'src', 'cblas'),
numpy.get_include(),
blas_info.pop('include_dirs', [])]
libraries = []
if os.name == 'posix':
libraries.append('m')
cblas_libs.append('m')
config.add_extension('sparsefuncs_fast', sources=['sparsefuncs_fast.c'],
libraries=libraries)
config.add_extension('arrayfuncs',
sources=['arrayfuncs.c'],
depends=[join('src', 'cholesky_delete.h')],
libraries=cblas_libs,
include_dirs=cblas_includes,
extra_compile_args=cblas_compile_args,
**blas_info
)
config.add_extension(
'murmurhash',
sources=['murmurhash.c', join('src', 'MurmurHash3.cpp')],
include_dirs=['src'])
config.add_extension('lgamma',
sources=['lgamma.c', join('src', 'gamma.c')],
include_dirs=['src'],
libraries=libraries)
config.add_extension('graph_shortest_path',
sources=['graph_shortest_path.c'],
include_dirs=[numpy.get_include()])
config.add_extension('fast_dict',
sources=['fast_dict.cpp'],
language="c++",
include_dirs=[numpy.get_include()],
libraries=libraries)
config.add_extension('seq_dataset',
sources=['seq_dataset.c'],
include_dirs=[numpy.get_include()])
config.add_extension('weight_vector',
sources=['weight_vector.c'],
include_dirs=cblas_includes,
libraries=cblas_libs,
**blas_info)
config.add_extension("_random",
sources=["_random.c"],
include_dirs=[numpy.get_include()],
libraries=libraries)
config.add_extension("_logistic_sigmoid",
sources=["_logistic_sigmoid.c"],
include_dirs=[numpy.get_include()],
libraries=libraries)
return config
if __name__ == '__main__':
from numpy.distutils.core import setup
setup(**configuration(top_path='').todict())
| bsd-3-clause |
ikaee/bfr-attendant | facerecognitionlibrary/jni-build/jni/include/tensorflow/contrib/learn/python/learn/tests/dataframe/feeding_queue_runner_test.py | 62 | 5053 | # Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests `FeedingQueueRunner` using arrays and `DataFrames`."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from tensorflow.contrib.learn.python.learn.dataframe.queues import feeding_functions as ff
from tensorflow.python.client import session
from tensorflow.python.framework import ops
from tensorflow.python.platform import test
from tensorflow.python.training import coordinator
from tensorflow.python.training import queue_runner_impl
# pylint: disable=g-import-not-at-top
try:
import pandas as pd
HAS_PANDAS = True
except ImportError:
HAS_PANDAS = False
def get_rows(array, row_indices):
rows = [array[i] for i in row_indices]
return np.vstack(rows)
class FeedingQueueRunnerTestCase(test.TestCase):
"""Tests for `FeedingQueueRunner`."""
def testArrayFeeding(self):
with ops.Graph().as_default():
array = np.arange(32).reshape([16, 2])
q = ff.enqueue_data(array, capacity=100)
batch_size = 3
dq_op = q.dequeue_many(batch_size)
with session.Session() as sess:
coord = coordinator.Coordinator()
threads = queue_runner_impl.start_queue_runners(sess=sess, coord=coord)
for i in range(100):
indices = [
j % array.shape[0]
for j in range(batch_size * i, batch_size * (i + 1))
]
expected_dq = get_rows(array, indices)
dq = sess.run(dq_op)
np.testing.assert_array_equal(indices, dq[0])
np.testing.assert_array_equal(expected_dq, dq[1])
coord.request_stop()
coord.join(threads)
def testArrayFeedingMultiThread(self):
with ops.Graph().as_default():
array = np.arange(256).reshape([128, 2])
q = ff.enqueue_data(array, capacity=128, num_threads=8, shuffle=True)
batch_size = 3
dq_op = q.dequeue_many(batch_size)
with session.Session() as sess:
coord = coordinator.Coordinator()
threads = queue_runner_impl.start_queue_runners(sess=sess, coord=coord)
for _ in range(100):
dq = sess.run(dq_op)
indices = dq[0]
expected_dq = get_rows(array, indices)
np.testing.assert_array_equal(expected_dq, dq[1])
coord.request_stop()
coord.join(threads)
def testPandasFeeding(self):
if not HAS_PANDAS:
return
with ops.Graph().as_default():
array1 = np.arange(32)
array2 = np.arange(32, 64)
df = pd.DataFrame({"a": array1, "b": array2}, index=np.arange(64, 96))
q = ff.enqueue_data(df, capacity=100)
batch_size = 5
dq_op = q.dequeue_many(5)
with session.Session() as sess:
coord = coordinator.Coordinator()
threads = queue_runner_impl.start_queue_runners(sess=sess, coord=coord)
for i in range(100):
indices = [
j % array1.shape[0]
for j in range(batch_size * i, batch_size * (i + 1))
]
expected_df_indices = df.index[indices]
expected_rows = df.iloc[indices]
dq = sess.run(dq_op)
np.testing.assert_array_equal(expected_df_indices, dq[0])
for col_num, col in enumerate(df.columns):
np.testing.assert_array_equal(expected_rows[col].values,
dq[col_num + 1])
coord.request_stop()
coord.join(threads)
def testPandasFeedingMultiThread(self):
if not HAS_PANDAS:
return
with ops.Graph().as_default():
array1 = np.arange(128, 256)
array2 = 2 * array1
df = pd.DataFrame({"a": array1, "b": array2}, index=np.arange(128))
q = ff.enqueue_data(df, capacity=128, num_threads=8, shuffle=True)
batch_size = 5
dq_op = q.dequeue_many(batch_size)
with session.Session() as sess:
coord = coordinator.Coordinator()
threads = queue_runner_impl.start_queue_runners(sess=sess, coord=coord)
for _ in range(100):
dq = sess.run(dq_op)
indices = dq[0]
expected_rows = df.iloc[indices]
for col_num, col in enumerate(df.columns):
np.testing.assert_array_equal(expected_rows[col].values,
dq[col_num + 1])
coord.request_stop()
coord.join(threads)
if __name__ == "__main__":
test.main()
| apache-2.0 |
Vimos/scikit-learn | sklearn/manifold/tests/test_locally_linear.py | 85 | 5600 | from itertools import product
import numpy as np
from numpy.testing import assert_almost_equal, assert_array_almost_equal
from scipy import linalg
from sklearn import neighbors, manifold
from sklearn.manifold.locally_linear import barycenter_kneighbors_graph
from sklearn.utils.testing import assert_less
from sklearn.utils.testing import ignore_warnings
from sklearn.utils.testing import assert_raise_message
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import assert_true
eigen_solvers = ['dense', 'arpack']
# ----------------------------------------------------------------------
# Test utility routines
def test_barycenter_kneighbors_graph():
X = np.array([[0, 1], [1.01, 1.], [2, 0]])
A = barycenter_kneighbors_graph(X, 1)
assert_array_almost_equal(
A.toarray(),
[[0., 1., 0.],
[1., 0., 0.],
[0., 1., 0.]])
A = barycenter_kneighbors_graph(X, 2)
# check that columns sum to one
assert_array_almost_equal(np.sum(A.toarray(), 1), np.ones(3))
pred = np.dot(A.toarray(), X)
assert_less(linalg.norm(pred - X) / X.shape[0], 1)
# ----------------------------------------------------------------------
# Test LLE by computing the reconstruction error on some manifolds.
def test_lle_simple_grid():
# note: ARPACK is numerically unstable, so this test will fail for
# some random seeds. We choose 2 because the tests pass.
rng = np.random.RandomState(2)
# grid of equidistant points in 2D, n_components = n_dim
X = np.array(list(product(range(5), repeat=2)))
X = X + 1e-10 * rng.uniform(size=X.shape)
n_components = 2
clf = manifold.LocallyLinearEmbedding(n_neighbors=5,
n_components=n_components,
random_state=rng)
tol = 0.1
N = barycenter_kneighbors_graph(X, clf.n_neighbors).toarray()
reconstruction_error = linalg.norm(np.dot(N, X) - X, 'fro')
assert_less(reconstruction_error, tol)
for solver in eigen_solvers:
clf.set_params(eigen_solver=solver)
clf.fit(X)
assert_true(clf.embedding_.shape[1] == n_components)
reconstruction_error = linalg.norm(
np.dot(N, clf.embedding_) - clf.embedding_, 'fro') ** 2
assert_less(reconstruction_error, tol)
assert_almost_equal(clf.reconstruction_error_,
reconstruction_error, decimal=1)
# re-embed a noisy version of X using the transform method
noise = rng.randn(*X.shape) / 100
X_reembedded = clf.transform(X + noise)
assert_less(linalg.norm(X_reembedded - clf.embedding_), tol)
def test_lle_manifold():
rng = np.random.RandomState(0)
# similar test on a slightly more complex manifold
X = np.array(list(product(np.arange(18), repeat=2)))
X = np.c_[X, X[:, 0] ** 2 / 18]
X = X + 1e-10 * rng.uniform(size=X.shape)
n_components = 2
for method in ["standard", "hessian", "modified", "ltsa"]:
clf = manifold.LocallyLinearEmbedding(n_neighbors=6,
n_components=n_components,
method=method, random_state=0)
tol = 1.5 if method == "standard" else 3
N = barycenter_kneighbors_graph(X, clf.n_neighbors).toarray()
reconstruction_error = linalg.norm(np.dot(N, X) - X)
assert_less(reconstruction_error, tol)
for solver in eigen_solvers:
clf.set_params(eigen_solver=solver)
clf.fit(X)
assert_true(clf.embedding_.shape[1] == n_components)
reconstruction_error = linalg.norm(
np.dot(N, clf.embedding_) - clf.embedding_, 'fro') ** 2
details = ("solver: %s, method: %s" % (solver, method))
assert_less(reconstruction_error, tol, msg=details)
assert_less(np.abs(clf.reconstruction_error_ -
reconstruction_error),
tol * reconstruction_error, msg=details)
# Test the error raised when parameter passed to lle is invalid
def test_lle_init_parameters():
X = np.random.rand(5, 3)
clf = manifold.LocallyLinearEmbedding(eigen_solver="error")
msg = "unrecognized eigen_solver 'error'"
assert_raise_message(ValueError, msg, clf.fit, X)
clf = manifold.LocallyLinearEmbedding(method="error")
msg = "unrecognized method 'error'"
assert_raise_message(ValueError, msg, clf.fit, X)
def test_pipeline():
# check that LocallyLinearEmbedding works fine as a Pipeline
# only checks that no error is raised.
# TODO check that it actually does something useful
from sklearn import pipeline, datasets
X, y = datasets.make_blobs(random_state=0)
clf = pipeline.Pipeline(
[('filter', manifold.LocallyLinearEmbedding(random_state=0)),
('clf', neighbors.KNeighborsClassifier())])
clf.fit(X, y)
assert_less(.9, clf.score(X, y))
# Test the error raised when the weight matrix is singular
def test_singular_matrix():
M = np.ones((10, 3))
f = ignore_warnings
assert_raises(ValueError, f(manifold.locally_linear_embedding),
M, 2, 1, method='standard', eigen_solver='arpack')
# regression test for #6033
def test_integer_input():
rand = np.random.RandomState(0)
X = rand.randint(0, 100, size=(20, 3))
for method in ["standard", "hessian", "modified", "ltsa"]:
clf = manifold.LocallyLinearEmbedding(method=method, n_neighbors=10)
clf.fit(X) # this previously raised a TypeError
| bsd-3-clause |
voxlol/scikit-learn | examples/manifold/plot_swissroll.py | 330 | 1446 | """
===================================
Swiss Roll reduction with LLE
===================================
An illustration of Swiss Roll reduction
with locally linear embedding
"""
# Author: Fabian Pedregosa -- <fabian.pedregosa@inria.fr>
# License: BSD 3 clause (C) INRIA 2011
print(__doc__)
import matplotlib.pyplot as plt
# This import is needed to modify the way figure behaves
from mpl_toolkits.mplot3d import Axes3D
Axes3D
#----------------------------------------------------------------------
# Locally linear embedding of the swiss roll
from sklearn import manifold, datasets
X, color = datasets.samples_generator.make_swiss_roll(n_samples=1500)
print("Computing LLE embedding")
X_r, err = manifold.locally_linear_embedding(X, n_neighbors=12,
n_components=2)
print("Done. Reconstruction error: %g" % err)
#----------------------------------------------------------------------
# Plot result
fig = plt.figure()
try:
# compatibility matplotlib < 1.0
ax = fig.add_subplot(211, projection='3d')
ax.scatter(X[:, 0], X[:, 1], X[:, 2], c=color, cmap=plt.cm.Spectral)
except:
ax = fig.add_subplot(211)
ax.scatter(X[:, 0], X[:, 2], c=color, cmap=plt.cm.Spectral)
ax.set_title("Original data")
ax = fig.add_subplot(212)
ax.scatter(X_r[:, 0], X_r[:, 1], c=color, cmap=plt.cm.Spectral)
plt.axis('tight')
plt.xticks([]), plt.yticks([])
plt.title('Projected data')
plt.show()
| bsd-3-clause |
mwv/scikit-learn | benchmarks/bench_plot_parallel_pairwise.py | 297 | 1247 | # Author: Mathieu Blondel <mathieu@mblondel.org>
# License: BSD 3 clause
import time
import pylab as pl
from sklearn.utils import check_random_state
from sklearn.metrics.pairwise import pairwise_distances
from sklearn.metrics.pairwise import pairwise_kernels
def plot(func):
random_state = check_random_state(0)
one_core = []
multi_core = []
sample_sizes = range(1000, 6000, 1000)
for n_samples in sample_sizes:
X = random_state.rand(n_samples, 300)
start = time.time()
func(X, n_jobs=1)
one_core.append(time.time() - start)
start = time.time()
func(X, n_jobs=-1)
multi_core.append(time.time() - start)
pl.figure('scikit-learn parallel %s benchmark results' % func.__name__)
pl.plot(sample_sizes, one_core, label="one core")
pl.plot(sample_sizes, multi_core, label="multi core")
pl.xlabel('n_samples')
pl.ylabel('Time (s)')
pl.title('Parallel %s' % func.__name__)
pl.legend()
def euclidean_distances(X, n_jobs):
return pairwise_distances(X, metric="euclidean", n_jobs=n_jobs)
def rbf_kernels(X, n_jobs):
return pairwise_kernels(X, metric="rbf", n_jobs=n_jobs, gamma=0.1)
plot(euclidean_distances)
plot(rbf_kernels)
pl.show()
| bsd-3-clause |
mhue/scikit-learn | examples/ensemble/plot_gradient_boosting_oob.py | 230 | 4762 | """
======================================
Gradient Boosting Out-of-Bag estimates
======================================
Out-of-bag (OOB) estimates can be a useful heuristic to estimate
the "optimal" number of boosting iterations.
OOB estimates are almost identical to cross-validation estimates but
they can be computed on-the-fly without the need for repeated model
fitting.
OOB estimates are only available for Stochastic Gradient Boosting
(i.e. ``subsample < 1.0``), the estimates are derived from the improvement
in loss based on the examples not included in the bootstrap sample
(the so-called out-of-bag examples).
The OOB estimator is a pessimistic estimator of the true
test loss, but remains a fairly good approximation for a small number of trees.
The figure shows the cumulative sum of the negative OOB improvements
as a function of the boosting iteration. As you can see, it tracks the test
loss for the first hundred iterations but then diverges in a
pessimistic way.
The figure also shows the performance of 3-fold cross validation which
usually gives a better estimate of the test loss
but is computationally more demanding.
"""
print(__doc__)
# Author: Peter Prettenhofer <peter.prettenhofer@gmail.com>
#
# License: BSD 3 clause
import numpy as np
import matplotlib.pyplot as plt
from sklearn import ensemble
from sklearn.cross_validation import KFold
from sklearn.cross_validation import train_test_split
# Generate data (adapted from G. Ridgeway's gbm example)
n_samples = 1000
random_state = np.random.RandomState(13)
x1 = random_state.uniform(size=n_samples)
x2 = random_state.uniform(size=n_samples)
x3 = random_state.randint(0, 4, size=n_samples)
p = 1 / (1.0 + np.exp(-(np.sin(3 * x1) - 4 * x2 + x3)))
y = random_state.binomial(1, p, size=n_samples)
X = np.c_[x1, x2, x3]
X = X.astype(np.float32)
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.5,
random_state=9)
# Fit classifier with out-of-bag estimates
params = {'n_estimators': 1200, 'max_depth': 3, 'subsample': 0.5,
'learning_rate': 0.01, 'min_samples_leaf': 1, 'random_state': 3}
clf = ensemble.GradientBoostingClassifier(**params)
clf.fit(X_train, y_train)
acc = clf.score(X_test, y_test)
print("Accuracy: {:.4f}".format(acc))
n_estimators = params['n_estimators']
x = np.arange(n_estimators) + 1
def heldout_score(clf, X_test, y_test):
"""compute deviance scores on ``X_test`` and ``y_test``. """
score = np.zeros((n_estimators,), dtype=np.float64)
for i, y_pred in enumerate(clf.staged_decision_function(X_test)):
score[i] = clf.loss_(y_test, y_pred)
return score
def cv_estimate(n_folds=3):
cv = KFold(n=X_train.shape[0], n_folds=n_folds)
cv_clf = ensemble.GradientBoostingClassifier(**params)
val_scores = np.zeros((n_estimators,), dtype=np.float64)
for train, test in cv:
cv_clf.fit(X_train[train], y_train[train])
val_scores += heldout_score(cv_clf, X_train[test], y_train[test])
val_scores /= n_folds
return val_scores
# Estimate best n_estimator using cross-validation
cv_score = cv_estimate(3)
# Compute best n_estimator for test data
test_score = heldout_score(clf, X_test, y_test)
# negative cumulative sum of oob improvements
cumsum = -np.cumsum(clf.oob_improvement_)
# min loss according to OOB
oob_best_iter = x[np.argmin(cumsum)]
# min loss according to test (normalize such that first loss is 0)
test_score -= test_score[0]
test_best_iter = x[np.argmin(test_score)]
# min loss according to cv (normalize such that first loss is 0)
cv_score -= cv_score[0]
cv_best_iter = x[np.argmin(cv_score)]
# color brew for the three curves
oob_color = list(map(lambda x: x / 256.0, (190, 174, 212)))
test_color = list(map(lambda x: x / 256.0, (127, 201, 127)))
cv_color = list(map(lambda x: x / 256.0, (253, 192, 134)))
# plot curves and vertical lines for best iterations
plt.plot(x, cumsum, label='OOB loss', color=oob_color)
plt.plot(x, test_score, label='Test loss', color=test_color)
plt.plot(x, cv_score, label='CV loss', color=cv_color)
plt.axvline(x=oob_best_iter, color=oob_color)
plt.axvline(x=test_best_iter, color=test_color)
plt.axvline(x=cv_best_iter, color=cv_color)
# add three vertical lines to xticks
xticks = plt.xticks()
xticks_pos = np.array(xticks[0].tolist() +
[oob_best_iter, cv_best_iter, test_best_iter])
xticks_label = np.array(list(map(lambda t: int(t), xticks[0])) +
['OOB', 'CV', 'Test'])
ind = np.argsort(xticks_pos)
xticks_pos = xticks_pos[ind]
xticks_label = xticks_label[ind]
plt.xticks(xticks_pos, xticks_label)
plt.legend(loc='upper right')
plt.ylabel('normalized loss')
plt.xlabel('number of iterations')
plt.show()
| bsd-3-clause |
poeticcapybara/pythalesians | pythalesians/backtest/stratanalysis/tradeanalysis.py | 1 | 2343 | __author__ = 'saeedamen'
#
# Copyright 2015 Thalesians Ltd. - http//www.thalesians.com / @thalesians
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the
# License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
#
# See the License for the specific language governing permissions and limitations under the License.
#
"""
TradeAnalysis
Applies some basic trade analysis for a trading strategy (as defined by StrategyTemplate). Use PyFolio to create some
basic trading statistics.
"""
pf = None
try:
import pyfolio as pf
except: pass
import datetime
import pandas
from pythalesians.util.loggermanager import LoggerManager
from pythalesians.timeseries.calcs.timeseriestimezone import TimeSeriesTimezone
from pythalesians.timeseries.calcs.timeseriescalcs import TimeSeriesCalcs
import matplotlib
import matplotlib.pyplot as plt
class TradeAnalysis:
def __init__(self):
self.logger = LoggerManager().getLogger(__name__)
self.DUMP_PATH = 'output_data/' + datetime.date.today().strftime("%Y%m%d") + ' '
self.scale_factor = 3
return
def run_strategy_returns_stats(self, strategy):
"""
run_strategy_returns_stats - Plots useful statistics for the trading strategy (using PyFolio)
Parameters
----------
strategy : StrategyTemplate
defining trading strategy
"""
pnl = strategy.get_strategy_pnl()
tz = TimeSeriesTimezone()
tsc = TimeSeriesCalcs()
# PyFolio assumes UTC time based DataFrames (so force this localisation)
try:
pnl = tz.localise_index_as_UTC(pnl)
except: pass
# TODO for intraday strategy make daily
# convert DataFrame (assumed to have only one column) to Series
pnl = tsc.calculate_returns(pnl)
pnl = pnl[pnl.columns[0]]
fig = pf.create_returns_tear_sheet(pnl, return_fig=True)
try:
plt.savefig (strategy.DUMP_PATH + "stats.png")
except: pass
plt.show()
| apache-2.0 |
mbayon/TFG-MachineLearning | vbig/lib/python2.7/site-packages/sklearn/linear_model/ridge.py | 8 | 52900 | """
Ridge regression
"""
# Author: Mathieu Blondel <mathieu@mblondel.org>
# Reuben Fletcher-Costin <reuben.fletchercostin@gmail.com>
# Fabian Pedregosa <fabian@fseoane.net>
# Michael Eickenberg <michael.eickenberg@nsup.org>
# License: BSD 3 clause
from abc import ABCMeta, abstractmethod
import warnings
import numpy as np
from scipy import linalg
from scipy import sparse
from scipy.sparse import linalg as sp_linalg
from .base import LinearClassifierMixin, LinearModel, _rescale_data
from .sag import sag_solver
from ..base import RegressorMixin
from ..utils.extmath import safe_sparse_dot
from ..utils.extmath import row_norms
from ..utils import check_X_y
from ..utils import check_array
from ..utils import check_consistent_length
from ..utils import compute_sample_weight
from ..utils import column_or_1d
from ..preprocessing import LabelBinarizer
from ..model_selection import GridSearchCV
from ..externals import six
from ..metrics.scorer import check_scoring
def _solve_sparse_cg(X, y, alpha, max_iter=None, tol=1e-3, verbose=0):
n_samples, n_features = X.shape
X1 = sp_linalg.aslinearoperator(X)
coefs = np.empty((y.shape[1], n_features), dtype=X.dtype)
if n_features > n_samples:
def create_mv(curr_alpha):
def _mv(x):
return X1.matvec(X1.rmatvec(x)) + curr_alpha * x
return _mv
else:
def create_mv(curr_alpha):
def _mv(x):
return X1.rmatvec(X1.matvec(x)) + curr_alpha * x
return _mv
for i in range(y.shape[1]):
y_column = y[:, i]
mv = create_mv(alpha[i])
if n_features > n_samples:
# kernel ridge
# w = X.T * inv(X X^t + alpha*Id) y
C = sp_linalg.LinearOperator(
(n_samples, n_samples), matvec=mv, dtype=X.dtype)
coef, info = sp_linalg.cg(C, y_column, tol=tol)
coefs[i] = X1.rmatvec(coef)
else:
# linear ridge
# w = inv(X^t X + alpha*Id) * X.T y
y_column = X1.rmatvec(y_column)
C = sp_linalg.LinearOperator(
(n_features, n_features), matvec=mv, dtype=X.dtype)
coefs[i], info = sp_linalg.cg(C, y_column, maxiter=max_iter,
tol=tol)
if info < 0:
raise ValueError("Failed with error code %d" % info)
if max_iter is None and info > 0 and verbose:
warnings.warn("sparse_cg did not converge after %d iterations." %
info)
return coefs
def _solve_lsqr(X, y, alpha, max_iter=None, tol=1e-3):
n_samples, n_features = X.shape
coefs = np.empty((y.shape[1], n_features), dtype=X.dtype)
n_iter = np.empty(y.shape[1], dtype=np.int32)
# According to the lsqr documentation, alpha = damp^2.
sqrt_alpha = np.sqrt(alpha)
for i in range(y.shape[1]):
y_column = y[:, i]
info = sp_linalg.lsqr(X, y_column, damp=sqrt_alpha[i],
atol=tol, btol=tol, iter_lim=max_iter)
coefs[i] = info[0]
n_iter[i] = info[2]
return coefs, n_iter
def _solve_cholesky(X, y, alpha):
# w = inv(X^t X + alpha*Id) * X.T y
n_samples, n_features = X.shape
n_targets = y.shape[1]
A = safe_sparse_dot(X.T, X, dense_output=True)
Xy = safe_sparse_dot(X.T, y, dense_output=True)
one_alpha = np.array_equal(alpha, len(alpha) * [alpha[0]])
if one_alpha:
A.flat[::n_features + 1] += alpha[0]
return linalg.solve(A, Xy, sym_pos=True,
overwrite_a=True).T
else:
coefs = np.empty([n_targets, n_features], dtype=X.dtype)
for coef, target, current_alpha in zip(coefs, Xy.T, alpha):
A.flat[::n_features + 1] += current_alpha
coef[:] = linalg.solve(A, target, sym_pos=True,
overwrite_a=False).ravel()
A.flat[::n_features + 1] -= current_alpha
return coefs
def _solve_cholesky_kernel(K, y, alpha, sample_weight=None, copy=False):
# dual_coef = inv(X X^t + alpha*Id) y
n_samples = K.shape[0]
n_targets = y.shape[1]
if copy:
K = K.copy()
alpha = np.atleast_1d(alpha)
one_alpha = (alpha == alpha[0]).all()
has_sw = isinstance(sample_weight, np.ndarray) \
or sample_weight not in [1.0, None]
if has_sw:
# Unlike other solvers, we need to support sample_weight directly
# because K might be a pre-computed kernel.
sw = np.sqrt(np.atleast_1d(sample_weight))
y = y * sw[:, np.newaxis]
K *= np.outer(sw, sw)
if one_alpha:
# Only one penalty, we can solve multi-target problems in one time.
K.flat[::n_samples + 1] += alpha[0]
try:
# Note: we must use overwrite_a=False in order to be able to
# use the fall-back solution below in case a LinAlgError
# is raised
dual_coef = linalg.solve(K, y, sym_pos=True,
overwrite_a=False)
except np.linalg.LinAlgError:
warnings.warn("Singular matrix in solving dual problem. Using "
"least-squares solution instead.")
dual_coef = linalg.lstsq(K, y)[0]
# K is expensive to compute and store in memory so change it back in
# case it was user-given.
K.flat[::n_samples + 1] -= alpha[0]
if has_sw:
dual_coef *= sw[:, np.newaxis]
return dual_coef
else:
# One penalty per target. We need to solve each target separately.
dual_coefs = np.empty([n_targets, n_samples], K.dtype)
for dual_coef, target, current_alpha in zip(dual_coefs, y.T, alpha):
K.flat[::n_samples + 1] += current_alpha
dual_coef[:] = linalg.solve(K, target, sym_pos=True,
overwrite_a=False).ravel()
K.flat[::n_samples + 1] -= current_alpha
if has_sw:
dual_coefs *= sw[np.newaxis, :]
return dual_coefs.T
def _solve_svd(X, y, alpha):
U, s, Vt = linalg.svd(X, full_matrices=False)
idx = s > 1e-15 # same default value as scipy.linalg.pinv
s_nnz = s[idx][:, np.newaxis]
UTy = np.dot(U.T, y)
d = np.zeros((s.size, alpha.size), dtype=X.dtype)
d[idx] = s_nnz / (s_nnz ** 2 + alpha)
d_UT_y = d * UTy
return np.dot(Vt.T, d_UT_y).T
def ridge_regression(X, y, alpha, sample_weight=None, solver='auto',
max_iter=None, tol=1e-3, verbose=0, random_state=None,
return_n_iter=False, return_intercept=False):
"""Solve the ridge equation by the method of normal equations.
Read more in the :ref:`User Guide <ridge_regression>`.
Parameters
----------
X : {array-like, sparse matrix, LinearOperator},
shape = [n_samples, n_features]
Training data
y : array-like, shape = [n_samples] or [n_samples, n_targets]
Target values
alpha : {float, array-like},
shape = [n_targets] if array-like
Regularization strength; must be a positive float. Regularization
improves the conditioning of the problem and reduces the variance of
the estimates. Larger values specify stronger regularization.
Alpha corresponds to ``C^-1`` in other linear models such as
LogisticRegression or LinearSVC. If an array is passed, penalties are
assumed to be specific to the targets. Hence they must correspond in
number.
sample_weight : float or numpy array of shape [n_samples]
Individual weights for each sample. If sample_weight is not None and
solver='auto', the solver will be set to 'cholesky'.
.. versionadded:: 0.17
solver : {'auto', 'svd', 'cholesky', 'lsqr', 'sparse_cg', 'sag', 'saga'}
Solver to use in the computational routines:
- 'auto' chooses the solver automatically based on the type of data.
- 'svd' uses a Singular Value Decomposition of X to compute the Ridge
coefficients. More stable for singular matrices than
'cholesky'.
- 'cholesky' uses the standard scipy.linalg.solve function to
obtain a closed-form solution via a Cholesky decomposition of
dot(X.T, X)
- 'sparse_cg' uses the conjugate gradient solver as found in
scipy.sparse.linalg.cg. As an iterative algorithm, this solver is
more appropriate than 'cholesky' for large-scale data
(possibility to set `tol` and `max_iter`).
- 'lsqr' uses the dedicated regularized least-squares routine
scipy.sparse.linalg.lsqr. It is the fastest but may not be available
in old scipy versions. It also uses an iterative procedure.
- 'sag' uses a Stochastic Average Gradient descent, and 'saga' uses
its improved, unbiased version named SAGA. Both methods also use an
iterative procedure, and are often faster than other solvers when
both n_samples and n_features are large. Note that 'sag' and
'saga' fast convergence is only guaranteed on features with
approximately the same scale. You can preprocess the data with a
scaler from sklearn.preprocessing.
All last five solvers support both dense and sparse data. However, only
'sag' and 'saga' supports sparse input when`fit_intercept` is True.
.. versionadded:: 0.17
Stochastic Average Gradient descent solver.
.. versionadded:: 0.19
SAGA solver.
max_iter : int, optional
Maximum number of iterations for conjugate gradient solver.
For the 'sparse_cg' and 'lsqr' solvers, the default value is determined
by scipy.sparse.linalg. For 'sag' and saga solver, the default value is
1000.
tol : float
Precision of the solution.
verbose : int
Verbosity level. Setting verbose > 0 will display additional
information depending on the solver used.
random_state : int, RandomState instance or None, optional, default None
The seed of the pseudo random number generator to use when shuffling
the data. If int, random_state is the seed used by the random number
generator; If RandomState instance, random_state is the random number
generator; If None, the random number generator is the RandomState
instance used by `np.random`. Used when ``solver`` == 'sag'.
return_n_iter : boolean, default False
If True, the method also returns `n_iter`, the actual number of
iteration performed by the solver.
.. versionadded:: 0.17
return_intercept : boolean, default False
If True and if X is sparse, the method also returns the intercept,
and the solver is automatically changed to 'sag'. This is only a
temporary fix for fitting the intercept with sparse data. For dense
data, use sklearn.linear_model._preprocess_data before your regression.
.. versionadded:: 0.17
Returns
-------
coef : array, shape = [n_features] or [n_targets, n_features]
Weight vector(s).
n_iter : int, optional
The actual number of iteration performed by the solver.
Only returned if `return_n_iter` is True.
intercept : float or array, shape = [n_targets]
The intercept of the model. Only returned if `return_intercept`
is True and if X is a scipy sparse array.
Notes
-----
This function won't compute the intercept.
"""
if return_intercept and sparse.issparse(X) and solver != 'sag':
if solver != 'auto':
warnings.warn("In Ridge, only 'sag' solver can currently fit the "
"intercept when X is sparse. Solver has been "
"automatically changed into 'sag'.")
solver = 'sag'
_dtype = [np.float64, np.float32]
# SAG needs X and y columns to be C-contiguous and np.float64
if solver in ['sag', 'saga']:
X = check_array(X, accept_sparse=['csr'],
dtype=np.float64, order='C')
y = check_array(y, dtype=np.float64, ensure_2d=False, order='F')
else:
X = check_array(X, accept_sparse=['csr', 'csc', 'coo'],
dtype=_dtype)
y = check_array(y, dtype=X.dtype, ensure_2d=False)
check_consistent_length(X, y)
n_samples, n_features = X.shape
if y.ndim > 2:
raise ValueError("Target y has the wrong shape %s" % str(y.shape))
ravel = False
if y.ndim == 1:
y = y.reshape(-1, 1)
ravel = True
n_samples_, n_targets = y.shape
if n_samples != n_samples_:
raise ValueError("Number of samples in X and y does not correspond:"
" %d != %d" % (n_samples, n_samples_))
has_sw = sample_weight is not None
if solver == 'auto':
# cholesky if it's a dense array and cg in any other case
if not sparse.issparse(X) or has_sw:
solver = 'cholesky'
else:
solver = 'sparse_cg'
elif solver == 'lsqr' and not hasattr(sp_linalg, 'lsqr'):
warnings.warn("""lsqr not available on this machine, falling back
to sparse_cg.""")
solver = 'sparse_cg'
if has_sw:
if np.atleast_1d(sample_weight).ndim > 1:
raise ValueError("Sample weights must be 1D array or scalar")
if solver not in ['sag', 'saga']:
# SAG supports sample_weight directly. For other solvers,
# we implement sample_weight via a simple rescaling.
X, y = _rescale_data(X, y, sample_weight)
# There should be either 1 or n_targets penalties
alpha = np.asarray(alpha, dtype=X.dtype).ravel()
if alpha.size not in [1, n_targets]:
raise ValueError("Number of targets and number of penalties "
"do not correspond: %d != %d"
% (alpha.size, n_targets))
if alpha.size == 1 and n_targets > 1:
alpha = np.repeat(alpha, n_targets)
if solver not in ('sparse_cg', 'cholesky', 'svd', 'lsqr', 'sag', 'saga'):
raise ValueError('Solver %s not understood' % solver)
n_iter = None
if solver == 'sparse_cg':
coef = _solve_sparse_cg(X, y, alpha, max_iter, tol, verbose)
elif solver == 'lsqr':
coef, n_iter = _solve_lsqr(X, y, alpha, max_iter, tol)
elif solver == 'cholesky':
if n_features > n_samples:
K = safe_sparse_dot(X, X.T, dense_output=True)
try:
dual_coef = _solve_cholesky_kernel(K, y, alpha)
coef = safe_sparse_dot(X.T, dual_coef, dense_output=True).T
except linalg.LinAlgError:
# use SVD solver if matrix is singular
solver = 'svd'
else:
try:
coef = _solve_cholesky(X, y, alpha)
except linalg.LinAlgError:
# use SVD solver if matrix is singular
solver = 'svd'
elif solver in ['sag', 'saga']:
# precompute max_squared_sum for all targets
max_squared_sum = row_norms(X, squared=True).max()
coef = np.empty((y.shape[1], n_features))
n_iter = np.empty(y.shape[1], dtype=np.int32)
intercept = np.zeros((y.shape[1], ))
for i, (alpha_i, target) in enumerate(zip(alpha, y.T)):
init = {'coef': np.zeros((n_features + int(return_intercept), 1))}
coef_, n_iter_, _ = sag_solver(
X, target.ravel(), sample_weight, 'squared', alpha_i, 0,
max_iter, tol, verbose, random_state, False, max_squared_sum,
init,
is_saga=solver == 'saga')
if return_intercept:
coef[i] = coef_[:-1]
intercept[i] = coef_[-1]
else:
coef[i] = coef_
n_iter[i] = n_iter_
if intercept.shape[0] == 1:
intercept = intercept[0]
coef = np.asarray(coef)
if solver == 'svd':
if sparse.issparse(X):
raise TypeError('SVD solver does not support sparse'
' inputs currently')
coef = _solve_svd(X, y, alpha)
if ravel:
# When y was passed as a 1d-array, we flatten the coefficients.
coef = coef.ravel()
if return_n_iter and return_intercept:
return coef, n_iter, intercept
elif return_intercept:
return coef, intercept
elif return_n_iter:
return coef, n_iter
else:
return coef
class _BaseRidge(six.with_metaclass(ABCMeta, LinearModel)):
@abstractmethod
def __init__(self, alpha=1.0, fit_intercept=True, normalize=False,
copy_X=True, max_iter=None, tol=1e-3, solver="auto",
random_state=None):
self.alpha = alpha
self.fit_intercept = fit_intercept
self.normalize = normalize
self.copy_X = copy_X
self.max_iter = max_iter
self.tol = tol
self.solver = solver
self.random_state = random_state
def fit(self, X, y, sample_weight=None):
if self.solver in ('sag', 'saga'):
_dtype = np.float64
else:
# all other solvers work at both float precision levels
_dtype = [np.float64, np.float32]
X, y = check_X_y(X, y, ['csr', 'csc', 'coo'], dtype=_dtype,
multi_output=True, y_numeric=True)
if ((sample_weight is not None) and
np.atleast_1d(sample_weight).ndim > 1):
raise ValueError("Sample weights must be 1D array or scalar")
X, y, X_offset, y_offset, X_scale = self._preprocess_data(
X, y, self.fit_intercept, self.normalize, self.copy_X,
sample_weight=sample_weight)
# temporary fix for fitting the intercept with sparse data using 'sag'
if sparse.issparse(X) and self.fit_intercept:
self.coef_, self.n_iter_, self.intercept_ = ridge_regression(
X, y, alpha=self.alpha, sample_weight=sample_weight,
max_iter=self.max_iter, tol=self.tol, solver=self.solver,
random_state=self.random_state, return_n_iter=True,
return_intercept=True)
self.intercept_ += y_offset
else:
self.coef_, self.n_iter_ = ridge_regression(
X, y, alpha=self.alpha, sample_weight=sample_weight,
max_iter=self.max_iter, tol=self.tol, solver=self.solver,
random_state=self.random_state, return_n_iter=True,
return_intercept=False)
self._set_intercept(X_offset, y_offset, X_scale)
return self
class Ridge(_BaseRidge, RegressorMixin):
"""Linear least squares with l2 regularization.
This model solves a regression model where the loss function is
the linear least squares function and regularization is given by
the l2-norm. Also known as Ridge Regression or Tikhonov regularization.
This estimator has built-in support for multi-variate regression
(i.e., when y is a 2d-array of shape [n_samples, n_targets]).
Read more in the :ref:`User Guide <ridge_regression>`.
Parameters
----------
alpha : {float, array-like}, shape (n_targets)
Regularization strength; must be a positive float. Regularization
improves the conditioning of the problem and reduces the variance of
the estimates. Larger values specify stronger regularization.
Alpha corresponds to ``C^-1`` in other linear models such as
LogisticRegression or LinearSVC. If an array is passed, penalties are
assumed to be specific to the targets. Hence they must correspond in
number.
fit_intercept : boolean
Whether to calculate the intercept for this model. If set
to false, no intercept will be used in calculations
(e.g. data is expected to be already centered).
normalize : boolean, optional, default False
This parameter is ignored when ``fit_intercept`` is set to False.
If True, the regressors X will be normalized before regression by
subtracting the mean and dividing by the l2-norm.
If you wish to standardize, please use
:class:`sklearn.preprocessing.StandardScaler` before calling ``fit``
on an estimator with ``normalize=False``.
copy_X : boolean, optional, default True
If True, X will be copied; else, it may be overwritten.
max_iter : int, optional
Maximum number of iterations for conjugate gradient solver.
For 'sparse_cg' and 'lsqr' solvers, the default value is determined
by scipy.sparse.linalg. For 'sag' solver, the default value is 1000.
tol : float
Precision of the solution.
solver : {'auto', 'svd', 'cholesky', 'lsqr', 'sparse_cg', 'sag', 'saga'}
Solver to use in the computational routines:
- 'auto' chooses the solver automatically based on the type of data.
- 'svd' uses a Singular Value Decomposition of X to compute the Ridge
coefficients. More stable for singular matrices than
'cholesky'.
- 'cholesky' uses the standard scipy.linalg.solve function to
obtain a closed-form solution.
- 'sparse_cg' uses the conjugate gradient solver as found in
scipy.sparse.linalg.cg. As an iterative algorithm, this solver is
more appropriate than 'cholesky' for large-scale data
(possibility to set `tol` and `max_iter`).
- 'lsqr' uses the dedicated regularized least-squares routine
scipy.sparse.linalg.lsqr. It is the fastest but may not be available
in old scipy versions. It also uses an iterative procedure.
- 'sag' uses a Stochastic Average Gradient descent, and 'saga' uses
its improved, unbiased version named SAGA. Both methods also use an
iterative procedure, and are often faster than other solvers when
both n_samples and n_features are large. Note that 'sag' and
'saga' fast convergence is only guaranteed on features with
approximately the same scale. You can preprocess the data with a
scaler from sklearn.preprocessing.
All last five solvers support both dense and sparse data. However,
only 'sag' and 'saga' supports sparse input when `fit_intercept` is
True.
.. versionadded:: 0.17
Stochastic Average Gradient descent solver.
.. versionadded:: 0.19
SAGA solver.
random_state : int, RandomState instance or None, optional, default None
The seed of the pseudo random number generator to use when shuffling
the data. If int, random_state is the seed used by the random number
generator; If RandomState instance, random_state is the random number
generator; If None, the random number generator is the RandomState
instance used by `np.random`. Used when ``solver`` == 'sag'.
.. versionadded:: 0.17
*random_state* to support Stochastic Average Gradient.
Attributes
----------
coef_ : array, shape (n_features,) or (n_targets, n_features)
Weight vector(s).
intercept_ : float | array, shape = (n_targets,)
Independent term in decision function. Set to 0.0 if
``fit_intercept = False``.
n_iter_ : array or None, shape (n_targets,)
Actual number of iterations for each target. Available only for
sag and lsqr solvers. Other solvers will return None.
.. versionadded:: 0.17
See also
--------
RidgeClassifier, RidgeCV, :class:`sklearn.kernel_ridge.KernelRidge`
Examples
--------
>>> from sklearn.linear_model import Ridge
>>> import numpy as np
>>> n_samples, n_features = 10, 5
>>> np.random.seed(0)
>>> y = np.random.randn(n_samples)
>>> X = np.random.randn(n_samples, n_features)
>>> clf = Ridge(alpha=1.0)
>>> clf.fit(X, y) # doctest: +NORMALIZE_WHITESPACE
Ridge(alpha=1.0, copy_X=True, fit_intercept=True, max_iter=None,
normalize=False, random_state=None, solver='auto', tol=0.001)
"""
def __init__(self, alpha=1.0, fit_intercept=True, normalize=False,
copy_X=True, max_iter=None, tol=1e-3, solver="auto",
random_state=None):
super(Ridge, self).__init__(alpha=alpha, fit_intercept=fit_intercept,
normalize=normalize, copy_X=copy_X,
max_iter=max_iter, tol=tol, solver=solver,
random_state=random_state)
def fit(self, X, y, sample_weight=None):
"""Fit Ridge regression model
Parameters
----------
X : {array-like, sparse matrix}, shape = [n_samples, n_features]
Training data
y : array-like, shape = [n_samples] or [n_samples, n_targets]
Target values
sample_weight : float or numpy array of shape [n_samples]
Individual weights for each sample
Returns
-------
self : returns an instance of self.
"""
return super(Ridge, self).fit(X, y, sample_weight=sample_weight)
class RidgeClassifier(LinearClassifierMixin, _BaseRidge):
"""Classifier using Ridge regression.
Read more in the :ref:`User Guide <ridge_regression>`.
Parameters
----------
alpha : float
Regularization strength; must be a positive float. Regularization
improves the conditioning of the problem and reduces the variance of
the estimates. Larger values specify stronger regularization.
Alpha corresponds to ``C^-1`` in other linear models such as
LogisticRegression or LinearSVC.
fit_intercept : boolean
Whether to calculate the intercept for this model. If set to false, no
intercept will be used in calculations (e.g. data is expected to be
already centered).
normalize : boolean, optional, default False
This parameter is ignored when ``fit_intercept`` is set to False.
If True, the regressors X will be normalized before regression by
subtracting the mean and dividing by the l2-norm.
If you wish to standardize, please use
:class:`sklearn.preprocessing.StandardScaler` before calling ``fit``
on an estimator with ``normalize=False``.
copy_X : boolean, optional, default True
If True, X will be copied; else, it may be overwritten.
max_iter : int, optional
Maximum number of iterations for conjugate gradient solver.
The default value is determined by scipy.sparse.linalg.
tol : float
Precision of the solution.
class_weight : dict or 'balanced', optional
Weights associated with classes in the form ``{class_label: weight}``.
If not given, all classes are supposed to have weight one.
The "balanced" mode uses the values of y to automatically adjust
weights inversely proportional to class frequencies in the input data
as ``n_samples / (n_classes * np.bincount(y))``
solver : {'auto', 'svd', 'cholesky', 'lsqr', 'sparse_cg', 'sag', 'saga'}
Solver to use in the computational routines:
- 'auto' chooses the solver automatically based on the type of data.
- 'svd' uses a Singular Value Decomposition of X to compute the Ridge
coefficients. More stable for singular matrices than
'cholesky'.
- 'cholesky' uses the standard scipy.linalg.solve function to
obtain a closed-form solution.
- 'sparse_cg' uses the conjugate gradient solver as found in
scipy.sparse.linalg.cg. As an iterative algorithm, this solver is
more appropriate than 'cholesky' for large-scale data
(possibility to set `tol` and `max_iter`).
- 'lsqr' uses the dedicated regularized least-squares routine
scipy.sparse.linalg.lsqr. It is the fastest but may not be available
in old scipy versions. It also uses an iterative procedure.
- 'sag' uses a Stochastic Average Gradient descent, and 'saga' uses
its unbiased and more flexible version named SAGA. Both methods
use an iterative procedure, and are often faster than other solvers
when both n_samples and n_features are large. Note that 'sag' and
'saga' fast convergence is only guaranteed on features with
approximately the same scale. You can preprocess the data with a
scaler from sklearn.preprocessing.
.. versionadded:: 0.17
Stochastic Average Gradient descent solver.
.. versionadded:: 0.19
SAGA solver.
random_state : int, RandomState instance or None, optional, default None
The seed of the pseudo random number generator to use when shuffling
the data. If int, random_state is the seed used by the random number
generator; If RandomState instance, random_state is the random number
generator; If None, the random number generator is the RandomState
instance used by `np.random`. Used when ``solver`` == 'sag'.
Attributes
----------
coef_ : array, shape (n_features,) or (n_classes, n_features)
Weight vector(s).
intercept_ : float | array, shape = (n_targets,)
Independent term in decision function. Set to 0.0 if
``fit_intercept = False``.
n_iter_ : array or None, shape (n_targets,)
Actual number of iterations for each target. Available only for
sag and lsqr solvers. Other solvers will return None.
See also
--------
Ridge, RidgeClassifierCV
Notes
-----
For multi-class classification, n_class classifiers are trained in
a one-versus-all approach. Concretely, this is implemented by taking
advantage of the multi-variate response support in Ridge.
"""
def __init__(self, alpha=1.0, fit_intercept=True, normalize=False,
copy_X=True, max_iter=None, tol=1e-3, class_weight=None,
solver="auto", random_state=None):
super(RidgeClassifier, self).__init__(
alpha=alpha, fit_intercept=fit_intercept, normalize=normalize,
copy_X=copy_X, max_iter=max_iter, tol=tol, solver=solver,
random_state=random_state)
self.class_weight = class_weight
def fit(self, X, y, sample_weight=None):
"""Fit Ridge regression model.
Parameters
----------
X : {array-like, sparse matrix}, shape = [n_samples,n_features]
Training data
y : array-like, shape = [n_samples]
Target values
sample_weight : float or numpy array of shape (n_samples,)
Sample weight.
.. versionadded:: 0.17
*sample_weight* support to Classifier.
Returns
-------
self : returns an instance of self.
"""
self._label_binarizer = LabelBinarizer(pos_label=1, neg_label=-1)
Y = self._label_binarizer.fit_transform(y)
if not self._label_binarizer.y_type_.startswith('multilabel'):
y = column_or_1d(y, warn=True)
else:
# we don't (yet) support multi-label classification in Ridge
raise ValueError(
"%s doesn't support multi-label classification" % (
self.__class__.__name__))
if self.class_weight:
if sample_weight is None:
sample_weight = 1.
# modify the sample weights with the corresponding class weight
sample_weight = (sample_weight *
compute_sample_weight(self.class_weight, y))
super(RidgeClassifier, self).fit(X, Y, sample_weight=sample_weight)
return self
@property
def classes_(self):
return self._label_binarizer.classes_
class _RidgeGCV(LinearModel):
"""Ridge regression with built-in Generalized Cross-Validation
It allows efficient Leave-One-Out cross-validation.
This class is not intended to be used directly. Use RidgeCV instead.
Notes
-----
We want to solve (K + alpha*Id)c = y,
where K = X X^T is the kernel matrix.
Let G = (K + alpha*Id)^-1.
Dual solution: c = Gy
Primal solution: w = X^T c
Compute eigendecomposition K = Q V Q^T.
Then G = Q (V + alpha*Id)^-1 Q^T,
where (V + alpha*Id) is diagonal.
It is thus inexpensive to inverse for many alphas.
Let loov be the vector of prediction values for each example
when the model was fitted with all examples but this example.
loov = (KGY - diag(KG)Y) / diag(I-KG)
Let looe be the vector of prediction errors for each example
when the model was fitted with all examples but this example.
looe = y - loov = c / diag(G)
References
----------
http://cbcl.mit.edu/projects/cbcl/publications/ps/MIT-CSAIL-TR-2007-025.pdf
http://www.mit.edu/~9.520/spring07/Classes/rlsslides.pdf
"""
def __init__(self, alphas=(0.1, 1.0, 10.0),
fit_intercept=True, normalize=False,
scoring=None, copy_X=True,
gcv_mode=None, store_cv_values=False):
self.alphas = np.asarray(alphas)
self.fit_intercept = fit_intercept
self.normalize = normalize
self.scoring = scoring
self.copy_X = copy_X
self.gcv_mode = gcv_mode
self.store_cv_values = store_cv_values
def _pre_compute(self, X, y, centered_kernel=True):
# even if X is very sparse, K is usually very dense
K = safe_sparse_dot(X, X.T, dense_output=True)
# the following emulates an additional constant regressor
# corresponding to fit_intercept=True
# but this is done only when the features have been centered
if centered_kernel:
K += np.ones_like(K)
v, Q = linalg.eigh(K)
QT_y = np.dot(Q.T, y)
return v, Q, QT_y
def _decomp_diag(self, v_prime, Q):
# compute diagonal of the matrix: dot(Q, dot(diag(v_prime), Q^T))
return (v_prime * Q ** 2).sum(axis=-1)
def _diag_dot(self, D, B):
# compute dot(diag(D), B)
if len(B.shape) > 1:
# handle case where B is > 1-d
D = D[(slice(None), ) + (np.newaxis, ) * (len(B.shape) - 1)]
return D * B
def _errors_and_values_helper(self, alpha, y, v, Q, QT_y):
"""Helper function to avoid code duplication between self._errors and
self._values.
Notes
-----
We don't construct matrix G, instead compute action on y & diagonal.
"""
w = 1. / (v + alpha)
constant_column = np.var(Q, 0) < 1.e-12
# detect constant columns
w[constant_column] = 0 # cancel the regularization for the intercept
c = np.dot(Q, self._diag_dot(w, QT_y))
G_diag = self._decomp_diag(w, Q)
# handle case where y is 2-d
if len(y.shape) != 1:
G_diag = G_diag[:, np.newaxis]
return G_diag, c
def _errors(self, alpha, y, v, Q, QT_y):
G_diag, c = self._errors_and_values_helper(alpha, y, v, Q, QT_y)
return (c / G_diag) ** 2, c
def _values(self, alpha, y, v, Q, QT_y):
G_diag, c = self._errors_and_values_helper(alpha, y, v, Q, QT_y)
return y - (c / G_diag), c
def _pre_compute_svd(self, X, y, centered_kernel=True):
if sparse.issparse(X):
raise TypeError("SVD not supported for sparse matrices")
if centered_kernel:
X = np.hstack((X, np.ones((X.shape[0], 1))))
# to emulate fit_intercept=True situation, add a column on ones
# Note that by centering, the other columns are orthogonal to that one
U, s, _ = linalg.svd(X, full_matrices=0)
v = s ** 2
UT_y = np.dot(U.T, y)
return v, U, UT_y
def _errors_and_values_svd_helper(self, alpha, y, v, U, UT_y):
"""Helper function to avoid code duplication between self._errors_svd
and self._values_svd.
"""
constant_column = np.var(U, 0) < 1.e-12
# detect columns colinear to ones
w = ((v + alpha) ** -1) - (alpha ** -1)
w[constant_column] = - (alpha ** -1)
# cancel the regularization for the intercept
c = np.dot(U, self._diag_dot(w, UT_y)) + (alpha ** -1) * y
G_diag = self._decomp_diag(w, U) + (alpha ** -1)
if len(y.shape) != 1:
# handle case where y is 2-d
G_diag = G_diag[:, np.newaxis]
return G_diag, c
def _errors_svd(self, alpha, y, v, U, UT_y):
G_diag, c = self._errors_and_values_svd_helper(alpha, y, v, U, UT_y)
return (c / G_diag) ** 2, c
def _values_svd(self, alpha, y, v, U, UT_y):
G_diag, c = self._errors_and_values_svd_helper(alpha, y, v, U, UT_y)
return y - (c / G_diag), c
def fit(self, X, y, sample_weight=None):
"""Fit Ridge regression model
Parameters
----------
X : {array-like, sparse matrix}, shape = [n_samples, n_features]
Training data
y : array-like, shape = [n_samples] or [n_samples, n_targets]
Target values. Will be cast to X's dtype if necessary
sample_weight : float or array-like of shape [n_samples]
Sample weight
Returns
-------
self : Returns self.
"""
X, y = check_X_y(X, y, ['csr', 'csc', 'coo'], dtype=np.float64,
multi_output=True, y_numeric=True)
if sample_weight is not None and not isinstance(sample_weight, float):
sample_weight = check_array(sample_weight, ensure_2d=False)
n_samples, n_features = X.shape
X, y, X_offset, y_offset, X_scale = LinearModel._preprocess_data(
X, y, self.fit_intercept, self.normalize, self.copy_X,
sample_weight=sample_weight)
gcv_mode = self.gcv_mode
with_sw = len(np.shape(sample_weight))
if gcv_mode is None or gcv_mode == 'auto':
if sparse.issparse(X) or n_features > n_samples or with_sw:
gcv_mode = 'eigen'
else:
gcv_mode = 'svd'
elif gcv_mode == "svd" and with_sw:
# FIXME non-uniform sample weights not yet supported
warnings.warn("non-uniform sample weights unsupported for svd, "
"forcing usage of eigen")
gcv_mode = 'eigen'
if gcv_mode == 'eigen':
_pre_compute = self._pre_compute
_errors = self._errors
_values = self._values
elif gcv_mode == 'svd':
# assert n_samples >= n_features
_pre_compute = self._pre_compute_svd
_errors = self._errors_svd
_values = self._values_svd
else:
raise ValueError('bad gcv_mode "%s"' % gcv_mode)
if sample_weight is not None:
X, y = _rescale_data(X, y, sample_weight)
centered_kernel = not sparse.issparse(X) and self.fit_intercept
v, Q, QT_y = _pre_compute(X, y, centered_kernel)
n_y = 1 if len(y.shape) == 1 else y.shape[1]
cv_values = np.zeros((n_samples * n_y, len(self.alphas)))
C = []
scorer = check_scoring(self, scoring=self.scoring, allow_none=True)
error = scorer is None
for i, alpha in enumerate(self.alphas):
if error:
out, c = _errors(alpha, y, v, Q, QT_y)
else:
out, c = _values(alpha, y, v, Q, QT_y)
cv_values[:, i] = out.ravel()
C.append(c)
if error:
best = cv_values.mean(axis=0).argmin()
else:
# The scorer want an object that will make the predictions but
# they are already computed efficiently by _RidgeGCV. This
# identity_estimator will just return them
def identity_estimator():
pass
identity_estimator.decision_function = lambda y_predict: y_predict
identity_estimator.predict = lambda y_predict: y_predict
out = [scorer(identity_estimator, y.ravel(), cv_values[:, i])
for i in range(len(self.alphas))]
best = np.argmax(out)
self.alpha_ = self.alphas[best]
self.dual_coef_ = C[best]
self.coef_ = safe_sparse_dot(self.dual_coef_.T, X)
self._set_intercept(X_offset, y_offset, X_scale)
if self.store_cv_values:
if len(y.shape) == 1:
cv_values_shape = n_samples, len(self.alphas)
else:
cv_values_shape = n_samples, n_y, len(self.alphas)
self.cv_values_ = cv_values.reshape(cv_values_shape)
return self
class _BaseRidgeCV(LinearModel):
def __init__(self, alphas=(0.1, 1.0, 10.0),
fit_intercept=True, normalize=False, scoring=None,
cv=None, gcv_mode=None,
store_cv_values=False):
self.alphas = alphas
self.fit_intercept = fit_intercept
self.normalize = normalize
self.scoring = scoring
self.cv = cv
self.gcv_mode = gcv_mode
self.store_cv_values = store_cv_values
def fit(self, X, y, sample_weight=None):
"""Fit Ridge regression model
Parameters
----------
X : array-like, shape = [n_samples, n_features]
Training data
y : array-like, shape = [n_samples] or [n_samples, n_targets]
Target values. Will be cast to X's dtype if necessary
sample_weight : float or array-like of shape [n_samples]
Sample weight
Returns
-------
self : Returns self.
"""
if self.cv is None:
estimator = _RidgeGCV(self.alphas,
fit_intercept=self.fit_intercept,
normalize=self.normalize,
scoring=self.scoring,
gcv_mode=self.gcv_mode,
store_cv_values=self.store_cv_values)
estimator.fit(X, y, sample_weight=sample_weight)
self.alpha_ = estimator.alpha_
if self.store_cv_values:
self.cv_values_ = estimator.cv_values_
else:
if self.store_cv_values:
raise ValueError("cv!=None and store_cv_values=True "
" are incompatible")
parameters = {'alpha': self.alphas}
gs = GridSearchCV(Ridge(fit_intercept=self.fit_intercept,
normalize=self.normalize),
parameters, cv=self.cv, scoring=self.scoring)
gs.fit(X, y, sample_weight=sample_weight)
estimator = gs.best_estimator_
self.alpha_ = gs.best_estimator_.alpha
self.coef_ = estimator.coef_
self.intercept_ = estimator.intercept_
return self
class RidgeCV(_BaseRidgeCV, RegressorMixin):
"""Ridge regression with built-in cross-validation.
By default, it performs Generalized Cross-Validation, which is a form of
efficient Leave-One-Out cross-validation.
Read more in the :ref:`User Guide <ridge_regression>`.
Parameters
----------
alphas : numpy array of shape [n_alphas]
Array of alpha values to try.
Regularization strength; must be a positive float. Regularization
improves the conditioning of the problem and reduces the variance of
the estimates. Larger values specify stronger regularization.
Alpha corresponds to ``C^-1`` in other linear models such as
LogisticRegression or LinearSVC.
fit_intercept : boolean
Whether to calculate the intercept for this model. If set
to false, no intercept will be used in calculations
(e.g. data is expected to be already centered).
normalize : boolean, optional, default False
This parameter is ignored when ``fit_intercept`` is set to False.
If True, the regressors X will be normalized before regression by
subtracting the mean and dividing by the l2-norm.
If you wish to standardize, please use
:class:`sklearn.preprocessing.StandardScaler` before calling ``fit``
on an estimator with ``normalize=False``.
scoring : string, callable or None, optional, default: None
A string (see model evaluation documentation) or
a scorer callable object / function with signature
``scorer(estimator, X, y)``.
cv : int, cross-validation generator or an iterable, optional
Determines the cross-validation splitting strategy.
Possible inputs for cv are:
- None, to use the efficient Leave-One-Out cross-validation
- integer, to specify the number of folds.
- An object to be used as a cross-validation generator.
- An iterable yielding train/test splits.
For integer/None inputs, if ``y`` is binary or multiclass,
:class:`sklearn.model_selection.StratifiedKFold` is used, else,
:class:`sklearn.model_selection.KFold` is used.
Refer :ref:`User Guide <cross_validation>` for the various
cross-validation strategies that can be used here.
gcv_mode : {None, 'auto', 'svd', eigen'}, optional
Flag indicating which strategy to use when performing
Generalized Cross-Validation. Options are::
'auto' : use svd if n_samples > n_features or when X is a sparse
matrix, otherwise use eigen
'svd' : force computation via singular value decomposition of X
(does not work for sparse matrices)
'eigen' : force computation via eigendecomposition of X^T X
The 'auto' mode is the default and is intended to pick the cheaper
option of the two depending upon the shape and format of the training
data.
store_cv_values : boolean, default=False
Flag indicating if the cross-validation values corresponding to
each alpha should be stored in the `cv_values_` attribute (see
below). This flag is only compatible with `cv=None` (i.e. using
Generalized Cross-Validation).
Attributes
----------
cv_values_ : array, shape = [n_samples, n_alphas] or \
shape = [n_samples, n_targets, n_alphas], optional
Cross-validation values for each alpha (if `store_cv_values=True` and \
`cv=None`). After `fit()` has been called, this attribute will \
contain the mean squared errors (by default) or the values of the \
`{loss,score}_func` function (if provided in the constructor).
coef_ : array, shape = [n_features] or [n_targets, n_features]
Weight vector(s).
intercept_ : float | array, shape = (n_targets,)
Independent term in decision function. Set to 0.0 if
``fit_intercept = False``.
alpha_ : float
Estimated regularization parameter.
See also
--------
Ridge: Ridge regression
RidgeClassifier: Ridge classifier
RidgeClassifierCV: Ridge classifier with built-in cross validation
"""
pass
class RidgeClassifierCV(LinearClassifierMixin, _BaseRidgeCV):
"""Ridge classifier with built-in cross-validation.
By default, it performs Generalized Cross-Validation, which is a form of
efficient Leave-One-Out cross-validation. Currently, only the n_features >
n_samples case is handled efficiently.
Read more in the :ref:`User Guide <ridge_regression>`.
Parameters
----------
alphas : numpy array of shape [n_alphas]
Array of alpha values to try.
Regularization strength; must be a positive float. Regularization
improves the conditioning of the problem and reduces the variance of
the estimates. Larger values specify stronger regularization.
Alpha corresponds to ``C^-1`` in other linear models such as
LogisticRegression or LinearSVC.
fit_intercept : boolean
Whether to calculate the intercept for this model. If set
to false, no intercept will be used in calculations
(e.g. data is expected to be already centered).
normalize : boolean, optional, default False
This parameter is ignored when ``fit_intercept`` is set to False.
If True, the regressors X will be normalized before regression by
subtracting the mean and dividing by the l2-norm.
If you wish to standardize, please use
:class:`sklearn.preprocessing.StandardScaler` before calling ``fit``
on an estimator with ``normalize=False``.
scoring : string, callable or None, optional, default: None
A string (see model evaluation documentation) or
a scorer callable object / function with signature
``scorer(estimator, X, y)``.
cv : int, cross-validation generator or an iterable, optional
Determines the cross-validation splitting strategy.
Possible inputs for cv are:
- None, to use the efficient Leave-One-Out cross-validation
- integer, to specify the number of folds.
- An object to be used as a cross-validation generator.
- An iterable yielding train/test splits.
Refer :ref:`User Guide <cross_validation>` for the various
cross-validation strategies that can be used here.
class_weight : dict or 'balanced', optional
Weights associated with classes in the form ``{class_label: weight}``.
If not given, all classes are supposed to have weight one.
The "balanced" mode uses the values of y to automatically adjust
weights inversely proportional to class frequencies in the input data
as ``n_samples / (n_classes * np.bincount(y))``
Attributes
----------
cv_values_ : array, shape = [n_samples, n_alphas] or \
shape = [n_samples, n_responses, n_alphas], optional
Cross-validation values for each alpha (if `store_cv_values=True` and
`cv=None`). After `fit()` has been called, this attribute will contain \
the mean squared errors (by default) or the values of the \
`{loss,score}_func` function (if provided in the constructor).
coef_ : array, shape = [n_features] or [n_targets, n_features]
Weight vector(s).
intercept_ : float | array, shape = (n_targets,)
Independent term in decision function. Set to 0.0 if
``fit_intercept = False``.
alpha_ : float
Estimated regularization parameter
See also
--------
Ridge: Ridge regression
RidgeClassifier: Ridge classifier
RidgeCV: Ridge regression with built-in cross validation
Notes
-----
For multi-class classification, n_class classifiers are trained in
a one-versus-all approach. Concretely, this is implemented by taking
advantage of the multi-variate response support in Ridge.
"""
def __init__(self, alphas=(0.1, 1.0, 10.0), fit_intercept=True,
normalize=False, scoring=None, cv=None, class_weight=None):
super(RidgeClassifierCV, self).__init__(
alphas=alphas, fit_intercept=fit_intercept, normalize=normalize,
scoring=scoring, cv=cv)
self.class_weight = class_weight
def fit(self, X, y, sample_weight=None):
"""Fit the ridge classifier.
Parameters
----------
X : array-like, shape (n_samples, n_features)
Training vectors, where n_samples is the number of samples
and n_features is the number of features.
y : array-like, shape (n_samples,)
Target values. Will be cast to X's dtype if necessary
sample_weight : float or numpy array of shape (n_samples,)
Sample weight.
Returns
-------
self : object
Returns self.
"""
self._label_binarizer = LabelBinarizer(pos_label=1, neg_label=-1)
Y = self._label_binarizer.fit_transform(y)
if not self._label_binarizer.y_type_.startswith('multilabel'):
y = column_or_1d(y, warn=True)
if self.class_weight:
if sample_weight is None:
sample_weight = 1.
# modify the sample weights with the corresponding class weight
sample_weight = (sample_weight *
compute_sample_weight(self.class_weight, y))
_BaseRidgeCV.fit(self, X, Y, sample_weight=sample_weight)
return self
@property
def classes_(self):
return self._label_binarizer.classes_
| mit |
huwenboshi/hess | misc/estimate_lambdagc.py | 1 | 3831 | import numpy as np, numpy.linalg
import pandas as pd
import sys,argparse,os,gzip
eps = 10.0**-8
def main():
# get command line
args = get_command_line()
# load step 1 results
info, eig, prjsq = load_local_hsqg_step1(args.prefix)
# compute emprical and theoretical
nloci = info.shape[0]
empirical = local_quad_form(info,eig,prjsq, args.num_eig, args.min_eigval)
empirical = empirical.sort_values(by=['emp']).reset_index(drop=True)
empirical = empirical[0:int(nloci*args.pct)]
# esimate the lambda gc
emp = np.reshape(empirical['emp'].values, (-1,1))
theo = empirical['theo'].values
lambda_gc = max(np.linalg.lstsq(emp, theo)[0][0], 1.0)
print lambda_gc
def load_local_hsqg_step1(prefix):
"""
Load results from step 1 for estimating local SNP-heritability
"""
# check if all files exist
for chrom in xrange(1, 23):
info_f = '{}_chr{}.info.gz'.format(prefix, chrom)
eig_f = '{}_chr{}.eig.gz'.format(prefix, chrom)
prjsq_f = '{}_chr{}.prjsq.gz'.format(prefix, chrom)
if((not os.path.exists(info_f)) or (not os.path.exists(eig_f)) or \
(not os.path.exists(prjsq_f))):
sys.exit(1)
# iterate through chromosomes
info = pd.DataFrame(); eig = []; prjsq = []
for chrom in xrange(1, 23):
# load the information about each locus into memory
info_f = '{}_chr{}.info.gz'.format(prefix, chrom)
info_chr = pd.read_table(info_f, delim_whitespace=True, header=None,
compression='gzip', names=['start', 'stop', 'nsnp', 'rank', 'N'])
info_chr['CHR'] = chrom
info = pd.concat((info, info_chr), axis=0)
# load the eigenvalues and squared projections into memory
eig_f = gzip.open('{}_chr{}.eig.gz'.format(prefix, chrom), 'r')
prjsq_f = gzip.open('{}_chr{}.prjsq.gz'.format(prefix, chrom), 'r')
for line in eig_f:
eig.append(np.array(line.strip().split()).astype(np.float))
for line in prjsq_f:
prjsq.append(np.array(line.strip().split()).astype(np.float))
eig_f.close(); prjsq_f.close()
# reset the index of info
info = info.reset_index(drop=True)
# check if info, eig, and prjsq have the same length
if info.shape[0] != len(eig) or len(eig) != len(prjsq):
sys.exit(1)
# print out debugging info
nloci = info.shape[0]
return (info, eig, prjsq)
def local_quad_form(info, eig, prjsq, max_k, min_eigval):
"""
Compute the quadratic form (beta_gwas' * LD_inv * beta_gwas) at each locus
"""
all_sum = []; all_k = []; all_theo = []
nloci = len(eig)
for i in xrange(nloci):
k = min(max_k, np.where(eig[i] > min_eigval)[0].size)
tmp = np.divide(prjsq[i][0:k], eig[i][0:k]+eps)
all_sum.append(np.sum(tmp))
all_k.append(float(k))
all_theo.append(float(k)/info['N'][i])
return pd.DataFrame({'emp': all_sum, 'k':all_k, 'theo': all_theo})
# get command line
def get_command_line():
parser = argparse.ArgumentParser(description='Estimate lambda gc '
'to re-inflate')
parser.add_argument('--prefix', dest='prefix', type=str, required=True,
help='Local SNP-heritability estimation step 1 output prefix')
parser.add_argument('--num-eig', dest='num_eig', type=int, required=False,
help='Number of eigenvectors to use', default=50)
parser.add_argument('--pct', dest='pct', type=float, required=False,
help='Percent of loci to use to estimate lambda gc', default=0.5)
parser.add_argument('--min-eigval', dest='min_eigval', type=float,
help='Minimum eigenvalue', default=1.0, required=False)
args = parser.parse_args()
return args
if(__name__ == '__main__'):
main()
| gpl-3.0 |
walterreade/scikit-learn | examples/applications/plot_outlier_detection_housing.py | 28 | 5563 | """
====================================
Outlier detection on a real data set
====================================
This example illustrates the need for robust covariance estimation
on a real data set. It is useful both for outlier detection and for
a better understanding of the data structure.
We selected two sets of two variables from the Boston housing data set
as an illustration of what kind of analysis can be done with several
outlier detection tools. For the purpose of visualization, we are working
with two-dimensional examples, but one should be aware that things are
not so trivial in high-dimension, as it will be pointed out.
In both examples below, the main result is that the empirical covariance
estimate, as a non-robust one, is highly influenced by the heterogeneous
structure of the observations. Although the robust covariance estimate is
able to focus on the main mode of the data distribution, it sticks to the
assumption that the data should be Gaussian distributed, yielding some biased
estimation of the data structure, but yet accurate to some extent.
The One-Class SVM algorithm
First example
-------------
The first example illustrates how robust covariance estimation can help
concentrating on a relevant cluster when another one exists. Here, many
observations are confounded into one and break down the empirical covariance
estimation.
Of course, some screening tools would have pointed out the presence of two
clusters (Support Vector Machines, Gaussian Mixture Models, univariate
outlier detection, ...). But had it been a high-dimensional example, none
of these could be applied that easily.
Second example
--------------
The second example shows the ability of the Minimum Covariance Determinant
robust estimator of covariance to concentrate on the main mode of the data
distribution: the location seems to be well estimated, although the covariance
is hard to estimate due to the banana-shaped distribution. Anyway, we can
get rid of some outlying observations.
The One-Class SVM is able to capture the real data structure, but the
difficulty is to adjust its kernel bandwidth parameter so as to obtain
a good compromise between the shape of the data scatter matrix and the
risk of over-fitting the data.
"""
print(__doc__)
# Author: Virgile Fritsch <virgile.fritsch@inria.fr>
# License: BSD 3 clause
import numpy as np
from sklearn.covariance import EllipticEnvelope
from sklearn.svm import OneClassSVM
import matplotlib.pyplot as plt
import matplotlib.font_manager
from sklearn.datasets import load_boston
# Get data
X1 = load_boston()['data'][:, [8, 10]] # two clusters
X2 = load_boston()['data'][:, [5, 12]] # "banana"-shaped
# Define "classifiers" to be used
classifiers = {
"Empirical Covariance": EllipticEnvelope(support_fraction=1.,
contamination=0.261),
"Robust Covariance (Minimum Covariance Determinant)":
EllipticEnvelope(contamination=0.261),
"OCSVM": OneClassSVM(nu=0.261, gamma=0.05)}
colors = ['m', 'g', 'b']
legend1 = {}
legend2 = {}
# Learn a frontier for outlier detection with several classifiers
xx1, yy1 = np.meshgrid(np.linspace(-8, 28, 500), np.linspace(3, 40, 500))
xx2, yy2 = np.meshgrid(np.linspace(3, 10, 500), np.linspace(-5, 45, 500))
for i, (clf_name, clf) in enumerate(classifiers.items()):
plt.figure(1)
clf.fit(X1)
Z1 = clf.decision_function(np.c_[xx1.ravel(), yy1.ravel()])
Z1 = Z1.reshape(xx1.shape)
legend1[clf_name] = plt.contour(
xx1, yy1, Z1, levels=[0], linewidths=2, colors=colors[i])
plt.figure(2)
clf.fit(X2)
Z2 = clf.decision_function(np.c_[xx2.ravel(), yy2.ravel()])
Z2 = Z2.reshape(xx2.shape)
legend2[clf_name] = plt.contour(
xx2, yy2, Z2, levels=[0], linewidths=2, colors=colors[i])
legend1_values_list = list(legend1.values())
legend1_keys_list = list(legend1.keys())
# Plot the results (= shape of the data points cloud)
plt.figure(1) # two clusters
plt.title("Outlier detection on a real data set (boston housing)")
plt.scatter(X1[:, 0], X1[:, 1], color='black')
bbox_args = dict(boxstyle="round", fc="0.8")
arrow_args = dict(arrowstyle="->")
plt.annotate("several confounded points", xy=(24, 19),
xycoords="data", textcoords="data",
xytext=(13, 10), bbox=bbox_args, arrowprops=arrow_args)
plt.xlim((xx1.min(), xx1.max()))
plt.ylim((yy1.min(), yy1.max()))
plt.legend((legend1_values_list[0].collections[0],
legend1_values_list[1].collections[0],
legend1_values_list[2].collections[0]),
(legend1_keys_list[0], legend1_keys_list[1], legend1_keys_list[2]),
loc="upper center",
prop=matplotlib.font_manager.FontProperties(size=12))
plt.ylabel("accessibility to radial highways")
plt.xlabel("pupil-teacher ratio by town")
legend2_values_list = list(legend2.values())
legend2_keys_list = list(legend2.keys())
plt.figure(2) # "banana" shape
plt.title("Outlier detection on a real data set (boston housing)")
plt.scatter(X2[:, 0], X2[:, 1], color='black')
plt.xlim((xx2.min(), xx2.max()))
plt.ylim((yy2.min(), yy2.max()))
plt.legend((legend2_values_list[0].collections[0],
legend2_values_list[1].collections[0],
legend2_values_list[2].collections[0]),
(legend2_keys_list[0], legend2_keys_list[1], legend2_keys_list[2]),
loc="upper center",
prop=matplotlib.font_manager.FontProperties(size=12))
plt.ylabel("% lower status of the population")
plt.xlabel("average number of rooms per dwelling")
plt.show()
| bsd-3-clause |
Averroes/statsmodels | statsmodels/discrete/tests/test_discrete.py | 19 | 55886 | """
Tests for discrete models
Notes
-----
DECIMAL_3 is used because it seems that there is a loss of precision
in the Stata *.dta -> *.csv output, NOT the estimator for the Poisson
tests.
"""
# pylint: disable-msg=E1101
from statsmodels.compat.python import range
import os
import numpy as np
from numpy.testing import (assert_, assert_raises, assert_almost_equal,
assert_equal, assert_array_equal, assert_allclose,
assert_array_less)
from statsmodels.discrete.discrete_model import (Logit, Probit, MNLogit,
Poisson, NegativeBinomial)
from statsmodels.discrete.discrete_margins import _iscount, _isdummy
import statsmodels.api as sm
import statsmodels.formula.api as smf
from nose import SkipTest
from .results.results_discrete import Spector, DiscreteL1, RandHIE, Anes
from statsmodels.tools.sm_exceptions import PerfectSeparationError
try:
import cvxopt
has_cvxopt = True
except ImportError:
has_cvxopt = False
try:
from scipy.optimize import basinhopping
has_basinhopping = True
except ImportError:
has_basinhopping = False
DECIMAL_14 = 14
DECIMAL_10 = 10
DECIMAL_9 = 9
DECIMAL_4 = 4
DECIMAL_3 = 3
DECIMAL_2 = 2
DECIMAL_1 = 1
DECIMAL_0 = 0
class CheckModelResults(object):
"""
res2 should be the test results from RModelWrap
or the results as defined in model_results_data
"""
def test_params(self):
assert_almost_equal(self.res1.params, self.res2.params, DECIMAL_4)
def test_conf_int(self):
assert_allclose(self.res1.conf_int(), self.res2.conf_int, rtol=8e-5)
def test_zstat(self):
assert_almost_equal(self.res1.tvalues, self.res2.z, DECIMAL_4)
def pvalues(self):
assert_almost_equal(self.res1.pvalues, self.res2.pvalues, DECIMAL_4)
# def test_cov_params(self):
# assert_almost_equal(self.res1.cov_params(), self.res2.cov_params,
# DECIMAL_4)
def test_llf(self):
assert_almost_equal(self.res1.llf, self.res2.llf, DECIMAL_4)
def test_llnull(self):
assert_almost_equal(self.res1.llnull, self.res2.llnull, DECIMAL_4)
def test_llr(self):
assert_almost_equal(self.res1.llr, self.res2.llr, DECIMAL_3)
def test_llr_pvalue(self):
assert_almost_equal(self.res1.llr_pvalue, self.res2.llr_pvalue,
DECIMAL_4)
def test_normalized_cov_params(self):
pass
def test_bse(self):
assert_almost_equal(self.res1.bse, self.res2.bse, DECIMAL_4)
def test_dof(self):
assert_equal(self.res1.df_model, self.res2.df_model)
assert_equal(self.res1.df_resid, self.res2.df_resid)
def test_aic(self):
assert_almost_equal(self.res1.aic, self.res2.aic, DECIMAL_3)
def test_bic(self):
assert_almost_equal(self.res1.bic, self.res2.bic, DECIMAL_3)
def test_predict(self):
assert_almost_equal(self.res1.model.predict(self.res1.params),
self.res2.phat, DECIMAL_4)
def test_predict_xb(self):
assert_almost_equal(self.res1.model.predict(self.res1.params,
linear=True),
self.res2.yhat, DECIMAL_4)
def test_loglikeobs(self):
#basic cross check
llobssum = self.res1.model.loglikeobs(self.res1.params).sum()
assert_almost_equal(llobssum, self.res1.llf, DECIMAL_14)
def test_jac(self):
#basic cross check
jacsum = self.res1.model.score_obs(self.res1.params).sum(0)
score = self.res1.model.score(self.res1.params)
assert_almost_equal(jacsum, score, DECIMAL_9) #Poisson has low precision ?
class CheckBinaryResults(CheckModelResults):
def test_pred_table(self):
assert_array_equal(self.res1.pred_table(), self.res2.pred_table)
def test_resid_dev(self):
assert_almost_equal(self.res1.resid_dev, self.res2.resid_dev,
DECIMAL_4)
def test_resid_generalized(self):
assert_almost_equal(self.res1.resid_generalized,
self.res2.resid_generalized, DECIMAL_4)
def smoke_test_resid_response(self):
self.res1.resid_response
class CheckMargEff(object):
"""
Test marginal effects (margeff) and its options
"""
def test_nodummy_dydxoverall(self):
me = self.res1.get_margeff()
assert_almost_equal(me.margeff,
self.res2.margeff_nodummy_dydx, DECIMAL_4)
assert_almost_equal(me.margeff_se,
self.res2.margeff_nodummy_dydx_se, DECIMAL_4)
def test_nodummy_dydxmean(self):
me = self.res1.get_margeff(at='mean')
assert_almost_equal(me.margeff,
self.res2.margeff_nodummy_dydxmean, DECIMAL_4)
assert_almost_equal(me.margeff_se,
self.res2.margeff_nodummy_dydxmean_se, DECIMAL_4)
def test_nodummy_dydxmedian(self):
me = self.res1.get_margeff(at='median')
assert_almost_equal(me.margeff,
self.res2.margeff_nodummy_dydxmedian, DECIMAL_4)
assert_almost_equal(me.margeff_se,
self.res2.margeff_nodummy_dydxmedian_se, DECIMAL_4)
def test_nodummy_dydxzero(self):
me = self.res1.get_margeff(at='zero')
assert_almost_equal(me.margeff,
self.res2.margeff_nodummy_dydxzero, DECIMAL_4)
assert_almost_equal(me.margeff_se,
self.res2.margeff_nodummy_dydxzero, DECIMAL_4)
def test_nodummy_dyexoverall(self):
me = self.res1.get_margeff(method='dyex')
assert_almost_equal(me.margeff,
self.res2.margeff_nodummy_dyex, DECIMAL_4)
assert_almost_equal(me.margeff_se,
self.res2.margeff_nodummy_dyex_se, DECIMAL_4)
def test_nodummy_dyexmean(self):
me = self.res1.get_margeff(at='mean', method='dyex')
assert_almost_equal(me.margeff,
self.res2.margeff_nodummy_dyexmean, DECIMAL_4)
assert_almost_equal(me.margeff_se,
self.res2.margeff_nodummy_dyexmean_se, DECIMAL_4)
def test_nodummy_dyexmedian(self):
me = self.res1.get_margeff(at='median', method='dyex')
assert_almost_equal(me.margeff,
self.res2.margeff_nodummy_dyexmedian, DECIMAL_4)
assert_almost_equal(me.margeff_se,
self.res2.margeff_nodummy_dyexmedian_se, DECIMAL_4)
def test_nodummy_dyexzero(self):
me = self.res1.get_margeff(at='zero', method='dyex')
assert_almost_equal(me.margeff,
self.res2.margeff_nodummy_dyexzero, DECIMAL_4)
assert_almost_equal(me.margeff_se,
self.res2.margeff_nodummy_dyexzero_se, DECIMAL_4)
def test_nodummy_eydxoverall(self):
me = self.res1.get_margeff(method='eydx')
assert_almost_equal(me.margeff,
self.res2.margeff_nodummy_eydx, DECIMAL_4)
assert_almost_equal(me.margeff_se,
self.res2.margeff_nodummy_eydx_se, DECIMAL_4)
def test_nodummy_eydxmean(self):
me = self.res1.get_margeff(at='mean', method='eydx')
assert_almost_equal(me.margeff,
self.res2.margeff_nodummy_eydxmean, DECIMAL_4)
assert_almost_equal(me.margeff_se,
self.res2.margeff_nodummy_eydxmean_se, DECIMAL_4)
def test_nodummy_eydxmedian(self):
me = self.res1.get_margeff(at='median', method='eydx')
assert_almost_equal(me.margeff,
self.res2.margeff_nodummy_eydxmedian, DECIMAL_4)
assert_almost_equal(me.margeff_se,
self.res2.margeff_nodummy_eydxmedian_se, DECIMAL_4)
def test_nodummy_eydxzero(self):
me = self.res1.get_margeff(at='zero', method='eydx')
assert_almost_equal(me.margeff,
self.res2.margeff_nodummy_eydxzero, DECIMAL_4)
assert_almost_equal(me.margeff_se,
self.res2.margeff_nodummy_eydxzero_se, DECIMAL_4)
def test_nodummy_eyexoverall(self):
me = self.res1.get_margeff(method='eyex')
assert_almost_equal(me.margeff,
self.res2.margeff_nodummy_eyex, DECIMAL_4)
assert_almost_equal(me.margeff_se,
self.res2.margeff_nodummy_eyex_se, DECIMAL_4)
def test_nodummy_eyexmean(self):
me = self.res1.get_margeff(at='mean', method='eyex')
assert_almost_equal(me.margeff,
self.res2.margeff_nodummy_eyexmean, DECIMAL_4)
assert_almost_equal(me.margeff_se,
self.res2.margeff_nodummy_eyexmean_se, DECIMAL_4)
def test_nodummy_eyexmedian(self):
me = self.res1.get_margeff(at='median', method='eyex')
assert_almost_equal(me.margeff,
self.res2.margeff_nodummy_eyexmedian, DECIMAL_4)
assert_almost_equal(me.margeff_se,
self.res2.margeff_nodummy_eyexmedian_se, DECIMAL_4)
def test_nodummy_eyexzero(self):
me = self.res1.get_margeff(at='zero', method='eyex')
assert_almost_equal(me.margeff,
self.res2.margeff_nodummy_eyexzero, DECIMAL_4)
assert_almost_equal(me.margeff_se,
self.res2.margeff_nodummy_eyexzero_se, DECIMAL_4)
def test_dummy_dydxoverall(self):
me = self.res1.get_margeff(dummy=True)
assert_almost_equal(me.margeff,
self.res2.margeff_dummy_dydx, DECIMAL_4)
assert_almost_equal(me.margeff_se,
self.res2.margeff_dummy_dydx_se, DECIMAL_4)
def test_dummy_dydxmean(self):
me = self.res1.get_margeff(at='mean', dummy=True)
assert_almost_equal(me.margeff,
self.res2.margeff_dummy_dydxmean, DECIMAL_4)
assert_almost_equal(me.margeff_se,
self.res2.margeff_dummy_dydxmean_se, DECIMAL_4)
def test_dummy_eydxoverall(self):
me = self.res1.get_margeff(method='eydx', dummy=True)
assert_almost_equal(me.margeff,
self.res2.margeff_dummy_eydx, DECIMAL_4)
assert_almost_equal(me.margeff_se,
self.res2.margeff_dummy_eydx_se, DECIMAL_4)
def test_dummy_eydxmean(self):
me = self.res1.get_margeff(at='mean', method='eydx', dummy=True)
assert_almost_equal(me.margeff,
self.res2.margeff_dummy_eydxmean, DECIMAL_4)
assert_almost_equal(me.margeff_se,
self.res2.margeff_dummy_eydxmean_se, DECIMAL_4)
def test_count_dydxoverall(self):
me = self.res1.get_margeff(count=True)
assert_almost_equal(me.margeff,
self.res2.margeff_count_dydx, DECIMAL_4)
assert_almost_equal(me.margeff_se,
self.res2.margeff_count_dydx_se, DECIMAL_4)
def test_count_dydxmean(self):
me = self.res1.get_margeff(count=True, at='mean')
assert_almost_equal(me.margeff,
self.res2.margeff_count_dydxmean, DECIMAL_4)
assert_almost_equal(me.margeff_se,
self.res2.margeff_count_dydxmean_se, DECIMAL_4)
def test_count_dummy_dydxoverall(self):
me = self.res1.get_margeff(count=True, dummy=True)
assert_almost_equal(me.margeff,
self.res2.margeff_count_dummy_dydxoverall, DECIMAL_4)
assert_almost_equal(me.margeff_se,
self.res2.margeff_count_dummy_dydxoverall_se, DECIMAL_4)
def test_count_dummy_dydxmean(self):
me = self.res1.get_margeff(count=True, dummy=True, at='mean')
assert_almost_equal(me.margeff,
self.res2.margeff_count_dummy_dydxmean, DECIMAL_4)
assert_almost_equal(me.margeff_se,
self.res2.margeff_count_dummy_dydxmean_se, DECIMAL_4)
class TestProbitNewton(CheckBinaryResults):
@classmethod
def setupClass(cls):
data = sm.datasets.spector.load()
data.exog = sm.add_constant(data.exog, prepend=False)
cls.res1 = Probit(data.endog, data.exog).fit(method="newton", disp=0)
res2 = Spector()
res2.probit()
cls.res2 = res2
#def test_predict(self):
# assert_almost_equal(self.res1.model.predict(self.res1.params),
# self.res2.predict, DECIMAL_4)
class TestProbitBFGS(CheckBinaryResults):
@classmethod
def setupClass(cls):
data = sm.datasets.spector.load()
data.exog = sm.add_constant(data.exog, prepend=False)
cls.res1 = Probit(data.endog, data.exog).fit(method="bfgs",
disp=0)
res2 = Spector()
res2.probit()
cls.res2 = res2
class TestProbitNM(CheckBinaryResults):
@classmethod
def setupClass(cls):
data = sm.datasets.spector.load()
data.exog = sm.add_constant(data.exog, prepend=False)
res2 = Spector()
res2.probit()
cls.res2 = res2
cls.res1 = Probit(data.endog, data.exog).fit(method="nm",
disp=0, maxiter=500)
class TestProbitPowell(CheckBinaryResults):
@classmethod
def setupClass(cls):
data = sm.datasets.spector.load()
data.exog = sm.add_constant(data.exog, prepend=False)
res2 = Spector()
res2.probit()
cls.res2 = res2
cls.res1 = Probit(data.endog, data.exog).fit(method="powell",
disp=0, ftol=1e-8)
class TestProbitCG(CheckBinaryResults):
@classmethod
def setupClass(cls):
data = sm.datasets.spector.load()
data.exog = sm.add_constant(data.exog, prepend=False)
res2 = Spector()
res2.probit()
cls.res2 = res2
# fmin_cg fails to converge on some machines - reparameterize
from statsmodels.tools.transform_model import StandardizeTransform
transf = StandardizeTransform(data.exog)
exog_st = transf(data.exog)
res1_st = Probit(data.endog,
exog_st).fit(method="cg", disp=0, maxiter=1000,
gtol=1e-08)
start_params = transf.transform_params(res1_st.params)
assert_allclose(start_params, res2.params, rtol=1e-5, atol=1e-6)
cls.res1 = Probit(data.endog,
data.exog).fit(start_params=start_params,
method="cg", maxiter=1000,
gtol=1e-05, disp=0)
assert_array_less(cls.res1.mle_retvals['fcalls'], 100)
class TestProbitNCG(CheckBinaryResults):
@classmethod
def setupClass(cls):
data = sm.datasets.spector.load()
data.exog = sm.add_constant(data.exog, prepend=False)
res2 = Spector()
res2.probit()
cls.res2 = res2
cls.res1 = Probit(data.endog, data.exog).fit(method="ncg",
disp=0, avextol=1e-8,
warn_convergence=False)
# converges close enough but warnflag is 2 for precision loss
class TestProbitBasinhopping(CheckBinaryResults):
@classmethod
def setupClass(cls):
if not has_basinhopping:
raise SkipTest("Skipped TestProbitBasinhopping since"
" basinhopping solver is not available")
data = sm.datasets.spector.load()
data.exog = sm.add_constant(data.exog, prepend=False)
res2 = Spector()
res2.probit()
cls.res2 = res2
fit = Probit(data.endog, data.exog).fit
cls.res1 = fit(method="basinhopping", disp=0, niter=5,
minimizer={'method' : 'L-BFGS-B', 'tol' : 1e-8})
class CheckLikelihoodModelL1(object):
"""
For testing results generated with L1 regularization
"""
def test_params(self):
assert_almost_equal(self.res1.params, self.res2.params, DECIMAL_4)
def test_conf_int(self):
assert_almost_equal(
self.res1.conf_int(), self.res2.conf_int, DECIMAL_4)
def test_bse(self):
assert_almost_equal(self.res1.bse, self.res2.bse, DECIMAL_4)
def test_nnz_params(self):
assert_almost_equal(
self.res1.nnz_params, self.res2.nnz_params, DECIMAL_4)
def test_aic(self):
assert_almost_equal(
self.res1.aic, self.res2.aic, DECIMAL_3)
def test_bic(self):
assert_almost_equal(
self.res1.bic, self.res2.bic, DECIMAL_3)
class TestProbitL1(CheckLikelihoodModelL1):
@classmethod
def setupClass(cls):
data = sm.datasets.spector.load()
data.exog = sm.add_constant(data.exog, prepend=True)
alpha = np.array([0.1, 0.2, 0.3, 10]) #/ data.exog.shape[0]
cls.res1 = Probit(data.endog, data.exog).fit_regularized(
method="l1", alpha=alpha, disp=0, trim_mode='auto',
auto_trim_tol=0.02, acc=1e-10, maxiter=1000)
res2 = DiscreteL1()
res2.probit()
cls.res2 = res2
def test_cov_params(self):
assert_almost_equal(
self.res1.cov_params(), self.res2.cov_params, DECIMAL_4)
class TestMNLogitL1(CheckLikelihoodModelL1):
@classmethod
def setupClass(cls):
anes_data = sm.datasets.anes96.load()
anes_exog = anes_data.exog
anes_exog = sm.add_constant(anes_exog, prepend=False)
mlogit_mod = sm.MNLogit(anes_data.endog, anes_exog)
alpha = 10. * np.ones((mlogit_mod.J - 1, mlogit_mod.K)) #/ anes_exog.shape[0]
alpha[-1,:] = 0
cls.res1 = mlogit_mod.fit_regularized(
method='l1', alpha=alpha, trim_mode='auto', auto_trim_tol=0.02,
acc=1e-10, disp=0)
res2 = DiscreteL1()
res2.mnlogit()
cls.res2 = res2
class TestLogitL1(CheckLikelihoodModelL1):
@classmethod
def setupClass(cls):
data = sm.datasets.spector.load()
data.exog = sm.add_constant(data.exog, prepend=True)
cls.alpha = 3 * np.array([0., 1., 1., 1.]) #/ data.exog.shape[0]
cls.res1 = Logit(data.endog, data.exog).fit_regularized(
method="l1", alpha=cls.alpha, disp=0, trim_mode='size',
size_trim_tol=1e-5, acc=1e-10, maxiter=1000)
res2 = DiscreteL1()
res2.logit()
cls.res2 = res2
def test_cov_params(self):
assert_almost_equal(
self.res1.cov_params(), self.res2.cov_params, DECIMAL_4)
class TestCVXOPT(object):
@classmethod
def setupClass(self):
self.data = sm.datasets.spector.load()
self.data.exog = sm.add_constant(self.data.exog, prepend=True)
def test_cvxopt_versus_slsqp(self):
#Compares resutls from cvxopt to the standard slsqp
if has_cvxopt:
self.alpha = 3. * np.array([0, 1, 1, 1.]) #/ self.data.endog.shape[0]
res_slsqp = Logit(self.data.endog, self.data.exog).fit_regularized(
method="l1", alpha=self.alpha, disp=0, acc=1e-10, maxiter=1000,
trim_mode='auto')
res_cvxopt = Logit(self.data.endog, self.data.exog).fit_regularized(
method="l1_cvxopt_cp", alpha=self.alpha, disp=0, abstol=1e-10,
trim_mode='auto', auto_trim_tol=0.01, maxiter=1000)
assert_almost_equal(res_slsqp.params, res_cvxopt.params, DECIMAL_4)
else:
raise SkipTest("Skipped test_cvxopt since cvxopt is not available")
class TestSweepAlphaL1(object):
@classmethod
def setupClass(cls):
data = sm.datasets.spector.load()
data.exog = sm.add_constant(data.exog, prepend=True)
cls.model = Logit(data.endog, data.exog)
cls.alphas = np.array(
[[0.1, 0.1, 0.1, 0.1],
[0.4, 0.4, 0.5, 0.5],
[0.5, 0.5, 1, 1]]) #/ data.exog.shape[0]
cls.res1 = DiscreteL1()
cls.res1.sweep()
def test_sweep_alpha(self):
for i in range(3):
alpha = self.alphas[i, :]
res2 = self.model.fit_regularized(
method="l1", alpha=alpha, disp=0, acc=1e-10,
trim_mode='off', maxiter=1000)
assert_almost_equal(res2.params, self.res1.params[i], DECIMAL_4)
class CheckL1Compatability(object):
"""
Tests compatability between l1 and unregularized by setting alpha such
that certain parameters should be effectively unregularized, and others
should be ignored by the model.
"""
def test_params(self):
m = self.m
assert_almost_equal(
self.res_unreg.params[:m], self.res_reg.params[:m], DECIMAL_4)
# The last entry should be close to zero
# handle extra parameter of NegativeBinomial
kvars = self.res_reg.model.exog.shape[1]
assert_almost_equal(0, self.res_reg.params[m:kvars], DECIMAL_4)
def test_cov_params(self):
m = self.m
# The restricted cov_params should be equal
assert_almost_equal(
self.res_unreg.cov_params()[:m, :m],
self.res_reg.cov_params()[:m, :m],
DECIMAL_1)
def test_df(self):
assert_equal(self.res_unreg.df_model, self.res_reg.df_model)
assert_equal(self.res_unreg.df_resid, self.res_reg.df_resid)
def test_t_test(self):
m = self.m
kvars = self.kvars
# handle extra parameter of NegativeBinomial
extra = getattr(self, 'k_extra', 0)
t_unreg = self.res_unreg.t_test(np.eye(len(self.res_unreg.params)))
t_reg = self.res_reg.t_test(np.eye(kvars + extra))
assert_almost_equal(t_unreg.effect[:m], t_reg.effect[:m], DECIMAL_3)
assert_almost_equal(t_unreg.sd[:m], t_reg.sd[:m], DECIMAL_3)
assert_almost_equal(np.nan, t_reg.sd[m])
assert_allclose(t_unreg.tvalue[:m], t_reg.tvalue[:m], atol=3e-3)
assert_almost_equal(np.nan, t_reg.tvalue[m])
def test_f_test(self):
m = self.m
kvars = self.kvars
# handle extra parameter of NegativeBinomial
extra = getattr(self, 'k_extra', 0)
f_unreg = self.res_unreg.f_test(np.eye(len(self.res_unreg.params))[:m])
f_reg = self.res_reg.f_test(np.eye(kvars + extra)[:m])
assert_allclose(f_unreg.fvalue, f_reg.fvalue, rtol=3e-5, atol=1e-3)
assert_almost_equal(f_unreg.pvalue, f_reg.pvalue, DECIMAL_3)
def test_bad_r_matrix(self):
kvars = self.kvars
assert_raises(ValueError, self.res_reg.f_test, np.eye(kvars) )
class TestPoissonL1Compatability(CheckL1Compatability):
@classmethod
def setupClass(cls):
cls.kvars = 10 # Number of variables
cls.m = 7 # Number of unregularized parameters
rand_data = sm.datasets.randhie.load()
rand_exog = rand_data.exog.view(float).reshape(len(rand_data.exog), -1)
rand_exog = sm.add_constant(rand_exog, prepend=True)
# Drop some columns and do an unregularized fit
exog_no_PSI = rand_exog[:, :cls.m]
mod_unreg = sm.Poisson(rand_data.endog, exog_no_PSI)
cls.res_unreg = mod_unreg.fit(method="newton", disp=False)
# Do a regularized fit with alpha, effectively dropping the last column
alpha = 10 * len(rand_data.endog) * np.ones(cls.kvars)
alpha[:cls.m] = 0
cls.res_reg = sm.Poisson(rand_data.endog, rand_exog).fit_regularized(
method='l1', alpha=alpha, disp=False, acc=1e-10, maxiter=2000,
trim_mode='auto')
class TestNegativeBinomialL1Compatability(CheckL1Compatability):
@classmethod
def setupClass(cls):
cls.kvars = 10 # Number of variables
cls.m = 7 # Number of unregularized parameters
rand_data = sm.datasets.randhie.load()
rand_exog = rand_data.exog.view(float).reshape(len(rand_data.exog), -1)
rand_exog_st = (rand_exog - rand_exog.mean(0)) / rand_exog.std(0)
rand_exog = sm.add_constant(rand_exog_st, prepend=True)
# Drop some columns and do an unregularized fit
exog_no_PSI = rand_exog[:, :cls.m]
mod_unreg = sm.NegativeBinomial(rand_data.endog, exog_no_PSI)
cls.res_unreg = mod_unreg.fit(method="newton", disp=False)
# Do a regularized fit with alpha, effectively dropping the last column
alpha = 10 * len(rand_data.endog) * np.ones(cls.kvars + 1)
alpha[:cls.m] = 0
alpha[-1] = 0 # don't penalize alpha
mod_reg = sm.NegativeBinomial(rand_data.endog, rand_exog)
cls.res_reg = mod_reg.fit_regularized(
method='l1', alpha=alpha, disp=False, acc=1e-10, maxiter=2000,
trim_mode='auto')
cls.k_extra = 1 # 1 extra parameter in nb2
class TestNegativeBinomialGeoL1Compatability(CheckL1Compatability):
@classmethod
def setupClass(cls):
cls.kvars = 10 # Number of variables
cls.m = 7 # Number of unregularized parameters
rand_data = sm.datasets.randhie.load()
rand_exog = rand_data.exog.view(float).reshape(len(rand_data.exog), -1)
rand_exog = sm.add_constant(rand_exog, prepend=True)
# Drop some columns and do an unregularized fit
exog_no_PSI = rand_exog[:, :cls.m]
mod_unreg = sm.NegativeBinomial(rand_data.endog, exog_no_PSI,
loglike_method='geometric')
cls.res_unreg = mod_unreg.fit(method="newton", disp=False)
# Do a regularized fit with alpha, effectively dropping the last columns
alpha = 10 * len(rand_data.endog) * np.ones(cls.kvars)
alpha[:cls.m] = 0
mod_reg = sm.NegativeBinomial(rand_data.endog, rand_exog,
loglike_method='geometric')
cls.res_reg = mod_reg.fit_regularized(
method='l1', alpha=alpha, disp=False, acc=1e-10, maxiter=2000,
trim_mode='auto')
assert_equal(mod_reg.loglike_method, 'geometric')
class TestLogitL1Compatability(CheckL1Compatability):
@classmethod
def setupClass(cls):
cls.kvars = 4 # Number of variables
cls.m = 3 # Number of unregularized parameters
data = sm.datasets.spector.load()
data.exog = sm.add_constant(data.exog, prepend=True)
# Do a regularized fit with alpha, effectively dropping the last column
alpha = np.array([0, 0, 0, 10])
cls.res_reg = Logit(data.endog, data.exog).fit_regularized(
method="l1", alpha=alpha, disp=0, acc=1e-15, maxiter=2000,
trim_mode='auto')
# Actually drop the last columnand do an unregularized fit
exog_no_PSI = data.exog[:, :cls.m]
cls.res_unreg = Logit(data.endog, exog_no_PSI).fit(disp=0, tol=1e-15)
class TestMNLogitL1Compatability(CheckL1Compatability):
@classmethod
def setupClass(cls):
cls.kvars = 4 # Number of variables
cls.m = 3 # Number of unregularized parameters
data = sm.datasets.spector.load()
data.exog = sm.add_constant(data.exog, prepend=True)
alpha = np.array([0, 0, 0, 10])
cls.res_reg = MNLogit(data.endog, data.exog).fit_regularized(
method="l1", alpha=alpha, disp=0, acc=1e-15, maxiter=2000,
trim_mode='auto')
# Actually drop the last columnand do an unregularized fit
exog_no_PSI = data.exog[:, :cls.m]
cls.res_unreg = MNLogit(data.endog, exog_no_PSI).fit(
disp=0, tol=1e-15, method='bfgs', maxiter=1000)
def test_t_test(self):
m = self.m
kvars = self.kvars
t_unreg = self.res_unreg.t_test(np.eye(m))
t_reg = self.res_reg.t_test(np.eye(kvars))
assert_almost_equal(t_unreg.effect, t_reg.effect[:m], DECIMAL_3)
assert_almost_equal(t_unreg.sd, t_reg.sd[:m], DECIMAL_3)
assert_almost_equal(np.nan, t_reg.sd[m])
assert_almost_equal(t_unreg.tvalue, t_reg.tvalue[:m, :m], DECIMAL_3)
def test_f_test(self):
raise SkipTest("Skipped test_f_test for MNLogit")
class TestProbitL1Compatability(CheckL1Compatability):
@classmethod
def setupClass(cls):
cls.kvars = 4 # Number of variables
cls.m = 3 # Number of unregularized parameters
data = sm.datasets.spector.load()
data.exog = sm.add_constant(data.exog, prepend=True)
alpha = np.array([0, 0, 0, 10])
cls.res_reg = Probit(data.endog, data.exog).fit_regularized(
method="l1", alpha=alpha, disp=0, acc=1e-15, maxiter=2000,
trim_mode='auto')
# Actually drop the last columnand do an unregularized fit
exog_no_PSI = data.exog[:, :cls.m]
cls.res_unreg = Probit(data.endog, exog_no_PSI).fit(disp=0, tol=1e-15)
class CompareL1(object):
"""
For checking results for l1 regularization.
Assumes self.res1 and self.res2 are two legitimate models to be compared.
"""
def test_basic_results(self):
assert_almost_equal(self.res1.params, self.res2.params, DECIMAL_4)
assert_almost_equal(self.res1.cov_params(), self.res2.cov_params(), DECIMAL_4)
assert_almost_equal(self.res1.conf_int(), self.res2.conf_int(), DECIMAL_4)
assert_almost_equal(self.res1.pvalues, self.res2.pvalues, DECIMAL_4)
assert_almost_equal(self.res1.pred_table(), self.res2.pred_table(), DECIMAL_4)
assert_almost_equal(self.res1.bse, self.res2.bse, DECIMAL_4)
assert_almost_equal(self.res1.llf, self.res2.llf, DECIMAL_4)
assert_almost_equal(self.res1.aic, self.res2.aic, DECIMAL_4)
assert_almost_equal(self.res1.bic, self.res2.bic, DECIMAL_4)
assert_almost_equal(self.res1.pvalues, self.res2.pvalues, DECIMAL_4)
class CompareL11D(CompareL1):
"""
Check t and f tests. This only works for 1-d results
"""
def test_tests(self):
restrictmat = np.eye(len(self.res1.params.ravel()))
assert_almost_equal(self.res1.t_test(restrictmat).pvalue,
self.res2.t_test(restrictmat).pvalue, DECIMAL_4)
assert_almost_equal(self.res1.f_test(restrictmat).pvalue,
self.res2.f_test(restrictmat).pvalue, DECIMAL_4)
class TestL1AlphaZeroLogit(CompareL11D):
"""
Compares l1 model with alpha = 0 to the unregularized model.
"""
@classmethod
def setupClass(cls):
data = sm.datasets.spector.load()
data.exog = sm.add_constant(data.exog, prepend=True)
cls.res1 = Logit(data.endog, data.exog).fit_regularized(
method="l1", alpha=0, disp=0, acc=1e-15, maxiter=1000,
trim_mode='auto', auto_trim_tol=0.01)
cls.res2 = Logit(data.endog, data.exog).fit(disp=0, tol=1e-15)
class TestL1AlphaZeroProbit(CompareL11D):
"""
Compares l1 model with alpha = 0 to the unregularized model.
"""
@classmethod
def setupClass(cls):
data = sm.datasets.spector.load()
data.exog = sm.add_constant(data.exog, prepend=True)
cls.res1 = Probit(data.endog, data.exog).fit_regularized(
method="l1", alpha=0, disp=0, acc=1e-15, maxiter=1000,
trim_mode='auto', auto_trim_tol=0.01)
cls.res2 = Probit(data.endog, data.exog).fit(disp=0, tol=1e-15)
class TestL1AlphaZeroMNLogit(CompareL1):
@classmethod
def setupClass(cls):
data = sm.datasets.anes96.load()
data.exog = sm.add_constant(data.exog, prepend=False)
cls.res1 = MNLogit(data.endog, data.exog).fit_regularized(
method="l1", alpha=0, disp=0, acc=1e-15, maxiter=1000,
trim_mode='auto', auto_trim_tol=0.01)
cls.res2 = MNLogit(data.endog, data.exog).fit(disp=0, tol=1e-15,
method='bfgs',
maxiter=1000)
class TestLogitNewton(CheckBinaryResults, CheckMargEff):
@classmethod
def setupClass(cls):
data = sm.datasets.spector.load()
data.exog = sm.add_constant(data.exog, prepend=False)
cls.res1 = Logit(data.endog, data.exog).fit(method="newton", disp=0)
res2 = Spector()
res2.logit()
cls.res2 = res2
def test_resid_pearson(self):
assert_almost_equal(self.res1.resid_pearson,
self.res2.resid_pearson, 5)
def test_nodummy_exog1(self):
me = self.res1.get_margeff(atexog={0 : 2.0, 2 : 1.})
assert_almost_equal(me.margeff,
self.res2.margeff_nodummy_atexog1, DECIMAL_4)
assert_almost_equal(me.margeff_se,
self.res2.margeff_nodummy_atexog1_se, DECIMAL_4)
def test_nodummy_exog2(self):
me = self.res1.get_margeff(atexog={1 : 21., 2 : 0}, at='mean')
assert_almost_equal(me.margeff,
self.res2.margeff_nodummy_atexog2, DECIMAL_4)
assert_almost_equal(me.margeff_se,
self.res2.margeff_nodummy_atexog2_se, DECIMAL_4)
def test_dummy_exog1(self):
me = self.res1.get_margeff(atexog={0 : 2.0, 2 : 1.}, dummy=True)
assert_almost_equal(me.margeff,
self.res2.margeff_dummy_atexog1, DECIMAL_4)
assert_almost_equal(me.margeff_se,
self.res2.margeff_dummy_atexog1_se, DECIMAL_4)
def test_dummy_exog2(self):
me = self.res1.get_margeff(atexog={1 : 21., 2 : 0}, at='mean',
dummy=True)
assert_almost_equal(me.margeff,
self.res2.margeff_dummy_atexog2, DECIMAL_4)
assert_almost_equal(me.margeff_se,
self.res2.margeff_dummy_atexog2_se, DECIMAL_4)
class TestLogitBFGS(CheckBinaryResults, CheckMargEff):
@classmethod
def setupClass(cls):
data = sm.datasets.spector.load()
data.exog = sm.add_constant(data.exog, prepend=False)
res2 = Spector()
res2.logit()
cls.res2 = res2
cls.res1 = Logit(data.endog, data.exog).fit(method="bfgs", disp=0)
class TestPoissonNewton(CheckModelResults):
@classmethod
def setupClass(cls):
data = sm.datasets.randhie.load()
exog = sm.add_constant(data.exog, prepend=False)
cls.res1 = Poisson(data.endog, exog).fit(method='newton', disp=0)
res2 = RandHIE()
res2.poisson()
cls.res2 = res2
def test_margeff_overall(self):
me = self.res1.get_margeff()
assert_almost_equal(me.margeff,
self.res2.margeff_nodummy_overall, DECIMAL_4)
assert_almost_equal(me.margeff_se,
self.res2.margeff_nodummy_overall_se, DECIMAL_4)
def test_margeff_dummy_overall(self):
me = self.res1.get_margeff(dummy=True)
assert_almost_equal(me.margeff,
self.res2.margeff_dummy_overall, DECIMAL_4)
assert_almost_equal(me.margeff_se,
self.res2.margeff_dummy_overall_se, DECIMAL_4)
def test_resid(self):
assert_almost_equal(self.res1.resid, self.res2.resid, 2)
def test_predict_prob(self):
cur_dir = os.path.dirname(os.path.abspath(__file__))
probs_res = np.loadtxt(os.path.join(cur_dir, "results",
"predict_prob_poisson.csv"), delimiter=",")
# just check the first 100 obs. vs R to save memory
probs = self.res1.predict_prob()[:100]
assert_almost_equal(probs, probs_res, 8)
class TestNegativeBinomialNB2Newton(CheckModelResults):
@classmethod
def setupClass(cls):
data = sm.datasets.randhie.load()
exog = sm.add_constant(data.exog, prepend=False)
cls.res1 = NegativeBinomial(data.endog, exog, 'nb2').fit(method='newton', disp=0)
res2 = RandHIE()
res2.negativebinomial_nb2_bfgs()
cls.res2 = res2
def test_jac(self):
pass
#NOTE: The bse is much closer precitions to stata
def test_bse(self):
assert_almost_equal(self.res1.bse, self.res2.bse, DECIMAL_3)
def test_params(self):
assert_almost_equal(self.res1.params, self.res2.params, DECIMAL_4)
def test_alpha(self):
self.res1.bse # attaches alpha_std_err
assert_almost_equal(self.res1.lnalpha, self.res2.lnalpha,
DECIMAL_4)
assert_almost_equal(self.res1.lnalpha_std_err,
self.res2.lnalpha_std_err, DECIMAL_4)
def test_conf_int(self):
assert_almost_equal(self.res1.conf_int(), self.res2.conf_int,
DECIMAL_3)
def test_zstat(self): # Low precision because Z vs. t
assert_almost_equal(self.res1.pvalues[:-1], self.res2.pvalues,
DECIMAL_2)
def test_fittedvalues(self):
assert_almost_equal(self.res1.fittedvalues[:10],
self.res2.fittedvalues[:10], DECIMAL_3)
def test_predict(self):
assert_almost_equal(self.res1.predict()[:10],
np.exp(self.res2.fittedvalues[:10]), DECIMAL_3)
def test_predict_xb(self):
assert_almost_equal(self.res1.predict(linear=True)[:10],
self.res2.fittedvalues[:10], DECIMAL_3)
def no_info(self):
pass
test_jac = no_info
class TestNegativeBinomialNB1Newton(CheckModelResults):
@classmethod
def setupClass(cls):
data = sm.datasets.randhie.load()
exog = sm.add_constant(data.exog, prepend=False)
cls.res1 = NegativeBinomial(data.endog, exog, 'nb1').fit(
method="newton",
maxiter=100,
disp=0)
res2 = RandHIE()
res2.negativebinomial_nb1_bfgs()
cls.res2 = res2
def test_zstat(self):
assert_almost_equal(self.res1.tvalues, self.res2.z, DECIMAL_1)
def test_lnalpha(self):
self.res1.bse # attaches alpha_std_err
assert_almost_equal(self.res1.lnalpha, self.res2.lnalpha, 3)
assert_almost_equal(self.res1.lnalpha_std_err,
self.res2.lnalpha_std_err, DECIMAL_4)
def test_params(self):
assert_almost_equal(self.res1.params, self.res2.params, DECIMAL_4)
def test_conf_int(self):
# the bse for alpha is not high precision from the hessian
# approximation
assert_almost_equal(self.res1.conf_int(), self.res2.conf_int,
DECIMAL_2)
def test_jac(self):
pass
def test_predict(self):
pass
def test_predict_xb(self):
pass
class TestNegativeBinomialNB2BFGS(CheckModelResults):
@classmethod
def setupClass(cls):
data = sm.datasets.randhie.load()
exog = sm.add_constant(data.exog, prepend=False)
cls.res1 = NegativeBinomial(data.endog, exog, 'nb2').fit(
method='bfgs', disp=0,
maxiter=1000)
res2 = RandHIE()
res2.negativebinomial_nb2_bfgs()
cls.res2 = res2
def test_jac(self):
pass
#NOTE: The bse is much closer precitions to stata
def test_bse(self):
assert_almost_equal(self.res1.bse, self.res2.bse, DECIMAL_3)
def test_params(self):
assert_almost_equal(self.res1.params, self.res2.params, DECIMAL_4)
def test_alpha(self):
self.res1.bse # attaches alpha_std_err
assert_almost_equal(self.res1.lnalpha, self.res2.lnalpha,
DECIMAL_4)
assert_almost_equal(self.res1.lnalpha_std_err,
self.res2.lnalpha_std_err, DECIMAL_4)
def test_conf_int(self):
assert_almost_equal(self.res1.conf_int(), self.res2.conf_int,
DECIMAL_3)
def test_zstat(self): # Low precision because Z vs. t
assert_almost_equal(self.res1.pvalues[:-1], self.res2.pvalues,
DECIMAL_2)
def test_fittedvalues(self):
assert_almost_equal(self.res1.fittedvalues[:10],
self.res2.fittedvalues[:10], DECIMAL_3)
def test_predict(self):
assert_almost_equal(self.res1.predict()[:10],
np.exp(self.res2.fittedvalues[:10]), DECIMAL_3)
def test_predict_xb(self):
assert_almost_equal(self.res1.predict(linear=True)[:10],
self.res2.fittedvalues[:10], DECIMAL_3)
def no_info(self):
pass
test_jac = no_info
class TestNegativeBinomialNB1BFGS(CheckModelResults):
@classmethod
def setupClass(cls):
data = sm.datasets.randhie.load()
exog = sm.add_constant(data.exog, prepend=False)
cls.res1 = NegativeBinomial(data.endog, exog, 'nb1').fit(method="bfgs",
maxiter=100,
disp=0)
res2 = RandHIE()
res2.negativebinomial_nb1_bfgs()
cls.res2 = res2
def test_zstat(self):
assert_almost_equal(self.res1.tvalues, self.res2.z, DECIMAL_1)
def test_lnalpha(self):
self.res1.bse # attaches alpha_std_err
assert_almost_equal(self.res1.lnalpha, self.res2.lnalpha, 3)
assert_almost_equal(self.res1.lnalpha_std_err,
self.res2.lnalpha_std_err, DECIMAL_4)
def test_params(self):
assert_almost_equal(self.res1.params, self.res2.params, DECIMAL_4)
def test_conf_int(self):
# the bse for alpha is not high precision from the hessian
# approximation
assert_almost_equal(self.res1.conf_int(), self.res2.conf_int,
DECIMAL_2)
def test_jac(self):
pass
def test_predict(self):
pass
def test_predict_xb(self):
pass
class TestNegativeBinomialGeometricBFGS(CheckModelResults):
"""
Cannot find another implementation of the geometric to cross-check results
we only test fitted values because geometric has fewer parameters than nb1 and nb2
and we want to make sure that predict() np.dot(exog, params) works
"""
@classmethod
def setupClass(cls):
data = sm.datasets.randhie.load()
exog = sm.add_constant(data.exog, prepend=False)
cls.res1 = NegativeBinomial(data.endog, exog, 'geometric').fit(method='bfgs', disp=0)
res2 = RandHIE()
res2.negativebinomial_geometric_bfgs()
cls.res2 = res2
# the following are regression tests, could be inherited instead
def test_aic(self):
assert_almost_equal(self.res1.aic, self.res2.aic, DECIMAL_3)
def test_bic(self):
assert_almost_equal(self.res1.bic, self.res2.bic, DECIMAL_3)
def test_conf_int(self):
assert_almost_equal(self.res1.conf_int(), self.res2.conf_int, DECIMAL_3)
def test_fittedvalues(self):
assert_almost_equal(self.res1.fittedvalues[:10], self.res2.fittedvalues[:10], DECIMAL_3)
def test_jac(self):
pass
def test_predict(self):
assert_almost_equal(self.res1.predict()[:10], np.exp(self.res2.fittedvalues[:10]), DECIMAL_3)
def test_params(self):
assert_almost_equal(self.res1.params, self.res2.params, DECIMAL_3)
def test_predict_xb(self):
assert_almost_equal(self.res1.predict(linear=True)[:10], self.res2.fittedvalues[:10], DECIMAL_3)
def test_zstat(self): # Low precision because Z vs. t
assert_almost_equal(self.res1.tvalues, self.res2.z, DECIMAL_1)
def no_info(self):
pass
def test_llf(self):
assert_almost_equal(self.res1.llf, self.res2.llf, DECIMAL_1)
def test_llr(self):
assert_almost_equal(self.res1.llr, self.res2.llr, DECIMAL_2)
def test_bse(self):
assert_almost_equal(self.res1.bse, self.res2.bse, DECIMAL_3)
test_jac = no_info
class CheckMNLogitBaseZero(CheckModelResults):
def test_margeff_overall(self):
me = self.res1.get_margeff()
assert_almost_equal(me.margeff, self.res2.margeff_dydx_overall, 6)
assert_almost_equal(me.margeff_se, self.res2.margeff_dydx_overall_se, 6)
def test_margeff_mean(self):
me = self.res1.get_margeff(at='mean')
assert_almost_equal(me.margeff, self.res2.margeff_dydx_mean, 7)
assert_almost_equal(me.margeff_se, self.res2.margeff_dydx_mean_se, 7)
def test_margeff_dummy(self):
data = self.data
vote = data.data['vote']
exog = np.column_stack((data.exog, vote))
exog = sm.add_constant(exog, prepend=False)
res = MNLogit(data.endog, exog).fit(method="newton", disp=0)
me = res.get_margeff(dummy=True)
assert_almost_equal(me.margeff, self.res2.margeff_dydx_dummy_overall,
6)
assert_almost_equal(me.margeff_se,
self.res2.margeff_dydx_dummy_overall_se, 6)
me = res.get_margeff(dummy=True, method="eydx")
assert_almost_equal(me.margeff, self.res2.margeff_eydx_dummy_overall,
5)
assert_almost_equal(me.margeff_se,
self.res2.margeff_eydx_dummy_overall_se, 6)
def test_j(self):
assert_equal(self.res1.model.J, self.res2.J)
def test_k(self):
assert_equal(self.res1.model.K, self.res2.K)
def test_endog_names(self):
assert_equal(self.res1._get_endog_name(None,None)[1],
['y=1', 'y=2', 'y=3', 'y=4', 'y=5', 'y=6'])
def test_pred_table(self):
# fitted results taken from gretl
pred = [6, 1, 1, 1, 0, 1, 0, 1, 1, 0, 0, 1, 1, 1, 0, 6, 0, 1, 6, 0, 0,
1, 1, 6, 0, 1, 0, 1, 1, 1, 0, 0, 1, 1, 6, 0, 0, 6, 6, 0, 0, 1,
1, 6, 1, 6, 0, 0, 0, 1, 0, 1, 0, 0, 0, 6, 0, 0, 6, 0, 0, 0, 1,
1, 0, 0, 6, 6, 6, 6, 1, 0, 5, 1, 1, 0, 0, 0, 1, 1, 0, 0, 0, 0,
6, 0, 6, 6, 1, 0, 1, 1, 6, 5, 1, 0, 0, 0, 5, 0, 0, 6, 0, 1, 0,
0, 0, 0, 0, 1, 1, 0, 6, 6, 6, 6, 5, 0, 1, 1, 0, 1, 0, 6, 6, 0,
0, 0, 6, 0, 0, 0, 6, 6, 0, 5, 1, 0, 0, 0, 0, 6, 0, 5, 6, 6, 0,
0, 0, 0, 6, 1, 0, 0, 1, 0, 1, 6, 1, 1, 1, 1, 1, 0, 0, 0, 6, 0,
5, 1, 0, 6, 6, 6, 0, 0, 0, 0, 1, 6, 6, 0, 0, 0, 1, 1, 5, 6, 0,
6, 1, 0, 0, 1, 6, 0, 0, 1, 0, 6, 6, 0, 5, 6, 6, 0, 0, 6, 1, 0,
6, 0, 1, 0, 1, 6, 0, 1, 1, 1, 6, 0, 5, 0, 0, 6, 1, 0, 6, 5, 5,
0, 6, 1, 1, 1, 0, 0, 6, 0, 0, 5, 0, 0, 6, 6, 6, 6, 6, 0, 1, 0,
0, 6, 6, 0, 0, 1, 6, 0, 0, 6, 1, 6, 1, 1, 1, 0, 1, 6, 5, 0, 0,
1, 5, 0, 1, 6, 6, 1, 0, 0, 1, 6, 1, 5, 6, 1, 0, 0, 1, 1, 0, 6,
1, 6, 0, 1, 1, 5, 6, 6, 5, 1, 1, 1, 0, 6, 1, 6, 1, 0, 1, 0, 0,
1, 5, 0, 1, 1, 0, 5, 6, 0, 5, 1, 1, 6, 5, 0, 6, 0, 0, 0, 0, 0,
0, 1, 6, 1, 0, 5, 1, 0, 0, 1, 6, 0, 0, 6, 6, 6, 0, 2, 1, 6, 5,
6, 1, 1, 0, 5, 1, 1, 1, 6, 1, 6, 6, 5, 6, 0, 1, 0, 1, 6, 0, 6,
1, 6, 0, 0, 6, 1, 0, 6, 1, 0, 0, 0, 0, 6, 6, 6, 6, 5, 6, 6, 0,
0, 6, 1, 1, 6, 0, 0, 6, 6, 0, 6, 6, 0, 0, 6, 0, 0, 6, 6, 6, 1,
0, 6, 0, 0, 0, 6, 1, 1, 0, 1, 5, 0, 0, 5, 0, 0, 0, 1, 1, 6, 1,
0, 0, 0, 6, 6, 1, 1, 6, 5, 5, 0, 6, 6, 0, 1, 1, 0, 6, 6, 0, 6,
5, 5, 6, 5, 1, 0, 6, 0, 6, 1, 0, 1, 6, 6, 6, 1, 0, 6, 0, 5, 6,
6, 5, 0, 5, 1, 0, 6, 0, 6, 1, 5, 5, 0, 1, 5, 5, 2, 6, 6, 6, 5,
0, 0, 1, 6, 1, 0, 1, 6, 1, 0, 0, 1, 5, 6, 6, 0, 0, 0, 5, 6, 6,
6, 1, 5, 6, 1, 0, 0, 6, 5, 0, 1, 1, 1, 6, 6, 0, 1, 0, 0, 0, 5,
0, 0, 6, 1, 6, 0, 6, 1, 5, 5, 6, 5, 0, 0, 0, 0, 1, 1, 0, 5, 5,
0, 0, 0, 0, 1, 0, 6, 6, 1, 1, 6, 6, 0, 5, 5, 0, 0, 0, 6, 6, 1,
6, 0, 0, 5, 0, 1, 6, 5, 6, 6, 5, 5, 6, 6, 1, 0, 1, 6, 6, 1, 6,
0, 6, 0, 6, 5, 0, 6, 6, 0, 5, 6, 0, 6, 6, 5, 0, 1, 6, 6, 1, 0,
1, 0, 6, 6, 1, 0, 6, 6, 6, 0, 1, 6, 0, 1, 5, 1, 1, 5, 6, 6, 0,
1, 6, 6, 1, 5, 0, 5, 0, 6, 0, 1, 6, 1, 0, 6, 1, 6, 0, 6, 1, 0,
0, 0, 6, 6, 0, 1, 1, 6, 6, 6, 1, 6, 0, 5, 6, 0, 5, 6, 6, 5, 5,
5, 6, 0, 6, 0, 0, 0, 5, 0, 6, 1, 2, 6, 6, 6, 5, 1, 6, 0, 6, 0,
0, 0, 0, 6, 5, 0, 5, 1, 6, 5, 1, 6, 5, 1, 1, 0, 0, 6, 1, 1, 5,
6, 6, 0, 5, 2, 5, 5, 0, 5, 5, 5, 6, 5, 6, 6, 5, 2, 6, 5, 6, 0,
0, 6, 5, 0, 6, 0, 0, 6, 6, 6, 0, 5, 1, 1, 6, 6, 5, 2, 1, 6, 5,
6, 0, 6, 6, 1, 1, 5, 1, 6, 6, 6, 0, 0, 6, 1, 0, 5, 5, 1, 5, 6,
1, 6, 0, 1, 6, 5, 0, 0, 6, 1, 5, 1, 0, 6, 0, 6, 6, 5, 5, 6, 6,
6, 6, 2, 6, 6, 6, 5, 5, 5, 0, 1, 0, 0, 0, 6, 6, 1, 0, 6, 6, 6,
6, 6, 1, 0, 6, 1, 5, 5, 6, 6, 6, 6, 6, 5, 6, 1, 6, 2, 5, 5, 6,
5, 6, 6, 5, 6, 6, 5, 5, 6, 1, 5, 1, 6, 0, 2, 5, 0, 5, 0, 2, 1,
6, 0, 0, 6, 6, 1, 6, 0, 5, 5, 6, 6, 1, 6, 6, 6, 5, 6, 6, 1, 6,
5, 6, 1, 1, 0, 6, 6, 5, 1, 0, 0, 6, 6, 5, 6, 0, 1, 6, 0, 5, 6,
5, 2, 5, 2, 0, 0, 1, 6, 6, 1, 5, 6, 6, 0, 6, 6, 6, 6, 6, 5]
assert_array_equal(self.res1.predict().argmax(1), pred)
# the rows should add up for pred table
assert_array_equal(self.res1.pred_table().sum(0), np.bincount(pred))
# note this is just a regression test, gretl doesn't have a prediction
# table
pred = [[ 126., 41., 2., 0., 0., 12., 19.],
[ 77., 73., 3., 0., 0., 15., 12.],
[ 37., 43., 2., 0., 0., 19., 7.],
[ 12., 9., 1., 0., 0., 9., 6.],
[ 19., 10., 2., 0., 0., 20., 43.],
[ 22., 25., 1., 0., 0., 31., 71.],
[ 9., 7., 1., 0., 0., 18., 140.]]
assert_array_equal(self.res1.pred_table(), pred)
def test_resid(self):
assert_array_equal(self.res1.resid_misclassified, self.res2.resid)
class TestMNLogitNewtonBaseZero(CheckMNLogitBaseZero):
@classmethod
def setupClass(cls):
data = sm.datasets.anes96.load()
cls.data = data
exog = data.exog
exog = sm.add_constant(exog, prepend=False)
cls.res1 = MNLogit(data.endog, exog).fit(method="newton", disp=0)
res2 = Anes()
res2.mnlogit_basezero()
cls.res2 = res2
class TestMNLogitLBFGSBaseZero(CheckMNLogitBaseZero):
@classmethod
def setupClass(cls):
data = sm.datasets.anes96.load()
cls.data = data
exog = data.exog
exog = sm.add_constant(exog, prepend=False)
mymodel = MNLogit(data.endog, exog)
cls.res1 = mymodel.fit(method="lbfgs", disp=0, maxiter=50000,
#m=12, pgtol=1e-7, factr=1e3, # 5 failures
#m=20, pgtol=1e-8, factr=1e2, # 3 failures
#m=30, pgtol=1e-9, factr=1e1, # 1 failure
m=40, pgtol=1e-10, factr=5e0,
loglike_and_score=mymodel.loglike_and_score)
res2 = Anes()
res2.mnlogit_basezero()
cls.res2 = res2
def test_perfect_prediction():
cur_dir = os.path.dirname(os.path.abspath(__file__))
iris_dir = os.path.join(cur_dir, '..', '..', 'genmod', 'tests', 'results')
iris_dir = os.path.abspath(iris_dir)
iris = np.genfromtxt(os.path.join(iris_dir, 'iris.csv'), delimiter=",",
skip_header=1)
y = iris[:,-1]
X = iris[:,:-1]
X = X[y != 2]
y = y[y != 2]
X = sm.add_constant(X, prepend=True)
mod = Logit(y,X)
assert_raises(PerfectSeparationError, mod.fit, maxiter=1000)
#turn off raise PerfectSeparationError
mod.raise_on_perfect_prediction = False
# this will raise if you set maxiter high enough with a singular matrix
from pandas.util.testing import assert_produces_warning
# this is not thread-safe
with assert_produces_warning():
mod.fit(disp=False, maxiter=50) # should not raise but does warn
def test_poisson_predict():
#GH: 175, make sure poisson predict works without offset and exposure
data = sm.datasets.randhie.load()
exog = sm.add_constant(data.exog, prepend=True)
res = sm.Poisson(data.endog, exog).fit(method='newton', disp=0)
pred1 = res.predict()
pred2 = res.predict(exog)
assert_almost_equal(pred1, pred2)
#exta options
pred3 = res.predict(exog, offset=0, exposure=1)
assert_almost_equal(pred1, pred3)
pred3 = res.predict(exog, offset=0, exposure=2)
assert_almost_equal(2*pred1, pred3)
pred3 = res.predict(exog, offset=np.log(2), exposure=1)
assert_almost_equal(2*pred1, pred3)
def test_poisson_newton():
#GH: 24, Newton doesn't work well sometimes
nobs = 10000
np.random.seed(987689)
x = np.random.randn(nobs, 3)
x = sm.add_constant(x, prepend=True)
y_count = np.random.poisson(np.exp(x.sum(1)))
mod = sm.Poisson(y_count, x)
from pandas.util.testing import assert_produces_warning
# this is not thread-safe
with assert_produces_warning():
res = mod.fit(start_params=-np.ones(4), method='newton', disp=0)
assert_(not res.mle_retvals['converged'])
def test_issue_339():
# make sure MNLogit summary works for J != K.
data = sm.datasets.anes96.load()
exog = data.exog
# leave out last exog column
exog = exog[:,:-1]
exog = sm.add_constant(exog, prepend=True)
res1 = sm.MNLogit(data.endog, exog).fit(method="newton", disp=0)
# strip the header from the test
smry = "\n".join(res1.summary().as_text().split('\n')[9:])
cur_dir = os.path.dirname(os.path.abspath(__file__))
test_case_file = os.path.join(cur_dir, 'results', 'mn_logit_summary.txt')
test_case = open(test_case_file, 'r').read()
np.testing.assert_equal(smry, test_case[:-1])
def test_issue_341():
data = sm.datasets.anes96.load()
exog = data.exog
# leave out last exog column
exog = exog[:,:-1]
exog = sm.add_constant(exog, prepend=True)
res1 = sm.MNLogit(data.endog, exog).fit(method="newton", disp=0)
x = exog[0]
np.testing.assert_equal(res1.predict(x).shape, (1,7))
np.testing.assert_equal(res1.predict(x[None]).shape, (1,7))
def test_iscount():
X = np.random.random((50, 10))
X[:,2] = np.random.randint(1, 10, size=50)
X[:,6] = np.random.randint(1, 10, size=50)
X[:,4] = np.random.randint(0, 2, size=50)
X[:,1] = np.random.randint(-10, 10, size=50) # not integers
count_ind = _iscount(X)
assert_equal(count_ind, [2, 6])
def test_isdummy():
X = np.random.random((50, 10))
X[:,2] = np.random.randint(1, 10, size=50)
X[:,6] = np.random.randint(0, 2, size=50)
X[:,4] = np.random.randint(0, 2, size=50)
X[:,1] = np.random.randint(-10, 10, size=50) # not integers
count_ind = _isdummy(X)
assert_equal(count_ind, [4, 6])
def test_non_binary():
y = [1, 2, 1, 2, 1, 2]
X = np.random.randn(6, 2)
np.testing.assert_raises(ValueError, Logit, y, X)
def test_mnlogit_factor():
dta = sm.datasets.anes96.load_pandas()
dta['endog'] = dta.endog.replace(dict(zip(range(7), 'ABCDEFG')))
dta.exog['constant'] = 1
mod = sm.MNLogit(dta.endog, dta.exog)
res = mod.fit(disp=0)
# smoke tests
params = res.params
summary = res.summary()
# with patsy
del dta.exog['constant']
mod = smf.mnlogit('PID ~ ' + ' + '.join(dta.exog.columns), dta.data)
res2 = mod.fit(disp=0)
res2.params
summary = res2.summary()
def test_formula_missing_exposure():
# see 2083
import statsmodels.formula.api as smf
import pandas as pd
d = {'Foo': [1, 2, 10, 149], 'Bar': [1, 2, 3, np.nan],
'constant': [1] * 4, 'exposure' : np.random.uniform(size=4),
'x': [1, 3, 2, 1.5]}
df = pd.DataFrame(d)
# should work
mod1 = smf.poisson('Foo ~ Bar', data=df, exposure=df['exposure'])
assert_(type(mod1.exposure) is np.ndarray, msg='Exposure is not ndarray')
# make sure this raises
exposure = pd.Series(np.random.randn(5))
assert_raises(ValueError, sm.Poisson, df.Foo, df[['constant', 'Bar']],
exposure=exposure)
if __name__ == "__main__":
import nose
nose.runmodule(argv=[__file__, '-vvs', '-x', '--pdb'],
exit=False)
| bsd-3-clause |
terkkila/scikit-learn | examples/svm/plot_svm_anova.py | 250 | 2000 | """
=================================================
SVM-Anova: SVM with univariate feature selection
=================================================
This example shows how to perform univariate feature before running a SVC
(support vector classifier) to improve the classification scores.
"""
print(__doc__)
import numpy as np
import matplotlib.pyplot as plt
from sklearn import svm, datasets, feature_selection, cross_validation
from sklearn.pipeline import Pipeline
###############################################################################
# Import some data to play with
digits = datasets.load_digits()
y = digits.target
# Throw away data, to be in the curse of dimension settings
y = y[:200]
X = digits.data[:200]
n_samples = len(y)
X = X.reshape((n_samples, -1))
# add 200 non-informative features
X = np.hstack((X, 2 * np.random.random((n_samples, 200))))
###############################################################################
# Create a feature-selection transform and an instance of SVM that we
# combine together to have an full-blown estimator
transform = feature_selection.SelectPercentile(feature_selection.f_classif)
clf = Pipeline([('anova', transform), ('svc', svm.SVC(C=1.0))])
###############################################################################
# Plot the cross-validation score as a function of percentile of features
score_means = list()
score_stds = list()
percentiles = (1, 3, 6, 10, 15, 20, 30, 40, 60, 80, 100)
for percentile in percentiles:
clf.set_params(anova__percentile=percentile)
# Compute cross-validation score using all CPUs
this_scores = cross_validation.cross_val_score(clf, X, y, n_jobs=1)
score_means.append(this_scores.mean())
score_stds.append(this_scores.std())
plt.errorbar(percentiles, score_means, np.array(score_stds))
plt.title(
'Performance of the SVM-Anova varying the percentile of features selected')
plt.xlabel('Percentile')
plt.ylabel('Prediction rate')
plt.axis('tight')
plt.show()
| bsd-3-clause |
tribhuvanesh/vpa | vispr/tools/scripts/evaluate.py | 1 | 5206 | #!/usr/bin/python
"""This is a short description.
Replace this with a more detailed description of what this file contains.
"""
import json
import time
import pickle
import sys
import csv
import argparse
import os
import os.path as osp
import shutil
import numpy as np
import matplotlib.pyplot as plt
from PIL import Image
from scipy.misc import imread
from sklearn.metrics import average_precision_score
from vispr import DS_ROOT
from vispr.tools.common.utils import *
__author__ = "Tribhuvanesh Orekondy"
__maintainer__ = "Tribhuvanesh Orekondy"
__email__ = "orekondy@mpi-inf.mpg.de"
__status__ = "Development"
def main():
parser = argparse.ArgumentParser()
parser.add_argument("pred_file", type=str, help="File path to list of predictions")
parser.add_argument("-c", "--class_scores", type=str, default=None, help="Path to write class-specific APs")
parser.add_argument("-q", "--qual", type=str, default=None, help="Path to write qualitative results")
args = parser.parse_args()
params = vars(args)
# Load Attributes --------------------------------------------------------------------------------------------------
attr_id_to_name, attr_id_to_idx = load_attributes()
idx_to_attr_id = {v: k for k, v in attr_id_to_idx.iteritems()}
n_attr = len(attr_id_to_idx)
# Load predictions -------------------------------------------------------------------------------------------------
# Construct a list of dicts containing: GT labels, Prediction probabilities, Image path
pred_list = []
with open(params['pred_file'], 'r') as f:
for _line in f:
line = _line.strip()
dct = json.loads(line)
pred_entry = dict()
pred_entry['pred_probs'] = np.asarray(dct['pred_probs'], dtype=float)
# Read image_path and gt_labels from annotation
anno_path = dct['anno_path'] if osp.exists(dct['anno_path']) else osp.join(DS_ROOT, dct['anno_path'])
with open(anno_path) as jf:
anno = json.load(jf)
# Get the list of attributes this corresponds to
attr_set = set(anno['labels'])
attr_vec = labels_to_vec(attr_set, attr_id_to_idx)
pred_entry['image_path'] = anno['image_path']
pred_entry['gt_labels'] = attr_vec
pred_entry['anno_path'] = dct['anno_path']
pred_list.append(pred_entry)
# Convert to matrix ------------------------------------------------------------------------------------------------
# Create a NxM matrix. Each row represents the class-probabilities for the M classes.
# In case of GT, they are 1-hot encoded
gt_mat = np.array([d['gt_labels'] for d in pred_list])
pred_probs_mat = np.array([d['pred_probs'] for d in pred_list])
# Drop examples where gt contains no relevant attributes (when testing on a partial set)
# non_empty_gt_idx = np.where(np.sum(gt_mat, axis=1) > 0)[0]
# pred_probs_mat = pred_probs_mat[non_empty_gt_idx, :]
# gt_mat = gt_mat[non_empty_gt_idx, :]
# Evaluate Overall Attribute Prediction ----------------------------------------------------------------------------
n_examples, n_labels = gt_mat.shape
print '# Examples = ', n_examples
print '# Labels = ', n_labels
print 'Macro MAP = {:.2f}'.format(100 * average_precision_score(gt_mat, pred_probs_mat, average='macro'))
if params['class_scores'] is not None:
cmap_stats = average_precision_score(gt_mat, pred_probs_mat, average=None)
with open(params['class_scores'], 'w') as wf:
wf.write('\t'.join(['attribute_id', 'attribute_name', 'num_occurrences', 'ap']) + '\n')
for idx in range(n_labels):
attr_id = idx_to_attr_id[idx]
attr_name = attr_id_to_name[attr_id]
attr_occurrences = np.sum(gt_mat, axis=0)[idx]
ap = cmap_stats[idx]
wf.write('{}\t{}\t{}\t{}\n'.format(attr_id, attr_name, attr_occurrences, ap*100.0))
if params['qual'] is not None:
if not osp.exists(params['qual']):
print '{} does not exist. Creating it ...'.format(params['qual'])
os.mkdir(params['qual'])
for pred in pred_list:
image_path = pred['image_path']
im = Image.open(image_path)
fig, [ax1, ax2] = plt.subplots(1, 2, figsize=(20, 15))
ax = ax1
ax.imshow(im)
ax.axis('off')
ax = ax2
text_str = ''
pred_probs = pred['pred_probs']
top_10_inds = np.argsort(-pred_probs)[:10]
for aidx in top_10_inds:
text_str += '{:<30} {:.3f}\n'.format(idx_to_attr_id[aidx], pred_probs[aidx])
ax.set_xlim(xmin=0, xmax=1)
ax.set_ylim(ymin=0, ymax=1)
ax.text(0.0, 0.5, text_str, fontsize='xx-large')
ax.axis('off')
plt.tight_layout()
_, im_name = osp.split(image_path)
out_path = osp.join(params['qual'], im_name)
plt.savefig(out_path, bbox_inches='tight')
plt.close()
if __name__ == '__main__':
main() | apache-2.0 |
jkarnows/scikit-learn | sklearn/decomposition/tests/test_truncated_svd.py | 240 | 6055 | """Test truncated SVD transformer."""
import numpy as np
import scipy.sparse as sp
from sklearn.decomposition import TruncatedSVD
from sklearn.utils import check_random_state
from sklearn.utils.testing import (assert_array_almost_equal, assert_equal,
assert_raises, assert_greater,
assert_array_less)
# Make an X that looks somewhat like a small tf-idf matrix.
# XXX newer versions of SciPy have scipy.sparse.rand for this.
shape = 60, 55
n_samples, n_features = shape
rng = check_random_state(42)
X = rng.randint(-100, 20, np.product(shape)).reshape(shape)
X = sp.csr_matrix(np.maximum(X, 0), dtype=np.float64)
X.data[:] = 1 + np.log(X.data)
Xdense = X.A
def test_algorithms():
svd_a = TruncatedSVD(30, algorithm="arpack")
svd_r = TruncatedSVD(30, algorithm="randomized", random_state=42)
Xa = svd_a.fit_transform(X)[:, :6]
Xr = svd_r.fit_transform(X)[:, :6]
assert_array_almost_equal(Xa, Xr)
comp_a = np.abs(svd_a.components_)
comp_r = np.abs(svd_r.components_)
# All elements are equal, but some elements are more equal than others.
assert_array_almost_equal(comp_a[:9], comp_r[:9])
assert_array_almost_equal(comp_a[9:], comp_r[9:], decimal=3)
def test_attributes():
for n_components in (10, 25, 41):
tsvd = TruncatedSVD(n_components).fit(X)
assert_equal(tsvd.n_components, n_components)
assert_equal(tsvd.components_.shape, (n_components, n_features))
def test_too_many_components():
for algorithm in ["arpack", "randomized"]:
for n_components in (n_features, n_features+1):
tsvd = TruncatedSVD(n_components=n_components, algorithm=algorithm)
assert_raises(ValueError, tsvd.fit, X)
def test_sparse_formats():
for fmt in ("array", "csr", "csc", "coo", "lil"):
Xfmt = Xdense if fmt == "dense" else getattr(X, "to" + fmt)()
tsvd = TruncatedSVD(n_components=11)
Xtrans = tsvd.fit_transform(Xfmt)
assert_equal(Xtrans.shape, (n_samples, 11))
Xtrans = tsvd.transform(Xfmt)
assert_equal(Xtrans.shape, (n_samples, 11))
def test_inverse_transform():
for algo in ("arpack", "randomized"):
# We need a lot of components for the reconstruction to be "almost
# equal" in all positions. XXX Test means or sums instead?
tsvd = TruncatedSVD(n_components=52, random_state=42)
Xt = tsvd.fit_transform(X)
Xinv = tsvd.inverse_transform(Xt)
assert_array_almost_equal(Xinv, Xdense, decimal=1)
def test_integers():
Xint = X.astype(np.int64)
tsvd = TruncatedSVD(n_components=6)
Xtrans = tsvd.fit_transform(Xint)
assert_equal(Xtrans.shape, (n_samples, tsvd.n_components))
def test_explained_variance():
# Test sparse data
svd_a_10_sp = TruncatedSVD(10, algorithm="arpack")
svd_r_10_sp = TruncatedSVD(10, algorithm="randomized", random_state=42)
svd_a_20_sp = TruncatedSVD(20, algorithm="arpack")
svd_r_20_sp = TruncatedSVD(20, algorithm="randomized", random_state=42)
X_trans_a_10_sp = svd_a_10_sp.fit_transform(X)
X_trans_r_10_sp = svd_r_10_sp.fit_transform(X)
X_trans_a_20_sp = svd_a_20_sp.fit_transform(X)
X_trans_r_20_sp = svd_r_20_sp.fit_transform(X)
# Test dense data
svd_a_10_de = TruncatedSVD(10, algorithm="arpack")
svd_r_10_de = TruncatedSVD(10, algorithm="randomized", random_state=42)
svd_a_20_de = TruncatedSVD(20, algorithm="arpack")
svd_r_20_de = TruncatedSVD(20, algorithm="randomized", random_state=42)
X_trans_a_10_de = svd_a_10_de.fit_transform(X.toarray())
X_trans_r_10_de = svd_r_10_de.fit_transform(X.toarray())
X_trans_a_20_de = svd_a_20_de.fit_transform(X.toarray())
X_trans_r_20_de = svd_r_20_de.fit_transform(X.toarray())
# helper arrays for tests below
svds = (svd_a_10_sp, svd_r_10_sp, svd_a_20_sp, svd_r_20_sp, svd_a_10_de,
svd_r_10_de, svd_a_20_de, svd_r_20_de)
svds_trans = (
(svd_a_10_sp, X_trans_a_10_sp),
(svd_r_10_sp, X_trans_r_10_sp),
(svd_a_20_sp, X_trans_a_20_sp),
(svd_r_20_sp, X_trans_r_20_sp),
(svd_a_10_de, X_trans_a_10_de),
(svd_r_10_de, X_trans_r_10_de),
(svd_a_20_de, X_trans_a_20_de),
(svd_r_20_de, X_trans_r_20_de),
)
svds_10_v_20 = (
(svd_a_10_sp, svd_a_20_sp),
(svd_r_10_sp, svd_r_20_sp),
(svd_a_10_de, svd_a_20_de),
(svd_r_10_de, svd_r_20_de),
)
svds_sparse_v_dense = (
(svd_a_10_sp, svd_a_10_de),
(svd_a_20_sp, svd_a_20_de),
(svd_r_10_sp, svd_r_10_de),
(svd_r_20_sp, svd_r_20_de),
)
# Assert the 1st component is equal
for svd_10, svd_20 in svds_10_v_20:
assert_array_almost_equal(
svd_10.explained_variance_ratio_,
svd_20.explained_variance_ratio_[:10],
decimal=5,
)
# Assert that 20 components has higher explained variance than 10
for svd_10, svd_20 in svds_10_v_20:
assert_greater(
svd_20.explained_variance_ratio_.sum(),
svd_10.explained_variance_ratio_.sum(),
)
# Assert that all the values are greater than 0
for svd in svds:
assert_array_less(0.0, svd.explained_variance_ratio_)
# Assert that total explained variance is less than 1
for svd in svds:
assert_array_less(svd.explained_variance_ratio_.sum(), 1.0)
# Compare sparse vs. dense
for svd_sparse, svd_dense in svds_sparse_v_dense:
assert_array_almost_equal(svd_sparse.explained_variance_ratio_,
svd_dense.explained_variance_ratio_)
# Test that explained_variance is correct
for svd, transformed in svds_trans:
total_variance = np.var(X.toarray(), axis=0).sum()
variances = np.var(transformed, axis=0)
true_explained_variance_ratio = variances / total_variance
assert_array_almost_equal(
svd.explained_variance_ratio_,
true_explained_variance_ratio,
)
| bsd-3-clause |
SU-ECE-17-7/ibeis | _broken/_old_qt_hs_matcher/user_dialogs.py | 1 | 7263 | # -*- coding: utf-8 -*-
"""
Depricate
"""
from __future__ import absolute_import, division, print_function, unicode_literals
import utool as ut
print, rrr, profile = ut.inject2(__name__, '[user_dialogs]')
def convert_name_suggestion_to_aids(ibs, choicetup, name_suggest_tup):
"""
Example:
>>> # DISABLE_DOCTEST
>>> from ibeis.algo.hots.user_dialogs import * # NOQA
>>> import ibeis
>>> # build test data
>>> ibs = ibeis.opendb('testdb1')
>>> comp_aids = [2, 3, 4]
>>> comp_names = ['fred', 'sue', 'alice']
>>> chosen_names = ['fred']
>>> # execute function
>>> result = convert_name_suggestion_to_aids(ibs, choicetup, name_suggest_tup)
>>> # verify results
>>> print(result)
"""
num_top = 3
autoname_msg, chosen_names, name_confidence = name_suggest_tup
comp_aids_all = ut.get_list_column(choicetup.sorted_aids, 0)
comp_aids = ut.listclip(comp_aids_all, num_top)
comp_names = ibs.get_annot_names(comp_aids)
issuggested = ut.list_cover(comp_names, chosen_names)
suggest_aids = ut.compress(comp_aids, issuggested)
return comp_aids, suggest_aids
def wait_for_user_name_decision(ibs, cm, qreq_, choicetup, name_suggest_tup, incinfo=None):
r"""
Prompts the user for input
hooks into to some method of getting user input for names
Args:
ibs (IBEISController):
cm (QueryResult): object of feature correspondences and scores
autoname_func (function):
CommandLine:
python -m ibeis.algo.hots.user_dialogs --test-wait_for_user_name_decision --show
Example:
>>> # DISABLE_DOCTEST
>>> from ibeis.algo.hots.user_dialogs import * # NOQA
>>> import ibeis
>>> # build test data
>>> ibs = ibeis.opendb('testdb1')
>>> qaids = [1]
>>> daids = [2, 3, 4, 5]
>>> cm, qreq_ = ibs.query_chips(qaids, daids, cfgdict=dict(),
>>> return_request=True)[0]
>>> choicetup = '?'
>>> name_suggest_tup = '?'
>>> incinfo = None
>>> # execute function
>>> result = wait_for_user_name_decision(ibs, cm, qreq_, choicetup,
>>> name_suggest_tup, incinfo)
>>> # verify results
>>> print(result)
>>> ut.show_if_requested()
"""
import plottool as pt
if cm is None:
print('WARNING: chipmatch is None')
new_mplshow = True and cm is not None
mplshowtop = False and cm is not None
qtinspect = False and cm is not None
if new_mplshow:
from ibeis.viz.interact import interact_query_decision
print('Showing matplotlib window')
# convert name choices into data for gui
comp_aids, suggest_aids = convert_name_suggestion_to_aids(ibs,
choicetup,
name_suggest_tup)
# Update names tree callback
# Let the harness do these callbacks
#backend_callback = incinfo.get('backend_callback', None)
#update_callback = incinfo.get('update_callback', None)
name_decision_callback = incinfo['name_decision_callback']
progress_current = incinfo['count']
progress_total = incinfo['nTotal']
fnum = incinfo['fnum']
qvi = interact_query_decision.QueryVerificationInteraction(
qreq_, cm, comp_aids, suggest_aids,
name_decision_callback=name_decision_callback,
#update_callback=update_callback,
#backend_callback=backend_callback,
progress_current=progress_current, progress_total=progress_total,
fnum=fnum)
qvi.fig.show()
pt.bring_to_front(qvi.fig)
if mplshowtop:
import guitool
fnum = 513
pt.figure(fnum=fnum, pnum=(2, 3, 1), doclf=True, docla=True)
fig = cm.ishow_top(qreq_, fnum=fnum, in_image=False, annot_mode=0,
sidebyside=False, show_query=True)
fig.show()
#fig.canvas.raise_()
#from plottool import fig_presenter
#fig_presenter.bring_to_front(fig)
newname = ibs.make_next_name()
newname_prefix = 'New Name:\n'
# FIXME or remoev
name = None
#if chosen_names is None:
# name = newname_prefix + newname
aid_list = ut.get_list_column(choicetup.sorted_aids, 0)
name_options = ibs.get_annot_names(aid_list) + [newname_prefix + newname]
msg = 'Decide on query name. System suggests; ' + str(name)
title = 'name decision'
options = name_options[::-1]
user_chosen_name = guitool.user_option(None, msg, title, options) # NOQA
if user_chosen_name is None:
raise AssertionError('User Canceled Query')
user_chosen_name = user_chosen_name.replace(newname_prefix, '')
# TODO: Make the old interface use the correct sorted_aids format
#name_decision_callback(user_chosen_name)
if qtinspect:
print('Showing qt inspect window')
qres_wgt = cm.qt_inspect_gui(qreq_)
qres_wgt.show()
qres_wgt.raise_()
#if qreq_ is not None:
# if qreq_.normalizer is None:
# print('normalizer is None!!')
# else:
# qreq_.normalizer.visualize(update=False, fnum=2)
# Prompt the user (this could be swaped out with a qt or web interface)
#if qtinspect:
# qres_wgt.close()
#return user_chosen_name
def wait_for_user_exemplar_decision(autoexemplar_msg, exemplar_decision,
exemplar_condience, incinfo=None):
r""" hooks into to some method of getting user input for exemplars
TODO: really good interface
Args:
autoexemplar_msg (?):
exemplar_decision (?):
exemplar_condience (?):
Returns:
?: True
CommandLine:
python -m ibeis.algo.hots.automated_matcher --test-get_user_exemplar_decision
Example:
>>> # DISABLE_DOCTEST
>>> from ibeis.algo.hots.automated_matcher import * # NOQA
>>> import ibeis # NOQA
>>> # build test data
>>> autoexemplar_msg = '?'
>>> exemplar_decision = '?'
>>> exemplar_condience = '?'
>>> get_user_exemplar_decision(autoexemplar_msg, exemplar_decision,
>>> exemplar_condience)
>>> # verify results
>>> result = str(True)
>>> print(result)
"""
import guitool
options = ['No', 'Yes']
msg = 'Add query as new exemplar?. IBEIS suggests: ' + options[exemplar_decision]
title = 'exemplar decision'
responce = guitool.user_option(None, msg, title, options) # NOQA
if responce is None:
raise AssertionError('User Canceled Query')
if responce == 'Yes':
exemplar_decision = True
elif responce == 'No':
exemplar_decision = False
else:
raise AssertionError('answer yes or no')
# TODO CALLBACK HERE
exemplar_decision_callback = incinfo['exemplar_decision_callback']
exemplar_decision_callback(exemplar_decision)
| apache-2.0 |
elkingtonmcb/scikit-learn | sklearn/feature_selection/tests/test_chi2.py | 221 | 2398 | """
Tests for chi2, currently the only feature selection function designed
specifically to work with sparse matrices.
"""
import numpy as np
from scipy.sparse import coo_matrix, csr_matrix
import scipy.stats
from sklearn.feature_selection import SelectKBest, chi2
from sklearn.feature_selection.univariate_selection import _chisquare
from nose.tools import assert_raises
from numpy.testing import assert_equal, assert_array_almost_equal
# Feature 0 is highly informative for class 1;
# feature 1 is the same everywhere;
# feature 2 is a bit informative for class 2.
X = [[2, 1, 2],
[9, 1, 1],
[6, 1, 2],
[0, 1, 2]]
y = [0, 1, 2, 2]
def mkchi2(k):
"""Make k-best chi2 selector"""
return SelectKBest(chi2, k=k)
def test_chi2():
# Test Chi2 feature extraction
chi2 = mkchi2(k=1).fit(X, y)
chi2 = mkchi2(k=1).fit(X, y)
assert_equal(chi2.get_support(indices=True), [0])
assert_equal(chi2.transform(X), np.array(X)[:, [0]])
chi2 = mkchi2(k=2).fit(X, y)
assert_equal(sorted(chi2.get_support(indices=True)), [0, 2])
Xsp = csr_matrix(X, dtype=np.float)
chi2 = mkchi2(k=2).fit(Xsp, y)
assert_equal(sorted(chi2.get_support(indices=True)), [0, 2])
Xtrans = chi2.transform(Xsp)
assert_equal(Xtrans.shape, [Xsp.shape[0], 2])
# == doesn't work on scipy.sparse matrices
Xtrans = Xtrans.toarray()
Xtrans2 = mkchi2(k=2).fit_transform(Xsp, y).toarray()
assert_equal(Xtrans, Xtrans2)
def test_chi2_coo():
# Check that chi2 works with a COO matrix
# (as returned by CountVectorizer, DictVectorizer)
Xcoo = coo_matrix(X)
mkchi2(k=2).fit_transform(Xcoo, y)
# if we got here without an exception, we're safe
def test_chi2_negative():
# Check for proper error on negative numbers in the input X.
X, y = [[0, 1], [-1e-20, 1]], [0, 1]
for X in (X, np.array(X), csr_matrix(X)):
assert_raises(ValueError, chi2, X, y)
def test_chisquare():
# Test replacement for scipy.stats.chisquare against the original.
obs = np.array([[2., 2.],
[1., 1.]])
exp = np.array([[1.5, 1.5],
[1.5, 1.5]])
# call SciPy first because our version overwrites obs
chi_scp, p_scp = scipy.stats.chisquare(obs, exp)
chi_our, p_our = _chisquare(obs, exp)
assert_array_almost_equal(chi_scp, chi_our)
assert_array_almost_equal(p_scp, p_our)
| bsd-3-clause |
grahesh/Stock-Market-Event-Analysis | Examples/Event Analysis/Half-Yearly End/Half_Year_End_Analysis.py | 1 | 4522 | # -*- coding: utf-8 -*-
"""
Created on Thu Jan 03 10:16:39 2013
@author: Grahesh
"""
import pandas
from qstkutil import DataAccess as da
import numpy as np
import math
import copy
import qstkutil.qsdateutil as du
import datetime as dt
import qstkutil.DataAccess as da
import qstkutil.tsutil as tsu
import qstkstudy.EventProfiler as ep
"""
Accepts a list of symbols along with start and end date
Returns the Event Matrix which is a pandas Datamatrix
Event matrix has the following structure :
|IBM |GOOG|XOM |MSFT| GS | JP |
(d1)|nan |nan | 1 |nan |nan | 1 |
(d2)|nan | 1 |nan |nan |nan |nan |
(d3)| 1 |nan | 1 |nan | 1 |nan |
(d4)|nan | 1 |nan | 1 |nan |nan |
...................................
...................................
Also, d1 = start date
nan = no information about any event.
1 = status bit(positively confirms the event occurence)
"""
# Get the data from the data store
storename = "NSEData" # get data from our daily prices source
# Available field names: open, close, high, low, close, actual_close, volume
closefield = "close"
volumefield = "volume"
window = 10
def getHalfYearEndDates(timestamps):
newTS=[]
tempYear=timestamps[0].year
flag=1
for x in range(0, len(timestamps)-1):
if(timestamps[x].year==tempYear):
if(timestamps[x].month==4 and flag==1):
newTS.append(timestamps[x-1])
flag=0
if(timestamps[x].month==10):
newTS.append(timestamps[x-1])
tempYear=timestamps[x].year+1
flag=1
return newTS
def findEvents(symbols, startday,endday, marketSymbol,verbose=False):
# Reading the Data for the list of Symbols.
timeofday=dt.timedelta(hours=16)
timestamps = du.getNSEdays(startday,endday,timeofday)
endOfHalfYear=getHalfYearEndDates(timestamps)
dataobj = da.DataAccess('NSEData')
if verbose:
print __name__ + " reading data"
# Reading the Data
close = dataobj.get_data(timestamps, symbols, closefield)
# Completing the Data - Removing the NaN values from the Matrix
close = (close.fillna(method='ffill')).fillna(method='backfill')
# Calculating Daily Returns for the Market
tsu.returnize0(close.values)
# Calculating the Returns of the Stock Relative to the Market
# So if a Stock went up 5% and the Market rised 3%. The the return relative to market is 2%
mktneutDM = close - close[marketSymbol]
np_eventmat = copy.deepcopy(mktneutDM)
for sym in symbols:
for time in timestamps:
np_eventmat[sym][time]=np.NAN
if verbose:
print __name__ + " finding events"
# Generating the Event Matrix
# Event described is : Analyzing half year events for given stocks.
for symbol in symbols:
for i in endOfHalfYear:
np_eventmat[symbol][i] = 1.0 #overwriting by the bit, marking the event
return np_eventmat
#################################################
################ MAIN CODE ######################
#################################################
symbols = np.loadtxt('NSE500port.csv',dtype='S13',comments='#', skiprows=1)
# You might get a message about some files being missing, don't worry about it.
#symbols =['SPY','BFRE','ATCS','RSERF','GDNEF','LAST','ATTUF','JBFCF','CYVA','SPF','XPO','EHECF','TEMO','AOLS','CSNT','REMI','GLRP','AIFLY','BEE','DJRT','CHSTF','AICAF']
#symbols=['NSE','3MINDIA.NS','AARTIIND.NS','ABAN.NS','ABB.NS','ABGSHIP.NS','ABIRLANUV.NS','ACC.NS','ADANIENT.NS','ADANIPORT.NS','ADANIPOWE.NS','ADVANTA.NS','ALLCARGO.NS','AIAENG.NS','AIL.NS','AZKOINDIA.NS']
startday = dt.datetime(2011,1,1)
endday = dt.datetime(2012,1,1)
eventMatrix = findEvents(symbols,startday,endday,marketSymbol='NSE500',verbose=True)
eventMatrix.to_csv('eventmatrix.csv', sep=',')
eventProfiler = ep.EventProfiler(eventMatrix,startday,endday,lookback_days=20,lookforward_days=20,verbose=True)
eventProfiler.study(filename="HalfYearEventStudy.jpg",plotErrorBars=True,plotMarketNeutral=True,plotEvents=False,marketSymbol='NSE500')
| bsd-3-clause |
BhallaLab/moose-examples | tutorials/Electrophys/ephys6_div_normalization.py | 2 | 6736 | ########################################################################
# This example demonstrates divisive normalization
# Copyright (C) Upinder S. Bhalla NCBS 2018
# Released under the terms of the GNU Public License V3.
########################################################################
import matplotlib.pyplot as plt
import matplotlib.image as mpimg
from matplotlib.widgets import Slider, Button, RadioButtons
import numpy as np
import warnings
import moose
import rdesigneur as rd
lines = []
tplot = ""
axes = []
sliders = []
spikingDistrib = []
RM = 1.0
RA = 1.0
CM = 0.01
dia = 10e-6
runtime = 0.08
elecPlotDt = 0.0002
sliderMode = "Gbar"
gluGbar = 1.0
gabaGbar = 0.5
K_A_Gbar = 2.0
gluOnset = 20.0e-3
gabaOnset = 40.0e-3
inputFreq = 100.0
inputDuration = 0.01
printOutput = False
makeMovie = False
frameNum = 0
fname = "movie/frame"
ttext = ""
rec = []
def setGluGbar( val ):
global gluGbar
gluGbar = val
updateDisplay()
def setGabaGbar( val ):
global gabaGbar
gabaGbar = val
updateDisplay()
def setK_A_Gbar( val ):
global K_A_Gbar
K_A_Gbar = val
updateDisplay()
def setGabaOnset( val ):
global gabaOnset
gabaOnset = val/1000.0
updateDisplay()
def setRM( val ):
global RM
RM = val
updateDisplay()
def setCM( val ):
global CM
CM = val
updateDisplay()
def makeModel():
cd = [
['glu', 'soma', 'Gbar', str(gluGbar)],
['GABA', 'soma', 'Gbar', str(gabaGbar)],
['K_A', 'soma', 'Gbar', str(K_A_Gbar)]
]
cd.extend( spikingDistrib )
rdes = rd.rdesigneur(
elecPlotDt = elecPlotDt,
stealCellFromLibrary = True,
verbose = False,
chanProto = [
['make_glu()', 'glu'],['make_GABA()', 'GABA'],
['make_K_A()','K_A'],
['make_Na()', 'Na'],['make_K_DR()', 'K_DR'],
],
cellProto = [['somaProto', 'cellBase', dia, dia]],
passiveDistrib = [[ '#', 'RM', str(RM), 'CM', str(CM), 'RA', str(RA) ]],
chanDistrib = cd,
stimList = [
['soma', '1','glu', 'periodicsyn', '{}*(t>{:.3f} && t<{:.3f})'.format( inputFreq, gluOnset, gluOnset + inputDuration) ],
['soma', '1','GABA', 'periodicsyn', '{}*(t>{:.3f} && t<{:.3f})'.format( inputFreq, gabaOnset, gabaOnset + inputDuration) ],
],
plotList = [['soma', '1','.', 'Vm']],
)
moose.element( '/library/GABA' ).Ek = -0.07
rdes.buildModel()
def main():
warnings.filterwarnings("ignore", category=UserWarning, module="matplotlib")
makeDisplay()
quit()
class stimToggle():
def __init__( self, toggle, ax ):
self.duration = 1
self.toggle = toggle
self.ax = ax
def click( self, event ):
global spikingDistrib
if self.duration < 0.5:
self.duration = 1.0
self.toggle.label.set_text( "Spiking off" )
self.toggle.color = "yellow"
self.toggle.hovercolor = "yellow"
spikingDistrib = []
else:
self.duration = 0.001
self.toggle.label.set_text( "Spiking on" )
self.toggle.color = "orange"
self.toggle.hovercolor = "orange"
spikingDistrib = [['Na', 'soma', 'Gbar', '200' ],['K_DR', 'soma', 'Gbar', '250' ]]
updateDisplay()
def printSomaVm():
print("This is somaVm" )
def updateDisplay():
global frameNum
makeModel()
moose.reinit()
moose.start( runtime )
tabvec = moose.element( '/model/graphs/plot0' ).vector
#print "############## len tabvec = ", len(tabvec)
maxval = max(tabvec)
imaxval = list(tabvec).index( maxval )
maxt = imaxval * elecPlotDt * 1000
pk = (maxval - min( tabvec[:imaxval+1] )) * 1000
ttext.set_text( "Peak=({:.1f}, {:.1f})".format( maxt, pk ) )
tplot.set_ydata( tabvec * 1000 )
if printOutput:
print( "{:.2f}\t{:.2f}\t{:.2f}\t{:.2f}\t{:.2f}\t{:.2f}\t{:.2f}\t{:.2f}\t{:.2f}".format( maxval*1000, pk,maxt, gluGbar, gabaGbar, K_A_Gbar, gabaOnset*1000, RM, CM ) )
if makeMovie:
plt.savefig( "{}_{:03d}.png".format(fname, frameNum) )
frameNum += 1
moose.delete( '/model' )
moose.delete( '/library' )
def doQuit( event ):
quit()
def makeDisplay():
global lines
global tplot
global axes
global sliders
global ttext
img = mpimg.imread( 'EI_input.png' )
fig = plt.figure( figsize=(10,12) )
png = fig.add_subplot(311)
imgplot = plt.imshow( img )
plt.axis('off')
ax1 = fig.add_subplot(312)
ttext = plt.text( 0, -35, "Peak=(0,0)", alpha = 0.9 )
plt.ylabel( 'Vm (mV)' )
plt.ylim( -80, -30 )
plt.xlabel( 'time (ms)' )
plt.title( "Membrane potential vs time at soma." )
t = np.arange( 0.0, runtime + elecPlotDt / 2.0, elecPlotDt ) * 1000 #ms
#print "############## len t = ", len(t)
tplot, = ax1.plot( t, np.zeros(len(t)), 'b-' )
ax = fig.add_subplot(313)
plt.axis('off')
axcolor = 'palegreen'
axStim = plt.axes( [0.02,0.005, 0.20,0.03], facecolor='green' )
axReset = plt.axes( [0.25,0.005, 0.30,0.03], facecolor='blue' )
axQuit = plt.axes( [0.60,0.005, 0.30,0.03], facecolor='blue' )
for x in np.arange( 0.05, 0.31, 0.05 ):
axes.append( plt.axes( [0.25, x, 0.65, 0.03], facecolor=axcolor ) )
stim = Button( axStim, 'Spiking off', color = 'yellow' )
stimObj = stimToggle( stim, axStim )
reset = Button( axReset, 'Reset', color = 'cyan' )
q = Button( axQuit, 'Quit', color = 'pink' )
sliders.append( Slider( axes[0], "gluGbar (Mho/m^2)", 0.01, 2, valinit = gluGbar) )
sliders[-1].on_changed( setGluGbar )
sliders.append( Slider( axes[1], "gabaGbar (Mho/m^2)", 0.01, 2, valinit = gabaGbar) )
sliders[-1].on_changed( setGabaGbar )
sliders.append( Slider( axes[2], "K_A_Gbar (Mho/m^2)", 0.01, 50, valinit = K_A_Gbar) )
sliders[-1].on_changed( setK_A_Gbar )
sliders.append( Slider( axes[3], "GABA Onset time (ms)", 10, 50, valinit = gabaOnset * 1000) )
sliders[-1].on_changed( setGabaOnset )
sliders.append( Slider( axes[4], "RM (Ohm.m^2)", 0.1, 10, valinit = RM))
sliders[-1].on_changed( setRM )
sliders.append( Slider( axes[5], "CM (Farads/m^2)", 0.001, 0.1, valinit = CM, valfmt='%0.3f'))
sliders[-1].on_changed( setCM )
def resetParms( event ):
for i in sliders:
i.reset()
stim.on_clicked( stimObj.click )
reset.on_clicked( resetParms )
q.on_clicked( doQuit )
if printOutput:
print( "maxval\tpk\tmaxt\tgluG\tgabaG\tK_A_G\tgabaon\tRM\tCM" )
updateDisplay()
plt.show()
# Run the 'main' if this script is executed standalone.
if __name__ == '__main__':
main()
| gpl-2.0 |
saullocastro/pyNastran | pyNastran/gui/colormaps/__init__.py | 1 | 50960 | #from inferno import cm_data as inferno_cm
#from magma import cm_data as magma_cm
#from plasma import cm_data as plasma_cm
#from viridis import cm_data as viridis_cm
# New matplotlib colormaps by Nathaniel J. Smith, Stefan van der Walt,
# and (in the case of viridis) Eric Firing.
#
# This file and the colormaps in it are released under the CC0 license /
# public domain dedication. We would appreciate credit if you use or
# redistribute these colormaps, but do not impose any legal restrictions.
#
# To the extent possible under law, the persons who associated CC0 with
# mpl-colormaps have waived all copyright and related or neighboring rights
# to mpl-colormaps.
#
# You should have received a copy of the CC0 legalcode along with this
# work. If not, see <http://creativecommons.org/publicdomain/zero/1.0/>.
__all__ = ['magma', 'inferno', 'plasma', 'viridis']
_magma_data = [[0.001462, 0.000466, 0.013866],
[0.002258, 0.001295, 0.018331],
[0.003279, 0.002305, 0.023708],
[0.004512, 0.003490, 0.029965],
[0.005950, 0.004843, 0.037130],
[0.007588, 0.006356, 0.044973],
[0.009426, 0.008022, 0.052844],
[0.011465, 0.009828, 0.060750],
[0.013708, 0.011771, 0.068667],
[0.016156, 0.013840, 0.076603],
[0.018815, 0.016026, 0.084584],
[0.021692, 0.018320, 0.092610],
[0.024792, 0.020715, 0.100676],
[0.028123, 0.023201, 0.108787],
[0.031696, 0.025765, 0.116965],
[0.035520, 0.028397, 0.125209],
[0.039608, 0.031090, 0.133515],
[0.043830, 0.033830, 0.141886],
[0.048062, 0.036607, 0.150327],
[0.052320, 0.039407, 0.158841],
[0.056615, 0.042160, 0.167446],
[0.060949, 0.044794, 0.176129],
[0.065330, 0.047318, 0.184892],
[0.069764, 0.049726, 0.193735],
[0.074257, 0.052017, 0.202660],
[0.078815, 0.054184, 0.211667],
[0.083446, 0.056225, 0.220755],
[0.088155, 0.058133, 0.229922],
[0.092949, 0.059904, 0.239164],
[0.097833, 0.061531, 0.248477],
[0.102815, 0.063010, 0.257854],
[0.107899, 0.064335, 0.267289],
[0.113094, 0.065492, 0.276784],
[0.118405, 0.066479, 0.286321],
[0.123833, 0.067295, 0.295879],
[0.129380, 0.067935, 0.305443],
[0.135053, 0.068391, 0.315000],
[0.140858, 0.068654, 0.324538],
[0.146785, 0.068738, 0.334011],
[0.152839, 0.068637, 0.343404],
[0.159018, 0.068354, 0.352688],
[0.165308, 0.067911, 0.361816],
[0.171713, 0.067305, 0.370771],
[0.178212, 0.066576, 0.379497],
[0.184801, 0.065732, 0.387973],
[0.191460, 0.064818, 0.396152],
[0.198177, 0.063862, 0.404009],
[0.204935, 0.062907, 0.411514],
[0.211718, 0.061992, 0.418647],
[0.218512, 0.061158, 0.425392],
[0.225302, 0.060445, 0.431742],
[0.232077, 0.059889, 0.437695],
[0.238826, 0.059517, 0.443256],
[0.245543, 0.059352, 0.448436],
[0.252220, 0.059415, 0.453248],
[0.258857, 0.059706, 0.457710],
[0.265447, 0.060237, 0.461840],
[0.271994, 0.060994, 0.465660],
[0.278493, 0.061978, 0.469190],
[0.284951, 0.063168, 0.472451],
[0.291366, 0.064553, 0.475462],
[0.297740, 0.066117, 0.478243],
[0.304081, 0.067835, 0.480812],
[0.310382, 0.069702, 0.483186],
[0.316654, 0.071690, 0.485380],
[0.322899, 0.073782, 0.487408],
[0.329114, 0.075972, 0.489287],
[0.335308, 0.078236, 0.491024],
[0.341482, 0.080564, 0.492631],
[0.347636, 0.082946, 0.494121],
[0.353773, 0.085373, 0.495501],
[0.359898, 0.087831, 0.496778],
[0.366012, 0.090314, 0.497960],
[0.372116, 0.092816, 0.499053],
[0.378211, 0.095332, 0.500067],
[0.384299, 0.097855, 0.501002],
[0.390384, 0.100379, 0.501864],
[0.396467, 0.102902, 0.502658],
[0.402548, 0.105420, 0.503386],
[0.408629, 0.107930, 0.504052],
[0.414709, 0.110431, 0.504662],
[0.420791, 0.112920, 0.505215],
[0.426877, 0.115395, 0.505714],
[0.432967, 0.117855, 0.506160],
[0.439062, 0.120298, 0.506555],
[0.445163, 0.122724, 0.506901],
[0.451271, 0.125132, 0.507198],
[0.457386, 0.127522, 0.507448],
[0.463508, 0.129893, 0.507652],
[0.469640, 0.132245, 0.507809],
[0.475780, 0.134577, 0.507921],
[0.481929, 0.136891, 0.507989],
[0.488088, 0.139186, 0.508011],
[0.494258, 0.141462, 0.507988],
[0.500438, 0.143719, 0.507920],
[0.506629, 0.145958, 0.507806],
[0.512831, 0.148179, 0.507648],
[0.519045, 0.150383, 0.507443],
[0.525270, 0.152569, 0.507192],
[0.531507, 0.154739, 0.506895],
[0.537755, 0.156894, 0.506551],
[0.544015, 0.159033, 0.506159],
[0.550287, 0.161158, 0.505719],
[0.556571, 0.163269, 0.505230],
[0.562866, 0.165368, 0.504692],
[0.569172, 0.167454, 0.504105],
[0.575490, 0.169530, 0.503466],
[0.581819, 0.171596, 0.502777],
[0.588158, 0.173652, 0.502035],
[0.594508, 0.175701, 0.501241],
[0.600868, 0.177743, 0.500394],
[0.607238, 0.179779, 0.499492],
[0.613617, 0.181811, 0.498536],
[0.620005, 0.183840, 0.497524],
[0.626401, 0.185867, 0.496456],
[0.632805, 0.187893, 0.495332],
[0.639216, 0.189921, 0.494150],
[0.645633, 0.191952, 0.492910],
[0.652056, 0.193986, 0.491611],
[0.658483, 0.196027, 0.490253],
[0.664915, 0.198075, 0.488836],
[0.671349, 0.200133, 0.487358],
[0.677786, 0.202203, 0.485819],
[0.684224, 0.204286, 0.484219],
[0.690661, 0.206384, 0.482558],
[0.697098, 0.208501, 0.480835],
[0.703532, 0.210638, 0.479049],
[0.709962, 0.212797, 0.477201],
[0.716387, 0.214982, 0.475290],
[0.722805, 0.217194, 0.473316],
[0.729216, 0.219437, 0.471279],
[0.735616, 0.221713, 0.469180],
[0.742004, 0.224025, 0.467018],
[0.748378, 0.226377, 0.464794],
[0.754737, 0.228772, 0.462509],
[0.761077, 0.231214, 0.460162],
[0.767398, 0.233705, 0.457755],
[0.773695, 0.236249, 0.455289],
[0.779968, 0.238851, 0.452765],
[0.786212, 0.241514, 0.450184],
[0.792427, 0.244242, 0.447543],
[0.798608, 0.247040, 0.444848],
[0.804752, 0.249911, 0.442102],
[0.810855, 0.252861, 0.439305],
[0.816914, 0.255895, 0.436461],
[0.822926, 0.259016, 0.433573],
[0.828886, 0.262229, 0.430644],
[0.834791, 0.265540, 0.427671],
[0.840636, 0.268953, 0.424666],
[0.846416, 0.272473, 0.421631],
[0.852126, 0.276106, 0.418573],
[0.857763, 0.279857, 0.415496],
[0.863320, 0.283729, 0.412403],
[0.868793, 0.287728, 0.409303],
[0.874176, 0.291859, 0.406205],
[0.879464, 0.296125, 0.403118],
[0.884651, 0.300530, 0.400047],
[0.889731, 0.305079, 0.397002],
[0.894700, 0.309773, 0.393995],
[0.899552, 0.314616, 0.391037],
[0.904281, 0.319610, 0.388137],
[0.908884, 0.324755, 0.385308],
[0.913354, 0.330052, 0.382563],
[0.917689, 0.335500, 0.379915],
[0.921884, 0.341098, 0.377376],
[0.925937, 0.346844, 0.374959],
[0.929845, 0.352734, 0.372677],
[0.933606, 0.358764, 0.370541],
[0.937221, 0.364929, 0.368567],
[0.940687, 0.371224, 0.366762],
[0.944006, 0.377643, 0.365136],
[0.947180, 0.384178, 0.363701],
[0.950210, 0.390820, 0.362468],
[0.953099, 0.397563, 0.361438],
[0.955849, 0.404400, 0.360619],
[0.958464, 0.411324, 0.360014],
[0.960949, 0.418323, 0.359630],
[0.963310, 0.425390, 0.359469],
[0.965549, 0.432519, 0.359529],
[0.967671, 0.439703, 0.359810],
[0.969680, 0.446936, 0.360311],
[0.971582, 0.454210, 0.361030],
[0.973381, 0.461520, 0.361965],
[0.975082, 0.468861, 0.363111],
[0.976690, 0.476226, 0.364466],
[0.978210, 0.483612, 0.366025],
[0.979645, 0.491014, 0.367783],
[0.981000, 0.498428, 0.369734],
[0.982279, 0.505851, 0.371874],
[0.983485, 0.513280, 0.374198],
[0.984622, 0.520713, 0.376698],
[0.985693, 0.528148, 0.379371],
[0.986700, 0.535582, 0.382210],
[0.987646, 0.543015, 0.385210],
[0.988533, 0.550446, 0.388365],
[0.989363, 0.557873, 0.391671],
[0.990138, 0.565296, 0.395122],
[0.990871, 0.572706, 0.398714],
[0.991558, 0.580107, 0.402441],
[0.992196, 0.587502, 0.406299],
[0.992785, 0.594891, 0.410283],
[0.993326, 0.602275, 0.414390],
[0.993834, 0.609644, 0.418613],
[0.994309, 0.616999, 0.422950],
[0.994738, 0.624350, 0.427397],
[0.995122, 0.631696, 0.431951],
[0.995480, 0.639027, 0.436607],
[0.995810, 0.646344, 0.441361],
[0.996096, 0.653659, 0.446213],
[0.996341, 0.660969, 0.451160],
[0.996580, 0.668256, 0.456192],
[0.996775, 0.675541, 0.461314],
[0.996925, 0.682828, 0.466526],
[0.997077, 0.690088, 0.471811],
[0.997186, 0.697349, 0.477182],
[0.997254, 0.704611, 0.482635],
[0.997325, 0.711848, 0.488154],
[0.997351, 0.719089, 0.493755],
[0.997351, 0.726324, 0.499428],
[0.997341, 0.733545, 0.505167],
[0.997285, 0.740772, 0.510983],
[0.997228, 0.747981, 0.516859],
[0.997138, 0.755190, 0.522806],
[0.997019, 0.762398, 0.528821],
[0.996898, 0.769591, 0.534892],
[0.996727, 0.776795, 0.541039],
[0.996571, 0.783977, 0.547233],
[0.996369, 0.791167, 0.553499],
[0.996162, 0.798348, 0.559820],
[0.995932, 0.805527, 0.566202],
[0.995680, 0.812706, 0.572645],
[0.995424, 0.819875, 0.579140],
[0.995131, 0.827052, 0.585701],
[0.994851, 0.834213, 0.592307],
[0.994524, 0.841387, 0.598983],
[0.994222, 0.848540, 0.605696],
[0.993866, 0.855711, 0.612482],
[0.993545, 0.862859, 0.619299],
[0.993170, 0.870024, 0.626189],
[0.992831, 0.877168, 0.633109],
[0.992440, 0.884330, 0.640099],
[0.992089, 0.891470, 0.647116],
[0.991688, 0.898627, 0.654202],
[0.991332, 0.905763, 0.661309],
[0.990930, 0.912915, 0.668481],
[0.990570, 0.920049, 0.675675],
[0.990175, 0.927196, 0.682926],
[0.989815, 0.934329, 0.690198],
[0.989434, 0.941470, 0.697519],
[0.989077, 0.948604, 0.704863],
[0.988717, 0.955742, 0.712242],
[0.988367, 0.962878, 0.719649],
[0.988033, 0.970012, 0.727077],
[0.987691, 0.977154, 0.734536],
[0.987387, 0.984288, 0.742002],
[0.987053, 0.991438, 0.749504]]
_inferno_data = [[0.001462, 0.000466, 0.013866],
[0.002267, 0.001270, 0.018570],
[0.003299, 0.002249, 0.024239],
[0.004547, 0.003392, 0.030909],
[0.006006, 0.004692, 0.038558],
[0.007676, 0.006136, 0.046836],
[0.009561, 0.007713, 0.055143],
[0.011663, 0.009417, 0.063460],
[0.013995, 0.011225, 0.071862],
[0.016561, 0.013136, 0.080282],
[0.019373, 0.015133, 0.088767],
[0.022447, 0.017199, 0.097327],
[0.025793, 0.019331, 0.105930],
[0.029432, 0.021503, 0.114621],
[0.033385, 0.023702, 0.123397],
[0.037668, 0.025921, 0.132232],
[0.042253, 0.028139, 0.141141],
[0.046915, 0.030324, 0.150164],
[0.051644, 0.032474, 0.159254],
[0.056449, 0.034569, 0.168414],
[0.061340, 0.036590, 0.177642],
[0.066331, 0.038504, 0.186962],
[0.071429, 0.040294, 0.196354],
[0.076637, 0.041905, 0.205799],
[0.081962, 0.043328, 0.215289],
[0.087411, 0.044556, 0.224813],
[0.092990, 0.045583, 0.234358],
[0.098702, 0.046402, 0.243904],
[0.104551, 0.047008, 0.253430],
[0.110536, 0.047399, 0.262912],
[0.116656, 0.047574, 0.272321],
[0.122908, 0.047536, 0.281624],
[0.129285, 0.047293, 0.290788],
[0.135778, 0.046856, 0.299776],
[0.142378, 0.046242, 0.308553],
[0.149073, 0.045468, 0.317085],
[0.155850, 0.044559, 0.325338],
[0.162689, 0.043554, 0.333277],
[0.169575, 0.042489, 0.340874],
[0.176493, 0.041402, 0.348111],
[0.183429, 0.040329, 0.354971],
[0.190367, 0.039309, 0.361447],
[0.197297, 0.038400, 0.367535],
[0.204209, 0.037632, 0.373238],
[0.211095, 0.037030, 0.378563],
[0.217949, 0.036615, 0.383522],
[0.224763, 0.036405, 0.388129],
[0.231538, 0.036405, 0.392400],
[0.238273, 0.036621, 0.396353],
[0.244967, 0.037055, 0.400007],
[0.251620, 0.037705, 0.403378],
[0.258234, 0.038571, 0.406485],
[0.264810, 0.039647, 0.409345],
[0.271347, 0.040922, 0.411976],
[0.277850, 0.042353, 0.414392],
[0.284321, 0.043933, 0.416608],
[0.290763, 0.045644, 0.418637],
[0.297178, 0.047470, 0.420491],
[0.303568, 0.049396, 0.422182],
[0.309935, 0.051407, 0.423721],
[0.316282, 0.053490, 0.425116],
[0.322610, 0.055634, 0.426377],
[0.328921, 0.057827, 0.427511],
[0.335217, 0.060060, 0.428524],
[0.341500, 0.062325, 0.429425],
[0.347771, 0.064616, 0.430217],
[0.354032, 0.066925, 0.430906],
[0.360284, 0.069247, 0.431497],
[0.366529, 0.071579, 0.431994],
[0.372768, 0.073915, 0.432400],
[0.379001, 0.076253, 0.432719],
[0.385228, 0.078591, 0.432955],
[0.391453, 0.080927, 0.433109],
[0.397674, 0.083257, 0.433183],
[0.403894, 0.085580, 0.433179],
[0.410113, 0.087896, 0.433098],
[0.416331, 0.090203, 0.432943],
[0.422549, 0.092501, 0.432714],
[0.428768, 0.094790, 0.432412],
[0.434987, 0.097069, 0.432039],
[0.441207, 0.099338, 0.431594],
[0.447428, 0.101597, 0.431080],
[0.453651, 0.103848, 0.430498],
[0.459875, 0.106089, 0.429846],
[0.466100, 0.108322, 0.429125],
[0.472328, 0.110547, 0.428334],
[0.478558, 0.112764, 0.427475],
[0.484789, 0.114974, 0.426548],
[0.491022, 0.117179, 0.425552],
[0.497257, 0.119379, 0.424488],
[0.503493, 0.121575, 0.423356],
[0.509730, 0.123769, 0.422156],
[0.515967, 0.125960, 0.420887],
[0.522206, 0.128150, 0.419549],
[0.528444, 0.130341, 0.418142],
[0.534683, 0.132534, 0.416667],
[0.540920, 0.134729, 0.415123],
[0.547157, 0.136929, 0.413511],
[0.553392, 0.139134, 0.411829],
[0.559624, 0.141346, 0.410078],
[0.565854, 0.143567, 0.408258],
[0.572081, 0.145797, 0.406369],
[0.578304, 0.148039, 0.404411],
[0.584521, 0.150294, 0.402385],
[0.590734, 0.152563, 0.400290],
[0.596940, 0.154848, 0.398125],
[0.603139, 0.157151, 0.395891],
[0.609330, 0.159474, 0.393589],
[0.615513, 0.161817, 0.391219],
[0.621685, 0.164184, 0.388781],
[0.627847, 0.166575, 0.386276],
[0.633998, 0.168992, 0.383704],
[0.640135, 0.171438, 0.381065],
[0.646260, 0.173914, 0.378359],
[0.652369, 0.176421, 0.375586],
[0.658463, 0.178962, 0.372748],
[0.664540, 0.181539, 0.369846],
[0.670599, 0.184153, 0.366879],
[0.676638, 0.186807, 0.363849],
[0.682656, 0.189501, 0.360757],
[0.688653, 0.192239, 0.357603],
[0.694627, 0.195021, 0.354388],
[0.700576, 0.197851, 0.351113],
[0.706500, 0.200728, 0.347777],
[0.712396, 0.203656, 0.344383],
[0.718264, 0.206636, 0.340931],
[0.724103, 0.209670, 0.337424],
[0.729909, 0.212759, 0.333861],
[0.735683, 0.215906, 0.330245],
[0.741423, 0.219112, 0.326576],
[0.747127, 0.222378, 0.322856],
[0.752794, 0.225706, 0.319085],
[0.758422, 0.229097, 0.315266],
[0.764010, 0.232554, 0.311399],
[0.769556, 0.236077, 0.307485],
[0.775059, 0.239667, 0.303526],
[0.780517, 0.243327, 0.299523],
[0.785929, 0.247056, 0.295477],
[0.791293, 0.250856, 0.291390],
[0.796607, 0.254728, 0.287264],
[0.801871, 0.258674, 0.283099],
[0.807082, 0.262692, 0.278898],
[0.812239, 0.266786, 0.274661],
[0.817341, 0.270954, 0.270390],
[0.822386, 0.275197, 0.266085],
[0.827372, 0.279517, 0.261750],
[0.832299, 0.283913, 0.257383],
[0.837165, 0.288385, 0.252988],
[0.841969, 0.292933, 0.248564],
[0.846709, 0.297559, 0.244113],
[0.851384, 0.302260, 0.239636],
[0.855992, 0.307038, 0.235133],
[0.860533, 0.311892, 0.230606],
[0.865006, 0.316822, 0.226055],
[0.869409, 0.321827, 0.221482],
[0.873741, 0.326906, 0.216886],
[0.878001, 0.332060, 0.212268],
[0.882188, 0.337287, 0.207628],
[0.886302, 0.342586, 0.202968],
[0.890341, 0.347957, 0.198286],
[0.894305, 0.353399, 0.193584],
[0.898192, 0.358911, 0.188860],
[0.902003, 0.364492, 0.184116],
[0.905735, 0.370140, 0.179350],
[0.909390, 0.375856, 0.174563],
[0.912966, 0.381636, 0.169755],
[0.916462, 0.387481, 0.164924],
[0.919879, 0.393389, 0.160070],
[0.923215, 0.399359, 0.155193],
[0.926470, 0.405389, 0.150292],
[0.929644, 0.411479, 0.145367],
[0.932737, 0.417627, 0.140417],
[0.935747, 0.423831, 0.135440],
[0.938675, 0.430091, 0.130438],
[0.941521, 0.436405, 0.125409],
[0.944285, 0.442772, 0.120354],
[0.946965, 0.449191, 0.115272],
[0.949562, 0.455660, 0.110164],
[0.952075, 0.462178, 0.105031],
[0.954506, 0.468744, 0.099874],
[0.956852, 0.475356, 0.094695],
[0.959114, 0.482014, 0.089499],
[0.961293, 0.488716, 0.084289],
[0.963387, 0.495462, 0.079073],
[0.965397, 0.502249, 0.073859],
[0.967322, 0.509078, 0.068659],
[0.969163, 0.515946, 0.063488],
[0.970919, 0.522853, 0.058367],
[0.972590, 0.529798, 0.053324],
[0.974176, 0.536780, 0.048392],
[0.975677, 0.543798, 0.043618],
[0.977092, 0.550850, 0.039050],
[0.978422, 0.557937, 0.034931],
[0.979666, 0.565057, 0.031409],
[0.980824, 0.572209, 0.028508],
[0.981895, 0.579392, 0.026250],
[0.982881, 0.586606, 0.024661],
[0.983779, 0.593849, 0.023770],
[0.984591, 0.601122, 0.023606],
[0.985315, 0.608422, 0.024202],
[0.985952, 0.615750, 0.025592],
[0.986502, 0.623105, 0.027814],
[0.986964, 0.630485, 0.030908],
[0.987337, 0.637890, 0.034916],
[0.987622, 0.645320, 0.039886],
[0.987819, 0.652773, 0.045581],
[0.987926, 0.660250, 0.051750],
[0.987945, 0.667748, 0.058329],
[0.987874, 0.675267, 0.065257],
[0.987714, 0.682807, 0.072489],
[0.987464, 0.690366, 0.079990],
[0.987124, 0.697944, 0.087731],
[0.986694, 0.705540, 0.095694],
[0.986175, 0.713153, 0.103863],
[0.985566, 0.720782, 0.112229],
[0.984865, 0.728427, 0.120785],
[0.984075, 0.736087, 0.129527],
[0.983196, 0.743758, 0.138453],
[0.982228, 0.751442, 0.147565],
[0.981173, 0.759135, 0.156863],
[0.980032, 0.766837, 0.166353],
[0.978806, 0.774545, 0.176037],
[0.977497, 0.782258, 0.185923],
[0.976108, 0.789974, 0.196018],
[0.974638, 0.797692, 0.206332],
[0.973088, 0.805409, 0.216877],
[0.971468, 0.813122, 0.227658],
[0.969783, 0.820825, 0.238686],
[0.968041, 0.828515, 0.249972],
[0.966243, 0.836191, 0.261534],
[0.964394, 0.843848, 0.273391],
[0.962517, 0.851476, 0.285546],
[0.960626, 0.859069, 0.298010],
[0.958720, 0.866624, 0.310820],
[0.956834, 0.874129, 0.323974],
[0.954997, 0.881569, 0.337475],
[0.953215, 0.888942, 0.351369],
[0.951546, 0.896226, 0.365627],
[0.950018, 0.903409, 0.380271],
[0.948683, 0.910473, 0.395289],
[0.947594, 0.917399, 0.410665],
[0.946809, 0.924168, 0.426373],
[0.946392, 0.930761, 0.442367],
[0.946403, 0.937159, 0.458592],
[0.946903, 0.943348, 0.474970],
[0.947937, 0.949318, 0.491426],
[0.949545, 0.955063, 0.507860],
[0.951740, 0.960587, 0.524203],
[0.954529, 0.965896, 0.540361],
[0.957896, 0.971003, 0.556275],
[0.961812, 0.975924, 0.571925],
[0.966249, 0.980678, 0.587206],
[0.971162, 0.985282, 0.602154],
[0.976511, 0.989753, 0.616760],
[0.982257, 0.994109, 0.631017],
[0.988362, 0.998364, 0.644924]]
_plasma_data = [[0.050383, 0.029803, 0.527975],
[0.063536, 0.028426, 0.533124],
[0.075353, 0.027206, 0.538007],
[0.086222, 0.026125, 0.542658],
[0.096379, 0.025165, 0.547103],
[0.105980, 0.024309, 0.551368],
[0.115124, 0.023556, 0.555468],
[0.123903, 0.022878, 0.559423],
[0.132381, 0.022258, 0.563250],
[0.140603, 0.021687, 0.566959],
[0.148607, 0.021154, 0.570562],
[0.156421, 0.020651, 0.574065],
[0.164070, 0.020171, 0.577478],
[0.171574, 0.019706, 0.580806],
[0.178950, 0.019252, 0.584054],
[0.186213, 0.018803, 0.587228],
[0.193374, 0.018354, 0.590330],
[0.200445, 0.017902, 0.593364],
[0.207435, 0.017442, 0.596333],
[0.214350, 0.016973, 0.599239],
[0.221197, 0.016497, 0.602083],
[0.227983, 0.016007, 0.604867],
[0.234715, 0.015502, 0.607592],
[0.241396, 0.014979, 0.610259],
[0.248032, 0.014439, 0.612868],
[0.254627, 0.013882, 0.615419],
[0.261183, 0.013308, 0.617911],
[0.267703, 0.012716, 0.620346],
[0.274191, 0.012109, 0.622722],
[0.280648, 0.011488, 0.625038],
[0.287076, 0.010855, 0.627295],
[0.293478, 0.010213, 0.629490],
[0.299855, 0.009561, 0.631624],
[0.306210, 0.008902, 0.633694],
[0.312543, 0.008239, 0.635700],
[0.318856, 0.007576, 0.637640],
[0.325150, 0.006915, 0.639512],
[0.331426, 0.006261, 0.641316],
[0.337683, 0.005618, 0.643049],
[0.343925, 0.004991, 0.644710],
[0.350150, 0.004382, 0.646298],
[0.356359, 0.003798, 0.647810],
[0.362553, 0.003243, 0.649245],
[0.368733, 0.002724, 0.650601],
[0.374897, 0.002245, 0.651876],
[0.381047, 0.001814, 0.653068],
[0.387183, 0.001434, 0.654177],
[0.393304, 0.001114, 0.655199],
[0.399411, 0.000859, 0.656133],
[0.405503, 0.000678, 0.656977],
[0.411580, 0.000577, 0.657730],
[0.417642, 0.000564, 0.658390],
[0.423689, 0.000646, 0.658956],
[0.429719, 0.000831, 0.659425],
[0.435734, 0.001127, 0.659797],
[0.441732, 0.001540, 0.660069],
[0.447714, 0.002080, 0.660240],
[0.453677, 0.002755, 0.660310],
[0.459623, 0.003574, 0.660277],
[0.465550, 0.004545, 0.660139],
[0.471457, 0.005678, 0.659897],
[0.477344, 0.006980, 0.659549],
[0.483210, 0.008460, 0.659095],
[0.489055, 0.010127, 0.658534],
[0.494877, 0.011990, 0.657865],
[0.500678, 0.014055, 0.657088],
[0.506454, 0.016333, 0.656202],
[0.512206, 0.018833, 0.655209],
[0.517933, 0.021563, 0.654109],
[0.523633, 0.024532, 0.652901],
[0.529306, 0.027747, 0.651586],
[0.534952, 0.031217, 0.650165],
[0.540570, 0.034950, 0.648640],
[0.546157, 0.038954, 0.647010],
[0.551715, 0.043136, 0.645277],
[0.557243, 0.047331, 0.643443],
[0.562738, 0.051545, 0.641509],
[0.568201, 0.055778, 0.639477],
[0.573632, 0.060028, 0.637349],
[0.579029, 0.064296, 0.635126],
[0.584391, 0.068579, 0.632812],
[0.589719, 0.072878, 0.630408],
[0.595011, 0.077190, 0.627917],
[0.600266, 0.081516, 0.625342],
[0.605485, 0.085854, 0.622686],
[0.610667, 0.090204, 0.619951],
[0.615812, 0.094564, 0.617140],
[0.620919, 0.098934, 0.614257],
[0.625987, 0.103312, 0.611305],
[0.631017, 0.107699, 0.608287],
[0.636008, 0.112092, 0.605205],
[0.640959, 0.116492, 0.602065],
[0.645872, 0.120898, 0.598867],
[0.650746, 0.125309, 0.595617],
[0.655580, 0.129725, 0.592317],
[0.660374, 0.134144, 0.588971],
[0.665129, 0.138566, 0.585582],
[0.669845, 0.142992, 0.582154],
[0.674522, 0.147419, 0.578688],
[0.679160, 0.151848, 0.575189],
[0.683758, 0.156278, 0.571660],
[0.688318, 0.160709, 0.568103],
[0.692840, 0.165141, 0.564522],
[0.697324, 0.169573, 0.560919],
[0.701769, 0.174005, 0.557296],
[0.706178, 0.178437, 0.553657],
[0.710549, 0.182868, 0.550004],
[0.714883, 0.187299, 0.546338],
[0.719181, 0.191729, 0.542663],
[0.723444, 0.196158, 0.538981],
[0.727670, 0.200586, 0.535293],
[0.731862, 0.205013, 0.531601],
[0.736019, 0.209439, 0.527908],
[0.740143, 0.213864, 0.524216],
[0.744232, 0.218288, 0.520524],
[0.748289, 0.222711, 0.516834],
[0.752312, 0.227133, 0.513149],
[0.756304, 0.231555, 0.509468],
[0.760264, 0.235976, 0.505794],
[0.764193, 0.240396, 0.502126],
[0.768090, 0.244817, 0.498465],
[0.771958, 0.249237, 0.494813],
[0.775796, 0.253658, 0.491171],
[0.779604, 0.258078, 0.487539],
[0.783383, 0.262500, 0.483918],
[0.787133, 0.266922, 0.480307],
[0.790855, 0.271345, 0.476706],
[0.794549, 0.275770, 0.473117],
[0.798216, 0.280197, 0.469538],
[0.801855, 0.284626, 0.465971],
[0.805467, 0.289057, 0.462415],
[0.809052, 0.293491, 0.458870],
[0.812612, 0.297928, 0.455338],
[0.816144, 0.302368, 0.451816],
[0.819651, 0.306812, 0.448306],
[0.823132, 0.311261, 0.444806],
[0.826588, 0.315714, 0.441316],
[0.830018, 0.320172, 0.437836],
[0.833422, 0.324635, 0.434366],
[0.836801, 0.329105, 0.430905],
[0.840155, 0.333580, 0.427455],
[0.843484, 0.338062, 0.424013],
[0.846788, 0.342551, 0.420579],
[0.850066, 0.347048, 0.417153],
[0.853319, 0.351553, 0.413734],
[0.856547, 0.356066, 0.410322],
[0.859750, 0.360588, 0.406917],
[0.862927, 0.365119, 0.403519],
[0.866078, 0.369660, 0.400126],
[0.869203, 0.374212, 0.396738],
[0.872303, 0.378774, 0.393355],
[0.875376, 0.383347, 0.389976],
[0.878423, 0.387932, 0.386600],
[0.881443, 0.392529, 0.383229],
[0.884436, 0.397139, 0.379860],
[0.887402, 0.401762, 0.376494],
[0.890340, 0.406398, 0.373130],
[0.893250, 0.411048, 0.369768],
[0.896131, 0.415712, 0.366407],
[0.898984, 0.420392, 0.363047],
[0.901807, 0.425087, 0.359688],
[0.904601, 0.429797, 0.356329],
[0.907365, 0.434524, 0.352970],
[0.910098, 0.439268, 0.349610],
[0.912800, 0.444029, 0.346251],
[0.915471, 0.448807, 0.342890],
[0.918109, 0.453603, 0.339529],
[0.920714, 0.458417, 0.336166],
[0.923287, 0.463251, 0.332801],
[0.925825, 0.468103, 0.329435],
[0.928329, 0.472975, 0.326067],
[0.930798, 0.477867, 0.322697],
[0.933232, 0.482780, 0.319325],
[0.935630, 0.487712, 0.315952],
[0.937990, 0.492667, 0.312575],
[0.940313, 0.497642, 0.309197],
[0.942598, 0.502639, 0.305816],
[0.944844, 0.507658, 0.302433],
[0.947051, 0.512699, 0.299049],
[0.949217, 0.517763, 0.295662],
[0.951344, 0.522850, 0.292275],
[0.953428, 0.527960, 0.288883],
[0.955470, 0.533093, 0.285490],
[0.957469, 0.538250, 0.282096],
[0.959424, 0.543431, 0.278701],
[0.961336, 0.548636, 0.275305],
[0.963203, 0.553865, 0.271909],
[0.965024, 0.559118, 0.268513],
[0.966798, 0.564396, 0.265118],
[0.968526, 0.569700, 0.261721],
[0.970205, 0.575028, 0.258325],
[0.971835, 0.580382, 0.254931],
[0.973416, 0.585761, 0.251540],
[0.974947, 0.591165, 0.248151],
[0.976428, 0.596595, 0.244767],
[0.977856, 0.602051, 0.241387],
[0.979233, 0.607532, 0.238013],
[0.980556, 0.613039, 0.234646],
[0.981826, 0.618572, 0.231287],
[0.983041, 0.624131, 0.227937],
[0.984199, 0.629718, 0.224595],
[0.985301, 0.635330, 0.221265],
[0.986345, 0.640969, 0.217948],
[0.987332, 0.646633, 0.214648],
[0.988260, 0.652325, 0.211364],
[0.989128, 0.658043, 0.208100],
[0.989935, 0.663787, 0.204859],
[0.990681, 0.669558, 0.201642],
[0.991365, 0.675355, 0.198453],
[0.991985, 0.681179, 0.195295],
[0.992541, 0.687030, 0.192170],
[0.993032, 0.692907, 0.189084],
[0.993456, 0.698810, 0.186041],
[0.993814, 0.704741, 0.183043],
[0.994103, 0.710698, 0.180097],
[0.994324, 0.716681, 0.177208],
[0.994474, 0.722691, 0.174381],
[0.994553, 0.728728, 0.171622],
[0.994561, 0.734791, 0.168938],
[0.994495, 0.740880, 0.166335],
[0.994355, 0.746995, 0.163821],
[0.994141, 0.753137, 0.161404],
[0.993851, 0.759304, 0.159092],
[0.993482, 0.765499, 0.156891],
[0.993033, 0.771720, 0.154808],
[0.992505, 0.777967, 0.152855],
[0.991897, 0.784239, 0.151042],
[0.991209, 0.790537, 0.149377],
[0.990439, 0.796859, 0.147870],
[0.989587, 0.803205, 0.146529],
[0.988648, 0.809579, 0.145357],
[0.987621, 0.815978, 0.144363],
[0.986509, 0.822401, 0.143557],
[0.985314, 0.828846, 0.142945],
[0.984031, 0.835315, 0.142528],
[0.982653, 0.841812, 0.142303],
[0.981190, 0.848329, 0.142279],
[0.979644, 0.854866, 0.142453],
[0.977995, 0.861432, 0.142808],
[0.976265, 0.868016, 0.143351],
[0.974443, 0.874622, 0.144061],
[0.972530, 0.881250, 0.144923],
[0.970533, 0.887896, 0.145919],
[0.968443, 0.894564, 0.147014],
[0.966271, 0.901249, 0.148180],
[0.964021, 0.907950, 0.149370],
[0.961681, 0.914672, 0.150520],
[0.959276, 0.921407, 0.151566],
[0.956808, 0.928152, 0.152409],
[0.954287, 0.934908, 0.152921],
[0.951726, 0.941671, 0.152925],
[0.949151, 0.948435, 0.152178],
[0.946602, 0.955190, 0.150328],
[0.944152, 0.961916, 0.146861],
[0.941896, 0.968590, 0.140956],
[0.940015, 0.975158, 0.131326]]
_viridis_data = [[0.267004, 0.004874, 0.329415],
[0.268510, 0.009605, 0.335427],
[0.269944, 0.014625, 0.341379],
[0.271305, 0.019942, 0.347269],
[0.272594, 0.025563, 0.353093],
[0.273809, 0.031497, 0.358853],
[0.274952, 0.037752, 0.364543],
[0.276022, 0.044167, 0.370164],
[0.277018, 0.050344, 0.375715],
[0.277941, 0.056324, 0.381191],
[0.278791, 0.062145, 0.386592],
[0.279566, 0.067836, 0.391917],
[0.280267, 0.073417, 0.397163],
[0.280894, 0.078907, 0.402329],
[0.281446, 0.084320, 0.407414],
[0.281924, 0.089666, 0.412415],
[0.282327, 0.094955, 0.417331],
[0.282656, 0.100196, 0.422160],
[0.282910, 0.105393, 0.426902],
[0.283091, 0.110553, 0.431554],
[0.283197, 0.115680, 0.436115],
[0.283229, 0.120777, 0.440584],
[0.283187, 0.125848, 0.444960],
[0.283072, 0.130895, 0.449241],
[0.282884, 0.135920, 0.453427],
[0.282623, 0.140926, 0.457517],
[0.282290, 0.145912, 0.461510],
[0.281887, 0.150881, 0.465405],
[0.281412, 0.155834, 0.469201],
[0.280868, 0.160771, 0.472899],
[0.280255, 0.165693, 0.476498],
[0.279574, 0.170599, 0.479997],
[0.278826, 0.175490, 0.483397],
[0.278012, 0.180367, 0.486697],
[0.277134, 0.185228, 0.489898],
[0.276194, 0.190074, 0.493001],
[0.275191, 0.194905, 0.496005],
[0.274128, 0.199721, 0.498911],
[0.273006, 0.204520, 0.501721],
[0.271828, 0.209303, 0.504434],
[0.270595, 0.214069, 0.507052],
[0.269308, 0.218818, 0.509577],
[0.267968, 0.223549, 0.512008],
[0.266580, 0.228262, 0.514349],
[0.265145, 0.232956, 0.516599],
[0.263663, 0.237631, 0.518762],
[0.262138, 0.242286, 0.520837],
[0.260571, 0.246922, 0.522828],
[0.258965, 0.251537, 0.524736],
[0.257322, 0.256130, 0.526563],
[0.255645, 0.260703, 0.528312],
[0.253935, 0.265254, 0.529983],
[0.252194, 0.269783, 0.531579],
[0.250425, 0.274290, 0.533103],
[0.248629, 0.278775, 0.534556],
[0.246811, 0.283237, 0.535941],
[0.244972, 0.287675, 0.537260],
[0.243113, 0.292092, 0.538516],
[0.241237, 0.296485, 0.539709],
[0.239346, 0.300855, 0.540844],
[0.237441, 0.305202, 0.541921],
[0.235526, 0.309527, 0.542944],
[0.233603, 0.313828, 0.543914],
[0.231674, 0.318106, 0.544834],
[0.229739, 0.322361, 0.545706],
[0.227802, 0.326594, 0.546532],
[0.225863, 0.330805, 0.547314],
[0.223925, 0.334994, 0.548053],
[0.221989, 0.339161, 0.548752],
[0.220057, 0.343307, 0.549413],
[0.218130, 0.347432, 0.550038],
[0.216210, 0.351535, 0.550627],
[0.214298, 0.355619, 0.551184],
[0.212395, 0.359683, 0.551710],
[0.210503, 0.363727, 0.552206],
[0.208623, 0.367752, 0.552675],
[0.206756, 0.371758, 0.553117],
[0.204903, 0.375746, 0.553533],
[0.203063, 0.379716, 0.553925],
[0.201239, 0.383670, 0.554294],
[0.199430, 0.387607, 0.554642],
[0.197636, 0.391528, 0.554969],
[0.195860, 0.395433, 0.555276],
[0.194100, 0.399323, 0.555565],
[0.192357, 0.403199, 0.555836],
[0.190631, 0.407061, 0.556089],
[0.188923, 0.410910, 0.556326],
[0.187231, 0.414746, 0.556547],
[0.185556, 0.418570, 0.556753],
[0.183898, 0.422383, 0.556944],
[0.182256, 0.426184, 0.557120],
[0.180629, 0.429975, 0.557282],
[0.179019, 0.433756, 0.557430],
[0.177423, 0.437527, 0.557565],
[0.175841, 0.441290, 0.557685],
[0.174274, 0.445044, 0.557792],
[0.172719, 0.448791, 0.557885],
[0.171176, 0.452530, 0.557965],
[0.169646, 0.456262, 0.558030],
[0.168126, 0.459988, 0.558082],
[0.166617, 0.463708, 0.558119],
[0.165117, 0.467423, 0.558141],
[0.163625, 0.471133, 0.558148],
[0.162142, 0.474838, 0.558140],
[0.160665, 0.478540, 0.558115],
[0.159194, 0.482237, 0.558073],
[0.157729, 0.485932, 0.558013],
[0.156270, 0.489624, 0.557936],
[0.154815, 0.493313, 0.557840],
[0.153364, 0.497000, 0.557724],
[0.151918, 0.500685, 0.557587],
[0.150476, 0.504369, 0.557430],
[0.149039, 0.508051, 0.557250],
[0.147607, 0.511733, 0.557049],
[0.146180, 0.515413, 0.556823],
[0.144759, 0.519093, 0.556572],
[0.143343, 0.522773, 0.556295],
[0.141935, 0.526453, 0.555991],
[0.140536, 0.530132, 0.555659],
[0.139147, 0.533812, 0.555298],
[0.137770, 0.537492, 0.554906],
[0.136408, 0.541173, 0.554483],
[0.135066, 0.544853, 0.554029],
[0.133743, 0.548535, 0.553541],
[0.132444, 0.552216, 0.553018],
[0.131172, 0.555899, 0.552459],
[0.129933, 0.559582, 0.551864],
[0.128729, 0.563265, 0.551229],
[0.127568, 0.566949, 0.550556],
[0.126453, 0.570633, 0.549841],
[0.125394, 0.574318, 0.549086],
[0.124395, 0.578002, 0.548287],
[0.123463, 0.581687, 0.547445],
[0.122606, 0.585371, 0.546557],
[0.121831, 0.589055, 0.545623],
[0.121148, 0.592739, 0.544641],
[0.120565, 0.596422, 0.543611],
[0.120092, 0.600104, 0.542530],
[0.119738, 0.603785, 0.541400],
[0.119512, 0.607464, 0.540218],
[0.119423, 0.611141, 0.538982],
[0.119483, 0.614817, 0.537692],
[0.119699, 0.618490, 0.536347],
[0.120081, 0.622161, 0.534946],
[0.120638, 0.625828, 0.533488],
[0.121380, 0.629492, 0.531973],
[0.122312, 0.633153, 0.530398],
[0.123444, 0.636809, 0.528763],
[0.124780, 0.640461, 0.527068],
[0.126326, 0.644107, 0.525311],
[0.128087, 0.647749, 0.523491],
[0.130067, 0.651384, 0.521608],
[0.132268, 0.655014, 0.519661],
[0.134692, 0.658636, 0.517649],
[0.137339, 0.662252, 0.515571],
[0.140210, 0.665859, 0.513427],
[0.143303, 0.669459, 0.511215],
[0.146616, 0.673050, 0.508936],
[0.150148, 0.676631, 0.506589],
[0.153894, 0.680203, 0.504172],
[0.157851, 0.683765, 0.501686],
[0.162016, 0.687316, 0.499129],
[0.166383, 0.690856, 0.496502],
[0.170948, 0.694384, 0.493803],
[0.175707, 0.697900, 0.491033],
[0.180653, 0.701402, 0.488189],
[0.185783, 0.704891, 0.485273],
[0.191090, 0.708366, 0.482284],
[0.196571, 0.711827, 0.479221],
[0.202219, 0.715272, 0.476084],
[0.208030, 0.718701, 0.472873],
[0.214000, 0.722114, 0.469588],
[0.220124, 0.725509, 0.466226],
[0.226397, 0.728888, 0.462789],
[0.232815, 0.732247, 0.459277],
[0.239374, 0.735588, 0.455688],
[0.246070, 0.738910, 0.452024],
[0.252899, 0.742211, 0.448284],
[0.259857, 0.745492, 0.444467],
[0.266941, 0.748751, 0.440573],
[0.274149, 0.751988, 0.436601],
[0.281477, 0.755203, 0.432552],
[0.288921, 0.758394, 0.428426],
[0.296479, 0.761561, 0.424223],
[0.304148, 0.764704, 0.419943],
[0.311925, 0.767822, 0.415586],
[0.319809, 0.770914, 0.411152],
[0.327796, 0.773980, 0.406640],
[0.335885, 0.777018, 0.402049],
[0.344074, 0.780029, 0.397381],
[0.352360, 0.783011, 0.392636],
[0.360741, 0.785964, 0.387814],
[0.369214, 0.788888, 0.382914],
[0.377779, 0.791781, 0.377939],
[0.386433, 0.794644, 0.372886],
[0.395174, 0.797475, 0.367757],
[0.404001, 0.800275, 0.362552],
[0.412913, 0.803041, 0.357269],
[0.421908, 0.805774, 0.351910],
[0.430983, 0.808473, 0.346476],
[0.440137, 0.811138, 0.340967],
[0.449368, 0.813768, 0.335384],
[0.458674, 0.816363, 0.329727],
[0.468053, 0.818921, 0.323998],
[0.477504, 0.821444, 0.318195],
[0.487026, 0.823929, 0.312321],
[0.496615, 0.826376, 0.306377],
[0.506271, 0.828786, 0.300362],
[0.515992, 0.831158, 0.294279],
[0.525776, 0.833491, 0.288127],
[0.535621, 0.835785, 0.281908],
[0.545524, 0.838039, 0.275626],
[0.555484, 0.840254, 0.269281],
[0.565498, 0.842430, 0.262877],
[0.575563, 0.844566, 0.256415],
[0.585678, 0.846661, 0.249897],
[0.595839, 0.848717, 0.243329],
[0.606045, 0.850733, 0.236712],
[0.616293, 0.852709, 0.230052],
[0.626579, 0.854645, 0.223353],
[0.636902, 0.856542, 0.216620],
[0.647257, 0.858400, 0.209861],
[0.657642, 0.860219, 0.203082],
[0.668054, 0.861999, 0.196293],
[0.678489, 0.863742, 0.189503],
[0.688944, 0.865448, 0.182725],
[0.699415, 0.867117, 0.175971],
[0.709898, 0.868751, 0.169257],
[0.720391, 0.870350, 0.162603],
[0.730889, 0.871916, 0.156029],
[0.741388, 0.873449, 0.149561],
[0.751884, 0.874951, 0.143228],
[0.762373, 0.876424, 0.137064],
[0.772852, 0.877868, 0.131109],
[0.783315, 0.879285, 0.125405],
[0.793760, 0.880678, 0.120005],
[0.804182, 0.882046, 0.114965],
[0.814576, 0.883393, 0.110347],
[0.824940, 0.884720, 0.106217],
[0.835270, 0.886029, 0.102646],
[0.845561, 0.887322, 0.099702],
[0.855810, 0.888601, 0.097452],
[0.866013, 0.889868, 0.095953],
[0.876168, 0.891125, 0.095250],
[0.886271, 0.892374, 0.095374],
[0.896320, 0.893616, 0.096335],
[0.906311, 0.894855, 0.098125],
[0.916242, 0.896091, 0.100717],
[0.926106, 0.897330, 0.104071],
[0.935904, 0.898570, 0.108131],
[0.945636, 0.899815, 0.112838],
[0.955300, 0.901065, 0.118128],
[0.964894, 0.902323, 0.123941],
[0.974417, 0.903590, 0.130215],
[0.983868, 0.904867, 0.136897],
[0.993248, 0.906157, 0.143936]]
colormap_dict = {
'magma' : _magma_data,
'inferno' : _inferno_data,
'plasma' : _plasma_data,
'viridis' : _viridis_data,
}
colormap_keys = ['jet', 'magma', 'inferno', 'plasma', 'viridis']
if __name__ == '__main__':
from matplotlib.colors import ListedColormap
cmaps = {}
for (name, data) in (('magma', _magma_data),
('inferno', _inferno_data),
('plasma', _plasma_data),
('viridis', _viridis_data)):
cmaps[name] = ListedColormap(data, name=name)
magma = cmaps['magma']
inferno = cmaps['inferno']
plasma = cmaps['plasma']
viridis = cmaps['viridis']
| lgpl-3.0 |
rexshihaoren/scikit-learn | benchmarks/bench_lasso.py | 297 | 3305 | """
Benchmarks of Lasso vs LassoLars
First, we fix a training set and increase the number of
samples. Then we plot the computation time as function of
the number of samples.
In the second benchmark, we increase the number of dimensions of the
training set. Then we plot the computation time as function of
the number of dimensions.
In both cases, only 10% of the features are informative.
"""
import gc
from time import time
import numpy as np
from sklearn.datasets.samples_generator import make_regression
def compute_bench(alpha, n_samples, n_features, precompute):
lasso_results = []
lars_lasso_results = []
it = 0
for ns in n_samples:
for nf in n_features:
it += 1
print('==================')
print('Iteration %s of %s' % (it, max(len(n_samples),
len(n_features))))
print('==================')
n_informative = nf // 10
X, Y, coef_ = make_regression(n_samples=ns, n_features=nf,
n_informative=n_informative,
noise=0.1, coef=True)
X /= np.sqrt(np.sum(X ** 2, axis=0)) # Normalize data
gc.collect()
print("- benchmarking Lasso")
clf = Lasso(alpha=alpha, fit_intercept=False,
precompute=precompute)
tstart = time()
clf.fit(X, Y)
lasso_results.append(time() - tstart)
gc.collect()
print("- benchmarking LassoLars")
clf = LassoLars(alpha=alpha, fit_intercept=False,
normalize=False, precompute=precompute)
tstart = time()
clf.fit(X, Y)
lars_lasso_results.append(time() - tstart)
return lasso_results, lars_lasso_results
if __name__ == '__main__':
from sklearn.linear_model import Lasso, LassoLars
import pylab as pl
alpha = 0.01 # regularization parameter
n_features = 10
list_n_samples = np.linspace(100, 1000000, 5).astype(np.int)
lasso_results, lars_lasso_results = compute_bench(alpha, list_n_samples,
[n_features], precompute=True)
pl.figure('scikit-learn LASSO benchmark results')
pl.subplot(211)
pl.plot(list_n_samples, lasso_results, 'b-',
label='Lasso')
pl.plot(list_n_samples, lars_lasso_results, 'r-',
label='LassoLars')
pl.title('precomputed Gram matrix, %d features, alpha=%s' % (n_features, alpha))
pl.legend(loc='upper left')
pl.xlabel('number of samples')
pl.ylabel('Time (s)')
pl.axis('tight')
n_samples = 2000
list_n_features = np.linspace(500, 3000, 5).astype(np.int)
lasso_results, lars_lasso_results = compute_bench(alpha, [n_samples],
list_n_features, precompute=False)
pl.subplot(212)
pl.plot(list_n_features, lasso_results, 'b-', label='Lasso')
pl.plot(list_n_features, lars_lasso_results, 'r-', label='LassoLars')
pl.title('%d samples, alpha=%s' % (n_samples, alpha))
pl.legend(loc='upper left')
pl.xlabel('number of features')
pl.ylabel('Time (s)')
pl.axis('tight')
pl.show()
| bsd-3-clause |
dhruv13J/scikit-learn | sklearn/utils/tests/test_shortest_path.py | 88 | 2828 | from collections import defaultdict
import numpy as np
from numpy.testing import assert_array_almost_equal
from sklearn.utils.graph import (graph_shortest_path,
single_source_shortest_path_length)
def floyd_warshall_slow(graph, directed=False):
N = graph.shape[0]
#set nonzero entries to infinity
graph[np.where(graph == 0)] = np.inf
#set diagonal to zero
graph.flat[::N + 1] = 0
if not directed:
graph = np.minimum(graph, graph.T)
for k in range(N):
for i in range(N):
for j in range(N):
graph[i, j] = min(graph[i, j], graph[i, k] + graph[k, j])
graph[np.where(np.isinf(graph))] = 0
return graph
def generate_graph(N=20):
#sparse grid of distances
rng = np.random.RandomState(0)
dist_matrix = rng.random_sample((N, N))
#make symmetric: distances are not direction-dependent
dist_matrix += dist_matrix.T
#make graph sparse
i = (rng.randint(N, size=N * N // 2), rng.randint(N, size=N * N // 2))
dist_matrix[i] = 0
#set diagonal to zero
dist_matrix.flat[::N + 1] = 0
return dist_matrix
def test_floyd_warshall():
dist_matrix = generate_graph(20)
for directed in (True, False):
graph_FW = graph_shortest_path(dist_matrix, directed, 'FW')
graph_py = floyd_warshall_slow(dist_matrix.copy(), directed)
assert_array_almost_equal(graph_FW, graph_py)
def test_dijkstra():
dist_matrix = generate_graph(20)
for directed in (True, False):
graph_D = graph_shortest_path(dist_matrix, directed, 'D')
graph_py = floyd_warshall_slow(dist_matrix.copy(), directed)
assert_array_almost_equal(graph_D, graph_py)
def test_shortest_path():
dist_matrix = generate_graph(20)
# We compare path length and not costs (-> set distances to 0 or 1)
dist_matrix[dist_matrix != 0] = 1
for directed in (True, False):
if not directed:
dist_matrix = np.minimum(dist_matrix, dist_matrix.T)
graph_py = floyd_warshall_slow(dist_matrix.copy(), directed)
for i in range(dist_matrix.shape[0]):
# Non-reachable nodes have distance 0 in graph_py
dist_dict = defaultdict(int)
dist_dict.update(single_source_shortest_path_length(dist_matrix,
i))
for j in range(graph_py[i].shape[0]):
assert_array_almost_equal(dist_dict[j], graph_py[i, j])
def test_dijkstra_bug_fix():
X = np.array([[0., 0., 4.],
[1., 0., 2.],
[0., 5., 0.]])
dist_FW = graph_shortest_path(X, directed=False, method='FW')
dist_D = graph_shortest_path(X, directed=False, method='D')
assert_array_almost_equal(dist_D, dist_FW)
| bsd-3-clause |
idf/FaceReader | facerec_py/apps/scripts/lpq_experiment.py | 1 | 12120 | #!/usr/bin/python
#
# coding: utf-8
#
# Software License Agreement (BSD License)
#
# Copyright (c) 2013, Philipp Wagner <bytefish[at]gmx[dot]de>.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following
# disclaimer in the documentation and/or other materials provided
# with the distribution.
# * Neither the name of the author nor the names of its
# contributors may be used to endorse or promote products derived
# from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
# FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
# COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
# BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
# ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
import numpy as np
from scipy import ndimage
import os
import sys
sys.path.append("../..")
# try to import the PIL Image
try:
from PIL import Image
except ImportError:
import Image
import matplotlib.pyplot as plt
import textwrap
import logging
from facerec.feature import SpatialHistogram
from facerec.distance import ChiSquareDistance
from facerec.classifier import NearestNeighbor
from facerec.model import PredictableModel
from facerec.lbp import LPQ, ExtendedLBP
from facerec.validation import SimpleValidation, precision
from facerec.util import shuffle_array
EXPERIMENT_NAME = "LocalPhaseQuantizationExperiment"
# ITER_MAX is the number of experimental runs, as described in the
# original paper. For testing purposes, it was set to 1, but it
# should be set to a higher value to get at least a little confidence
# in the results.
ITER_MAX = 1
class FileNameFilter:
"""
Base class used for filtering files.
"""
def __init__(self, name):
self._name = name
def __call__(self, filename):
return True
def __repr__(self):
return "FileNameFilter (name=%s)" % (self._name)
class YaleBaseFilter(FileNameFilter):
"""
This Filter filters files, based on their filetype ending (.pgm) and
their azimuth and elevation. The higher the angle, the more shadows in
the face. This is useful for experiments with illumination and
preprocessing.
"""
def __init__(self, min_azimuth, max_azimuth, min_elevation, max_elevation):
FileNameFilter.__init__(self, "Filter YaleFDB Subset1")
self._min_azimuth = min_azimuth
self._max_azimuth = max_azimuth
self._min_elevation = min_elevation
self._max_elevation = max_elevation
def __call__(self, filename):
# We only want the PGM files:
filetype = filename[-4:]
if filetype != ".pgm":
return False
# There are "Ambient" PGM files, ignore them:
if "Ambient" in filename:
return False
azimuth = abs(int(filename[12:16]))
elevation = abs(int(filename[17:20]))
# Now filter based on angles:
if azimuth < self._min_azimuth or azimuth > self._max_azimuth:
return False
if elevation < self._min_elevation or elevation > self._max_elevation:
return False
return True
def __repr__(self):
return "Yale FDB Filter (min_azimuth=%s, max_azimuth=%s, min_elevation=%s, max_elevation=%s)" % (min_azimuth, max_azimuth, min_elevation, max_elevation)
def read_images(path, fileNameFilter=FileNameFilter("None"), sz=None):
"""Reads the images in a given folder, resizes images on the fly if size is given.
Args:
path: Path to a folder with subfolders representing the subjects (persons).
sz: A tuple with the size Resizes
Returns:
A list [X,y]
X: The images, which is a Python list of numpy arrays.
y: The corresponding labels (the unique number of the subject, person) in a Python list.
"""
c = 0
X,y = [], []
for dirname, dirnames, filenames in os.walk(path):
for subdirname in dirnames:
subject_path = os.path.join(dirname, subdirname)
for filename in os.listdir(subject_path):
if fileNameFilter(filename):
try:
im = Image.open(os.path.join(subject_path, filename))
im = im.convert("L")
# resize to given size (if given)
if (sz is not None):
im = im.resize(sz, Image.ANTIALIAS)
X.append(np.asarray(im, dtype=np.uint8))
y.append(c)
except IOError, (errno, strerror):
print "I/O error({0}): {1}".format(errno, strerror)
except:
print "Unexpected error:", sys.exc_info()[0]
raise
c = c+1
return [X,y]
def apply_gaussian(X, sigma):
"""A simple function to apply a Gaussian Blur on each image in X.
Args:
X: A list of images.
sigma: sigma to apply
Returns:
Y: The processed images
"""
return np.array([ndimage.gaussian_filter(x, sigma) for x in X])
def results_to_list(validation_results):
return [precision(result.true_positives,result.false_positives) for result in validation_results]
def partition_data(X, y):
"""
Shuffles the input data and splits it into a new set of images. This resembles the experimental setup
used in the paper on the Local Phase Quantization descriptor in:
"Recognition of Blurred Faces Using Local Phase Quantization", Timo Ahonen, Esa Rahtu, Ville Ojansivu, Janne Heikkila
What it does is to build a subset for each class, so it has 1 image for training and the rest for testing.
The original dataset is shuffled for each call, hence you always get a new partitioning.
"""
Xs,ys = shuffle_array(X,y)
# Maps index to class:
mapping = {}
for i in xrange(len(y)):
yi = ys[i]
try:
mapping[yi].append(i)
except KeyError:
mapping[yi] = [i]
# Get one image for each subject:
Xtrain, ytrain = [], []
Xtest, ytest = [], []
# Finally build partition:
for key, indices in mapping.iteritems():
# Add images:
Xtrain.extend([ Xs[i] for i in indices[:1] ])
ytrain.extend([ ys[i] for i in indices[:1] ])
Xtest.extend([ Xs[i] for i in indices[1:20]])
ytest.extend([ ys[i] for i in indices[1:20]])
# Return shuffled partitions:
return Xtrain, ytrain, Xtest, ytest
class ModelWrapper:
def __init__(model):
self.model = model
self.result = []
if __name__ == "__main__":
# This is where we write the results to, if an output_dir is given
# in command line:
out_dir = None
# You'll need at least a path to your image data, please see
# the tutorial coming with this source code on how to prepare
# your image data:
if len(sys.argv) < 2:
print "USAGE: lpq_experiment.py </path/to/images>"
sys.exit()
# Define filters for the Dataset:
yale_subset_0_40 = YaleBaseFilter(0, 40, 0, 40)
# Now read in the image data. Apply filters, scale to 128 x 128 pixel:
[X,y] = read_images(sys.argv[1], yale_subset_0_40, sz=(64,64))
# Set up a handler for logging:
handler = logging.StreamHandler(sys.stdout)
formatter = logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s')
handler.setFormatter(formatter)
# Add handler to facerec modules, so we see what's going on inside:
logger = logging.getLogger("facerec")
logger.addHandler(handler)
logger.setLevel(logging.INFO)
# The models we want to evaluate:
model0 = PredictableModel(feature=SpatialHistogram(lbp_operator=ExtendedLBP()), classifier=NearestNeighbor(dist_metric=ChiSquareDistance(), k=1))
model1 = PredictableModel(feature=SpatialHistogram(lbp_operator=LPQ()), classifier=NearestNeighbor(dist_metric=ChiSquareDistance(), k=1))
# The sigmas we'll apply for each run:
sigmas = [0]
print 'The experiment will be run %s times!' % ITER_MAX
# Initialize experiments (with empty results):
experiments = {}
experiments['lbp_model'] = { 'model': model0, 'results' : {}, 'color' : 'r', 'linestyle' : '--', 'marker' : '*'}
experiments['lpq_model'] = { 'model': model1, 'results' : {}, 'color' : 'b', 'linestyle' : '--', 'marker' : 's'}
# Loop to acquire the results for each experiment:
for sigma in sigmas:
print "Setting sigma=%s" % sigma
for key, value in experiments.iteritems():
print 'Running experiment for model=%s' % key
# Define the validators for the model:
cv0 = SimpleValidation(value['model'])
for iteration in xrange(ITER_MAX):
print "Repeating experiment %s/%s." % (iteration + 1, ITER_MAX)
# Split dataset according to the papers description:
Xtrain, ytrain, Xtest, ytest = partition_data(X,y)
# Apply a gaussian blur on the images:
Xs = apply_gaussian(Xtest, sigma)
# Run each validator with the given data:
experiment_description = "%s (iteration=%s, sigma=%.2f)" % (EXPERIMENT_NAME, iteration, sigma)
cv0.validate(Xtrain, ytrain, Xs, ytest, experiment_description)
# Get overall results:
true_positives = sum([validation_result.true_positives for validation_result in cv0.validation_results])
false_positives = sum([validation_result.false_positives for validation_result in cv0.validation_results])
# Calculate overall precision:
prec = precision(true_positives,false_positives)
# Store the result:
print key
experiments[key]['results'][sigma] = prec
# Make a nice plot of this textual output:
fig = plt.figure()
# Holds the legend items:
plot_legend = []
# Add the Validation results:
for experiment_name, experiment_definition in experiments.iteritems():
print key, experiment_definition
results = experiment_definition['results']
(xvalues, yvalues) = zip(*[(k,v) for k,v in results.iteritems()])
# Add to the legend:
plot_legend.append(experiment_name)
# Put the results into the plot:
plot_color = experiment_definition['color']
plot_linestyle = experiment_definition['linestyle']
plot_marker = experiment_definition['marker']
plt.plot(sigmas, yvalues, linestyle=plot_linestyle, marker=plot_marker, color=plot_color)
# Put the legend below the plot (TODO):
plt.legend(plot_legend, prop={'size':6}, numpoints=1, loc='upper center', bbox_to_anchor=(0.5, -0.2), fancybox=True, shadow=True, ncol=1)
# Scale y-axis between 0,1 to see the Precision:
plt.ylim(0,1)
plt.xlim(-0.2, max(sigmas) + 1)
# Finally add the labels:
plt.title(EXPERIMENT_NAME)
plt.ylabel('Precision')
plt.xlabel('Sigma')
fig.subplots_adjust(bottom=0.5)
# Save the gifure and we are out of here!
plt.savefig("lpq_experiment.png", bbox_inches='tight',dpi=100)
| mit |
SirJohnFranklin/FieldSolver | ElectricFieldSolver.py | 1 | 30598 | from __future__ import division, print_function
import numpy as np
import numba as nb
import matplotlib.pyplot as plt
import scipy.constants as const
from traits.api import HasTraits, on_trait_change, Instance, Bool, Float, Int, Array, List, Dict
from HelperFunctions import plot_field, timeitNV, calculate_radius
from scipy.sparse.linalg import spsolve, bicgstab, bicg, cg, cgs, gmres, lgmres, minres, qmr, lsqr, lsmr
# from scikits.umfpack import spsolve
from scipy import sparse
from tqdm import tqdm
from pyamg import ruge_stuben_solver
# maybe use pysparse to solve matrices
@timeitNV
@nb.jit(nopython=True, nogil=True)
def solve_gauss_seidel_cylindric(initial_phi, cell_type, rho_i, dr, dz, r, maxit=1e5, rtol=1e-6):
nz, nr = cell_type.shape
g = np.zeros(cell_type.shape)
phi = np.zeros(cell_type.shape)
phib4 = np.zeros(cell_type.shape)
tol = 0.
for it in xrange(int(maxit)):
phib4 = phi
for i in range(1, nz - 1): # loop over cells
for j in range(1, nr - 1):
b = (rho_i[i, j]) / const.epsilon_0
g[i, j] = (b +
(phi[i, j - 1] + phi[i, j + 1]) / dr**2 +
(-phi[i, j - 1] + phi[i, j + 1]) / (2 * dr * r[i,j]) + # -/+ error in phi part???
(phi[i - 1, j] + phi[i + 1, j]) / dz**2) / (2 / dr**2 + 2 / dz**2)
# neumann boundaries around the "World"
g[0, :] = g[1, :] # left
g[-1, :] = g[-2, :] # right
g[:, -1] = g[:, -2] # top
g[:, 0] = g[:, 1] # buttom
# dirichlet nodes
phi = np.where(cell_type > 0, initial_phi, g)
tol = np.nansum(np.abs((phi - phib4)/(phi + phib4)))
if tol < rtol or it == maxit:
return phi, tol, it
return phi, tol, it
@timeitNV
@nb.jit(nopython=True, nogil=True)
def solve_gauss_seidel_cartesian(initial_phi, cell_type, rho_i, dy, dx, maxit=1e5, rtol=1e-6):
nx, ny = cell_type.shape
g = np.zeros(cell_type.shape)
phi = np.zeros(cell_type.shape)
phib4 = np.zeros(cell_type.shape)
tol = 0.
for it in xrange(int(maxit)):
phib4 = phi
for i in range(1, nx - 1): # loop over cells
for j in range(1, ny - 1):
b = (rho_i[i, j]) / const.epsilon_0
g[i, j] = ((phi[i-1,j]+phi[i+1,j])*dy**2 + dx**2 * (phi[i,j-1]+phi[i,j+1] + dy**2 * b)) / (2 * (dx**2 + dy**2))
# neumann boundaries around the "World"
g[0, :] = g[1, :] # left
g[-1, :] = g[-2, :] # right
g[:, -1] = g[:, -2] # top
g[:, 0] = g[:, 1] # buttom
# dirichlet nodes
phi = np.where(cell_type > 0, initial_phi, g)
tol = np.nansum(np.abs((phi - phib4)/(phi + phib4)))
if tol < rtol or it == maxit:
return phi, tol, it
return phi, tol, it
class PoissonSolverBase(HasTraits):
verbose = Bool(True)
node_volume = Array
numbers_to_voltage_dict = Dict
numbers_to_currect_dict = Dict
rho_i = Array
electric_cell_type = Array
electric_sparse_mat = Instance(sparse.csr_matrix)
initial_electric_potential = Array
solved_electric_potential = Array
magnetic_cell_type = Array
magnetic_sparse_mat = Instance(sparse.csr_matrix)
initial_currents = Array
solved_magnetic_potential = Array
electric_field = Array
magnetic_field = Array
def get_electric_cell_type(self):
return self.electric_cell_type
def get_magnetic_cell_type(self):
return self.magnetic_cell_type
def set_electric_cell_type(self, cell_type_array, numbers_to_voltage_dict):
"""
Creates initial potential array from celltypes and voltages assigned to these cell types
:param cell_type_array: array of int..
:param numbers_to_voltage_dict: dictionary of number in cell type array to initial voltage
:return: initial_electric_potential (not solved)
"""
if self.verbose:
print(self.__class__.__name__, ": setting electric_cell_type array | electric_cell_type.shape = ", self.electric_cell_type.shape)
self.electric_cell_type = cell_type_array
self.numbers_to_voltage_dict = numbers_to_voltage_dict
for key in numbers_to_voltage_dict:
num = int(key)
self.initial_electric_potential[np.where(self.electric_cell_type == num)] = numbers_to_voltage_dict[key]
return self.initial_electric_potential
def set_magnetic_cell_type(self, cell_type_array, numbers_to_currents_dict):
"""
Creates initial potential array from celltypes and voltages assigned to these cell types
:param cell_type_array: array of int..
:param numbers_to_voltage_dict: dictionary of number in cell type array to initial voltage
:return: initial_electric_potential (not solved)
"""
if self.verbose:
print(self.__class__.__name__, ": setting magnetic_cell_type array | magnetic_cell_type.shape = ", self.electric_cell_type.shape)
self.magnetic_cell_type = cell_type_array
self.numbers_to_current_dict = numbers_to_currents_dict
for key in numbers_to_currents_dict:
num = int(key)
self.initial_currents[np.where(self.magnetic_cell_type == num)] = numbers_to_currents_dict[key]
return self.initial_currents
def _get_borders_array(self):
# this is an helper function for create_Ab_matrix to know where we have the boundaries in the flattened array
borders = np.zeros(self.electric_cell_type.shape)
borders[0, :] = 1 # buttom
borders[-1, :] = 2 # top
borders[:, 0] = 3 # left
borders[:, -1] = 4 # right
ncells = self._get_mesh_size()
return borders.reshape(ncells[0] * ncells[1])
def _add_for_COO_format(self, i, j, v):
self._ii.append(i)
self._jj.append(j)
self._va.append(v)
def _create_electric_rhs(self):
raise NotImplementedError("Has to be implemented by inheriting classes.")
def _create_magnetic_rhs(self):
raise NotImplementedError("Has to be implemented by inheriting classes.")
@timeitNV
def _pyamg_benchmark(self, matrix, rhs, x0=None, tol=1e-8):
ml = ruge_stuben_solver(matrix)
print(ml) # print hierarchy information
x = ml.solve(rhs, x0=x0, tol=tol) # solve Ax=b to a tolerance of 1e-8
print(self.__class__.__name__,": Residual norm after direct solving is", np.linalg.norm(rhs - matrix * x)) # compute norm of residual vector
return x
def calculate_potential_exact(self, method='iterative'):
if not np.all(self.initial_currents == 0.):
self.calculate_electric_magnetic_potential_exact(kind='magnetic', method=method)
if not np.all(self.initial_electric_potential == 0.):
self.calculate_electric_magnetic_potential_exact(kind='electric', method=method)
def calculate_electric_magnetic_potential_exact(self, kind, method='iterative'):
# TODO: without x0, pyamg finds no solution. Could be used for iterative solving (faster, because multilevel_solver),
# https://github.com/pyamg/pyamg
"""
If there is no Ab matrix, function will calculate it by direct solving (scipy.spsolve).
If there is Ab matrix, programme will use an iterative solver with previously solved potential as initial guess
by default to speed up computation time.
:param method: 'iterative' or 'direct'. Only important if sparse matrix has been calculated. Normally iterative
is faster. If strong fluctuations in potential occur, e.g. through charged ions/electrons,
direct solving is faster.
:return: solved_electric_potential
"""
if kind == 'electric':
self.electric_rhs = self._create_electric_rhs()
rhs = self.electric_rhs
sparse_matrix = self.electric_sparse_mat
x0 = self.solved_electric_potential.reshape(self._get_mesh_size())
elif kind == 'magnetic':
self.magnetic_rhs = self._create_magnetic_rhs()
rhs = self.magnetic_rhs
sparse_matrix = self.magnetic_sparse_mat
x0 = self.solved_magnetic_potential.reshape(self._get_mesh_size())
else:
raise NotImplementedError(self.__class__.__name__,": kind must be 'both' (=None) or 'electric' or 'magnetic'")
if sparse_matrix is None:
sparse_matrix = self.create_Ab_matrix(kind=kind) # if cell types do not change, this is constant!
if self.verbose:
print(self.__class__.__name__,": Solving ", sparse_matrix.shape, " sparse matrix system...")
x = self._spsolve_benchmark(sparse_matrix, rhs)
# x = self._pyamg_benchmark(sparse_matrix, rhs)
if self.verbose:
print(self.__class__.__name__, ": Residual norm after direct solving is", np.linalg.norm(rhs - sparse_matrix * x))
else:
if method == 'iterative':
# x = self._bicgstab_benchmark(sparse_matrix, rhs, x0=x0)
x = self._pyamg_benchmark(sparse_matrix, rhs, x0=x0)
elif method == 'direct':
x = self._spsolve_benchmark(sparse_matrix, rhs)
else:
# print(self.__class__.__name__,": Methods for solving can only be iterative and direct!")
raise NotImplementedError(self.__class__.__name__,": Methods for solving can only be iterative and direct!")
if self.verbose:
print(self.__class__.__name__, ": Residual norm after ", method, " solving is", np.linalg.norm(rhs - sparse_matrix * x))
if kind == 'electric':
self.solved_electric_potential = x.reshape(self._get_mesh_size()) # correct
self.electric_sparse_mat = sparse_matrix
elif kind == 'magnetic':
self.solved_magnetic_potential = x.reshape(self._get_mesh_size())
self.magnetic_sparse_mat = sparse_matrix
else:
print("Never happens.")
return self.solved_electric_potential
@timeitNV
def _spsolve_benchmark(self, sparse_matrix, rhs):
return spsolve(sparse_matrix, rhs)
def _bicgstab_benchmark(self, sparse_matrix, rhs, x0=None):
x = bicgstab(sparse_matrix, rhs, x0, tol=1e-5)
return x[0]
def _get_mesh_size(self):
if hasattr(self, 'nr') and hasattr(self, 'nz'):
return self.nr, self.nz
elif hasattr(self, 'ny') and hasattr(self, 'nx'):
return self.ny, self.nx
def _electric_field_default(self):
return np.zeros(self._get_mesh_size())
def _solved_electric_potential_default(self):
return np.zeros(self._get_mesh_size())
def _initial_currents_default(self):
return np.zeros(self._get_mesh_size())
def _initial_electric_potential_default(self):
return np.zeros(self._get_mesh_size())
def _initial_magnetic_potential_default(self):
return np.zeros(self._get_mesh_size())
def _rho_i_default(self):
return np.zeros(self._get_mesh_size())
def _electric_cell_type_default(self):
return np.zeros(self._get_mesh_size())
def _magnetic_cell_type_default(self):
return np.zeros(self._get_mesh_size())
def _solved_magnetic_potential_default(self):
return np.zeros(self._get_mesh_size())
def _magnetic_field_default(self):
return np.zeros(self._get_mesh_size())
class CartesianPoissonSolver(PoissonSolverBase):
"""
- All arrays go like [y,x]
"""
nx = Int
ny = Int
dx = Float
dy = Float
xvals = Array
yvals = Array
electric_field_x = Array
electric_field_y = Array
def __init__(self, nx, dx, ny, dy):
super(CartesianPoissonSolver, self).__init__()
print(self.__class__.__name__, ": Created with nx = ", nx, " | dx = ", dx, " | ny = ", ny, " | dy = ", dy)
self.nx = int(nx)
self.ny = int(ny)
self.dx = dx
self.dy = dy
def plot_all_fields(self, figsize=(12,4)):
savedict = {}
savedict['x-values'] = self.xvals
savedict['y-values'] = self.yvals
if not np.all(self.solved_electric_potential == 0.):
plot_field(self.xvals, self.yvals, self.solved_electric_potential, self.electric_cell_type, 'potential $\phi$ [V]', figsize=figsize)
plot_field(self.xvals, self.yvals, self.initial_electric_potential, self.electric_cell_type, 'initial potential $\phi$ [V]', figsize=figsize)
plot_field(self.xvals, self.yvals, self.electric_field_y, self.electric_cell_type, 'electric field $E_y$ [V/m]', figsize=figsize)
plot_field(self.xvals, self.yvals, self.electric_field_x, self.electric_cell_type, 'electric field $E_x$ [V/m]', figsize=figsize)
plot_field(self.xvals, self.yvals, self.electric_field, self.electric_cell_type, 'electric field combinded $E$ [V/m]', lognorm=False, figsize=figsize)
savedict['solved_electric_potential'] = self.solved_electric_potential
savedict['initial_electric_potential'] = self.initial_electric_potential
savedict['electric_field_y'] = self.electric_field_y
savedict['electric_field_x'] = self.electric_field_x
savedict['electric_field_combined'] = self.electric_field
if not np.all(self.solved_magnetic_potential == 0.):
plot_field(self.xvals, self.yvals, self.solved_magnetic_potential, self.magnetic_cell_type, 'magnetic potential A [Wb/m]', figsize=figsize)
plot_field(self.xvals, self.yvals, self.initial_currents, self.electric_cell_type, 'initial currents J $[A/m^2]$', figsize=figsize)
plot_field(self.xvals, self.yvals, self.magnetic_field_y, self.magnetic_cell_type, 'magnetic field $B_y$ [T]', figsize=figsize)
plot_field(self.xvals, self.yvals, self.magnetic_field_x, self.magnetic_cell_type, 'magnetic field $B_x$ [T]', figsize=figsize)
plot_field(self.xvals, self.yvals, self.magnetic_field, self.magnetic_cell_type, 'magnetic field combinded $B$ [T]', lognorm=False)
savedict['solved_magnetic_potential'] = self.solved_magnetic_potential
savedict['initial_currents'] = self.initial_currents
savedict['magnetic_field_y'] = self.magnetic_field_y
savedict['magnetic_field_x'] = self.magnetic_field_x
savedict['magnetic_field'] = self.magnetic_field
for fig in plt.get_fignums():
plt.figure(fig)
plt.xlabel('x direction [m]')
plt.ylabel('y direction [m]')
return savedict
def calculate_potential_gauss_seidel(self):
solved_potential, tol, iters = solve_gauss_seidel_cartesian(np.transpose(self.initial_electric_potential),
np.transpose(self.electric_cell_type),
np.transpose(self.rho_i), self.dy,
self.dx, maxit=1e6)
self.solved_electric_potential = np.transpose(solved_potential)
print(self.__class__.__name__, ": solve_gauss_seidel_cartesian() reached tolerance of ", tol, " after ", iters, " iterations.")
return self.solved_electric_potential
@on_trait_change('solved_electric_potential')
def calculate_electric_fields(self):
if self.verbose:
print(self.__class__.__name__,": Calculating electric fields")
self.electric_field_x = -np.gradient(self.solved_electric_potential, self.dx, axis=1, edge_order=2)
self.electric_field_y = -np.gradient(self.solved_electric_potential, self.dy, axis=0, edge_order=2)
self.electric_field = np.sqrt(self.electric_field_x ** 2 + self.electric_field_y ** 2)
return self.electric_field, self.electric_field_y, self.electric_field_x
@on_trait_change('solved_magnetic_potential')
def calculate_magnetic_fields(self):
if self.verbose:
print(self.__class__.__name__,": Calculating magnetic fields")
self.magnetic_field_x = -np.gradient(self.solved_magnetic_potential, self.dy, axis=0, edge_order=2)
self.magnetic_field_y = -np.gradient(self.solved_magnetic_potential, self.dx, axis=1, edge_order=2)
self.magnetic_field = np.sqrt(self.magnetic_field_x**2 + self.magnetic_field_y**2)
return self.magnetic_field, self.magnetic_field_y, self.magnetic_field_x
def _create_electric_rhs(self):
b = self.initial_electric_potential - self.dx * self.dy * self.rho_i / const.epsilon_0 # TODO: check if dx * dy or dx**2
b = b.reshape(self.nx * self.ny)
return b
def _create_magnetic_rhs(self):
b = -self.initial_currents * const.mu_0
b = b.reshape(self.nx * self.ny)
return b
@timeitNV
def create_Ab_matrix(self, kind):
"""
See https://en.wikipedia.org/wiki/Five-point_stencil
See http://www.sciencedirect.com/science/article/pii/0010465571900476 for algorithm description, e.g. alpha,
betaj and gammaj
"""
if kind == 'electric':
cell_type_flat = self.electric_cell_type.reshape((self.nx * self.ny))
rb = -1.
elif kind == 'magnetic':
cell_type_flat = self.magnetic_cell_type.reshape((self.ny * self.nx))
rb = 0.
else:
print("This never happens.")
exit()
borders = self._get_borders_array()
max_i = self.nx * self.ny
max_j = self.nx * self.ny
self._ii = []
self._jj = []
self._va = []
alpha = -2 * (1 + (self.dx / self.dy) ** 2)
print(self.__class__.__name__, ": Creating sparse matrix for field solving.")
for i in tqdm(xrange(max_i)): # first only Neumann RB, dirichlet via electric_cell_type (e.g. set borders to phi=0)...
j = np.floor(i / self.nx) + 1
if cell_type_flat[i] > 0 and kind == 'electric': # fixed to phi
self._add_for_COO_format(i, i, 1.)
elif borders[i] == 1: # buttom
self._add_for_COO_format(i, i + self.nx, rb)
self._add_for_COO_format(i, i, 1.)
elif borders[i] == 2: # top
self._add_for_COO_format(i, i - self.nx, rb)
self._add_for_COO_format(i, i, 1.)
elif borders[i] == 3: # left
self._add_for_COO_format(i, i + 1, rb)
self._add_for_COO_format(i, i, 1.)
elif borders[i] == 4: # right
self._add_for_COO_format(i, i - 1, rb)
self._add_for_COO_format(i, i, 1.)
else:
betaj = (self.dx / self.dy) ** 2
gammaj = (self.dx / self.dy) ** 2
if i - 1 >= 0:
self._add_for_COO_format(i, i - 1, 1.)
if i + 1 < max_j:
self._add_for_COO_format(i, i + 1, 1.)
if (i - self.nx) >= 0: # buttom
self._add_for_COO_format(i, i - self.nx, gammaj)
if (i + self.nx) <= max_j: # top
self._add_for_COO_format(i, i + self.nx, betaj)
self._add_for_COO_format(i, i, alpha)
if kind == 'electric':
self.electric_sparse_mat = sparse.coo_matrix((self._va, (self._ii, self._jj))).tocsr() #
return self.electric_sparse_mat
elif kind == 'magnetic':
self.magnetic_sparse_mat = sparse.coo_matrix((self._va, (self._ii, self._jj))).tocsr() #
return self.magnetic_sparse_mat
else:
print("This never happens.")
exit(-1)
def _electric_field_x_default(self):
return np.zeros(self._get_mesh_size())
def _electric_field_y_default(self):
return np.zeros(self._get_mesh_size())
def _yvals_default(self):
return np.linspace(0, self.ny, self.ny, endpoint=False) * self.dy
def _xvals_default(self):
return np.linspace(0, self.nx, self.nx, endpoint=False) * self.dx
class CylindricalPoissonSolver(PoissonSolverBase):
"""
- All arrays go like [r,z]
"""
nz = Int
nr = Int
dz = Float
dr = Float
zvals = Array
rvals = Array
electric_field_z = Array
electric_field_r = Array
magnetic_field_z = Array
magnetic_field_r = Array
def __init__(self, nz, dz, nr, dr):
super(CylindricalPoissonSolver, self).__init__()
print(self.__class__.__name__, ": Created with nx = ", nz, " | dx = ", dz, " | ny = ", nr, " | dy = ", dr)
self.nz = int(nz)
self.nr = int(nr)
self.dz = dz
self.dr = dr
def plot_all_fields(self):
savedict = {}
savedict['r-values'] = self.rvals
savedict['z-values'] = self.zvals
if not np.all(self.solved_electric_potential == 0.):
print(self.__class__.__name__, ": Plotting electric fields")
plot_field(self.zvals, self.rvals, self.solved_electric_potential, self.electric_cell_type, 'potential $\phi$ [V]', mask=True)
plot_field(self.zvals, self.rvals, self.initial_electric_potential, self.electric_cell_type, 'initial potential $\phi$ [V]')
plot_field(self.zvals, self.rvals, self.electric_field_r, self.electric_cell_type, 'electric field $E_r$ [V/m]', mask=True)
plot_field(self.zvals, self.rvals, self.electric_field_z, self.electric_cell_type, 'electric field $E_z$ [V/m]', mask=True)
plot_field(self.zvals, self.rvals, self.electric_field, self.electric_cell_type, 'electric field combinded $E$ [V/m]', lognorm=False, mask=True)
savedict['solved_electric_potential'] = self.solved_electric_potential
savedict['initial_electric_potential'] = self.initial_electric_potential
savedict['electric_field_r'] = self.electric_field_r
savedict['electric_field_z'] = self.electric_field_z
savedict['electric_field_combined'] = self.electric_field
if not np.all(self.solved_magnetic_potential == 0.):
print(self.__class__.__name__, ": Plotting magnetic fields")
plot_field(self.zvals, self.rvals, self.solved_magnetic_potential, self.magnetic_cell_type, 'magnetic potential $A$ [Wb/m]')
plot_field(self.zvals, self.rvals, self.initial_currents, self.electric_cell_type, 'initial currents $\phi$ $[A/m^2]$')
plot_field(self.zvals, self.rvals, self.magnetic_field_r, self.magnetic_cell_type, 'magnetic field $B_r$ [T]')
plot_field(self.zvals, self.rvals, self.magnetic_field_z, self.magnetic_cell_type, 'magnetic field $B_z$ [T]')
plot_field(self.zvals, self.rvals, self.magnetic_field, self.magnetic_cell_type, 'magnetic field combinded $B$ [T]', lognorm=False)
savedict['solved_magnetic_potential'] = self.solved_magnetic_potential
savedict['initial_currents'] = self.initial_currents
savedict['magnetic_field_r'] = self.electric_field_r
savedict['magnetic_field_z'] = self.electric_field_z
savedict['magnetic_field'] = self.magnetic_field
for fig in plt.get_fignums():
plt.figure(fig)
plt.xlabel('z direction [m]')
plt.ylabel('r direction [m]')
return savedict
def get_radius_array(self):
r = np.zeros_like(np.transpose(self.electric_cell_type)) # get radii in not standard order for gauss-seidel
for i in range(self.nz):
for j in range(self.nr):
r[i][j] = calculate_radius(j+1, self.dr)
return r
def calculate_potential_gauss_seidel(self):
r = self.get_radius_array()
solved_potential, tol, iters = solve_gauss_seidel_cylindric(np.transpose(self.initial_electric_potential),
np.transpose(self.electric_cell_type),
np.transpose(self.rho_i), self.dr,
self.dz, r, maxit=1e6)
self.solved_electric_potential = np.transpose(solved_potential)
print(self.__class__.__name__, ": solve_gauss_seidel_cylindric() reached tolerance of ", tol, " after ", iters, " iterations.")
return self.solved_electric_potential
@on_trait_change('solved_electric_potential')
def calculate_electric_fields(self):
if self.verbose:
print(self.__class__.__name__,": Calculating electric fields")
self.electric_field_z = -np.gradient(self.solved_electric_potential, self.dz, axis=1, edge_order=2)
self.electric_field_r = -np.gradient(self.solved_electric_potential, self.dr, axis=0, edge_order=2)
self.electric_field = np.sqrt(self.electric_field_z**2 + self.electric_field_r**2)
return self.electric_field, self.electric_field_r, self.electric_field_z
@on_trait_change('solved_magnetic_potential')
def calculate_magnetic_fields(self):
# See: link.springer.com/content/pdf/10.1007%2F3-540-28812-0_2.pdf - p. 27ff
if self.verbose:
print(self.__class__.__name__,": Calculating magnetic fields")
r = np.transpose(self.get_radius_array())
self.magnetic_field_z = np.gradient(self.solved_magnetic_potential, self.dr, axis=0, edge_order=2) / (2 * np.pi * np.transpose(self.get_radius_array()))
self.magnetic_field_r = -np.gradient(self.solved_magnetic_potential, self.dz, axis=1, edge_order=2) / (2 * np.pi * np.transpose(self.get_radius_array()))
self.magnetic_field = np.sqrt(self.magnetic_field_z**2 + self.magnetic_field_r**2)
return self.magnetic_field, self.magnetic_field_r, self.magnetic_field_z
def _create_electric_rhs(self):
# creates the right hand side of possion equation
# rho_i is density of ions in a cell (see poisson equation)
# See http://www.sciencedirect.com/science/article/pii/0010465571900476 for dz**2 factor (p in paper)
b = self.initial_electric_potential - self.dz ** 2 * self.rho_i / const.epsilon_0
b = b.reshape(self.nz * self.nr)
return b
def _create_magnetic_rhs(self):
# creates the right hand side of possion equation
# See: link.springer.com/content/pdf/10.1007%2F3-540-28812-0_2.pdf - p. 27ff
# self.get_radius_array()*self.dr is a j array
b = - const.mu_0 * self.initial_currents * self.dz ** 2 * (2 * np.pi * np.transpose(self.get_radius_array()))
b = b.reshape(self.nz * self.nr)
return b
@timeitNV
def create_Ab_matrix(self, kind):
"""
See https://en.wikipedia.org/wiki/Five-point_stencil
See http://www.sciencedirect.com/science/article/pii/0010465571900476 for algorithm description, e.g. alpha,
betaj and gammaj
:param kind: 'magnetic or electric'
:return: Nothing
"""
if kind == 'electric':
cell_type_flat = self.electric_cell_type.reshape((self.nz * self.nr))
rb = -1. # neumann boundary
intermed_factor = 1. # See: link.springer.com/content/pdf/10.1007%2F3-540-28812-0_2.pdf - p. 27ff
elif kind == 'magnetic':
cell_type_flat = self.magnetic_cell_type.reshape((self.nz * self.nr))
rb = 0. # dirichelet boundary
intermed_factor = -1. # See: link.springer.com/content/pdf/10.1007%2F3-540-28812-0_2.pdf - p. 27ff
else:
print("This never happens.")
exit()
print(self.__class__.__name__, ": Creating sparse matrix for", kind, "field solving.")
borders = self._get_borders_array()
max_i = self.nz * self.nr
max_j = self.nz * self.nr
self._ii = []
self._jj = []
self._va = []
alpha = -2 * (1 + (self.dz/self.dr)**2)
for i in tqdm(xrange(max_i)): # first only Neumann RB, dirichlet via electric_cell_type (e.g. set borders to phi=0)...
j = np.floor(i / self.nz)+1
if cell_type_flat[i] > 0 and kind == 'electric': # fixed to phi
self._add_for_COO_format(i, i, 1.)
elif borders[i] == 1: # buttom
self._add_for_COO_format(i, i + self.nz, rb) # disabled = 0: field gets pushed into domain., -1 for electric potential
self._add_for_COO_format(i, i, 1.)
elif borders[i] == 2: # top
self._add_for_COO_format(i, i - self.nz, rb)
self._add_for_COO_format(i, i, 1.)
elif borders[i] == 3: # left
self._add_for_COO_format(i, i + 1, rb)
self._add_for_COO_format(i, i, 1.)
elif borders[i] == 4: # right
self._add_for_COO_format(i, i - 1, rb)
self._add_for_COO_format(i, i, 1.)
else:
betaj = (self.dz / self.dr) ** 2 * (1 + 1 / (2 * j))
gammaj = (self.dz / self.dr) ** 2 * (1 - 1 / (2 * j))
if i - 1 >= 0:
self._add_for_COO_format(i, i - 1, 1.)
if i + 1 <= max_j:
self._add_for_COO_format(i, i + 1, 1.)
if (i - self.nz) >= 0: # buttom
self._add_for_COO_format(i, i + intermed_factor * self.nz, betaj)
if (i + self.nz) <= max_j: # top
self._add_for_COO_format(i, i - intermed_factor * self.nz, gammaj)
self._add_for_COO_format(i, i, alpha)
if kind == 'electric':
self.electric_sparse_mat = sparse.coo_matrix((self._va, (self._ii, self._jj))).tocsr() #
return self.electric_sparse_mat
elif kind == 'magnetic':
self.magnetic_sparse_mat = sparse.coo_matrix((self._va, (self._ii, self._jj))).tocsr() #
return self.magnetic_sparse_mat
else:
print("This never happens.")
exit()
def _electric_field_z_default(self):
return np.zeros(self._get_mesh_size())
def _electric_field_r_default(self):
return np.zeros(self._get_mesh_size())
def _rvals_default(self):
return np.linspace(0, self.nr, self.nr, endpoint=False) * self.dr
def _zvals_default(self):
return np.linspace(0, self.nz, self.nz, endpoint=False) * self.dz | gpl-3.0 |
B3AU/waveTree | sklearn/feature_selection/tests/test_from_model.py | 14 | 1541 | import numpy as np
import scipy.sparse as sp
from nose.tools import assert_raises, assert_true
from sklearn.utils.testing import assert_less
from sklearn.datasets import load_iris
from sklearn.linear_model import LogisticRegression
from sklearn.linear_model import SGDClassifier
from sklearn.svm import LinearSVC
iris = load_iris()
def test_transform_linear_model():
for clf in (LogisticRegression(C=0.1),
LinearSVC(C=0.01, dual=False),
SGDClassifier(alpha=0.1, n_iter=10, shuffle=True,
random_state=0)):
for thresh in (None, ".09*mean", "1e-5 * median"):
for func in (np.array, sp.csr_matrix):
X = func(iris.data)
clf.set_params(penalty="l1")
clf.fit(X, iris.target)
X_new = clf.transform(X, thresh)
if isinstance(clf, SGDClassifier):
assert_true(X_new.shape[1] <= X.shape[1])
else:
assert_less(X_new.shape[1], X.shape[1])
clf.set_params(penalty="l2")
clf.fit(X_new, iris.target)
pred = clf.predict(X_new)
assert_true(np.mean(pred == iris.target) >= 0.7)
def test_invalid_input():
clf = SGDClassifier(alpha=0.1, n_iter=10, shuffle=True, random_state=None)
clf.fit(iris.data, iris.target)
assert_raises(ValueError, clf.transform, iris.data, "gobbledigook")
assert_raises(ValueError, clf.transform, iris.data, ".5 * gobbledigook")
| bsd-3-clause |
pratapvardhan/scikit-learn | examples/linear_model/plot_logistic_multinomial.py | 24 | 2480 | """
====================================================
Plot multinomial and One-vs-Rest Logistic Regression
====================================================
Plot decision surface of multinomial and One-vs-Rest Logistic Regression.
The hyperplanes corresponding to the three One-vs-Rest (OVR) classifiers
are represented by the dashed lines.
"""
print(__doc__)
# Authors: Tom Dupre la Tour <tom.dupre-la-tour@m4x.org>
# Licence: BSD 3 clause
import numpy as np
import matplotlib.pyplot as plt
from sklearn.datasets import make_blobs
from sklearn.linear_model import LogisticRegression
# make 3-class dataset for classification
centers = [[-5, 0], [0, 1.5], [5, -1]]
X, y = make_blobs(n_samples=1000, centers=centers, random_state=40)
transformation = [[0.4, 0.2], [-0.4, 1.2]]
X = np.dot(X, transformation)
for multi_class in ('multinomial', 'ovr'):
clf = LogisticRegression(solver='sag', max_iter=100, random_state=42,
multi_class=multi_class).fit(X, y)
# print the training scores
print("training score : %.3f (%s)" % (clf.score(X, y), multi_class))
# create a mesh to plot in
h = .02 # step size in the mesh
x_min, x_max = X[:, 0].min() - 1, X[:, 0].max() + 1
y_min, y_max = X[:, 1].min() - 1, X[:, 1].max() + 1
xx, yy = np.meshgrid(np.arange(x_min, x_max, h),
np.arange(y_min, y_max, h))
# Plot the decision boundary. For that, we will assign a color to each
# point in the mesh [x_min, m_max]x[y_min, y_max].
Z = clf.predict(np.c_[xx.ravel(), yy.ravel()])
# Put the result into a color plot
Z = Z.reshape(xx.shape)
plt.figure()
plt.contourf(xx, yy, Z, cmap=plt.cm.Paired)
plt.title("Decision surface of LogisticRegression (%s)" % multi_class)
plt.axis('tight')
# Plot also the training points
colors = "bry"
for i, color in zip(clf.classes_, colors):
idx = np.where(y == i)
plt.scatter(X[idx, 0], X[idx, 1], c=color, cmap=plt.cm.Paired)
# Plot the three one-against-all classifiers
xmin, xmax = plt.xlim()
ymin, ymax = plt.ylim()
coef = clf.coef_
intercept = clf.intercept_
def plot_hyperplane(c, color):
def line(x0):
return (-(x0 * coef[c, 0]) - intercept[c]) / coef[c, 1]
plt.plot([xmin, xmax], [line(xmin), line(xmax)],
ls="--", color=color)
for i, color in zip(clf.classes_, colors):
plot_hyperplane(i, color)
plt.show()
| bsd-3-clause |
RayMick/scikit-learn | examples/plot_digits_pipe.py | 250 | 1809 | #!/usr/bin/python
# -*- coding: utf-8 -*-
"""
=========================================================
Pipelining: chaining a PCA and a logistic regression
=========================================================
The PCA does an unsupervised dimensionality reduction, while the logistic
regression does the prediction.
We use a GridSearchCV to set the dimensionality of the PCA
"""
print(__doc__)
# Code source: Gaël Varoquaux
# Modified for documentation by Jaques Grobler
# License: BSD 3 clause
import numpy as np
import matplotlib.pyplot as plt
from sklearn import linear_model, decomposition, datasets
from sklearn.pipeline import Pipeline
from sklearn.grid_search import GridSearchCV
logistic = linear_model.LogisticRegression()
pca = decomposition.PCA()
pipe = Pipeline(steps=[('pca', pca), ('logistic', logistic)])
digits = datasets.load_digits()
X_digits = digits.data
y_digits = digits.target
###############################################################################
# Plot the PCA spectrum
pca.fit(X_digits)
plt.figure(1, figsize=(4, 3))
plt.clf()
plt.axes([.2, .2, .7, .7])
plt.plot(pca.explained_variance_, linewidth=2)
plt.axis('tight')
plt.xlabel('n_components')
plt.ylabel('explained_variance_')
###############################################################################
# Prediction
n_components = [20, 40, 64]
Cs = np.logspace(-4, 4, 3)
#Parameters of pipelines can be set using ‘__’ separated parameter names:
estimator = GridSearchCV(pipe,
dict(pca__n_components=n_components,
logistic__C=Cs))
estimator.fit(X_digits, y_digits)
plt.axvline(estimator.best_estimator_.named_steps['pca'].n_components,
linestyle=':', label='n_components chosen')
plt.legend(prop=dict(size=12))
plt.show()
| bsd-3-clause |
tjhei/burnman-original | misc/paper_averaging.py | 2 | 6102 | # BurnMan - a lower mantle toolkit
# Copyright (C) 2012, 2013, Heister, T., Unterborn, C., Rose, I. and Cottaar, S.
# Released under GPL v2 or later.
"""
This script reproduces Cottaar, Heister, Rose, Unterborn (2014) Figure 2.
This example shows the effect of different averaging schemes. Currently four
averaging schemes are available:
1. Voight-Reuss-Hill
2. Voight averaging
3. Reuss averaging
4. Hashin-Shtrikman averaging
See Watt et al., 1976 Journal of Geophysics and Space Physics for explanations
of each averaging scheme.
requires:
- geotherms
- compute seismic velocities
teaches:
- averaging
"""
import os, sys, numpy as np, matplotlib.pyplot as plt
#hack to allow scripts to be placed in subdirectories next to burnman:
if not os.path.exists('burnman') and os.path.exists('../burnman'):
sys.path.insert(1,os.path.abspath('..'))
import burnman
from burnman import minerals
import colors
if __name__ == "__main__":
figsize=(6,5)
prop={'size':12}
#plt.rc('text', usetex=True)
plt.rc('font', family='sanserif')
figure=plt.figure(dpi=100,figsize=figsize)
""" choose 'slb2' (finite-strain 2nd order shear modulus,
stixrude and lithgow-bertelloni, 2005)
or 'slb3 (finite-strain 3rd order shear modulus,
stixrude and lithgow-bertelloni, 2005)
or 'mgd3' (mie-gruneisen-debeye 3rd order shear modulus,
matas et al. 2007)
or 'mgd2' (mie-gruneisen-debeye 2nd order shear modulus,
matas et al. 2007)
or 'bm2' (birch-murnaghan 2nd order, if you choose to ignore temperature
(your choice in geotherm will not matter in this case))
or 'bm3' (birch-murnaghan 3rd order, if you choose to ignore temperature
(your choice in geotherm will not matter in this case))"""
amount_perovskite = 0.6
method = 'slb3'
rock = burnman.composite( [ (minerals.SLB_2011.mg_perovskite(), amount_perovskite),
(minerals.SLB_2011.wuestite(), 1.0-amount_perovskite) ] )
rock.set_method(method)
perovskitite = burnman.composite( [ (minerals.SLB_2011.mg_perovskite(), 1.0), ] )
perovskitite.set_method(method)
periclasite = burnman.composite( [ (minerals.SLB_2011.wuestite(), 1.0), ] )
periclasite.set_method(method)
#seismic model for comparison:
# pick from .prem() .slow() .fast() (see burnman/seismic.py)
seismic_model = burnman.seismic.prem()
#set on how many depth slices the computations should be done
number_of_points = 20
# we will do our computation and comparison at the following depth values:
depths = np.linspace(700e3, 2800e3, number_of_points)
#alternatively, we could use the values where prem is defined:
#depths = seismic_model.internal_depth_list()
pressures, seis_rho, seis_vp, seis_vs, seis_vphi = seismic_model.evaluate_all_at(depths)
temperatures = burnman.geotherm.brown_shankland(pressures)
print "Calculations are done for:"
rock.debug_print()
#calculate the seismic velocities of the rock using a whole battery of averaging schemes:
# do the end members, here averaging scheme does not matter (though it defaults to Voigt-Reuss-Hill)
rho_pv, vp_pv, vs_pv, vphi_pv, K_pv, G_pv = \
burnman.velocities_from_rock(perovskitite, pressures, temperatures)
rho_fp, vp_fp, vs_fp, vphi_fp, K_fp, G_fp = \
burnman.velocities_from_rock(periclasite, pressures, temperatures)
#Voigt Reuss Hill averaging
rho_vrh, vp_vrh, vs_vrh, vphi_vrh, K_vrh, G_vrh = \
burnman.velocities_from_rock(rock, pressures, temperatures, averaging_scheme=burnman.averaging_schemes.voigt_reuss_hill())
#Voigt averaging
rho_v, vp_v, vs_v, vphi_v, K_v, G_v = \
burnman.velocities_from_rock(rock, pressures, temperatures, averaging_scheme=burnman.averaging_schemes.voigt())
#Reuss averaging
rho_r, vp_r, vs_r, vphi_r, K_r, G_r = \
burnman.velocities_from_rock(rock, pressures, temperatures, averaging_scheme=burnman.averaging_schemes.reuss())
#Upper bound for Hashin-Shtrikman averaging
rho_hsu, vp_hsu, vs_hsu, vphi_hsu, K_hsu, G_hsu = \
burnman.velocities_from_rock(rock, pressures, temperatures, averaging_scheme=burnman.averaging_schemes.hashin_shtrikman_upper())
#Lower bound for Hashin-Shtrikman averaging
rho_hsl, vp_hsl, vs_hsl, vphi_hsl, K_hsl, G_hsl = \
burnman.velocities_from_rock(rock, pressures, temperatures, averaging_scheme=burnman.averaging_schemes.hashin_shtrikman_lower())
#linear fit
vs_lin = vs_pv*amount_perovskite + vs_fp*(1.0-amount_perovskite)
# PLOTTING
# plot vs
ax = figure.add_subplot(1,1,1)
plt.plot(pressures/1.e9,vs_v/1.e3,color=colors.color(0),linewidth=2,linestyle='-',marker='^',\
markersize=4,label='Voigt')
plt.plot(pressures/1.e9,vs_r/1.e3,color=colors.color(5),linewidth=2,linestyle='-',marker='v',\
markersize=4,label='Reuss')
plt.plot(pressures/1.e9,vs_vrh/1.e3,color=colors.color(1),linestyle='-',marker='*',\
markersize=6,label='Voigt-Reuss-Hill')
plt.fill_between(pressures/1.e9, vs_hsu/1.e3, vs_hsl/1.e3, facecolor='red', lw=0, label='asdf',interpolate=False)
#plt.plot(pressures/1.e9,vs_hsu/1.e3,color='r',linestyle='-',\
# markersize=4,label='Hashin-Shtrikman')
#plt.plot(pressures/1.e9,vs_hsl/1.e3,color='r',linestyle='-',marker='x',\
# markersize=4)
plt.plot(pressures/1.e9,vs_lin/1.e3,color='k',linewidth=2,linestyle='--',\
markersize=4,label='linear')
plt.plot(pressures/1.e9,vs_pv/1.e3,color=colors.color(2),linewidth=2,linestyle='-',marker='d',\
markersize=4,label='Mg Perovskite')
plt.plot(pressures/1.e9,vs_fp/1.e3,color=colors.color(4),linewidth=2,linestyle='-',marker='x',\
markersize=6,label=r'W\"ustite')
plt.ylim(3.0,7.5)
plt.xlim(min(pressures)/1.e9,max(pressures)/1.e9)
simArtist = plt.Line2D((0,1),(0,0), color='r', lw=5, linestyle='-')
handles, labels = ax.get_legend_handles_labels()
plt.legend(handles[0:3]+[simArtist]+handles[3:], labels[0:3]+['Hashin-Shtrikman']+labels[3:], loc='lower right',ncol=2,prop=prop)
plt.xlabel('Pressure (GPa)')
plt.ylabel('Shear velocity $V_s$ (km/s)')
plt.savefig("example_averaging.pdf",bbox_inches='tight')
plt.show()
| gpl-2.0 |
evidation-health/bokeh | bokeh/compat/mplexporter/renderers/vincent_renderer.py | 64 | 1922 | import warnings
from .base import Renderer
from ..exporter import Exporter
class VincentRenderer(Renderer):
def open_figure(self, fig, props):
self.chart = None
self.figwidth = int(props['figwidth'] * props['dpi'])
self.figheight = int(props['figheight'] * props['dpi'])
def draw_line(self, data, coordinates, style, label, mplobj=None):
import vincent # only import if VincentRenderer is used
if coordinates != 'data':
warnings.warn("Only data coordinates supported. Skipping this")
linedata = {'x': data[:, 0],
'y': data[:, 1]}
line = vincent.Line(linedata, iter_idx='x',
width=self.figwidth, height=self.figheight)
# TODO: respect the other style settings
line.scales['color'].range = [style['color']]
if self.chart is None:
self.chart = line
else:
warnings.warn("Multiple plot elements not yet supported")
def draw_markers(self, data, coordinates, style, label, mplobj=None):
import vincent # only import if VincentRenderer is used
if coordinates != 'data':
warnings.warn("Only data coordinates supported. Skipping this")
markerdata = {'x': data[:, 0],
'y': data[:, 1]}
markers = vincent.Scatter(markerdata, iter_idx='x',
width=self.figwidth, height=self.figheight)
# TODO: respect the other style settings
markers.scales['color'].range = [style['facecolor']]
if self.chart is None:
self.chart = markers
else:
warnings.warn("Multiple plot elements not yet supported")
def fig_to_vincent(fig):
"""Convert a matplotlib figure to a vincent object"""
renderer = VincentRenderer()
exporter = Exporter(renderer)
exporter.run(fig)
return renderer.chart
| bsd-3-clause |
pkruskal/scikit-learn | examples/mixture/plot_gmm_selection.py | 248 | 3223 | """
=================================
Gaussian Mixture Model Selection
=================================
This example shows that model selection can be performed with
Gaussian Mixture Models using information-theoretic criteria (BIC).
Model selection concerns both the covariance type
and the number of components in the model.
In that case, AIC also provides the right result (not shown to save time),
but BIC is better suited if the problem is to identify the right model.
Unlike Bayesian procedures, such inferences are prior-free.
In that case, the model with 2 components and full covariance
(which corresponds to the true generative model) is selected.
"""
print(__doc__)
import itertools
import numpy as np
from scipy import linalg
import matplotlib.pyplot as plt
import matplotlib as mpl
from sklearn import mixture
# Number of samples per component
n_samples = 500
# Generate random sample, two components
np.random.seed(0)
C = np.array([[0., -0.1], [1.7, .4]])
X = np.r_[np.dot(np.random.randn(n_samples, 2), C),
.7 * np.random.randn(n_samples, 2) + np.array([-6, 3])]
lowest_bic = np.infty
bic = []
n_components_range = range(1, 7)
cv_types = ['spherical', 'tied', 'diag', 'full']
for cv_type in cv_types:
for n_components in n_components_range:
# Fit a mixture of Gaussians with EM
gmm = mixture.GMM(n_components=n_components, covariance_type=cv_type)
gmm.fit(X)
bic.append(gmm.bic(X))
if bic[-1] < lowest_bic:
lowest_bic = bic[-1]
best_gmm = gmm
bic = np.array(bic)
color_iter = itertools.cycle(['k', 'r', 'g', 'b', 'c', 'm', 'y'])
clf = best_gmm
bars = []
# Plot the BIC scores
spl = plt.subplot(2, 1, 1)
for i, (cv_type, color) in enumerate(zip(cv_types, color_iter)):
xpos = np.array(n_components_range) + .2 * (i - 2)
bars.append(plt.bar(xpos, bic[i * len(n_components_range):
(i + 1) * len(n_components_range)],
width=.2, color=color))
plt.xticks(n_components_range)
plt.ylim([bic.min() * 1.01 - .01 * bic.max(), bic.max()])
plt.title('BIC score per model')
xpos = np.mod(bic.argmin(), len(n_components_range)) + .65 +\
.2 * np.floor(bic.argmin() / len(n_components_range))
plt.text(xpos, bic.min() * 0.97 + .03 * bic.max(), '*', fontsize=14)
spl.set_xlabel('Number of components')
spl.legend([b[0] for b in bars], cv_types)
# Plot the winner
splot = plt.subplot(2, 1, 2)
Y_ = clf.predict(X)
for i, (mean, covar, color) in enumerate(zip(clf.means_, clf.covars_,
color_iter)):
v, w = linalg.eigh(covar)
if not np.any(Y_ == i):
continue
plt.scatter(X[Y_ == i, 0], X[Y_ == i, 1], .8, color=color)
# Plot an ellipse to show the Gaussian component
angle = np.arctan2(w[0][1], w[0][0])
angle = 180 * angle / np.pi # convert to degrees
v *= 4
ell = mpl.patches.Ellipse(mean, v[0], v[1], 180 + angle, color=color)
ell.set_clip_box(splot.bbox)
ell.set_alpha(.5)
splot.add_artist(ell)
plt.xlim(-10, 10)
plt.ylim(-3, 6)
plt.xticks(())
plt.yticks(())
plt.title('Selected GMM: full model, 2 components')
plt.subplots_adjust(hspace=.35, bottom=.02)
plt.show()
| bsd-3-clause |
phdowling/scikit-learn | examples/svm/plot_separating_hyperplane.py | 294 | 1273 | """
=========================================
SVM: Maximum margin separating hyperplane
=========================================
Plot the maximum margin separating hyperplane within a two-class
separable dataset using a Support Vector Machine classifier with
linear kernel.
"""
print(__doc__)
import numpy as np
import matplotlib.pyplot as plt
from sklearn import svm
# we create 40 separable points
np.random.seed(0)
X = np.r_[np.random.randn(20, 2) - [2, 2], np.random.randn(20, 2) + [2, 2]]
Y = [0] * 20 + [1] * 20
# fit the model
clf = svm.SVC(kernel='linear')
clf.fit(X, Y)
# get the separating hyperplane
w = clf.coef_[0]
a = -w[0] / w[1]
xx = np.linspace(-5, 5)
yy = a * xx - (clf.intercept_[0]) / w[1]
# plot the parallels to the separating hyperplane that pass through the
# support vectors
b = clf.support_vectors_[0]
yy_down = a * xx + (b[1] - a * b[0])
b = clf.support_vectors_[-1]
yy_up = a * xx + (b[1] - a * b[0])
# plot the line, the points, and the nearest vectors to the plane
plt.plot(xx, yy, 'k-')
plt.plot(xx, yy_down, 'k--')
plt.plot(xx, yy_up, 'k--')
plt.scatter(clf.support_vectors_[:, 0], clf.support_vectors_[:, 1],
s=80, facecolors='none')
plt.scatter(X[:, 0], X[:, 1], c=Y, cmap=plt.cm.Paired)
plt.axis('tight')
plt.show()
| bsd-3-clause |
ningchi/scikit-learn | examples/gaussian_process/plot_gp_regression.py | 253 | 4054 | #!/usr/bin/python
# -*- coding: utf-8 -*-
r"""
=========================================================
Gaussian Processes regression: basic introductory example
=========================================================
A simple one-dimensional regression exercise computed in two different ways:
1. A noise-free case with a cubic correlation model
2. A noisy case with a squared Euclidean correlation model
In both cases, the model parameters are estimated using the maximum
likelihood principle.
The figures illustrate the interpolating property of the Gaussian Process
model as well as its probabilistic nature in the form of a pointwise 95%
confidence interval.
Note that the parameter ``nugget`` is applied as a Tikhonov regularization
of the assumed covariance between the training points. In the special case
of the squared euclidean correlation model, nugget is mathematically equivalent
to a normalized variance: That is
.. math::
\mathrm{nugget}_i = \left[\frac{\sigma_i}{y_i}\right]^2
"""
print(__doc__)
# Author: Vincent Dubourg <vincent.dubourg@gmail.com>
# Jake Vanderplas <vanderplas@astro.washington.edu>
# Licence: BSD 3 clause
import numpy as np
from sklearn.gaussian_process import GaussianProcess
from matplotlib import pyplot as pl
np.random.seed(1)
def f(x):
"""The function to predict."""
return x * np.sin(x)
#----------------------------------------------------------------------
# First the noiseless case
X = np.atleast_2d([1., 3., 5., 6., 7., 8.]).T
# Observations
y = f(X).ravel()
# Mesh the input space for evaluations of the real function, the prediction and
# its MSE
x = np.atleast_2d(np.linspace(0, 10, 1000)).T
# Instanciate a Gaussian Process model
gp = GaussianProcess(corr='cubic', theta0=1e-2, thetaL=1e-4, thetaU=1e-1,
random_start=100)
# Fit to data using Maximum Likelihood Estimation of the parameters
gp.fit(X, y)
# Make the prediction on the meshed x-axis (ask for MSE as well)
y_pred, MSE = gp.predict(x, eval_MSE=True)
sigma = np.sqrt(MSE)
# Plot the function, the prediction and the 95% confidence interval based on
# the MSE
fig = pl.figure()
pl.plot(x, f(x), 'r:', label=u'$f(x) = x\,\sin(x)$')
pl.plot(X, y, 'r.', markersize=10, label=u'Observations')
pl.plot(x, y_pred, 'b-', label=u'Prediction')
pl.fill(np.concatenate([x, x[::-1]]),
np.concatenate([y_pred - 1.9600 * sigma,
(y_pred + 1.9600 * sigma)[::-1]]),
alpha=.5, fc='b', ec='None', label='95% confidence interval')
pl.xlabel('$x$')
pl.ylabel('$f(x)$')
pl.ylim(-10, 20)
pl.legend(loc='upper left')
#----------------------------------------------------------------------
# now the noisy case
X = np.linspace(0.1, 9.9, 20)
X = np.atleast_2d(X).T
# Observations and noise
y = f(X).ravel()
dy = 0.5 + 1.0 * np.random.random(y.shape)
noise = np.random.normal(0, dy)
y += noise
# Mesh the input space for evaluations of the real function, the prediction and
# its MSE
x = np.atleast_2d(np.linspace(0, 10, 1000)).T
# Instanciate a Gaussian Process model
gp = GaussianProcess(corr='squared_exponential', theta0=1e-1,
thetaL=1e-3, thetaU=1,
nugget=(dy / y) ** 2,
random_start=100)
# Fit to data using Maximum Likelihood Estimation of the parameters
gp.fit(X, y)
# Make the prediction on the meshed x-axis (ask for MSE as well)
y_pred, MSE = gp.predict(x, eval_MSE=True)
sigma = np.sqrt(MSE)
# Plot the function, the prediction and the 95% confidence interval based on
# the MSE
fig = pl.figure()
pl.plot(x, f(x), 'r:', label=u'$f(x) = x\,\sin(x)$')
pl.errorbar(X.ravel(), y, dy, fmt='r.', markersize=10, label=u'Observations')
pl.plot(x, y_pred, 'b-', label=u'Prediction')
pl.fill(np.concatenate([x, x[::-1]]),
np.concatenate([y_pred - 1.9600 * sigma,
(y_pred + 1.9600 * sigma)[::-1]]),
alpha=.5, fc='b', ec='None', label='95% confidence interval')
pl.xlabel('$x$')
pl.ylabel('$f(x)$')
pl.ylim(-10, 20)
pl.legend(loc='upper left')
pl.show()
| bsd-3-clause |
rs2/bokeh | bokeh/util/sampledata.py | 2 | 7117 | #-----------------------------------------------------------------------------
# Copyright (c) 2012 - 2017, Anaconda, Inc. All rights reserved.
#
# Powered by the Bokeh Development Team.
#
# The full license is in the file LICENSE.txt, distributed with this software.
#-----------------------------------------------------------------------------
''' Helper functions for downloading and accessing sample data.
'''
#-----------------------------------------------------------------------------
# Boilerplate
#-----------------------------------------------------------------------------
from __future__ import absolute_import, division, print_function, unicode_literals
import logging
log = logging.getLogger(__name__)
from bokeh.util.api import public, internal ; public, internal
#-----------------------------------------------------------------------------
# Imports
#-----------------------------------------------------------------------------
# Standard library imports
from os import mkdir, remove
from os.path import abspath, dirname, exists, expanduser, isdir, isfile, join, splitext
from sys import stdout
from zipfile import ZipFile
# External imports
import six
from six.moves.urllib.request import urlopen
# Bokeh imports
from .dependencies import import_required
#-----------------------------------------------------------------------------
# Globals and constants
#-----------------------------------------------------------------------------
__all__ = (
'download',
)
#-----------------------------------------------------------------------------
# Public API
#-----------------------------------------------------------------------------
@public((1,0,0))
def download(progress=True):
''' Download larger data sets for various Bokeh examples.
'''
data_dir = external_data_dir(create=True)
print("Using data directory: %s" % data_dir)
s3 = 'https://s3.amazonaws.com/bokeh_data/'
files = [
(s3, 'CGM.csv'),
(s3, 'US_Counties.zip'),
(s3, 'us_cities.json'),
(s3, 'unemployment09.csv'),
(s3, 'AAPL.csv'),
(s3, 'FB.csv'),
(s3, 'GOOG.csv'),
(s3, 'IBM.csv'),
(s3, 'MSFT.csv'),
(s3, 'WPP2012_SA_DB03_POPULATION_QUINQUENNIAL.zip'),
(s3, 'gapminder_fertility.csv'),
(s3, 'gapminder_population.csv'),
(s3, 'gapminder_life_expectancy.csv'),
(s3, 'gapminder_regions.csv'),
(s3, 'world_cities.zip'),
(s3, 'airports.json'),
(s3, 'movies.db.zip'),
(s3, 'airports.csv'),
(s3, 'routes.csv'),
]
for base_url, filename in files:
_download_file(base_url, filename, data_dir, progress=progress)
#-----------------------------------------------------------------------------
# Internal API
#-----------------------------------------------------------------------------
@internal((1,0,0))
def external_csv(module, name, **kw):
'''
'''
pd = import_required('pandas', '%s sample data requires Pandas (http://pandas.pydata.org) to be installed' % module)
return pd.read_csv(external_path(name), **kw)
@internal((1,0,0))
def external_data_dir(create=False):
'''
'''
try:
import yaml
except ImportError:
raise RuntimeError("'yaml' and 'pyyaml' are required to use bokeh.sampledata functions")
bokeh_dir = _bokeh_dir(create=create)
data_dir = join(bokeh_dir, "data")
try:
config = yaml.load(open(join(bokeh_dir, 'config')))
data_dir = expanduser(config['sampledata_dir'])
except (IOError, TypeError):
pass
if not exists(data_dir):
if not create:
raise RuntimeError('bokeh sample data directory does not exist, please execute bokeh.sampledata.download()')
print("Creating %s directory" % data_dir)
try:
mkdir(data_dir)
except OSError:
raise RuntimeError("could not create bokeh data directory at %s" % data_dir)
else:
if not isdir(data_dir):
raise RuntimeError("%s exists but is not a directory" % data_dir)
return data_dir
@internal((1,0,0))
def external_path(filename):
data_dir = external_data_dir()
fn = join(data_dir, filename)
if not exists(fn) and isfile(fn):
raise RuntimeError('Could not locate external data file %e. Please execute bokeh.sampledata.download()' % fn)
return fn
@internal((1,0,0))
def package_csv(module, name, **kw):
'''
'''
pd = import_required('pandas', '%s sample data requires Pandas (http://pandas.pydata.org) to be installed' % module)
return pd.read_csv(package_path(name), **kw)
@internal((1,0,0))
def package_dir():
'''
'''
return abspath(join(dirname(__file__), "..", "sampledata", "_data"))
@internal((1,0,0))
def package_path(filename):
'''
'''
return join(package_dir(), filename)
@internal((1,0,0))
def open_csv(filename):
'''
'''
# csv differs in Python 2.x and Python 3.x. Open the file differently in each.
if six.PY2:
return open(filename, 'rb')
else:
return open(filename, 'r', newline='', encoding='utf8')
#-----------------------------------------------------------------------------
# Private API
#-----------------------------------------------------------------------------
def _bokeh_dir(create=False):
'''
'''
bokeh_dir = join(expanduser("~"), ".bokeh")
if not exists(bokeh_dir):
if not create: return bokeh_dir
print("Creating %s directory" % bokeh_dir)
try:
mkdir(bokeh_dir)
except OSError:
raise RuntimeError("could not create bokeh config directory at %s" % bokeh_dir)
else:
if not isdir(bokeh_dir):
raise RuntimeError("%s exists but is not a directory" % bokeh_dir)
return bokeh_dir
def _download_file(base_url, filename, data_dir, progress=True):
'''
'''
file_url = join(base_url, filename)
file_path = join(data_dir, filename)
url = urlopen(file_url)
with open(file_path, 'wb') as file:
file_size = int(url.headers["Content-Length"])
print("Downloading: %s (%d bytes)" % (filename, file_size))
fetch_size = 0
block_size = 16384
while True:
data = url.read(block_size)
if not data:
break
fetch_size += len(data)
file.write(data)
if progress:
status = "\r%10d [%6.2f%%]" % (fetch_size, fetch_size*100.0/file_size)
stdout.write(status)
stdout.flush()
if progress:
print()
real_name, ext = splitext(filename)
if ext == '.zip':
if not splitext(real_name)[1]:
real_name += ".csv"
print("Unpacking: %s" % real_name)
with ZipFile(file_path, 'r') as zip_file:
zip_file.extract(real_name, data_dir)
remove(file_path)
#-----------------------------------------------------------------------------
# Code
#-----------------------------------------------------------------------------
| bsd-3-clause |
Fireblend/scikit-learn | sklearn/cross_validation.py | 5 | 61899 | """
The :mod:`sklearn.cross_validation` module includes utilities for cross-
validation and performance evaluation.
"""
# Author: Alexandre Gramfort <alexandre.gramfort@inria.fr>,
# Gael Varoquaux <gael.varoquaux@normalesup.org>,
# Olivier Grisel <olivier.grisel@ensta.org>
# License: BSD 3 clause
from __future__ import print_function
from __future__ import division
import warnings
from itertools import chain, combinations
from math import ceil, floor, factorial
import numbers
import time
from abc import ABCMeta, abstractmethod
import numpy as np
import scipy.sparse as sp
from .base import is_classifier, clone
from .utils import indexable, check_random_state, safe_indexing
from .utils.validation import (_is_arraylike, _num_samples,
check_array, column_or_1d)
from .utils.multiclass import type_of_target
from .externals.joblib import Parallel, delayed, logger
from .externals.six import with_metaclass
from .externals.six.moves import zip
from .metrics.scorer import check_scoring
from .utils.fixes import bincount
__all__ = ['KFold',
'LeaveOneLabelOut',
'LeaveOneOut',
'LeavePLabelOut',
'LeavePOut',
'ShuffleSplit',
'StratifiedKFold',
'StratifiedShuffleSplit',
'PredefinedSplit',
'LabelShuffleSplit',
'check_cv',
'cross_val_score',
'cross_val_predict',
'permutation_test_score',
'train_test_split']
class _PartitionIterator(with_metaclass(ABCMeta)):
"""Base class for CV iterators where train_mask = ~test_mask
Implementations must define `_iter_test_masks` or `_iter_test_indices`.
Parameters
----------
n : int
Total number of elements in dataset.
"""
def __init__(self, n):
if abs(n - int(n)) >= np.finfo('f').eps:
raise ValueError("n must be an integer")
self.n = int(n)
def __iter__(self):
ind = np.arange(self.n)
for test_index in self._iter_test_masks():
train_index = np.logical_not(test_index)
train_index = ind[train_index]
test_index = ind[test_index]
yield train_index, test_index
# Since subclasses must implement either _iter_test_masks or
# _iter_test_indices, neither can be abstract.
def _iter_test_masks(self):
"""Generates boolean masks corresponding to test sets.
By default, delegates to _iter_test_indices()
"""
for test_index in self._iter_test_indices():
test_mask = self._empty_mask()
test_mask[test_index] = True
yield test_mask
def _iter_test_indices(self):
"""Generates integer indices corresponding to test sets."""
raise NotImplementedError
def _empty_mask(self):
return np.zeros(self.n, dtype=np.bool)
class LeaveOneOut(_PartitionIterator):
"""Leave-One-Out cross validation iterator.
Provides train/test indices to split data in train test sets. Each
sample is used once as a test set (singleton) while the remaining
samples form the training set.
Note: ``LeaveOneOut(n)`` is equivalent to ``KFold(n, n_folds=n)`` and
``LeavePOut(n, p=1)``.
Due to the high number of test sets (which is the same as the
number of samples) this cross validation method can be very costly.
For large datasets one should favor KFold, StratifiedKFold or
ShuffleSplit.
Read more in the :ref:`User Guide <cross_validation>`.
Parameters
----------
n : int
Total number of elements in dataset.
Examples
--------
>>> from sklearn import cross_validation
>>> X = np.array([[1, 2], [3, 4]])
>>> y = np.array([1, 2])
>>> loo = cross_validation.LeaveOneOut(2)
>>> len(loo)
2
>>> print(loo)
sklearn.cross_validation.LeaveOneOut(n=2)
>>> for train_index, test_index in loo:
... print("TRAIN:", train_index, "TEST:", test_index)
... X_train, X_test = X[train_index], X[test_index]
... y_train, y_test = y[train_index], y[test_index]
... print(X_train, X_test, y_train, y_test)
TRAIN: [1] TEST: [0]
[[3 4]] [[1 2]] [2] [1]
TRAIN: [0] TEST: [1]
[[1 2]] [[3 4]] [1] [2]
See also
--------
LeaveOneLabelOut for splitting the data according to explicit,
domain-specific stratification of the dataset.
"""
def _iter_test_indices(self):
return range(self.n)
def __repr__(self):
return '%s.%s(n=%i)' % (
self.__class__.__module__,
self.__class__.__name__,
self.n,
)
def __len__(self):
return self.n
class LeavePOut(_PartitionIterator):
"""Leave-P-Out cross validation iterator
Provides train/test indices to split data in train test sets. This results
in testing on all distinct samples of size p, while the remaining n - p
samples form the training set in each iteration.
Note: ``LeavePOut(n, p)`` is NOT equivalent to ``KFold(n, n_folds=n // p)``
which creates non-overlapping test sets.
Due to the high number of iterations which grows combinatorically with the
number of samples this cross validation method can be very costly. For
large datasets one should favor KFold, StratifiedKFold or ShuffleSplit.
Read more in the :ref:`User Guide <cross_validation>`.
Parameters
----------
n : int
Total number of elements in dataset.
p : int
Size of the test sets.
Examples
--------
>>> from sklearn import cross_validation
>>> X = np.array([[1, 2], [3, 4], [5, 6], [7, 8]])
>>> y = np.array([1, 2, 3, 4])
>>> lpo = cross_validation.LeavePOut(4, 2)
>>> len(lpo)
6
>>> print(lpo)
sklearn.cross_validation.LeavePOut(n=4, p=2)
>>> for train_index, test_index in lpo:
... print("TRAIN:", train_index, "TEST:", test_index)
... X_train, X_test = X[train_index], X[test_index]
... y_train, y_test = y[train_index], y[test_index]
TRAIN: [2 3] TEST: [0 1]
TRAIN: [1 3] TEST: [0 2]
TRAIN: [1 2] TEST: [0 3]
TRAIN: [0 3] TEST: [1 2]
TRAIN: [0 2] TEST: [1 3]
TRAIN: [0 1] TEST: [2 3]
"""
def __init__(self, n, p):
super(LeavePOut, self).__init__(n)
self.p = p
def _iter_test_indices(self):
for comb in combinations(range(self.n), self.p):
yield np.array(comb)
def __repr__(self):
return '%s.%s(n=%i, p=%i)' % (
self.__class__.__module__,
self.__class__.__name__,
self.n,
self.p,
)
def __len__(self):
return int(factorial(self.n) / factorial(self.n - self.p)
/ factorial(self.p))
class _BaseKFold(with_metaclass(ABCMeta, _PartitionIterator)):
"""Base class to validate KFold approaches"""
@abstractmethod
def __init__(self, n, n_folds, shuffle, random_state):
super(_BaseKFold, self).__init__(n)
if abs(n_folds - int(n_folds)) >= np.finfo('f').eps:
raise ValueError("n_folds must be an integer")
self.n_folds = n_folds = int(n_folds)
if n_folds <= 1:
raise ValueError(
"k-fold cross validation requires at least one"
" train / test split by setting n_folds=2 or more,"
" got n_folds={0}.".format(n_folds))
if n_folds > self.n:
raise ValueError(
("Cannot have number of folds n_folds={0} greater"
" than the number of samples: {1}.").format(n_folds, n))
if not isinstance(shuffle, bool):
raise TypeError("shuffle must be True or False;"
" got {0}".format(shuffle))
self.shuffle = shuffle
self.random_state = random_state
class KFold(_BaseKFold):
"""K-Folds cross validation iterator.
Provides train/test indices to split data in train test sets. Split
dataset into k consecutive folds (without shuffling).
Each fold is then used a validation set once while the k - 1 remaining
fold form the training set.
Read more in the :ref:`User Guide <cross_validation>`.
Parameters
----------
n : int
Total number of elements.
n_folds : int, default=3
Number of folds. Must be at least 2.
shuffle : boolean, optional
Whether to shuffle the data before splitting into batches.
random_state : None, int or RandomState
Pseudo-random number generator state used for random
sampling. If None, use default numpy RNG for shuffling
Examples
--------
>>> from sklearn import cross_validation
>>> X = np.array([[1, 2], [3, 4], [1, 2], [3, 4]])
>>> y = np.array([1, 2, 3, 4])
>>> kf = cross_validation.KFold(4, n_folds=2)
>>> len(kf)
2
>>> print(kf) # doctest: +NORMALIZE_WHITESPACE
sklearn.cross_validation.KFold(n=4, n_folds=2, shuffle=False,
random_state=None)
>>> for train_index, test_index in kf:
... print("TRAIN:", train_index, "TEST:", test_index)
... X_train, X_test = X[train_index], X[test_index]
... y_train, y_test = y[train_index], y[test_index]
TRAIN: [2 3] TEST: [0 1]
TRAIN: [0 1] TEST: [2 3]
Notes
-----
The first n % n_folds folds have size n // n_folds + 1, other folds have
size n // n_folds.
See also
--------
StratifiedKFold: take label information into account to avoid building
folds with imbalanced class distributions (for binary or multiclass
classification tasks).
"""
def __init__(self, n, n_folds=3, shuffle=False,
random_state=None):
super(KFold, self).__init__(n, n_folds, shuffle, random_state)
self.idxs = np.arange(n)
if shuffle:
rng = check_random_state(self.random_state)
rng.shuffle(self.idxs)
def _iter_test_indices(self):
n = self.n
n_folds = self.n_folds
fold_sizes = (n // n_folds) * np.ones(n_folds, dtype=np.int)
fold_sizes[:n % n_folds] += 1
current = 0
for fold_size in fold_sizes:
start, stop = current, current + fold_size
yield self.idxs[start:stop]
current = stop
def __repr__(self):
return '%s.%s(n=%i, n_folds=%i, shuffle=%s, random_state=%s)' % (
self.__class__.__module__,
self.__class__.__name__,
self.n,
self.n_folds,
self.shuffle,
self.random_state,
)
def __len__(self):
return self.n_folds
class StratifiedKFold(_BaseKFold):
"""Stratified K-Folds cross validation iterator
Provides train/test indices to split data in train test sets.
This cross-validation object is a variation of KFold that
returns stratified folds. The folds are made by preserving
the percentage of samples for each class.
Read more in the :ref:`User Guide <cross_validation>`.
Parameters
----------
y : array-like, [n_samples]
Samples to split in K folds.
n_folds : int, default=3
Number of folds. Must be at least 2.
shuffle : boolean, optional
Whether to shuffle each stratification of the data before splitting
into batches.
random_state : None, int or RandomState
Pseudo-random number generator state used for random
sampling. If None, use default numpy RNG for shuffling
Examples
--------
>>> from sklearn import cross_validation
>>> X = np.array([[1, 2], [3, 4], [1, 2], [3, 4]])
>>> y = np.array([0, 0, 1, 1])
>>> skf = cross_validation.StratifiedKFold(y, n_folds=2)
>>> len(skf)
2
>>> print(skf) # doctest: +NORMALIZE_WHITESPACE
sklearn.cross_validation.StratifiedKFold(labels=[0 0 1 1], n_folds=2,
shuffle=False, random_state=None)
>>> for train_index, test_index in skf:
... print("TRAIN:", train_index, "TEST:", test_index)
... X_train, X_test = X[train_index], X[test_index]
... y_train, y_test = y[train_index], y[test_index]
TRAIN: [1 3] TEST: [0 2]
TRAIN: [0 2] TEST: [1 3]
Notes
-----
All the folds have size trunc(n_samples / n_folds), the last one has the
complementary.
"""
def __init__(self, y, n_folds=3, shuffle=False,
random_state=None):
super(StratifiedKFold, self).__init__(
len(y), n_folds, shuffle, random_state)
y = np.asarray(y)
n_samples = y.shape[0]
unique_labels, y_inversed = np.unique(y, return_inverse=True)
label_counts = bincount(y_inversed)
min_labels = np.min(label_counts)
if self.n_folds > min_labels:
warnings.warn(("The least populated class in y has only %d"
" members, which is too few. The minimum"
" number of labels for any class cannot"
" be less than n_folds=%d."
% (min_labels, self.n_folds)), Warning)
# don't want to use the same seed in each label's shuffle
if self.shuffle:
rng = check_random_state(self.random_state)
else:
rng = self.random_state
# pre-assign each sample to a test fold index using individual KFold
# splitting strategies for each label so as to respect the
# balance of labels
per_label_cvs = [
KFold(max(c, self.n_folds), self.n_folds, shuffle=self.shuffle,
random_state=rng) for c in label_counts]
test_folds = np.zeros(n_samples, dtype=np.int)
for test_fold_idx, per_label_splits in enumerate(zip(*per_label_cvs)):
for label, (_, test_split) in zip(unique_labels, per_label_splits):
label_test_folds = test_folds[y == label]
# the test split can be too big because we used
# KFold(max(c, self.n_folds), self.n_folds) instead of
# KFold(c, self.n_folds) to make it possible to not crash even
# if the data is not 100% stratifiable for all the labels
# (we use a warning instead of raising an exception)
# If this is the case, let's trim it:
test_split = test_split[test_split < len(label_test_folds)]
label_test_folds[test_split] = test_fold_idx
test_folds[y == label] = label_test_folds
self.test_folds = test_folds
self.y = y
def _iter_test_masks(self):
for i in range(self.n_folds):
yield self.test_folds == i
def __repr__(self):
return '%s.%s(labels=%s, n_folds=%i, shuffle=%s, random_state=%s)' % (
self.__class__.__module__,
self.__class__.__name__,
self.y,
self.n_folds,
self.shuffle,
self.random_state,
)
def __len__(self):
return self.n_folds
class LeaveOneLabelOut(_PartitionIterator):
"""Leave-One-Label_Out cross-validation iterator
Provides train/test indices to split data according to a third-party
provided label. This label information can be used to encode arbitrary
domain specific stratifications of the samples as integers.
For instance the labels could be the year of collection of the samples
and thus allow for cross-validation against time-based splits.
Read more in the :ref:`User Guide <cross_validation>`.
Parameters
----------
labels : array-like of int with shape (n_samples,)
Arbitrary domain-specific stratification of the data to be used
to draw the splits.
Examples
--------
>>> from sklearn import cross_validation
>>> X = np.array([[1, 2], [3, 4], [5, 6], [7, 8]])
>>> y = np.array([1, 2, 1, 2])
>>> labels = np.array([1, 1, 2, 2])
>>> lol = cross_validation.LeaveOneLabelOut(labels)
>>> len(lol)
2
>>> print(lol)
sklearn.cross_validation.LeaveOneLabelOut(labels=[1 1 2 2])
>>> for train_index, test_index in lol:
... print("TRAIN:", train_index, "TEST:", test_index)
... X_train, X_test = X[train_index], X[test_index]
... y_train, y_test = y[train_index], y[test_index]
... print(X_train, X_test, y_train, y_test)
TRAIN: [2 3] TEST: [0 1]
[[5 6]
[7 8]] [[1 2]
[3 4]] [1 2] [1 2]
TRAIN: [0 1] TEST: [2 3]
[[1 2]
[3 4]] [[5 6]
[7 8]] [1 2] [1 2]
"""
def __init__(self, labels):
super(LeaveOneLabelOut, self).__init__(len(labels))
# We make a copy of labels to avoid side-effects during iteration
self.labels = np.array(labels, copy=True)
self.unique_labels = np.unique(labels)
self.n_unique_labels = len(self.unique_labels)
def _iter_test_masks(self):
for i in self.unique_labels:
yield self.labels == i
def __repr__(self):
return '%s.%s(labels=%s)' % (
self.__class__.__module__,
self.__class__.__name__,
self.labels,
)
def __len__(self):
return self.n_unique_labels
class LeavePLabelOut(_PartitionIterator):
"""Leave-P-Label_Out cross-validation iterator
Provides train/test indices to split data according to a third-party
provided label. This label information can be used to encode arbitrary
domain specific stratifications of the samples as integers.
For instance the labels could be the year of collection of the samples
and thus allow for cross-validation against time-based splits.
The difference between LeavePLabelOut and LeaveOneLabelOut is that
the former builds the test sets with all the samples assigned to
``p`` different values of the labels while the latter uses samples
all assigned the same labels.
Read more in the :ref:`User Guide <cross_validation>`.
Parameters
----------
labels : array-like of int with shape (n_samples,)
Arbitrary domain-specific stratification of the data to be used
to draw the splits.
p : int
Number of samples to leave out in the test split.
Examples
--------
>>> from sklearn import cross_validation
>>> X = np.array([[1, 2], [3, 4], [5, 6]])
>>> y = np.array([1, 2, 1])
>>> labels = np.array([1, 2, 3])
>>> lpl = cross_validation.LeavePLabelOut(labels, p=2)
>>> len(lpl)
3
>>> print(lpl)
sklearn.cross_validation.LeavePLabelOut(labels=[1 2 3], p=2)
>>> for train_index, test_index in lpl:
... print("TRAIN:", train_index, "TEST:", test_index)
... X_train, X_test = X[train_index], X[test_index]
... y_train, y_test = y[train_index], y[test_index]
... print(X_train, X_test, y_train, y_test)
TRAIN: [2] TEST: [0 1]
[[5 6]] [[1 2]
[3 4]] [1] [1 2]
TRAIN: [1] TEST: [0 2]
[[3 4]] [[1 2]
[5 6]] [2] [1 1]
TRAIN: [0] TEST: [1 2]
[[1 2]] [[3 4]
[5 6]] [1] [2 1]
"""
def __init__(self, labels, p):
# We make a copy of labels to avoid side-effects during iteration
super(LeavePLabelOut, self).__init__(len(labels))
self.labels = np.array(labels, copy=True)
self.unique_labels = np.unique(labels)
self.n_unique_labels = len(self.unique_labels)
self.p = p
def _iter_test_masks(self):
comb = combinations(range(self.n_unique_labels), self.p)
for idx in comb:
test_index = self._empty_mask()
idx = np.array(idx)
for l in self.unique_labels[idx]:
test_index[self.labels == l] = True
yield test_index
def __repr__(self):
return '%s.%s(labels=%s, p=%s)' % (
self.__class__.__module__,
self.__class__.__name__,
self.labels,
self.p,
)
def __len__(self):
return int(factorial(self.n_unique_labels) /
factorial(self.n_unique_labels - self.p) /
factorial(self.p))
class BaseShuffleSplit(with_metaclass(ABCMeta)):
"""Base class for ShuffleSplit and StratifiedShuffleSplit"""
def __init__(self, n, n_iter=10, test_size=0.1, train_size=None,
random_state=None):
self.n = n
self.n_iter = n_iter
self.test_size = test_size
self.train_size = train_size
self.random_state = random_state
self.n_train, self.n_test = _validate_shuffle_split(n, test_size,
train_size)
def __iter__(self):
for train, test in self._iter_indices():
yield train, test
return
@abstractmethod
def _iter_indices(self):
"""Generate (train, test) indices"""
class ShuffleSplit(BaseShuffleSplit):
"""Random permutation cross-validation iterator.
Yields indices to split data into training and test sets.
Note: contrary to other cross-validation strategies, random splits
do not guarantee that all folds will be different, although this is
still very likely for sizeable datasets.
Read more in the :ref:`User Guide <cross_validation>`.
Parameters
----------
n : int
Total number of elements in the dataset.
n_iter : int (default 10)
Number of re-shuffling & splitting iterations.
test_size : float (default 0.1), int, or None
If float, should be between 0.0 and 1.0 and represent the
proportion of the dataset to include in the test split. If
int, represents the absolute number of test samples. If None,
the value is automatically set to the complement of the train size.
train_size : float, int, or None (default is None)
If float, should be between 0.0 and 1.0 and represent the
proportion of the dataset to include in the train split. If
int, represents the absolute number of train samples. If None,
the value is automatically set to the complement of the test size.
random_state : int or RandomState
Pseudo-random number generator state used for random sampling.
Examples
--------
>>> from sklearn import cross_validation
>>> rs = cross_validation.ShuffleSplit(4, n_iter=3,
... test_size=.25, random_state=0)
>>> len(rs)
3
>>> print(rs)
... # doctest: +ELLIPSIS
ShuffleSplit(4, n_iter=3, test_size=0.25, ...)
>>> for train_index, test_index in rs:
... print("TRAIN:", train_index, "TEST:", test_index)
...
TRAIN: [3 1 0] TEST: [2]
TRAIN: [2 1 3] TEST: [0]
TRAIN: [0 2 1] TEST: [3]
>>> rs = cross_validation.ShuffleSplit(4, n_iter=3,
... train_size=0.5, test_size=.25, random_state=0)
>>> for train_index, test_index in rs:
... print("TRAIN:", train_index, "TEST:", test_index)
...
TRAIN: [3 1] TEST: [2]
TRAIN: [2 1] TEST: [0]
TRAIN: [0 2] TEST: [3]
"""
def _iter_indices(self):
rng = check_random_state(self.random_state)
for i in range(self.n_iter):
# random partition
permutation = rng.permutation(self.n)
ind_test = permutation[:self.n_test]
ind_train = permutation[self.n_test:self.n_test + self.n_train]
yield ind_train, ind_test
def __repr__(self):
return ('%s(%d, n_iter=%d, test_size=%s, '
'random_state=%s)' % (
self.__class__.__name__,
self.n,
self.n_iter,
str(self.test_size),
self.random_state,
))
def __len__(self):
return self.n_iter
def _validate_shuffle_split(n, test_size, train_size):
if test_size is None and train_size is None:
raise ValueError(
'test_size and train_size can not both be None')
if test_size is not None:
if np.asarray(test_size).dtype.kind == 'f':
if test_size >= 1.:
raise ValueError(
'test_size=%f should be smaller '
'than 1.0 or be an integer' % test_size)
elif np.asarray(test_size).dtype.kind == 'i':
if test_size >= n:
raise ValueError(
'test_size=%d should be smaller '
'than the number of samples %d' % (test_size, n))
else:
raise ValueError("Invalid value for test_size: %r" % test_size)
if train_size is not None:
if np.asarray(train_size).dtype.kind == 'f':
if train_size >= 1.:
raise ValueError("train_size=%f should be smaller "
"than 1.0 or be an integer" % train_size)
elif np.asarray(test_size).dtype.kind == 'f' and \
train_size + test_size > 1.:
raise ValueError('The sum of test_size and train_size = %f, '
'should be smaller than 1.0. Reduce '
'test_size and/or train_size.' %
(train_size + test_size))
elif np.asarray(train_size).dtype.kind == 'i':
if train_size >= n:
raise ValueError("train_size=%d should be smaller "
"than the number of samples %d" %
(train_size, n))
else:
raise ValueError("Invalid value for train_size: %r" % train_size)
if np.asarray(test_size).dtype.kind == 'f':
n_test = ceil(test_size * n)
elif np.asarray(test_size).dtype.kind == 'i':
n_test = float(test_size)
if train_size is None:
n_train = n - n_test
else:
if np.asarray(train_size).dtype.kind == 'f':
n_train = floor(train_size * n)
else:
n_train = float(train_size)
if test_size is None:
n_test = n - n_train
if n_train + n_test > n:
raise ValueError('The sum of train_size and test_size = %d, '
'should be smaller than the number of '
'samples %d. Reduce test_size and/or '
'train_size.' % (n_train + n_test, n))
return int(n_train), int(n_test)
class StratifiedShuffleSplit(BaseShuffleSplit):
"""Stratified ShuffleSplit cross validation iterator
Provides train/test indices to split data in train test sets.
This cross-validation object is a merge of StratifiedKFold and
ShuffleSplit, which returns stratified randomized folds. The folds
are made by preserving the percentage of samples for each class.
Note: like the ShuffleSplit strategy, stratified random splits
do not guarantee that all folds will be different, although this is
still very likely for sizeable datasets.
Read more in the :ref:`User Guide <cross_validation>`.
Parameters
----------
y : array, [n_samples]
Labels of samples.
n_iter : int (default 10)
Number of re-shuffling & splitting iterations.
test_size : float (default 0.1), int, or None
If float, should be between 0.0 and 1.0 and represent the
proportion of the dataset to include in the test split. If
int, represents the absolute number of test samples. If None,
the value is automatically set to the complement of the train size.
train_size : float, int, or None (default is None)
If float, should be between 0.0 and 1.0 and represent the
proportion of the dataset to include in the train split. If
int, represents the absolute number of train samples. If None,
the value is automatically set to the complement of the test size.
random_state : int or RandomState
Pseudo-random number generator state used for random sampling.
Examples
--------
>>> from sklearn.cross_validation import StratifiedShuffleSplit
>>> X = np.array([[1, 2], [3, 4], [1, 2], [3, 4]])
>>> y = np.array([0, 0, 1, 1])
>>> sss = StratifiedShuffleSplit(y, 3, test_size=0.5, random_state=0)
>>> len(sss)
3
>>> print(sss) # doctest: +ELLIPSIS
StratifiedShuffleSplit(labels=[0 0 1 1], n_iter=3, ...)
>>> for train_index, test_index in sss:
... print("TRAIN:", train_index, "TEST:", test_index)
... X_train, X_test = X[train_index], X[test_index]
... y_train, y_test = y[train_index], y[test_index]
TRAIN: [1 2] TEST: [3 0]
TRAIN: [0 2] TEST: [1 3]
TRAIN: [0 2] TEST: [3 1]
"""
def __init__(self, y, n_iter=10, test_size=0.1, train_size=None,
random_state=None):
super(StratifiedShuffleSplit, self).__init__(
len(y), n_iter, test_size, train_size, random_state)
self.y = np.array(y)
self.classes, self.y_indices = np.unique(y, return_inverse=True)
n_cls = self.classes.shape[0]
if np.min(bincount(self.y_indices)) < 2:
raise ValueError("The least populated class in y has only 1"
" member, which is too few. The minimum"
" number of labels for any class cannot"
" be less than 2.")
if self.n_train < n_cls:
raise ValueError('The train_size = %d should be greater or '
'equal to the number of classes = %d' %
(self.n_train, n_cls))
if self.n_test < n_cls:
raise ValueError('The test_size = %d should be greater or '
'equal to the number of classes = %d' %
(self.n_test, n_cls))
def _iter_indices(self):
rng = check_random_state(self.random_state)
cls_count = bincount(self.y_indices)
p_i = cls_count / float(self.n)
n_i = np.round(self.n_train * p_i).astype(int)
t_i = np.minimum(cls_count - n_i,
np.round(self.n_test * p_i).astype(int))
for n in range(self.n_iter):
train = []
test = []
for i, cls in enumerate(self.classes):
permutation = rng.permutation(cls_count[i])
cls_i = np.where((self.y == cls))[0][permutation]
train.extend(cls_i[:n_i[i]])
test.extend(cls_i[n_i[i]:n_i[i] + t_i[i]])
# Because of rounding issues (as n_train and n_test are not
# dividers of the number of elements per class), we may end
# up here with less samples in train and test than asked for.
if len(train) < self.n_train or len(test) < self.n_test:
# We complete by affecting randomly the missing indexes
missing_idx = np.where(bincount(train + test,
minlength=len(self.y)) == 0,
)[0]
missing_idx = rng.permutation(missing_idx)
train.extend(missing_idx[:(self.n_train - len(train))])
test.extend(missing_idx[-(self.n_test - len(test)):])
train = rng.permutation(train)
test = rng.permutation(test)
yield train, test
def __repr__(self):
return ('%s(labels=%s, n_iter=%d, test_size=%s, '
'random_state=%s)' % (
self.__class__.__name__,
self.y,
self.n_iter,
str(self.test_size),
self.random_state,
))
def __len__(self):
return self.n_iter
class PredefinedSplit(_PartitionIterator):
"""Predefined split cross validation iterator
Splits the data into training/test set folds according to a predefined
scheme. Each sample can be assigned to at most one test set fold, as
specified by the user through the ``test_fold`` parameter.
Read more in the :ref:`User Guide <cross_validation>`.
Parameters
----------
test_fold : "array-like, shape (n_samples,)
test_fold[i] gives the test set fold of sample i. A value of -1
indicates that the corresponding sample is not part of any test set
folds, but will instead always be put into the training fold.
Examples
--------
>>> from sklearn.cross_validation import PredefinedSplit
>>> X = np.array([[1, 2], [3, 4], [1, 2], [3, 4]])
>>> y = np.array([0, 0, 1, 1])
>>> ps = PredefinedSplit(test_fold=[0, 1, -1, 1])
>>> len(ps)
2
>>> print(ps) # doctest: +NORMALIZE_WHITESPACE +ELLIPSIS
sklearn.cross_validation.PredefinedSplit(test_fold=[ 0 1 -1 1])
>>> for train_index, test_index in ps:
... print("TRAIN:", train_index, "TEST:", test_index)
... X_train, X_test = X[train_index], X[test_index]
... y_train, y_test = y[train_index], y[test_index]
TRAIN: [1 2 3] TEST: [0]
TRAIN: [0 2] TEST: [1 3]
"""
def __init__(self, test_fold):
super(PredefinedSplit, self).__init__(len(test_fold))
self.test_fold = np.array(test_fold, dtype=np.int)
self.test_fold = column_or_1d(self.test_fold)
self.unique_folds = np.unique(self.test_fold)
self.unique_folds = self.unique_folds[self.unique_folds != -1]
def _iter_test_indices(self):
for f in self.unique_folds:
yield np.where(self.test_fold == f)[0]
def __repr__(self):
return '%s.%s(test_fold=%s)' % (
self.__class__.__module__,
self.__class__.__name__,
self.test_fold)
def __len__(self):
return len(self.unique_folds)
class LabelShuffleSplit(ShuffleSplit):
'''Shuffle-Labels-Out cross-validation iterator
Provides randomized train/test indices to split data according to a
third-party provided label. This label information can be used to encode
arbitrary domain specific stratifications of the samples as integers.
For instance the labels could be the year of collection of the samples
and thus allow for cross-validation against time-based splits.
The difference between LeavePLabelOut and LabelShuffleSplit is that
the former generates splits using all subsets of size ``p`` unique labels,
whereas LabelShuffleSplit generates a user-determined number of random
test splits, each with a user-determined fraction of unique labels.
For example, a less computationally intensive alternative to
``LeavePLabelOut(labels, p=10)`` would be
``LabelShuffleSplit(labels, test_size=10, n_iter=100)``.
Note: The parameters ``test_size`` and ``train_size`` refer to labels, and
not to samples, as in ShuffleSplit.
Parameters
----------
labels : array, [n_samples]
Labels of samples
n_iter : int (default 5)
Number of re-shuffling & splitting iterations.
test_size : float (default 0.2), int, or None
If float, should be between 0.0 and 1.0 and represent the
proportion of the labels to include in the test split. If
int, represents the absolute number of test labels. If None,
the value is automatically set to the complement of the train size.
train_size : float, int, or None (default is None)
If float, should be between 0.0 and 1.0 and represent the
proportion of the labels to include in the train split. If
int, represents the absolute number of train labels. If None,
the value is automatically set to the complement of the test size.
random_state : int or RandomState
Pseudo-random number generator state used for random sampling.
'''
def __init__(self, labels, n_iter=5, test_size=0.2, train_size=None,
random_state=None):
classes, label_indices = np.unique(labels, return_inverse=True)
super(LabelShuffleSplit, self).__init__(
len(classes),
n_iter=n_iter,
test_size=test_size,
train_size=train_size,
random_state=random_state)
self.labels = labels
self.classes = classes
self.label_indices = label_indices
def __repr__(self):
return ('%s(labels=%s, n_iter=%d, test_size=%s, '
'random_state=%s)' % (
self.__class__.__name__,
self.labels,
self.n_iter,
str(self.test_size),
self.random_state,
))
def __len__(self):
return self.n_iter
def _iter_indices(self):
for label_train, label_test in super(LabelShuffleSplit,
self)._iter_indices():
# these are the indices of classes in the partition
# invert them into data indices
train = np.flatnonzero(np.in1d(self.label_indices, label_train))
test = np.flatnonzero(np.in1d(self.label_indices, label_test))
yield train, test
##############################################################################
def _index_param_value(X, v, indices):
"""Private helper function for parameter value indexing."""
if not _is_arraylike(v) or _num_samples(v) != _num_samples(X):
# pass through: skip indexing
return v
if sp.issparse(v):
v = v.tocsr()
return safe_indexing(v, indices)
def cross_val_predict(estimator, X, y=None, cv=None, n_jobs=1,
verbose=0, fit_params=None, pre_dispatch='2*n_jobs'):
"""Generate cross-validated estimates for each input data point
Read more in the :ref:`User Guide <cross_validation>`.
Parameters
----------
estimator : estimator object implementing 'fit' and 'predict'
The object to use to fit the data.
X : array-like
The data to fit. Can be, for example a list, or an array at least 2d.
y : array-like, optional, default: None
The target variable to try to predict in the case of
supervised learning.
cv : cross-validation generator or int, optional, default: None
A cross-validation generator to use. If int, determines
the number of folds in StratifiedKFold if y is binary
or multiclass and estimator is a classifier, or the number
of folds in KFold otherwise. If None, it is equivalent to cv=3.
This generator must include all elements in the test set exactly once.
Otherwise, a ValueError is raised.
n_jobs : integer, optional
The number of CPUs to use to do the computation. -1 means
'all CPUs'.
verbose : integer, optional
The verbosity level.
fit_params : dict, optional
Parameters to pass to the fit method of the estimator.
pre_dispatch : int, or string, optional
Controls the number of jobs that get dispatched during parallel
execution. Reducing this number can be useful to avoid an
explosion of memory consumption when more jobs get dispatched
than CPUs can process. This parameter can be:
- None, in which case all the jobs are immediately
created and spawned. Use this for lightweight and
fast-running jobs, to avoid delays due to on-demand
spawning of the jobs
- An int, giving the exact number of total jobs that are
spawned
- A string, giving an expression as a function of n_jobs,
as in '2*n_jobs'
Returns
-------
preds : ndarray
This is the result of calling 'predict'
"""
X, y = indexable(X, y)
cv = check_cv(cv, X, y, classifier=is_classifier(estimator))
# We clone the estimator to make sure that all the folds are
# independent, and that it is pickle-able.
parallel = Parallel(n_jobs=n_jobs, verbose=verbose,
pre_dispatch=pre_dispatch)
preds_blocks = parallel(delayed(_fit_and_predict)(clone(estimator), X, y,
train, test, verbose,
fit_params)
for train, test in cv)
preds = [p for p, _ in preds_blocks]
locs = np.concatenate([loc for _, loc in preds_blocks])
if not _check_is_partition(locs, _num_samples(X)):
raise ValueError('cross_val_predict only works for partitions')
inv_locs = np.empty(len(locs), dtype=int)
inv_locs[locs] = np.arange(len(locs))
# Check for sparse predictions
if sp.issparse(preds[0]):
preds = sp.vstack(preds, format=preds[0].format)
else:
preds = np.concatenate(preds)
return preds[inv_locs]
def _fit_and_predict(estimator, X, y, train, test, verbose, fit_params):
"""Fit estimator and predict values for a given dataset split.
Read more in the :ref:`User Guide <cross_validation>`.
Parameters
----------
estimator : estimator object implementing 'fit' and 'predict'
The object to use to fit the data.
X : array-like of shape at least 2D
The data to fit.
y : array-like, optional, default: None
The target variable to try to predict in the case of
supervised learning.
train : array-like, shape (n_train_samples,)
Indices of training samples.
test : array-like, shape (n_test_samples,)
Indices of test samples.
verbose : integer
The verbosity level.
fit_params : dict or None
Parameters that will be passed to ``estimator.fit``.
Returns
-------
preds : sequence
Result of calling 'estimator.predict'
test : array-like
This is the value of the test parameter
"""
# Adjust length of sample weights
fit_params = fit_params if fit_params is not None else {}
fit_params = dict([(k, _index_param_value(X, v, train))
for k, v in fit_params.items()])
X_train, y_train = _safe_split(estimator, X, y, train)
X_test, _ = _safe_split(estimator, X, y, test, train)
if y_train is None:
estimator.fit(X_train, **fit_params)
else:
estimator.fit(X_train, y_train, **fit_params)
preds = estimator.predict(X_test)
return preds, test
def _check_is_partition(locs, n):
"""Check whether locs is a reordering of the array np.arange(n)
Parameters
----------
locs : ndarray
integer array to test
n : int
number of expected elements
Returns
-------
is_partition : bool
True iff sorted(locs) is range(n)
"""
if len(locs) != n:
return False
hit = np.zeros(n, bool)
hit[locs] = True
if not np.all(hit):
return False
return True
def cross_val_score(estimator, X, y=None, scoring=None, cv=None, n_jobs=1,
verbose=0, fit_params=None, pre_dispatch='2*n_jobs'):
"""Evaluate a score by cross-validation
Read more in the :ref:`User Guide <cross_validation>`.
Parameters
----------
estimator : estimator object implementing 'fit'
The object to use to fit the data.
X : array-like
The data to fit. Can be, for example a list, or an array at least 2d.
y : array-like, optional, default: None
The target variable to try to predict in the case of
supervised learning.
scoring : string, callable or None, optional, default: None
A string (see model evaluation documentation) or
a scorer callable object / function with signature
``scorer(estimator, X, y)``.
cv : cross-validation generator or int, optional, default: None
A cross-validation generator to use. If int, determines
the number of folds in StratifiedKFold if y is binary
or multiclass and estimator is a classifier, or the number
of folds in KFold otherwise. If None, it is equivalent to cv=3.
n_jobs : integer, optional
The number of CPUs to use to do the computation. -1 means
'all CPUs'.
verbose : integer, optional
The verbosity level.
fit_params : dict, optional
Parameters to pass to the fit method of the estimator.
pre_dispatch : int, or string, optional
Controls the number of jobs that get dispatched during parallel
execution. Reducing this number can be useful to avoid an
explosion of memory consumption when more jobs get dispatched
than CPUs can process. This parameter can be:
- None, in which case all the jobs are immediately
created and spawned. Use this for lightweight and
fast-running jobs, to avoid delays due to on-demand
spawning of the jobs
- An int, giving the exact number of total jobs that are
spawned
- A string, giving an expression as a function of n_jobs,
as in '2*n_jobs'
Returns
-------
scores : array of float, shape=(len(list(cv)),)
Array of scores of the estimator for each run of the cross validation.
"""
X, y = indexable(X, y)
cv = check_cv(cv, X, y, classifier=is_classifier(estimator))
scorer = check_scoring(estimator, scoring=scoring)
# We clone the estimator to make sure that all the folds are
# independent, and that it is pickle-able.
parallel = Parallel(n_jobs=n_jobs, verbose=verbose,
pre_dispatch=pre_dispatch)
scores = parallel(delayed(_fit_and_score)(clone(estimator), X, y, scorer,
train, test, verbose, None,
fit_params)
for train, test in cv)
return np.array(scores)[:, 0]
class FitFailedWarning(RuntimeWarning):
pass
def _fit_and_score(estimator, X, y, scorer, train, test, verbose,
parameters, fit_params, return_train_score=False,
return_parameters=False, error_score='raise'):
"""Fit estimator and compute scores for a given dataset split.
Parameters
----------
estimator : estimator object implementing 'fit'
The object to use to fit the data.
X : array-like of shape at least 2D
The data to fit.
y : array-like, optional, default: None
The target variable to try to predict in the case of
supervised learning.
scorer : callable
A scorer callable object / function with signature
``scorer(estimator, X, y)``.
train : array-like, shape (n_train_samples,)
Indices of training samples.
test : array-like, shape (n_test_samples,)
Indices of test samples.
verbose : integer
The verbosity level.
error_score : 'raise' (default) or numeric
Value to assign to the score if an error occurs in estimator fitting.
If set to 'raise', the error is raised. If a numeric value is given,
FitFailedWarning is raised. This parameter does not affect the refit
step, which will always raise the error.
parameters : dict or None
Parameters to be set on the estimator.
fit_params : dict or None
Parameters that will be passed to ``estimator.fit``.
return_train_score : boolean, optional, default: False
Compute and return score on training set.
return_parameters : boolean, optional, default: False
Return parameters that has been used for the estimator.
Returns
-------
train_score : float, optional
Score on training set, returned only if `return_train_score` is `True`.
test_score : float
Score on test set.
n_test_samples : int
Number of test samples.
scoring_time : float
Time spent for fitting and scoring in seconds.
parameters : dict or None, optional
The parameters that have been evaluated.
"""
if verbose > 1:
if parameters is None:
msg = "no parameters to be set"
else:
msg = '%s' % (', '.join('%s=%s' % (k, v)
for k, v in parameters.items()))
print("[CV] %s %s" % (msg, (64 - len(msg)) * '.'))
# Adjust length of sample weights
fit_params = fit_params if fit_params is not None else {}
fit_params = dict([(k, _index_param_value(X, v, train))
for k, v in fit_params.items()])
if parameters is not None:
estimator.set_params(**parameters)
start_time = time.time()
X_train, y_train = _safe_split(estimator, X, y, train)
X_test, y_test = _safe_split(estimator, X, y, test, train)
try:
if y_train is None:
estimator.fit(X_train, **fit_params)
else:
estimator.fit(X_train, y_train, **fit_params)
except Exception as e:
if error_score == 'raise':
raise
elif isinstance(error_score, numbers.Number):
test_score = error_score
if return_train_score:
train_score = error_score
warnings.warn("Classifier fit failed. The score on this train-test"
" partition for these parameters will be set to %f. "
"Details: \n%r" % (error_score, e), FitFailedWarning)
else:
raise ValueError("error_score must be the string 'raise' or a"
" numeric value. (Hint: if using 'raise', please"
" make sure that it has been spelled correctly.)"
)
else:
test_score = _score(estimator, X_test, y_test, scorer)
if return_train_score:
train_score = _score(estimator, X_train, y_train, scorer)
scoring_time = time.time() - start_time
if verbose > 2:
msg += ", score=%f" % test_score
if verbose > 1:
end_msg = "%s -%s" % (msg, logger.short_format_time(scoring_time))
print("[CV] %s %s" % ((64 - len(end_msg)) * '.', end_msg))
ret = [train_score] if return_train_score else []
ret.extend([test_score, _num_samples(X_test), scoring_time])
if return_parameters:
ret.append(parameters)
return ret
def _safe_split(estimator, X, y, indices, train_indices=None):
"""Create subset of dataset and properly handle kernels."""
if hasattr(estimator, 'kernel') and callable(estimator.kernel):
# cannot compute the kernel values with custom function
raise ValueError("Cannot use a custom kernel function. "
"Precompute the kernel matrix instead.")
if not hasattr(X, "shape"):
if getattr(estimator, "_pairwise", False):
raise ValueError("Precomputed kernels or affinity matrices have "
"to be passed as arrays or sparse matrices.")
X_subset = [X[idx] for idx in indices]
else:
if getattr(estimator, "_pairwise", False):
# X is a precomputed square kernel matrix
if X.shape[0] != X.shape[1]:
raise ValueError("X should be a square kernel matrix")
if train_indices is None:
X_subset = X[np.ix_(indices, indices)]
else:
X_subset = X[np.ix_(indices, train_indices)]
else:
X_subset = safe_indexing(X, indices)
if y is not None:
y_subset = safe_indexing(y, indices)
else:
y_subset = None
return X_subset, y_subset
def _score(estimator, X_test, y_test, scorer):
"""Compute the score of an estimator on a given test set."""
if y_test is None:
score = scorer(estimator, X_test)
else:
score = scorer(estimator, X_test, y_test)
if not isinstance(score, numbers.Number):
raise ValueError("scoring must return a number, got %s (%s) instead."
% (str(score), type(score)))
return score
def _permutation_test_score(estimator, X, y, cv, scorer):
"""Auxiliary function for permutation_test_score"""
avg_score = []
for train, test in cv:
estimator.fit(X[train], y[train])
avg_score.append(scorer(estimator, X[test], y[test]))
return np.mean(avg_score)
def _shuffle(y, labels, random_state):
"""Return a shuffled copy of y eventually shuffle among same labels."""
if labels is None:
ind = random_state.permutation(len(y))
else:
ind = np.arange(len(labels))
for label in np.unique(labels):
this_mask = (labels == label)
ind[this_mask] = random_state.permutation(ind[this_mask])
return y[ind]
def check_cv(cv, X=None, y=None, classifier=False):
"""Input checker utility for building a CV in a user friendly way.
Parameters
----------
cv : int, a cv generator instance, or None
The input specifying which cv generator to use. It can be an
integer, in which case it is the number of folds in a KFold,
None, in which case 3 fold is used, or another object, that
will then be used as a cv generator.
X : array-like
The data the cross-val object will be applied on.
y : array-like
The target variable for a supervised learning problem.
classifier : boolean optional
Whether the task is a classification task, in which case
stratified KFold will be used.
Returns
-------
checked_cv: a cross-validation generator instance.
The return value is guaranteed to be a cv generator instance, whatever
the input type.
"""
is_sparse = sp.issparse(X)
if cv is None:
cv = 3
if isinstance(cv, numbers.Integral):
if classifier:
if type_of_target(y) in ['binary', 'multiclass']:
cv = StratifiedKFold(y, cv)
else:
cv = KFold(_num_samples(y), cv)
else:
if not is_sparse:
n_samples = len(X)
else:
n_samples = X.shape[0]
cv = KFold(n_samples, cv)
return cv
def permutation_test_score(estimator, X, y, cv=None,
n_permutations=100, n_jobs=1, labels=None,
random_state=0, verbose=0, scoring=None):
"""Evaluate the significance of a cross-validated score with permutations
Read more in the :ref:`User Guide <cross_validation>`.
Parameters
----------
estimator : estimator object implementing 'fit'
The object to use to fit the data.
X : array-like of shape at least 2D
The data to fit.
y : array-like
The target variable to try to predict in the case of
supervised learning.
scoring : string, callable or None, optional, default: None
A string (see model evaluation documentation) or
a scorer callable object / function with signature
``scorer(estimator, X, y)``.
cv : integer or cross-validation generator, optional
If an integer is passed, it is the number of fold (default 3).
Specific cross-validation objects can be passed, see
sklearn.cross_validation module for the list of possible objects.
n_permutations : integer, optional
Number of times to permute ``y``.
n_jobs : integer, optional
The number of CPUs to use to do the computation. -1 means
'all CPUs'.
labels : array-like of shape [n_samples] (optional)
Labels constrain the permutation among groups of samples with
a same label.
random_state : RandomState or an int seed (0 by default)
A random number generator instance to define the state of the
random permutations generator.
verbose : integer, optional
The verbosity level.
Returns
-------
score : float
The true score without permuting targets.
permutation_scores : array, shape (n_permutations,)
The scores obtained for each permutations.
pvalue : float
The returned value equals p-value if `scoring` returns bigger
numbers for better scores (e.g., accuracy_score). If `scoring` is
rather a loss function (i.e. when lower is better such as with
`mean_squared_error`) then this is actually the complement of the
p-value: 1 - p-value.
Notes
-----
This function implements Test 1 in:
Ojala and Garriga. Permutation Tests for Studying Classifier
Performance. The Journal of Machine Learning Research (2010)
vol. 11
"""
X, y = indexable(X, y)
cv = check_cv(cv, X, y, classifier=is_classifier(estimator))
scorer = check_scoring(estimator, scoring=scoring)
random_state = check_random_state(random_state)
# We clone the estimator to make sure that all the folds are
# independent, and that it is pickle-able.
score = _permutation_test_score(clone(estimator), X, y, cv, scorer)
permutation_scores = Parallel(n_jobs=n_jobs, verbose=verbose)(
delayed(_permutation_test_score)(
clone(estimator), X, _shuffle(y, labels, random_state), cv,
scorer)
for _ in range(n_permutations))
permutation_scores = np.array(permutation_scores)
pvalue = (np.sum(permutation_scores >= score) + 1.0) / (n_permutations + 1)
return score, permutation_scores, pvalue
permutation_test_score.__test__ = False # to avoid a pb with nosetests
def train_test_split(*arrays, **options):
"""Split arrays or matrices into random train and test subsets
Quick utility that wraps input validation and
``next(iter(ShuffleSplit(n_samples)))`` and application to input
data into a single call for splitting (and optionally subsampling)
data in a oneliner.
Read more in the :ref:`User Guide <cross_validation>`.
Parameters
----------
*arrays : sequence of arrays or scipy.sparse matrices with same shape[0]
Python lists or tuples occurring in arrays are converted to 1D numpy
arrays.
test_size : float, int, or None (default is None)
If float, should be between 0.0 and 1.0 and represent the
proportion of the dataset to include in the test split. If
int, represents the absolute number of test samples. If None,
the value is automatically set to the complement of the train size.
If train size is also None, test size is set to 0.25.
train_size : float, int, or None (default is None)
If float, should be between 0.0 and 1.0 and represent the
proportion of the dataset to include in the train split. If
int, represents the absolute number of train samples. If None,
the value is automatically set to the complement of the test size.
random_state : int or RandomState
Pseudo-random number generator state used for random sampling.
stratify : array-like or None (default is None)
If not None, data is split in a stratified fashion, using this as
the labels array.
Returns
-------
splitting : list of arrays, length=2 * len(arrays)
List containing train-test split of input array.
Examples
--------
>>> import numpy as np
>>> from sklearn.cross_validation import train_test_split
>>> X, y = np.arange(10).reshape((5, 2)), range(5)
>>> X
array([[0, 1],
[2, 3],
[4, 5],
[6, 7],
[8, 9]])
>>> list(y)
[0, 1, 2, 3, 4]
>>> X_train, X_test, y_train, y_test = train_test_split(
... X, y, test_size=0.33, random_state=42)
...
>>> X_train
array([[4, 5],
[0, 1],
[6, 7]])
>>> y_train
[2, 0, 3]
>>> X_test
array([[2, 3],
[8, 9]])
>>> y_test
[1, 4]
"""
n_arrays = len(arrays)
if n_arrays == 0:
raise ValueError("At least one array required as input")
test_size = options.pop('test_size', None)
train_size = options.pop('train_size', None)
random_state = options.pop('random_state', None)
dtype = options.pop('dtype', None)
if dtype is not None:
warnings.warn("dtype option is ignored and will be removed in 0.18.",
DeprecationWarning)
allow_nd = options.pop('allow_nd', None)
allow_lists = options.pop('allow_lists', None)
stratify = options.pop('stratify', None)
if allow_lists is not None:
warnings.warn("The allow_lists option is deprecated and will be "
"assumed True in 0.18 and removed.", DeprecationWarning)
if options:
raise TypeError("Invalid parameters passed: %s" % str(options))
if allow_nd is not None:
warnings.warn("The allow_nd option is deprecated and will be "
"assumed True in 0.18 and removed.", DeprecationWarning)
if allow_lists is False or allow_nd is False:
arrays = [check_array(x, 'csr', allow_nd=allow_nd,
force_all_finite=False, ensure_2d=False)
if x is not None else x
for x in arrays]
if test_size is None and train_size is None:
test_size = 0.25
arrays = indexable(*arrays)
if stratify is not None:
cv = StratifiedShuffleSplit(stratify, test_size=test_size,
train_size=train_size,
random_state=random_state)
else:
n_samples = _num_samples(arrays[0])
cv = ShuffleSplit(n_samples, test_size=test_size,
train_size=train_size,
random_state=random_state)
train, test = next(iter(cv))
return list(chain.from_iterable((safe_indexing(a, train),
safe_indexing(a, test)) for a in arrays))
train_test_split.__test__ = False # to avoid a pb with nosetests
| bsd-3-clause |
voxlol/scikit-learn | sklearn/tests/test_calibration.py | 213 | 12219 | # Authors: Alexandre Gramfort <alexandre.gramfort@telecom-paristech.fr>
# License: BSD 3 clause
import numpy as np
from scipy import sparse
from sklearn.utils.testing import (assert_array_almost_equal, assert_equal,
assert_greater, assert_almost_equal,
assert_greater_equal,
assert_array_equal,
assert_raises,
assert_warns_message)
from sklearn.datasets import make_classification, make_blobs
from sklearn.naive_bayes import MultinomialNB
from sklearn.ensemble import RandomForestClassifier, RandomForestRegressor
from sklearn.svm import LinearSVC
from sklearn.linear_model import Ridge
from sklearn.pipeline import Pipeline
from sklearn.preprocessing import Imputer
from sklearn.metrics import brier_score_loss, log_loss
from sklearn.calibration import CalibratedClassifierCV
from sklearn.calibration import _sigmoid_calibration, _SigmoidCalibration
from sklearn.calibration import calibration_curve
def test_calibration():
"""Test calibration objects with isotonic and sigmoid"""
n_samples = 100
X, y = make_classification(n_samples=2 * n_samples, n_features=6,
random_state=42)
sample_weight = np.random.RandomState(seed=42).uniform(size=y.size)
X -= X.min() # MultinomialNB only allows positive X
# split train and test
X_train, y_train, sw_train = \
X[:n_samples], y[:n_samples], sample_weight[:n_samples]
X_test, y_test = X[n_samples:], y[n_samples:]
# Naive-Bayes
clf = MultinomialNB().fit(X_train, y_train, sample_weight=sw_train)
prob_pos_clf = clf.predict_proba(X_test)[:, 1]
pc_clf = CalibratedClassifierCV(clf, cv=y.size + 1)
assert_raises(ValueError, pc_clf.fit, X, y)
# Naive Bayes with calibration
for this_X_train, this_X_test in [(X_train, X_test),
(sparse.csr_matrix(X_train),
sparse.csr_matrix(X_test))]:
for method in ['isotonic', 'sigmoid']:
pc_clf = CalibratedClassifierCV(clf, method=method, cv=2)
# Note that this fit overwrites the fit on the entire training
# set
pc_clf.fit(this_X_train, y_train, sample_weight=sw_train)
prob_pos_pc_clf = pc_clf.predict_proba(this_X_test)[:, 1]
# Check that brier score has improved after calibration
assert_greater(brier_score_loss(y_test, prob_pos_clf),
brier_score_loss(y_test, prob_pos_pc_clf))
# Check invariance against relabeling [0, 1] -> [1, 2]
pc_clf.fit(this_X_train, y_train + 1, sample_weight=sw_train)
prob_pos_pc_clf_relabeled = pc_clf.predict_proba(this_X_test)[:, 1]
assert_array_almost_equal(prob_pos_pc_clf,
prob_pos_pc_clf_relabeled)
# Check invariance against relabeling [0, 1] -> [-1, 1]
pc_clf.fit(this_X_train, 2 * y_train - 1, sample_weight=sw_train)
prob_pos_pc_clf_relabeled = pc_clf.predict_proba(this_X_test)[:, 1]
assert_array_almost_equal(prob_pos_pc_clf,
prob_pos_pc_clf_relabeled)
# Check invariance against relabeling [0, 1] -> [1, 0]
pc_clf.fit(this_X_train, (y_train + 1) % 2,
sample_weight=sw_train)
prob_pos_pc_clf_relabeled = \
pc_clf.predict_proba(this_X_test)[:, 1]
if method == "sigmoid":
assert_array_almost_equal(prob_pos_pc_clf,
1 - prob_pos_pc_clf_relabeled)
else:
# Isotonic calibration is not invariant against relabeling
# but should improve in both cases
assert_greater(brier_score_loss(y_test, prob_pos_clf),
brier_score_loss((y_test + 1) % 2,
prob_pos_pc_clf_relabeled))
# check that calibration can also deal with regressors that have
# a decision_function
clf_base_regressor = CalibratedClassifierCV(Ridge())
clf_base_regressor.fit(X_train, y_train)
clf_base_regressor.predict(X_test)
# Check failure cases:
# only "isotonic" and "sigmoid" should be accepted as methods
clf_invalid_method = CalibratedClassifierCV(clf, method="foo")
assert_raises(ValueError, clf_invalid_method.fit, X_train, y_train)
# base-estimators should provide either decision_function or
# predict_proba (most regressors, for instance, should fail)
clf_base_regressor = \
CalibratedClassifierCV(RandomForestRegressor(), method="sigmoid")
assert_raises(RuntimeError, clf_base_regressor.fit, X_train, y_train)
def test_sample_weight_warning():
n_samples = 100
X, y = make_classification(n_samples=2 * n_samples, n_features=6,
random_state=42)
sample_weight = np.random.RandomState(seed=42).uniform(size=len(y))
X_train, y_train, sw_train = \
X[:n_samples], y[:n_samples], sample_weight[:n_samples]
X_test = X[n_samples:]
for method in ['sigmoid', 'isotonic']:
base_estimator = LinearSVC(random_state=42)
calibrated_clf = CalibratedClassifierCV(base_estimator, method=method)
# LinearSVC does not currently support sample weights but they
# can still be used for the calibration step (with a warning)
msg = "LinearSVC does not support sample_weight."
assert_warns_message(
UserWarning, msg,
calibrated_clf.fit, X_train, y_train, sample_weight=sw_train)
probs_with_sw = calibrated_clf.predict_proba(X_test)
# As the weights are used for the calibration, they should still yield
# a different predictions
calibrated_clf.fit(X_train, y_train)
probs_without_sw = calibrated_clf.predict_proba(X_test)
diff = np.linalg.norm(probs_with_sw - probs_without_sw)
assert_greater(diff, 0.1)
def test_calibration_multiclass():
"""Test calibration for multiclass """
# test multi-class setting with classifier that implements
# only decision function
clf = LinearSVC()
X, y_idx = make_blobs(n_samples=100, n_features=2, random_state=42,
centers=3, cluster_std=3.0)
# Use categorical labels to check that CalibratedClassifierCV supports
# them correctly
target_names = np.array(['a', 'b', 'c'])
y = target_names[y_idx]
X_train, y_train = X[::2], y[::2]
X_test, y_test = X[1::2], y[1::2]
clf.fit(X_train, y_train)
for method in ['isotonic', 'sigmoid']:
cal_clf = CalibratedClassifierCV(clf, method=method, cv=2)
cal_clf.fit(X_train, y_train)
probas = cal_clf.predict_proba(X_test)
assert_array_almost_equal(np.sum(probas, axis=1), np.ones(len(X_test)))
# Check that log-loss of calibrated classifier is smaller than
# log-loss of naively turned OvR decision function to probabilities
# via softmax
def softmax(y_pred):
e = np.exp(-y_pred)
return e / e.sum(axis=1).reshape(-1, 1)
uncalibrated_log_loss = \
log_loss(y_test, softmax(clf.decision_function(X_test)))
calibrated_log_loss = log_loss(y_test, probas)
assert_greater_equal(uncalibrated_log_loss, calibrated_log_loss)
# Test that calibration of a multiclass classifier decreases log-loss
# for RandomForestClassifier
X, y = make_blobs(n_samples=100, n_features=2, random_state=42,
cluster_std=3.0)
X_train, y_train = X[::2], y[::2]
X_test, y_test = X[1::2], y[1::2]
clf = RandomForestClassifier(n_estimators=10, random_state=42)
clf.fit(X_train, y_train)
clf_probs = clf.predict_proba(X_test)
loss = log_loss(y_test, clf_probs)
for method in ['isotonic', 'sigmoid']:
cal_clf = CalibratedClassifierCV(clf, method=method, cv=3)
cal_clf.fit(X_train, y_train)
cal_clf_probs = cal_clf.predict_proba(X_test)
cal_loss = log_loss(y_test, cal_clf_probs)
assert_greater(loss, cal_loss)
def test_calibration_prefit():
"""Test calibration for prefitted classifiers"""
n_samples = 50
X, y = make_classification(n_samples=3 * n_samples, n_features=6,
random_state=42)
sample_weight = np.random.RandomState(seed=42).uniform(size=y.size)
X -= X.min() # MultinomialNB only allows positive X
# split train and test
X_train, y_train, sw_train = \
X[:n_samples], y[:n_samples], sample_weight[:n_samples]
X_calib, y_calib, sw_calib = \
X[n_samples:2 * n_samples], y[n_samples:2 * n_samples], \
sample_weight[n_samples:2 * n_samples]
X_test, y_test = X[2 * n_samples:], y[2 * n_samples:]
# Naive-Bayes
clf = MultinomialNB()
clf.fit(X_train, y_train, sw_train)
prob_pos_clf = clf.predict_proba(X_test)[:, 1]
# Naive Bayes with calibration
for this_X_calib, this_X_test in [(X_calib, X_test),
(sparse.csr_matrix(X_calib),
sparse.csr_matrix(X_test))]:
for method in ['isotonic', 'sigmoid']:
pc_clf = CalibratedClassifierCV(clf, method=method, cv="prefit")
for sw in [sw_calib, None]:
pc_clf.fit(this_X_calib, y_calib, sample_weight=sw)
y_prob = pc_clf.predict_proba(this_X_test)
y_pred = pc_clf.predict(this_X_test)
prob_pos_pc_clf = y_prob[:, 1]
assert_array_equal(y_pred,
np.array([0, 1])[np.argmax(y_prob, axis=1)])
assert_greater(brier_score_loss(y_test, prob_pos_clf),
brier_score_loss(y_test, prob_pos_pc_clf))
def test_sigmoid_calibration():
"""Test calibration values with Platt sigmoid model"""
exF = np.array([5, -4, 1.0])
exY = np.array([1, -1, -1])
# computed from my python port of the C++ code in LibSVM
AB_lin_libsvm = np.array([-0.20261354391187855, 0.65236314980010512])
assert_array_almost_equal(AB_lin_libsvm,
_sigmoid_calibration(exF, exY), 3)
lin_prob = 1. / (1. + np.exp(AB_lin_libsvm[0] * exF + AB_lin_libsvm[1]))
sk_prob = _SigmoidCalibration().fit(exF, exY).predict(exF)
assert_array_almost_equal(lin_prob, sk_prob, 6)
# check that _SigmoidCalibration().fit only accepts 1d array or 2d column
# arrays
assert_raises(ValueError, _SigmoidCalibration().fit,
np.vstack((exF, exF)), exY)
def test_calibration_curve():
"""Check calibration_curve function"""
y_true = np.array([0, 0, 0, 1, 1, 1])
y_pred = np.array([0., 0.1, 0.2, 0.8, 0.9, 1.])
prob_true, prob_pred = calibration_curve(y_true, y_pred, n_bins=2)
prob_true_unnormalized, prob_pred_unnormalized = \
calibration_curve(y_true, y_pred * 2, n_bins=2, normalize=True)
assert_equal(len(prob_true), len(prob_pred))
assert_equal(len(prob_true), 2)
assert_almost_equal(prob_true, [0, 1])
assert_almost_equal(prob_pred, [0.1, 0.9])
assert_almost_equal(prob_true, prob_true_unnormalized)
assert_almost_equal(prob_pred, prob_pred_unnormalized)
# probabilities outside [0, 1] should not be accepted when normalize
# is set to False
assert_raises(ValueError, calibration_curve, [1.1], [-0.1],
normalize=False)
def test_calibration_nan_imputer():
"""Test that calibration can accept nan"""
X, y = make_classification(n_samples=10, n_features=2,
n_informative=2, n_redundant=0,
random_state=42)
X[0, 0] = np.nan
clf = Pipeline(
[('imputer', Imputer()),
('rf', RandomForestClassifier(n_estimators=1))])
clf_c = CalibratedClassifierCV(clf, cv=2, method='isotonic')
clf_c.fit(X, y)
clf_c.predict(X)
| bsd-3-clause |
zedoul/AnomalyDetection | generate_plots.py | 1 | 16040 | # -*- coding: utf-8 -*-
"""
http://www.astroml.org/sklearn_tutorial/dimensionality_reduction.html
"""
print (__doc__)
import numpy as np
import copy
import cPickle as pickle
import matplotlib
import matplotlib.mlab as mlab
import matplotlib.pyplot as plt
from matplotlib import gridspec
import nslkdd.preprocessing as preprocessing
import nslkdd.data.model as model
from nslkdd.get_kdd_dataframe import attack_types
from nslkdd.get_kdd_dataframe import df_by_attack_type
import colorhex
import util
import logger
today = util.make_today_folder('./results')
today = "./results/2015-02-09"
plot_lim_max = 21
plot_lim_min = -21
def plot_true_labels(ax, data_per_true_labels, title="", highlight_point = None):
ax.set_title("True labels")
for i, p in enumerate(data_per_true_labels) :
x = np.array([t[0] for t in p])
y = np.array([t[1] for t in p])
if i == model.attack_normal:
colors = ['g'] * len(x)
ax.scatter(x, y, c=colors)
elif i != model.attack_normal and i != highlight_point:
colors = ['r'] * len(x)
ax.scatter(x, y, c=colors)
if highlight_point != None :
p = data_per_true_labels[highlight_point]
x = np.array([t[0] for t in p])
y = np.array([t[1] for t in p])
colors = ['y'] * len(x)
ax.scatter(x, y, c=colors)
def plot_normal_label(ax, data_per_true_labels, title=""):
ax.set_title(title)
for i, p in enumerate(data_per_true_labels) :
x = [t[0] for t in p]
y = [t[1] for t in p]
x = np.array(x)
y = np.array(y)
if i == model.attack_normal:
ax.scatter(x, y, c='g')
logger.debug("* mean/std of normal")
logger.debug(len(x))
logger.debug(np.mean(x))
logger.debug(np.mean(y))
logger.debug(np.std(x))
logger.debug(np.std(y))
def plot_abnormal_label(ax, data_per_true_labels, title=""):
ax.set_title(title)
for i, p in enumerate(data_per_true_labels) :
x = [t[0] for t in p]
y = [t[1] for t in p]
x = np.array(x)
y = np.array(y)
if i != model.attack_normal:
ax.scatter(x, y, c='r')
def get_data(title):
with open(today+'/'+title+'_cproj.pkl','rb') as input:
cproj = pickle.load(input)
with open(today+'/'+title+'_res.pkl','rb') as input:
res = pickle.load(input)
with open(today+'/'+title+'_df.pkl','rb') as input:
df = pickle.load(input)
with open(today+'/'+title+'_highlight_point.pkl','rb') as input:
highlight_point = pickle.load(input)
return cproj, res, df, highlight_point
def gen_plot(cproj, res, df, highlight_point, title):
_, attacks = preprocessing.get_header_data()
# figure setting
fig, axarr = plt.subplots(4, 4, sharex='col', sharey='row')
plt.subplots_adjust(wspace=0.4, hspace=0.4)
plt.xlim(plot_lim_min, plot_lim_max)
plt.ylim(plot_lim_min, plot_lim_max)
# plt.xlabel('interval')
# plt.ylabel('log(probability) + k')
# plt.title('Convergence plot')
# plt.grid(True)
data_per_true_labels = []
for i in range( len(attacks) ):
data_per_true_labels.append([])
true_attack_types = df["attack"].values.tolist()
for i, d in enumerate(cproj):
data_per_true_labels[true_attack_types[i]].append(d)
k = int( len(cproj) * 12/500.0)
clusters = [0] * k
cluster_xs = []
cluster_ys = []
for i in range(k):
cluster_xs.append([])
cluster_ys.append([])
cluster_xmeans = [0] * k
cluster_ymeans = [0] * k
cluster_xstds = [0] * k
cluster_ystds = [0] * k
for i, p in enumerate(cproj):
true_label = true_attack_types[i]
if true_label == model.attack_normal :
clusters[ res[i] ] = clusters[ res[i] ] + 1
else :
clusters[ res[i] ] = clusters[ res[i] ] - 1
cluster_xs[ res[i] ].append(p[0])
cluster_ys[ res[i] ].append(p[1])
logger.debug("* mean/std of cluster")
for i, cluster in enumerate(clusters) :
cluster_xmeans[i] = np.mean(cluster_xs[i])
cluster_ymeans[i] = np.mean(cluster_ys[i])
cluster_xstds[i] = np.std(cluster_xs[i])
cluster_ystds[i] = np.std(cluster_ys[i])
logger.debug("cluster : " + str(i))
logger.debug("- size [" + str(len(cluster_xs[i])) + "]")
logger.debug("- xmin [" + str(cluster_xmeans[i]) + "]")
logger.debug("- ymin [" + str(cluster_ymeans[i]) + "]")
logger.debug("- xstd [" + str(cluster_xstds[i]) + "]")
logger.debug("- ystd [" + str(cluster_ystds[i]) + "]")
ax1 = axarr[0, 0]
ax2 = axarr[0, 1]
ax3 = axarr[0, 2]
ax4 = axarr[0, 3]
ax5 = axarr[1, 0]
ax6 = axarr[1, 1]
ax7 = axarr[1, 2]
ax8 = axarr[1, 3]
ax9 = axarr[2, 0]
ax10 = axarr[2, 1]
ax11 = axarr[2, 2]
ax12 = axarr[2, 3]
ax13 = axarr[3, 0]
ax14 = axarr[3, 1]
ax15 = axarr[3, 2]
ax16 = axarr[3, 3]
plot_true_labels(ax1, data_per_true_labels, "True labels", highlight_point)
plot_normal_label(ax2, data_per_true_labels, "True normals")
plot_abnormal_label(ax3, data_per_true_labels, "True abnormal")
ax4.set_title("k-means")
for i, p in enumerate(cproj):
ax4.scatter(p[0], p[1], c=colorhex.codes[ res[i] ])
##############################################################
ax5.set_title("Normal res")
for i, p in enumerate(cproj):
if clusters[ res[i] ] >= 0 :
ax5.scatter(p[0], p[1], c='g')
##############################################################
ax6.set_title("Abnormal res")
for i, p in enumerate(cproj):
if clusters[ res[i] ] < 0 :
ax6.scatter(p[0], p[1], c='r')
##############################################################
ax7.set_title("Cluster 1")
for i, p in enumerate(cproj):
if res[i] == 0 :
ax7.scatter(p[0], p[1], c='g')
##############################################################
ax8.set_title("Cluster 2")
for i, p in enumerate(cproj):
if res[i] == 1 :
ax8.scatter(p[0], p[1], c='g')
##############################################################
# ax9.set_title("kmeans")
# kmean_plot(title, ax9)
##############################################################
ax9.set_title("Cluster 3")
for i, p in enumerate(cproj):
if res[i] == 2 :
ax9.scatter(p[0], p[1], c='g')
##############################################################
ax10.set_title("Cluster 4")
for i, p in enumerate(cproj):
if res[i] == 3 :
ax10.scatter(p[0], p[1], c='g')
##############################################################
ax11.set_title("Cluster 5")
for i, p in enumerate(cproj):
if res[i] == 4 :
ax11.scatter(p[0], p[1], c='g')
##############################################################
ax12.set_title("Cluster 6")
for i, p in enumerate(cproj):
if res[i] == 5 :
ax12.scatter(p[0], p[1], c='g')
##############################################################
ax13.set_title("Cluster 7")
for i, p in enumerate(cproj):
if res[i] == 6 :
ax13.scatter(p[0], p[1], c='g')
##############################################################
ax14.set_title("Cluster 8")
for i, p in enumerate(cproj):
if res[i] == 7 :
ax14.scatter(p[0], p[1], c='g')
##############################################################
ax15.set_title("Cluster 9")
for i, p in enumerate(cproj):
if res[i] == 8 :
ax15.scatter(p[0], p[1], c='g')
##############################################################
ax16.set_title("Cluster 10")
for i, p in enumerate(cproj):
if res[i] == 9 :
ax16.scatter(p[0], p[1], c='g')
##############################################################
print title + " has been saved"
fig.savefig(today + "/" + title + ".png")
plt.close()
fig, ax = plt.subplots(1, 1, sharex='col', sharey='row')
plt.subplots_adjust(wspace=0.4, hspace=0.4)
plt.xlim(plot_lim_min, plot_lim_max)
plt.ylim(plot_lim_min, plot_lim_max)
for i, p in enumerate(data_per_true_labels) :
x = np.array([t[0] for t in p])
y = np.array([t[1] for t in p])
if i == model.attack_normal:
colors = ['g'] * len(x)
ax.scatter(x, y, c=colors)
elif i != model.attack_normal and i != highlight_point:
colors = ['r'] * len(x)
ax.scatter(x, y, c=colors)
if highlight_point != None :
p = data_per_true_labels[highlight_point]
x = np.array([t[0] for t in p])
y = np.array([t[1] for t in p])
colors = ['y'] * len(x)
ax.scatter(x, y, c=colors)
plt.xlabel('Similarity score to normal')
plt.ylabel('Similarity score to abnormal')
plt.title('True labels')
plt.grid(True)
fig.savefig(today + "/" + title + "_true_.png")
plt.close()
fig, ax = plt.subplots(1, 1, sharex='col', sharey='row')
plt.subplots_adjust(wspace=0.4, hspace=0.4)
plt.xlim(plot_lim_min, plot_lim_max)
plt.ylim(plot_lim_min, plot_lim_max)
for i, p in enumerate(cproj):
if clusters[ res[i] ] >= 0 :
ax.scatter(p[0], p[1], c='g')
else :
ax.scatter(p[0], p[1], c='r')
plt.xlabel('Similarity score to normal')
plt.ylabel('Similarity score to abnormal')
plt.title('Prediected labels')
plt.grid(True)
fig.savefig(today + "/" + title + "_prediction_.png")
plt.close()
def gen_plots():
dataset_description = "training20_only"
title = dataset_description
cproj, res, df, highlight_point = get_data(title)
gen_plot(cproj, res, df, highlight_point, title)
dataset_description = "training20_test20"
for attack_type_index, attack_type in enumerate(model.attack_types) :
if attack_type_index == model.attack_normal : # why <= instead of !=
continue
title = dataset_description + "_" + attack_type
cproj, res, df, highlight_point = get_data(title)
gen_plot(cproj, res, df, highlight_point, title)
def gen_one_plot():
dataset_description = "training20_test20_guess_passwd"
title = dataset_description
cproj, res, df, highlight_point = get_data(title)
gen_plot(cproj, res, df, highlight_point, title)
def kmean_plot(title, ax):
_, attacks = preprocessing.get_header_data()
cproj, res, df, highlight_point = get_data(title)
plt.subplots_adjust(wspace=0.4, hspace=0.4)
# plt.xlim(plot_lim_min, plot_lim_max)
# plt.ylim(plot_lim_min, plot_lim_max)
# ax = axarr
# ax.set_title("plot")
data_per_true_labels = []
for i in range( len(attacks) ):
data_per_true_labels.append([])
true_attack_types = df["attack"].values.tolist()
for i, d in enumerate(cproj):
data_per_true_labels[true_attack_types[i]].append(d)
k = 10
clusters = [0] * k
for i, p in enumerate(cproj):
true_label = true_attack_types[i]
if true_label == model.attack_normal :
clusters[ res[i] ] = clusters[ res[i] ] + 1
else :
clusters[ res[i] ] = clusters[ res[i] ] - 1
x = []
y = []
p = []
for ii, pp in enumerate(cproj):
if clusters[ res[ii] ] > 0 :
x.append(pp[0])
y.append(pp[1])
p.append(pp)
from sklearn.cluster import KMeans
data = p
h = .02
estimator = KMeans(init='k-means++', n_clusters=3)
estimator.fit(data)
centroids = estimator.cluster_centers_
x_min, x_max = min(x) + 1, max(x) - 1
y_min, y_max = min(y) + 1, max(y) - 1
xx, yy = np.meshgrid(np.arange(x_min, x_max, h), np.arange(y_min, y_max, h))
Z = estimator.predict(np.c_[xx.ravel(), yy.ravel()])
Z = Z.reshape(xx.shape)
plt.imshow(Z, interpolation='nearest',
extent=(xx.min(), xx.max(), yy.min(), yy.max()),
cmap=plt.cm.Paired,
aspect='auto', origin='lower')
plt.imshow(Z, interpolation='nearest',
extent=(xx.min(), xx.max(), yy.min(), yy.max()),
cmap=plt.cm.Paired,
aspect='auto', origin='lower')
plt.scatter(centroids[:, 0], centroids[:, 1],
marker='x', s=169, linewidths=3,
color='w', zorder=10)
colors = ['g'] * len(x)
ax.scatter(x, y, c=colors)
ax.scatter(np.mean(x), np.mean(y), c='r')
ax.scatter(np.median(x), np.median(y), c='b')
delta = 0.025
X = np.arange(plot_lim_min, plot_lim_max, delta)
Y = np.arange(plot_lim_min, plot_lim_max, delta)
X,Y = np.meshgrid(X,Y)
Z = mlab.bivariate_normal(X, Y, np.std(x), np.std(y), np.mean(x), np.mean(y))
plt.contour(X,Y,Z)
def test():
_, attacks = preprocessing.get_header_data()
dataset_description = "training20_only"
title = dataset_description
cproj, res, df, highlight_point = get_data(title)
fig, axarr = plt.subplots(1, 1, sharex='col', sharey='row')
plt.subplots_adjust(wspace=0.4, hspace=0.4)
plt.xlim(plot_lim_min, plot_lim_max)
plt.ylim(plot_lim_min, plot_lim_max)
ax = axarr
ax.set_title("plot")
data_per_true_labels = []
for i in range( len(attacks) ):
data_per_true_labels.append([])
true_attack_types = df["attack"].values.tolist()
for i, d in enumerate(cproj):
data_per_true_labels[true_attack_types[i]].append(d)
for i, p in enumerate(data_per_true_labels) :
x = np.array([t[0] for t in p])
y = np.array([t[1] for t in p])
if i == model.attack_normal:
from sklearn.cluster import KMeans
data = p
h = .02
estimator = KMeans(init='k-means++', n_clusters=3)
estimator.fit(data)
centroids = estimator.cluster_centers_
x_min, x_max = min(x) + 1, max(x) - 1
y_min, y_max = min(y) + 1, max(y) - 1
xx, yy = np.meshgrid(np.arange(x_min, x_max, h), np.arange(y_min, y_max, h))
Z = estimator.predict(np.c_[xx.ravel(), yy.ravel()])
Z = Z.reshape(xx.shape)
# plt.figure(1)
# plt.clf()
plt.imshow(Z, interpolation='nearest',
extent=(xx.min(), xx.max(), yy.min(), yy.max()),
cmap=plt.cm.Paired,
aspect='auto', origin='lower')
plt.imshow(Z, interpolation='nearest',
extent=(xx.min(), xx.max(), yy.min(), yy.max()),
cmap=plt.cm.Paired,
aspect='auto', origin='lower')
plt.scatter(centroids[:, 0], centroids[:, 1],
marker='x', s=169, linewidths=3,
color='w', zorder=10)
colors = ['g'] * len(x)
ax.scatter(x, y, c=colors)
ax.scatter(np.mean(x), np.mean(y), c='r')
ax.scatter(np.median(x), np.median(y), c='b')
delta = 0.025
X = np.arange(plot_lim_min, plot_lim_max, delta)
Y = np.arange(plot_lim_min, plot_lim_max, delta)
X,Y = np.meshgrid(X,Y)
Z = mlab.bivariate_normal(X, Y, np.std(x), np.std(y), np.mean(x), np.mean(y))
plt.contour(X,Y,Z)
# for i, r in df.iterrows() :
# if r['attack']
# for i, p in enumerate(cproj):
# if res[i] == 8 :
# ax1.scatter(p[0], p[1], c='g')
# plt.xticks(())
# plt.yticks(())
plt.show()
plt.close()
if __name__ == '__main__':
""" Anomaly detection with spectral clustering algorithm.
First training set only, to see what would happen with only known classes
Next with test set, to see what would happen with only unknown classes
"""
import time
start = time.time()
logger.set_file(today + "/log_plots.txt")
gen_plots()
# gen_one_plot()
# test()
elapsed = (time.time() - start)
print "done in %s seconds" % (elapsed)
| mit |
rajat1994/scikit-learn | setup.py | 143 | 7364 | #! /usr/bin/env python
#
# Copyright (C) 2007-2009 Cournapeau David <cournape@gmail.com>
# 2010 Fabian Pedregosa <fabian.pedregosa@inria.fr>
# License: 3-clause BSD
descr = """A set of python modules for machine learning and data mining"""
import sys
import os
import shutil
from distutils.command.clean import clean as Clean
if sys.version_info[0] < 3:
import __builtin__ as builtins
else:
import builtins
# This is a bit (!) hackish: we are setting a global variable so that the main
# sklearn __init__ can detect if it is being loaded by the setup routine, to
# avoid attempting to load components that aren't built yet:
# the numpy distutils extensions that are used by scikit-learn to recursively
# build the compiled extensions in sub-packages is based on the Python import
# machinery.
builtins.__SKLEARN_SETUP__ = True
DISTNAME = 'scikit-learn'
DESCRIPTION = 'A set of python modules for machine learning and data mining'
with open('README.rst') as f:
LONG_DESCRIPTION = f.read()
MAINTAINER = 'Andreas Mueller'
MAINTAINER_EMAIL = 'amueller@ais.uni-bonn.de'
URL = 'http://scikit-learn.org'
LICENSE = 'new BSD'
DOWNLOAD_URL = 'http://sourceforge.net/projects/scikit-learn/files/'
# We can actually import a restricted version of sklearn that
# does not need the compiled code
import sklearn
VERSION = sklearn.__version__
# Optional setuptools features
# We need to import setuptools early, if we want setuptools features,
# as it monkey-patches the 'setup' function
# For some commands, use setuptools
SETUPTOOLS_COMMANDS = set([
'develop', 'release', 'bdist_egg', 'bdist_rpm',
'bdist_wininst', 'install_egg_info', 'build_sphinx',
'egg_info', 'easy_install', 'upload', 'bdist_wheel',
'--single-version-externally-managed',
])
if SETUPTOOLS_COMMANDS.intersection(sys.argv):
import setuptools
extra_setuptools_args = dict(
zip_safe=False, # the package can run out of an .egg file
include_package_data=True,
)
else:
extra_setuptools_args = dict()
# Custom clean command to remove build artifacts
class CleanCommand(Clean):
description = "Remove build artifacts from the source tree"
def run(self):
Clean.run(self)
if os.path.exists('build'):
shutil.rmtree('build')
for dirpath, dirnames, filenames in os.walk('sklearn'):
for filename in filenames:
if (filename.endswith('.so') or filename.endswith('.pyd')
or filename.endswith('.dll')
or filename.endswith('.pyc')):
os.unlink(os.path.join(dirpath, filename))
for dirname in dirnames:
if dirname == '__pycache__':
shutil.rmtree(os.path.join(dirpath, dirname))
cmdclass = {'clean': CleanCommand}
# Optional wheelhouse-uploader features
# To automate release of binary packages for scikit-learn we need a tool
# to download the packages generated by travis and appveyor workers (with
# version number matching the current release) and upload them all at once
# to PyPI at release time.
# The URL of the artifact repositories are configured in the setup.cfg file.
WHEELHOUSE_UPLOADER_COMMANDS = set(['fetch_artifacts', 'upload_all'])
if WHEELHOUSE_UPLOADER_COMMANDS.intersection(sys.argv):
import wheelhouse_uploader.cmd
cmdclass.update(vars(wheelhouse_uploader.cmd))
def configuration(parent_package='', top_path=None):
if os.path.exists('MANIFEST'):
os.remove('MANIFEST')
from numpy.distutils.misc_util import Configuration
config = Configuration(None, parent_package, top_path)
# Avoid non-useful msg:
# "Ignoring attempt to set 'name' (from ... "
config.set_options(ignore_setup_xxx_py=True,
assume_default_configuration=True,
delegate_options_to_subpackages=True,
quiet=True)
config.add_subpackage('sklearn')
return config
def is_scipy_installed():
try:
import scipy
except ImportError:
return False
return True
def is_numpy_installed():
try:
import numpy
except ImportError:
return False
return True
def setup_package():
metadata = dict(name=DISTNAME,
maintainer=MAINTAINER,
maintainer_email=MAINTAINER_EMAIL,
description=DESCRIPTION,
license=LICENSE,
url=URL,
version=VERSION,
download_url=DOWNLOAD_URL,
long_description=LONG_DESCRIPTION,
classifiers=['Intended Audience :: Science/Research',
'Intended Audience :: Developers',
'License :: OSI Approved',
'Programming Language :: C',
'Programming Language :: Python',
'Topic :: Software Development',
'Topic :: Scientific/Engineering',
'Operating System :: Microsoft :: Windows',
'Operating System :: POSIX',
'Operating System :: Unix',
'Operating System :: MacOS',
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 2.6',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.3',
'Programming Language :: Python :: 3.4',
],
cmdclass=cmdclass,
**extra_setuptools_args)
if (len(sys.argv) >= 2
and ('--help' in sys.argv[1:] or sys.argv[1]
in ('--help-commands', 'egg_info', '--version', 'clean'))):
# For these actions, NumPy is not required.
#
# They are required to succeed without Numpy for example when
# pip is used to install Scikit-learn when Numpy is not yet present in
# the system.
try:
from setuptools import setup
except ImportError:
from distutils.core import setup
metadata['version'] = VERSION
else:
if is_numpy_installed() is False:
raise ImportError("Numerical Python (NumPy) is not installed.\n"
"scikit-learn requires NumPy.\n"
"Installation instructions are available on scikit-learn website: "
"http://scikit-learn.org/stable/install.html\n")
if is_scipy_installed() is False:
raise ImportError("Scientific Python (SciPy) is not installed.\n"
"scikit-learn requires SciPy.\n"
"Installation instructions are available on scikit-learn website: "
"http://scikit-learn.org/stable/install.html\n")
from numpy.distutils.core import setup
metadata['configuration'] = configuration
setup(**metadata)
if __name__ == "__main__":
setup_package()
| bsd-3-clause |
clbe/note_cgsp | openfisca_cgsp/cgsp_scripts/graph_revdisp.py | 1 | 2138 | # -*- coding: utf-8 -*-
# OpenFisca -- A versatile microsimulation software
# By: OpenFisca Team <contact@openfisca.fr>
#
# Copyright (C) 2011, 2012, 2013, 2014 OpenFisca Team
# https://github.com/openfisca
#
# This file is part of OpenFisca.
#
# OpenFisca is free software; you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# OpenFisca is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
import matplotlib.pyplot as plt
import datetime
from openfisca_core import periods
import openfisca_france
from openfisca_qt.matplotlib import graphs
TaxBenefitSystem = openfisca_france.init_country()
tax_benefit_system = TaxBenefitSystem()
def show_revdisp(year = 2013, max_sal = 30000, people = 1, filename = None):
simulation = tax_benefit_system.new_scenario().init_single_entity(
period = periods.period('year', year),
parent1 = dict(
birth = datetime.date(year - 40, 1, 1),
sali = max_sal),
parent2 = dict(birth = datetime.date(year - 40, 1, 1)) if people >= 2 else None,
enfants = [
dict(birth = datetime.date(year - 9, 1, 1)) if people >= 3 else None,
dict(birth = datetime.date(year - 9, 1, 1)) if people >= 4 else None,
] if people >= 3 else None,
).new_simulation(debug = True, reference = True)
fig = plt.figure()
axes = plt.gca()
graphs.draw_waterfall(
simulation = simulation,
axes = axes,
visible = ["revdisp", "sal"],
)
plt.show()
if filename:
fig.savefig('{}.png'.format(filename))
if __name__ == '__main__':
show_revdisp(filename = "waterfall")
| agpl-3.0 |
Srisai85/scikit-learn | examples/linear_model/plot_ols_3d.py | 350 | 2040 | #!/usr/bin/python
# -*- coding: utf-8 -*-
"""
=========================================================
Sparsity Example: Fitting only features 1 and 2
=========================================================
Features 1 and 2 of the diabetes-dataset are fitted and
plotted below. It illustrates that although feature 2
has a strong coefficient on the full model, it does not
give us much regarding `y` when compared to just feature 1
"""
print(__doc__)
# Code source: Gaël Varoquaux
# Modified for documentation by Jaques Grobler
# License: BSD 3 clause
import matplotlib.pyplot as plt
import numpy as np
from mpl_toolkits.mplot3d import Axes3D
from sklearn import datasets, linear_model
diabetes = datasets.load_diabetes()
indices = (0, 1)
X_train = diabetes.data[:-20, indices]
X_test = diabetes.data[-20:, indices]
y_train = diabetes.target[:-20]
y_test = diabetes.target[-20:]
ols = linear_model.LinearRegression()
ols.fit(X_train, y_train)
###############################################################################
# Plot the figure
def plot_figs(fig_num, elev, azim, X_train, clf):
fig = plt.figure(fig_num, figsize=(4, 3))
plt.clf()
ax = Axes3D(fig, elev=elev, azim=azim)
ax.scatter(X_train[:, 0], X_train[:, 1], y_train, c='k', marker='+')
ax.plot_surface(np.array([[-.1, -.1], [.15, .15]]),
np.array([[-.1, .15], [-.1, .15]]),
clf.predict(np.array([[-.1, -.1, .15, .15],
[-.1, .15, -.1, .15]]).T
).reshape((2, 2)),
alpha=.5)
ax.set_xlabel('X_1')
ax.set_ylabel('X_2')
ax.set_zlabel('Y')
ax.w_xaxis.set_ticklabels([])
ax.w_yaxis.set_ticklabels([])
ax.w_zaxis.set_ticklabels([])
#Generate the three different figures from different views
elev = 43.5
azim = -110
plot_figs(1, elev, azim, X_train, ols)
elev = -.5
azim = 0
plot_figs(2, elev, azim, X_train, ols)
elev = -.5
azim = 90
plot_figs(3, elev, azim, X_train, ols)
plt.show()
| bsd-3-clause |