repo_name
stringlengths 6
112
| path
stringlengths 4
204
| copies
stringlengths 1
3
| size
stringlengths 4
6
| content
stringlengths 714
810k
| license
stringclasses 15
values |
---|---|---|---|---|---|
bavardage/statsmodels | statsmodels/examples/ex_univar_kde.py | 8 | 5089 | """
This example tests the nonparametric estimator
for several popular univariate distributions with the different
bandwidth selction methods - CV-ML; CV-LS; Scott's rule of thumb.
Produces six different plots for each distribution
1) Beta
2) f
3) Pareto
4) Laplace
5) Weibull
6) Poisson
"""
import numpy as np
import scipy.stats as stats
import matplotlib.pyplot as plt
import statsmodels.api as sm
KDEMultivariate = sm.nonparametric.KDEMultivariate
np.random.seed(123456)
# Beta distribution
# Parameters
a = 2
b = 5
nobs = 250
support = np.random.beta(a, b, size=nobs)
rv = stats.beta(a, b)
ix = np.argsort(support)
dens_normal = KDEMultivariate(data=[support], var_type='c', bw='normal_reference')
dens_cvls = KDEMultivariate(data=[support], var_type='c', bw='cv_ls')
dens_cvml = KDEMultivariate(data=[support], var_type='c', bw='cv_ml')
plt.figure(1)
plt.plot(support[ix], rv.pdf(support[ix]), label='Actual')
plt.plot(support[ix], dens_normal.pdf()[ix], label='Scott')
plt.plot(support[ix], dens_cvls.pdf()[ix], label='CV_LS')
plt.plot(support[ix], dens_cvml.pdf()[ix], label='CV_ML')
plt.title("Nonparametric Estimation of the Density of Beta Distributed " \
"Random Variable")
plt.legend(('Actual', 'Scott', 'CV_LS', 'CV_ML'))
# f distribution
df = 100
dn = 100
nobs = 250
support = np.random.f(dn, df, size=nobs)
rv = stats.f(df, dn)
ix = np.argsort(support)
dens_normal = KDEMultivariate(data=[support], var_type='c', bw='normal_reference')
dens_cvls = KDEMultivariate(data=[support], var_type='c', bw='cv_ls')
dens_cvml = KDEMultivariate(data=[support], var_type='c', bw='cv_ml')
plt.figure(2)
plt.plot(support[ix], rv.pdf(support[ix]), label='Actual')
plt.plot(support[ix], dens_normal.pdf()[ix], label='Scott')
plt.plot(support[ix], dens_cvls.pdf()[ix], label='CV_LS')
plt.plot(support[ix], dens_cvml.pdf()[ix], label='CV_ML')
plt.title("Nonparametric Estimation of the Density of f Distributed " \
"Random Variable")
plt.legend(('Actual', 'Scott', 'CV_LS', 'CV_ML'))
# Pareto distribution
a = 2
nobs = 150
support = np.random.pareto(a, size=nobs)
rv = stats.pareto(a)
ix = np.argsort(support)
dens_normal = KDEMultivariate(data=[support], var_type='c', bw='normal_reference')
dens_cvls = KDEMultivariate(data=[support], var_type='c', bw='cv_ls')
dens_cvml = KDEMultivariate(data=[support], var_type='c', bw='cv_ml')
plt.figure(3)
plt.plot(support[ix], rv.pdf(support[ix]), label='Actual')
plt.plot(support[ix], dens_normal.pdf()[ix], label='Scott')
plt.plot(support[ix], dens_cvls.pdf()[ix], label='CV_LS')
plt.plot(support[ix], dens_cvml.pdf()[ix], label='CV_ML')
plt.title("Nonparametric Estimation of the Density of Pareto " \
"Distributed Random Variable")
plt.legend(('Actual', 'Scott', 'CV_LS', 'CV_ML'))
# Laplace Distribution
mu = 0
s = 1
nobs = 250
support = np.random.laplace(mu, s, size=nobs)
rv = stats.laplace(mu, s)
ix = np.argsort(support)
dens_normal = KDEMultivariate(data=[support], var_type='c', bw='normal_reference')
dens_cvls = KDEMultivariate(data=[support], var_type='c', bw='cv_ls')
dens_cvml = KDEMultivariate(data=[support], var_type='c', bw='cv_ml')
plt.figure(4)
plt.plot(support[ix], rv.pdf(support[ix]), label='Actual')
plt.plot(support[ix], dens_normal.pdf()[ix], label='Scott')
plt.plot(support[ix], dens_cvls.pdf()[ix], label='CV_LS')
plt.plot(support[ix], dens_cvml.pdf()[ix], label='CV_ML')
plt.title("Nonparametric Estimation of the Density of Laplace " \
"Distributed Random Variable")
plt.legend(('Actual', 'Scott', 'CV_LS', 'CV_ML'))
# Weibull Distribution
a = 1
nobs = 250
support = np.random.weibull(a, size=nobs)
rv = stats.weibull_min(a)
ix = np.argsort(support)
dens_normal = KDEMultivariate(data=[support], var_type='c', bw='normal_reference')
dens_cvls = KDEMultivariate(data=[support], var_type='c', bw='cv_ls')
dens_cvml = KDEMultivariate(data=[support], var_type='c', bw='cv_ml')
plt.figure(5)
plt.plot(support[ix], rv.pdf(support[ix]), label='Actual')
plt.plot(support[ix], dens_normal.pdf()[ix], label='Scott')
plt.plot(support[ix], dens_cvls.pdf()[ix], label='CV_LS')
plt.plot(support[ix], dens_cvml.pdf()[ix], label='CV_ML')
plt.title("Nonparametric Estimation of the Density of Weibull " \
"Distributed Random Variable")
plt.legend(('Actual', 'Scott', 'CV_LS', 'CV_ML'))
# Poisson Distribution
a = 2
nobs = 250
support = np.random.poisson(a, size=nobs)
rv = stats.poisson(a)
ix = np.argsort(support)
dens_normal = KDEMultivariate(data=[support], var_type='o', bw='normal_reference')
dens_cvls = KDEMultivariate(data=[support], var_type='o', bw='cv_ls')
dens_cvml = KDEMultivariate(data=[support], var_type='o', bw='cv_ml')
plt.figure(6)
plt.plot(support[ix], rv.pmf(support[ix]), label='Actual')
plt.plot(support[ix], dens_normal.pdf()[ix], label='Scott')
plt.plot(support[ix], dens_cvls.pdf()[ix], label='CV_LS')
plt.plot(support[ix], dens_cvml.pdf()[ix], label='CV_ML')
plt.title("Nonparametric Estimation of the Density of Poisson " \
"Distributed Random Variable")
plt.legend(('Actual', 'Scott', 'CV_LS', 'CV_ML'))
plt.show()
| bsd-3-clause |
AndrewRook/NFLWin | setup.py | 1 | 2348 | import os
import re
import tarfile
import warnings
from setuptools import setup, find_packages
from setuptools.command.install import install as _install
###################################################################
#Boilerplate I modified from the internet
VERSION_FILE = "nflwin/_version.py"
version_string = open(VERSION_FILE, "r").read()
version_re = r"^__version__ = [u]{0,1}['\"]([^'\"]*)['\"]"
version_match = re.search(version_re, version_string, re.M)
if version_match:
VERSION = version_match.group(1)
else:
raise RuntimeError("Unable to find version string in {0}".format(VERSION_FILE))
NAME = "nflwin"
PACKAGES = find_packages(where=".")
META_PATH = os.path.join(NAME, "__init__.py")
KEYWORDS = ['NFL','WP','Win Probability']
CLASSIFIERS = [
"Development Status :: 4 - Beta",
"Natural Language :: English",
"License :: OSI Approved :: MIT License",
"Operating System :: OS Independent",
"Programming Language :: Python",
"Programming Language :: Python :: 2",
"Programming Language :: Python :: 2.7",
]
INSTALL_REQUIRES = ['numpy',
'scipy',
'pandas',
'scikit-learn',
'joblib',]
EXTRAS_REQUIRE = {
"plotting": ["matplotlib"],
"nfldb": ["nfldb", "sqlalchemy"],
"dev": ["matplotlib", "nfldb", "sqlalchemy", "pytest", "pytest-cov", "sphinx", "numpydoc"]
}
PACKAGE_DATA = {"nflwin": ["models/default_model.nflwin*"]}
HERE = os.path.abspath(os.path.dirname(__file__))
README = None
with open(os.path.join(HERE, 'README.rst'),'r') as f:
README = f.read()
###################################################################
if __name__ == "__main__":
setup(
name=NAME,
description='A Python implementation of NFL Win Probability (WP)',
license='MIT',
url='https://github.com/AndrewRook/NFLWin',
version=VERSION,
author='Andrew Schechtman-Rook',
author_email='footballastronomer@gmail.com',
maintainer='Andrew Schechtman-Rook',
maintainer_email='footballastronomer@gmail.com',
keywords=KEYWORDS,
long_description=README,
packages=PACKAGES,
package_data=PACKAGE_DATA,
classifiers=CLASSIFIERS,
install_requires=INSTALL_REQUIRES,
extras_require=EXTRAS_REQUIRE
)
| mit |
fschill/mavue | pymavlink/tools/mavgpslag.py | 43 | 3446 | #!/usr/bin/env python
'''
calculate GPS lag from DF log
'''
import sys, time, os
from argparse import ArgumentParser
parser = ArgumentParser(description=__doc__)
parser.add_argument("--plot", action='store_true', default=False, help="plot errors")
parser.add_argument("--minspeed", type=float, default=6, help="minimum speed")
parser.add_argument("logs", metavar="LOG", nargs="+")
args = parser.parse_args()
from pymavlink import mavutil
from pymavlink.mavextra import *
from pymavlink.rotmat import Vector3, Matrix3
'''
Support having a $HOME/.pymavlink/mavextra.py for extra graphing functions
'''
home = os.getenv('HOME')
if home is not None:
extra = os.path.join(home, '.pymavlink', 'mavextra.py')
if os.path.exists(extra):
import imp
mavuser = imp.load_source('pymavlink.mavuser', extra)
from pymavlink.mavuser import *
def velocity_error(timestamps, vel, gaccel, accel_indexes, imu_dt, shift=0):
'''return summed velocity error'''
sum = 0
count = 0
for i in range(0, len(vel)-1):
dv = vel[i+1] - vel[i]
da = Vector3()
for idx in range(1+accel_indexes[i]-shift, 1+accel_indexes[i+1]-shift):
da += gaccel[idx]
dt1 = timestamps[i+1] - timestamps[i]
dt2 = (accel_indexes[i+1] - accel_indexes[i]) * imu_dt
da *= imu_dt
da *= dt1/dt2
#print(accel_indexes[i+1] - accel_indexes[i])
ex = abs(dv.x - da.x)
ey = abs(dv.y - da.y)
sum += 0.5*(ex+ey)
count += 1
if count == 0:
return None
return sum/count
def gps_lag(logfile):
'''work out gps velocity lag times for a log file'''
print("Processing log %s" % filename)
mlog = mavutil.mavlink_connection(filename)
timestamps = []
vel = []
gaccel = []
accel_indexes = []
ATT = None
IMU = None
dtsum = 0
dtcount = 0
while True:
m = mlog.recv_match(type=['GPS','IMU','ATT'])
if m is None:
break
t = m.get_type()
if t == 'GPS' and m.Status==3 and m.Spd>args.minspeed:
v = Vector3(m.Spd*cos(radians(m.GCrs)), m.Spd*sin(radians(m.GCrs)), m.VZ)
vel.append(v)
timestamps.append(m._timestamp)
accel_indexes.append(max(len(gaccel)-1,0))
elif t == 'ATT':
ATT = m
elif t == 'IMU':
if ATT is not None:
gaccel.append(earth_accel_df(m, ATT))
if IMU is not None:
dt = m._timestamp - IMU._timestamp
dtsum += dt
dtcount += 1
IMU = m
imu_dt = dtsum / dtcount
print("Loaded %u samples imu_dt=%.3f" % (len(vel), imu_dt))
besti = -1
besterr = 0
delays = []
errors = []
for i in range(0,100):
err = velocity_error(timestamps, vel, gaccel, accel_indexes, imu_dt, shift=i)
if err is None:
break
errors.append(err)
delays.append(i*imu_dt)
if besti == -1 or err < besterr:
besti = i
besterr = err
print("Best %u (%.3fs) %f" % (besti, besti*imu_dt, besterr))
if args.plot:
import matplotlib.pyplot as plt
plt.plot(delays, errors, 'bo-')
x1,x2,y1,y2 = plt.axis()
plt.axis((x1,x2,0,y2))
plt.ylabel('Error')
plt.xlabel('Delay(s)')
plt.show()
for filename in args.logs:
gps_lag(filename)
| bsd-3-clause |
hugobowne/scikit-learn | sklearn/manifold/setup.py | 99 | 1243 | import os
from os.path import join
import numpy
from numpy.distutils.misc_util import Configuration
from sklearn._build_utils import get_blas_info
def configuration(parent_package="", top_path=None):
config = Configuration("manifold", parent_package, top_path)
libraries = []
if os.name == 'posix':
libraries.append('m')
config.add_extension("_utils",
sources=["_utils.c"],
include_dirs=[numpy.get_include()],
libraries=libraries,
extra_compile_args=["-O3"])
cblas_libs, blas_info = get_blas_info()
eca = blas_info.pop('extra_compile_args', [])
eca.append("-O4")
config.add_extension("_barnes_hut_tsne",
libraries=cblas_libs,
sources=["_barnes_hut_tsne.c"],
include_dirs=[join('..', 'src', 'cblas'),
numpy.get_include(),
blas_info.pop('include_dirs', [])],
extra_compile_args=eca, **blas_info)
return config
if __name__ == "__main__":
from numpy.distutils.core import setup
setup(**configuration().todict())
| bsd-3-clause |
imaculate/scikit-learn | examples/decomposition/plot_pca_iris.py | 65 | 1485 | #!/usr/bin/python
# -*- coding: utf-8 -*-
"""
=========================================================
PCA example with Iris Data-set
=========================================================
Principal Component Analysis applied to the Iris dataset.
See `here <https://en.wikipedia.org/wiki/Iris_flower_data_set>`_ for more
information on this dataset.
"""
print(__doc__)
# Code source: Gaël Varoquaux
# License: BSD 3 clause
import numpy as np
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import Axes3D
from sklearn import decomposition
from sklearn import datasets
np.random.seed(5)
centers = [[1, 1], [-1, -1], [1, -1]]
iris = datasets.load_iris()
X = iris.data
y = iris.target
fig = plt.figure(1, figsize=(4, 3))
plt.clf()
ax = Axes3D(fig, rect=[0, 0, .95, 1], elev=48, azim=134)
plt.cla()
pca = decomposition.PCA(n_components=3)
pca.fit(X)
X = pca.transform(X)
for name, label in [('Setosa', 0), ('Versicolour', 1), ('Virginica', 2)]:
ax.text3D(X[y == label, 0].mean(),
X[y == label, 1].mean() + 1.5,
X[y == label, 2].mean(), name,
horizontalalignment='center',
bbox=dict(alpha=.5, edgecolor='w', facecolor='w'))
# Reorder the labels to have colors matching the cluster results
y = np.choose(y, [1, 2, 0]).astype(np.float)
ax.scatter(X[:, 0], X[:, 1], X[:, 2], c=y, cmap=plt.cm.spectral)
ax.w_xaxis.set_ticklabels([])
ax.w_yaxis.set_ticklabels([])
ax.w_zaxis.set_ticklabels([])
plt.show()
| bsd-3-clause |
mengxn/tensorflow | tensorflow/contrib/learn/python/learn/tests/dataframe/feeding_queue_runner_test.py | 62 | 5053 | # Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests `FeedingQueueRunner` using arrays and `DataFrames`."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from tensorflow.contrib.learn.python.learn.dataframe.queues import feeding_functions as ff
from tensorflow.python.client import session
from tensorflow.python.framework import ops
from tensorflow.python.platform import test
from tensorflow.python.training import coordinator
from tensorflow.python.training import queue_runner_impl
# pylint: disable=g-import-not-at-top
try:
import pandas as pd
HAS_PANDAS = True
except ImportError:
HAS_PANDAS = False
def get_rows(array, row_indices):
rows = [array[i] for i in row_indices]
return np.vstack(rows)
class FeedingQueueRunnerTestCase(test.TestCase):
"""Tests for `FeedingQueueRunner`."""
def testArrayFeeding(self):
with ops.Graph().as_default():
array = np.arange(32).reshape([16, 2])
q = ff.enqueue_data(array, capacity=100)
batch_size = 3
dq_op = q.dequeue_many(batch_size)
with session.Session() as sess:
coord = coordinator.Coordinator()
threads = queue_runner_impl.start_queue_runners(sess=sess, coord=coord)
for i in range(100):
indices = [
j % array.shape[0]
for j in range(batch_size * i, batch_size * (i + 1))
]
expected_dq = get_rows(array, indices)
dq = sess.run(dq_op)
np.testing.assert_array_equal(indices, dq[0])
np.testing.assert_array_equal(expected_dq, dq[1])
coord.request_stop()
coord.join(threads)
def testArrayFeedingMultiThread(self):
with ops.Graph().as_default():
array = np.arange(256).reshape([128, 2])
q = ff.enqueue_data(array, capacity=128, num_threads=8, shuffle=True)
batch_size = 3
dq_op = q.dequeue_many(batch_size)
with session.Session() as sess:
coord = coordinator.Coordinator()
threads = queue_runner_impl.start_queue_runners(sess=sess, coord=coord)
for _ in range(100):
dq = sess.run(dq_op)
indices = dq[0]
expected_dq = get_rows(array, indices)
np.testing.assert_array_equal(expected_dq, dq[1])
coord.request_stop()
coord.join(threads)
def testPandasFeeding(self):
if not HAS_PANDAS:
return
with ops.Graph().as_default():
array1 = np.arange(32)
array2 = np.arange(32, 64)
df = pd.DataFrame({"a": array1, "b": array2}, index=np.arange(64, 96))
q = ff.enqueue_data(df, capacity=100)
batch_size = 5
dq_op = q.dequeue_many(5)
with session.Session() as sess:
coord = coordinator.Coordinator()
threads = queue_runner_impl.start_queue_runners(sess=sess, coord=coord)
for i in range(100):
indices = [
j % array1.shape[0]
for j in range(batch_size * i, batch_size * (i + 1))
]
expected_df_indices = df.index[indices]
expected_rows = df.iloc[indices]
dq = sess.run(dq_op)
np.testing.assert_array_equal(expected_df_indices, dq[0])
for col_num, col in enumerate(df.columns):
np.testing.assert_array_equal(expected_rows[col].values,
dq[col_num + 1])
coord.request_stop()
coord.join(threads)
def testPandasFeedingMultiThread(self):
if not HAS_PANDAS:
return
with ops.Graph().as_default():
array1 = np.arange(128, 256)
array2 = 2 * array1
df = pd.DataFrame({"a": array1, "b": array2}, index=np.arange(128))
q = ff.enqueue_data(df, capacity=128, num_threads=8, shuffle=True)
batch_size = 5
dq_op = q.dequeue_many(batch_size)
with session.Session() as sess:
coord = coordinator.Coordinator()
threads = queue_runner_impl.start_queue_runners(sess=sess, coord=coord)
for _ in range(100):
dq = sess.run(dq_op)
indices = dq[0]
expected_rows = df.iloc[indices]
for col_num, col in enumerate(df.columns):
np.testing.assert_array_equal(expected_rows[col].values,
dq[col_num + 1])
coord.request_stop()
coord.join(threads)
if __name__ == "__main__":
test.main()
| apache-2.0 |
ishank08/scikit-learn | benchmarks/bench_rcv1_logreg_convergence.py | 58 | 7229 | # Authors: Tom Dupre la Tour <tom.dupre-la-tour@m4x.org>
# Olivier Grisel <olivier.grisel@ensta.org>
#
# License: BSD 3 clause
import matplotlib.pyplot as plt
import numpy as np
import gc
import time
from sklearn.externals.joblib import Memory
from sklearn.linear_model import (LogisticRegression, SGDClassifier)
from sklearn.datasets import fetch_rcv1
from sklearn.linear_model.sag import get_auto_step_size
try:
import lightning.classification as lightning_clf
except ImportError:
lightning_clf = None
m = Memory(cachedir='.', verbose=0)
# compute logistic loss
def get_loss(w, intercept, myX, myy, C):
n_samples = myX.shape[0]
w = w.ravel()
p = np.mean(np.log(1. + np.exp(-myy * (myX.dot(w) + intercept))))
print("%f + %f" % (p, w.dot(w) / 2. / C / n_samples))
p += w.dot(w) / 2. / C / n_samples
return p
# We use joblib to cache individual fits. Note that we do not pass the dataset
# as argument as the hashing would be too slow, so we assume that the dataset
# never changes.
@m.cache()
def bench_one(name, clf_type, clf_params, n_iter):
clf = clf_type(**clf_params)
try:
clf.set_params(max_iter=n_iter, random_state=42)
except:
clf.set_params(n_iter=n_iter, random_state=42)
st = time.time()
clf.fit(X, y)
end = time.time()
try:
C = 1.0 / clf.alpha / n_samples
except:
C = clf.C
try:
intercept = clf.intercept_
except:
intercept = 0.
train_loss = get_loss(clf.coef_, intercept, X, y, C)
train_score = clf.score(X, y)
test_score = clf.score(X_test, y_test)
duration = end - st
return train_loss, train_score, test_score, duration
def bench(clfs):
for (name, clf, iter_range, train_losses, train_scores,
test_scores, durations) in clfs:
print("training %s" % name)
clf_type = type(clf)
clf_params = clf.get_params()
for n_iter in iter_range:
gc.collect()
train_loss, train_score, test_score, duration = bench_one(
name, clf_type, clf_params, n_iter)
train_losses.append(train_loss)
train_scores.append(train_score)
test_scores.append(test_score)
durations.append(duration)
print("classifier: %s" % name)
print("train_loss: %.8f" % train_loss)
print("train_score: %.8f" % train_score)
print("test_score: %.8f" % test_score)
print("time for fit: %.8f seconds" % duration)
print("")
print("")
return clfs
def plot_train_losses(clfs):
plt.figure()
for (name, _, _, train_losses, _, _, durations) in clfs:
plt.plot(durations, train_losses, '-o', label=name)
plt.legend(loc=0)
plt.xlabel("seconds")
plt.ylabel("train loss")
def plot_train_scores(clfs):
plt.figure()
for (name, _, _, _, train_scores, _, durations) in clfs:
plt.plot(durations, train_scores, '-o', label=name)
plt.legend(loc=0)
plt.xlabel("seconds")
plt.ylabel("train score")
plt.ylim((0.92, 0.96))
def plot_test_scores(clfs):
plt.figure()
for (name, _, _, _, _, test_scores, durations) in clfs:
plt.plot(durations, test_scores, '-o', label=name)
plt.legend(loc=0)
plt.xlabel("seconds")
plt.ylabel("test score")
plt.ylim((0.92, 0.96))
def plot_dloss(clfs):
plt.figure()
pobj_final = []
for (name, _, _, train_losses, _, _, durations) in clfs:
pobj_final.append(train_losses[-1])
indices = np.argsort(pobj_final)
pobj_best = pobj_final[indices[0]]
for (name, _, _, train_losses, _, _, durations) in clfs:
log_pobj = np.log(abs(np.array(train_losses) - pobj_best)) / np.log(10)
plt.plot(durations, log_pobj, '-o', label=name)
plt.legend(loc=0)
plt.xlabel("seconds")
plt.ylabel("log(best - train_loss)")
def get_max_squared_sum(X):
"""Get the maximum row-wise sum of squares"""
return np.sum(X ** 2, axis=1).max()
rcv1 = fetch_rcv1()
X = rcv1.data
n_samples, n_features = X.shape
# consider the binary classification problem 'CCAT' vs the rest
ccat_idx = rcv1.target_names.tolist().index('CCAT')
y = rcv1.target.tocsc()[:, ccat_idx].toarray().ravel().astype(np.float64)
y[y == 0] = -1
# parameters
C = 1.
fit_intercept = True
tol = 1.0e-14
# max_iter range
sgd_iter_range = list(range(1, 121, 10))
newton_iter_range = list(range(1, 25, 3))
lbfgs_iter_range = list(range(1, 242, 12))
liblinear_iter_range = list(range(1, 37, 3))
liblinear_dual_iter_range = list(range(1, 85, 6))
sag_iter_range = list(range(1, 37, 3))
clfs = [
("LR-liblinear",
LogisticRegression(C=C, tol=tol,
solver="liblinear", fit_intercept=fit_intercept,
intercept_scaling=1),
liblinear_iter_range, [], [], [], []),
("LR-liblinear-dual",
LogisticRegression(C=C, tol=tol, dual=True,
solver="liblinear", fit_intercept=fit_intercept,
intercept_scaling=1),
liblinear_dual_iter_range, [], [], [], []),
("LR-SAG",
LogisticRegression(C=C, tol=tol,
solver="sag", fit_intercept=fit_intercept),
sag_iter_range, [], [], [], []),
("LR-newton-cg",
LogisticRegression(C=C, tol=tol, solver="newton-cg",
fit_intercept=fit_intercept),
newton_iter_range, [], [], [], []),
("LR-lbfgs",
LogisticRegression(C=C, tol=tol,
solver="lbfgs", fit_intercept=fit_intercept),
lbfgs_iter_range, [], [], [], []),
("SGD",
SGDClassifier(alpha=1.0 / C / n_samples, penalty='l2', loss='log',
fit_intercept=fit_intercept, verbose=0),
sgd_iter_range, [], [], [], [])]
if lightning_clf is not None and not fit_intercept:
alpha = 1. / C / n_samples
# compute the same step_size than in LR-sag
max_squared_sum = get_max_squared_sum(X)
step_size = get_auto_step_size(max_squared_sum, alpha, "log",
fit_intercept)
clfs.append(
("Lightning-SVRG",
lightning_clf.SVRGClassifier(alpha=alpha, eta=step_size,
tol=tol, loss="log"),
sag_iter_range, [], [], [], []))
clfs.append(
("Lightning-SAG",
lightning_clf.SAGClassifier(alpha=alpha, eta=step_size,
tol=tol, loss="log"),
sag_iter_range, [], [], [], []))
# We keep only 200 features, to have a dense dataset,
# and compare to lightning SAG, which seems incorrect in the sparse case.
X_csc = X.tocsc()
nnz_in_each_features = X_csc.indptr[1:] - X_csc.indptr[:-1]
X = X_csc[:, np.argsort(nnz_in_each_features)[-200:]]
X = X.toarray()
print("dataset: %.3f MB" % (X.nbytes / 1e6))
# Split training and testing. Switch train and test subset compared to
# LYRL2004 split, to have a larger training dataset.
n = 23149
X_test = X[:n, :]
y_test = y[:n]
X = X[n:, :]
y = y[n:]
clfs = bench(clfs)
plot_train_scores(clfs)
plot_test_scores(clfs)
plot_train_losses(clfs)
plot_dloss(clfs)
plt.show()
| bsd-3-clause |
anntzer/scikit-learn | asv_benchmarks/benchmarks/utils.py | 12 | 1380 | import numpy as np
from sklearn.metrics import balanced_accuracy_score, r2_score
def neg_mean_inertia(X, labels, centers):
return - (np.asarray(X - centers[labels])**2).sum(axis=1).mean()
def make_gen_classif_scorers(caller):
caller.train_scorer = balanced_accuracy_score
caller.test_scorer = balanced_accuracy_score
def make_gen_reg_scorers(caller):
caller.test_scorer = r2_score
caller.train_scorer = r2_score
def neg_mean_data_error(X, U, V):
return - np.sqrt(((X - U.dot(V))**2).mean())
def make_dict_learning_scorers(caller):
caller.train_scorer = lambda _, __: (
neg_mean_data_error(caller.X,
caller.estimator.transform(caller.X),
caller.estimator.components_))
caller.test_scorer = lambda _, __: (
neg_mean_data_error(caller.X_val,
caller.estimator.transform(caller.X_val),
caller.estimator.components_))
def explained_variance_ratio(Xt, X):
return np.var(Xt, axis=0).sum() / np.var(X, axis=0).sum()
def make_pca_scorers(caller):
caller.train_scorer = (
lambda _, __: caller.estimator.explained_variance_ratio_.sum())
caller.test_scorer = lambda _, __: (
explained_variance_ratio(caller.estimator.transform(caller.X_val),
caller.X_val))
| bsd-3-clause |
PrashntS/scikit-learn | examples/ensemble/plot_gradient_boosting_regression.py | 227 | 2520 | """
============================
Gradient Boosting regression
============================
Demonstrate Gradient Boosting on the Boston housing dataset.
This example fits a Gradient Boosting model with least squares loss and
500 regression trees of depth 4.
"""
print(__doc__)
# Author: Peter Prettenhofer <peter.prettenhofer@gmail.com>
#
# License: BSD 3 clause
import numpy as np
import matplotlib.pyplot as plt
from sklearn import ensemble
from sklearn import datasets
from sklearn.utils import shuffle
from sklearn.metrics import mean_squared_error
###############################################################################
# Load data
boston = datasets.load_boston()
X, y = shuffle(boston.data, boston.target, random_state=13)
X = X.astype(np.float32)
offset = int(X.shape[0] * 0.9)
X_train, y_train = X[:offset], y[:offset]
X_test, y_test = X[offset:], y[offset:]
###############################################################################
# Fit regression model
params = {'n_estimators': 500, 'max_depth': 4, 'min_samples_split': 1,
'learning_rate': 0.01, 'loss': 'ls'}
clf = ensemble.GradientBoostingRegressor(**params)
clf.fit(X_train, y_train)
mse = mean_squared_error(y_test, clf.predict(X_test))
print("MSE: %.4f" % mse)
###############################################################################
# Plot training deviance
# compute test set deviance
test_score = np.zeros((params['n_estimators'],), dtype=np.float64)
for i, y_pred in enumerate(clf.staged_decision_function(X_test)):
test_score[i] = clf.loss_(y_test, y_pred)
plt.figure(figsize=(12, 6))
plt.subplot(1, 2, 1)
plt.title('Deviance')
plt.plot(np.arange(params['n_estimators']) + 1, clf.train_score_, 'b-',
label='Training Set Deviance')
plt.plot(np.arange(params['n_estimators']) + 1, test_score, 'r-',
label='Test Set Deviance')
plt.legend(loc='upper right')
plt.xlabel('Boosting Iterations')
plt.ylabel('Deviance')
###############################################################################
# Plot feature importance
feature_importance = clf.feature_importances_
# make importances relative to max importance
feature_importance = 100.0 * (feature_importance / feature_importance.max())
sorted_idx = np.argsort(feature_importance)
pos = np.arange(sorted_idx.shape[0]) + .5
plt.subplot(1, 2, 2)
plt.barh(pos, feature_importance[sorted_idx], align='center')
plt.yticks(pos, boston.feature_names[sorted_idx])
plt.xlabel('Relative Importance')
plt.title('Variable Importance')
plt.show()
| bsd-3-clause |
videlec/sage-flatsurf | flatsurf/graphical/surface.py | 1 | 42816 | r"""
EXAMPLES::
sage: import flatsurf
sage: flatsurf.translation_surfaces.veech_2n_gon(4).plot()
Graphics object consisting of 18 graphics primitives
"""
#*****************************************************************************
# Copyright (C) 2013-2019 Vincent Delecroix <20100.delecroix@gmail.com>
# 2013-2019 W. Patrick Hooper <wphooper@gmail.com>
#
# Distributed under the terms of the GNU General Public License (GPL)
# as published by the Free Software Foundation; either version 2 of
# the License, or (at your option) any later version.
# https://www.gnu.org/licenses/
#*****************************************************************************
from __future__ import absolute_import, print_function, division
from six.moves import range, map, filter, zip
from six import iteritems
from flatsurf.geometry.similarity_surface import SimilaritySurface
from .polygon import *
from sage.rings.integer_ring import ZZ
from sage.rings.rational_field import QQ
from sage.modules.free_module_element import vector
class GraphicalSurface:
r"""
This class manages the rendering of a SimilaritySurface.
This class essentially consists of a collection of GraphicalPolygons which
control how individual polygons are positioned. In addition, this class
stores options which are passed to the polygons when they are rendered.
Some setup features set in the constructor and can be set again later via
`process_options()`.
The basic tasks of the class are to render the polygons, edges and labels.
To customize a rendering, it is useful to know something about how this
class works. (Apologies!)
There are attributes which control whether or not certain objects are
rendered, namely:
- `will_plot_polygons` -- Whether to plot polygons which are right-side up.
- `will_plot_upside_down_polygons` -- Whether to plot polygons which are
upside down. Defaults to False.
- `will_plot_polygon_labels` -- Whether to plot polygon labels.
- `will_plot_edges` -- If this is False then no edges will be plotted.
- `will_plot_non_adjacent_edges` = Whether to plot polygon edges which are
not adjacent to the edge it is glued to.
- `will_plot_adjacent_edges` -- Whether to plot polygon edges which are
adjacent to the polygon they are glued to.
- `will_plot_self_glued_edges` -- Whether to plot polygon edges which are
glued to themselves.
- `will_plot_edge_labels` -- Whether to plot polygon edges labels.
- `will_plot_zero_flags` -- Whether to plot line segments from the
baricenter to the zero vertex of each polygon. Useful in working out
vertex and edge labels. Defaults to False.
The `plot` method calls some other built in methods: `plot_polygon`,
`plot_polygon_label`, `plot_edge` and `plot_edge_label`. These in turn
call methods in `GraphicalPolygon`.
- `polygon_options` -- Options passed to :func:`graphical_polygon.GraphicalPolygon.plot_polygon` when
plotting a polygon right-side up.
- `upside_down_polygon_options` -- Options passed to
:func:`graphical_polygon.GraphicalPolygon.plot_polygon` when plotting a polygon upside-down.
- `polygon_label_options` -- Options passed to :func:`graphical_polygon.GraphicalPolygon.plot_label`
when plotting a polygon label.
- `non_adjacent_edge_options` -- Options passed to
:func:`graphical_polygon.GraphicalPolygon.plot_edge` when plotting a polygon edge which is not
adjacent to the edge it is glued to.
- `self.adjacent_edge_options` -- Options passed to
:func:`graphical_polygon.GraphicalPolygon.plot_edge` when plotting a polygon edge which is
adjacent to the edge it is glued to.
- `self_glued_edge_options` -- Options passed to :func:`graphical_polygon.GraphicalPolygon.plot_edge`
when plotting a polygon edge which is glued to itself.
- `edge_label_options` -- Options passed to :func:`graphical_polygon.GraphicalPolygon.edge_label`
when plotting a edge label.
- `zero_flag_options` -- Options passed to
:func:`graphical_polygon.GraphicalPolygon.plot_zero_flag` when plotting a zero_flag.
EXAMPLES::
sage: from flatsurf import *
sage: from flatsurf.graphical.surface import GraphicalSurface
sage: s = similarity_surfaces.example()
sage: gs = GraphicalSurface(s)
sage: gs.polygon_options["color"]="red"
sage: gs.plot() # not tested (problem with matplotlib font caches on Travis)
Graphics object consisting of 13 graphics primitives
"""
def __init__(self, similarity_surface, adjacencies=None, polygon_labels=True, \
edge_labels="gluings", default_position_function = None):
r"""
Construct a GraphicalSurface from a similarity surface.
INPUT:
- ``similarity_surface`` -- a similarity surface
- ``polygon_labels`` -- a boolean (default ``True``) whether the label
of polygons are displayed
- ``edge_labels`` -- option to control the display of edge labels. It
can be one of
- ``False`` or ``None`` for no labels
- ``'gluings'`` -- to put on each side of each non-adjacent edge, the
name of the polygon to which it is glued
- ``'number'`` -- to put on each side of each edge the number of the
edge
- ``'gluings and number'`` -- full information
- ``'letter'`` -- add matching letters to glued edges in an arbitrary way
- ``adjacencies`` -- a list of pairs ``(p,e)`` to be used to set
adjacencies of polygons.
- ``default_position_function`` -- a function mapping polygon labels to
similarities describing the position of the corresponding polygon.
If adjacencies is not defined and the surface is finite, make_all_visible()
is called to make all polygons visible.
"""
assert isinstance(similarity_surface, SimilaritySurface)
self._ss = similarity_surface
self._default_position_function = default_position_function
self._polygons = {}
self._visible = set([self._ss.base_label()])
if adjacencies is None:
if self._ss.is_finite():
self.make_all_visible()
self._edge_labels = None
self.will_plot_polygons = True
r"""
Whether to plot polygons which are right-side up.
"""
self.polygon_options = {"color":"lightgray"}
r"""Options passed to :func:`graphical_polygon.GraphicalPolygon.plot_polygon` when plotting a polygon right-side up."""
self.will_plot_upside_down_polygons = False
r"""
Whether to plot polygons which are upside down
"""
self.upside_down_polygon_options = {"color":"lightgray", "zorder":-1}
r"""Options passed to :func:`graphical_polygon.GraphicalPolygon.plot_polygon` when plotting a polygon upside-down."""
self.will_plot_polygon_labels = True
r"""
Whether to plot polygon labels.
"""
self.polygon_label_options = {"color":"black", "vertical_alignment":"center", "horizontal_alignment":"center"}
r"""Options passed to :func:`graphical_polygon.GraphicalPolygon.plot_label` when plotting a polygon label."""
self.will_plot_edges = True
r"""
If this is False then no edges will be plotted.
"""
self.will_plot_non_adjacent_edges = True
r"""
Whether to plot polygon edges which are not adjacent to the edge it is glued to.
"""
self.non_adjacent_edge_options = {"color":"blue"}
r"""Options passed to :func:`graphical_polygon.GraphicalPolygon.plot_edge` when plotting a polygon edge
which is not adjacent to the edge it is glued to."""
self.will_plot_adjacent_edges = True
r"""
Whether to plot polygon edges which are adjacent to the polygon they are glued to.
"""
self.adjacent_edge_options = {"color":"blue", "linestyle":":"}
r"""Options passed to :func:`graphical_polygon.GraphicalPolygon.plot_edge`
when plotting a polygon edge which is adjacent to the edge it is glued to."""
self.will_plot_self_glued_edges = True
r"""
Whether to plot polygon edges which are glued to themselves.
"""
self.self_glued_edge_options = {"color":"red"}
r"""Options passed to :func:`graphical_polygon.GraphicalPolygon.plot_edge` when plotting a polygon edge
which is glued to itself."""
self.will_plot_edge_labels = True
r"""
Whether to plot polygon edges which are not adjacent to the polygon they are glued to.
"""
self.edge_label_options = {"color":"blue"}
r"""Options passed to :func:`graphical_polygon.GraphicalPolygon.edge_label` when plotting a polygon label."""
self.will_plot_zero_flags = False
r"""
Whether to plot line segments from the baricenter to the zero vertex of each polygon.
"""
self.zero_flag_options = {"color":"green", "thickness":0.5}
r"""Options passed to :func:`graphical_polygon.GraphicalPolygon.plot_zero_flag` when plotting a zero_flag."""
self.process_options(adjacencies=adjacencies,
polygon_labels=polygon_labels, edge_labels=edge_labels)
def process_options(self, adjacencies=None, polygon_labels=None, edge_labels=None, default_position_function = None):
r"""
Process the options listed as if the graphical_surface was first
created.
INPUT:
- ``adjacencies`` -- a list of pairs ``(p,e)`` to be used to set
adjacencies of polygons.
- ``polygon_labels`` -- a boolean (default ``True``) whether the label
of polygons are displayed
- ``edge_labels`` -- option to control the display of edge labels. It
can be one of
- ``None`` for no change
- ``False`` for no labels
- ``'gluings'`` -- to put on each side of each non-adjacent edge, the
name of the polygon to which it is glued
- ``'number'`` -- to put on each side of each edge the number of the
edge
- ``'gluings and number'`` -- full information
- ``'letter'`` -- add matching letters to glued edges in an arbitrary way
- ``default_position_function`` -- a function mapping polygon labels to
similarities describing the position of the corresponding polygon.
Note that this will not affect polygons which have already been
positioned.
TESTS::
sage: from flatsurf import *
sage: c = translation_surfaces.chamanara(1/2)
sage: gs = c.graphical_surface()
sage: gs.process_options(edge_labels='hey')
Traceback (most recent call last):
...
ValueError: invalid value for edge_labels (='hey')
"""
if adjacencies is not None:
for p,e in adjacencies:
self.make_adjacent(p, e)
if polygon_labels is not None:
if not isinstance(polygon_labels, bool):
raise ValueError("polygon_labels must be True, False or None.")
self.will_plot_polygon_labels = polygon_labels
if edge_labels is not None:
if edge_labels is True:
self.will_plot_edge_labels = True
edge_labels = 'gluings'
elif edge_labels is False:
self.will_plot_edge_labels = False
edge_labels = None
elif edge_labels in ['gluings', 'number', 'gluings and number', 'letter']:
self._edge_labels = edge_labels
self.will_plot_edge_labels = True
else:
raise ValueError("invalid value for edge_labels (={!r})".format(edge_labels))
if default_position_function is not None:
self._default_position_function = default_position_function
def copy(self):
r"""
Make a copy of this GraphicalSurface.
EXAMPLES::
sage: from flatsurf import *
sage: s = translation_surfaces.octagon_and_squares()
sage: gs = s.graphical_surface()
sage: gs.will_plot_zero_flags = True
sage: gs.graphical_polygon(1).transformation()
(x, y) |-> (x + 2, y)
sage: gs.make_adjacent(0,2)
sage: gs.graphical_polygon(1).transformation()
(x, y) |-> (x + (a + 4), y + (a + 2))
sage: gs.polygon_options["color"]="yellow"
sage: gs2 = gs.copy()
sage: gs2 == gs
False
sage: gs2.will_plot_zero_flags
True
sage: gs2.graphical_polygon(1).transformation()
(x, y) |-> (x + (a + 4), y + (a + 2))
sage: gs2.polygon_options
{'color': 'yellow'}
"""
gs = GraphicalSurface(self.get_surface(), default_position_function = self._default_position_function)
# Copy plot options
gs.will_plot_polygons = self.will_plot_polygons
gs.polygon_options = dict(self.polygon_options)
gs.will_plot_upside_down_polygons = self.will_plot_upside_down_polygons
gs.upside_down_polygon_options = dict(self.upside_down_polygon_options)
gs.will_plot_polygon_labels = self.will_plot_polygon_labels
gs.polygon_label_options = dict(self.polygon_label_options)
gs.will_plot_edges = self.will_plot_edges
gs.will_plot_non_adjacent_edges = self.will_plot_non_adjacent_edges
gs.non_adjacent_edge_options = dict(self.non_adjacent_edge_options)
gs.will_plot_adjacent_edges = self.will_plot_adjacent_edges
gs.adjacent_edge_options = dict(self.adjacent_edge_options)
gs.will_plot_self_glued_edges = self.will_plot_self_glued_edges
gs.self_glued_edge_options = dict(self.self_glued_edge_options)
gs.will_plot_edge_labels = self.will_plot_edge_labels
gs.edge_label_options = dict(self.edge_label_options)
gs.will_plot_zero_flags = self.will_plot_zero_flags
gs.zero_flag_options = dict(self.zero_flag_options)
# Copy polygons and visible set.
gs._polygons = {label:gp.copy() for label,gp in iteritems(self._polygons)}
gs._visible = set(self._visible)
gs._edge_labels = self._edge_labels
return gs
def __repr__(self):
return "Graphical version of Similarity Surface {!r}".format(self._ss)
def visible(self):
r"""
Return a copy of the set of visible labels.
"""
return set(self._visible)
def is_visible(self,label):
r"""
Return whether the polygon with the given label is marked as visible.
"""
return label in self._visible
def make_visible(self, label):
r"""
Mark the polygon with the given label as visible.
"""
self._visible.add(label)
def hide(self, label):
r"""
Mark the polygon with the given label as invisible.
"""
self._visible.remove(label)
def make_all_visible(self, adjacent=None, limit=None):
r"""
Attempt to show all invisible polygons by walking over the surface.
INPUT:
- ``adjacent`` -- (default ``None``) whether the newly added polygon are
set to be adjacent or not. Defaults to true unless a default_position_function was
provided.
- ``limit`` -- (default ``None``) maximal number of additional polygons to make visible
EXAMPLES::
sage: from flatsurf import *
sage: s = similarity_surfaces.example()
sage: g = s.graphical_surface()
sage: g.make_all_visible()
sage: g.plot() # not tested (problem with matplotlib font caches on Travis)
Graphics object consisting of 13 graphics primitives
sage: s = similarity_surfaces.example()
sage: g = s.graphical_surface(cached=False, adjacencies=[])
sage: g.make_all_visible(adjacent=False)
sage: g.plot() # not tested (problem with matplotlib font caches on Travis)
Graphics object consisting of 16 graphics primitives
"""
if adjacent is None:
adjacent = (self._default_position_function is None)
if limit is None:
assert self._ss.is_finite()
if adjacent:
for l,poly in self._ss.walker().label_polygon_iterator():
for e in range(poly.num_edges()):
l2,e2 = self._ss.opposite_edge(l,e)
if not self.is_visible(l2):
self.make_adjacent(l,e)
else:
from flatsurf.geometry.similarity import SimilarityGroup
T = SimilarityGroup(self._ss.base_ring())
for l in self._ss.label_iterator():
if not self.is_visible(l):
if self._default_position_function is None:
# No reasonable way to display the polygon, so we do this hack:
g = self.graphical_polygon(l)
poly = self._ss.polygon(l)
sxmax = self.xmax()
pxmin = g.xmin()
t = T((QQ(self.xmax() - g.xmin() + 1),
QQ(-(g.ymin()+g.ymax()) / ZZ(2) )))
g.set_transformation(t)
self.make_visible(l)
else:
assert limit>0
if adjacent:
i = 0
for l,poly in self._ss.walker().label_polygon_iterator():
for e in range(poly.num_edges()):
l2,e2 = self._ss.opposite_edge(l,e)
if not self.is_visible(l2):
self.make_adjacent(l,e)
i=i+1
if i>=limit:
return
else:
from flatsurf.geometry.similarity import SimilarityGroup
T = SimilarityGroup(self._ss.base_ring())
i = 0
for l in self._ss.label_iterator():
if not self.is_visible(l):
if self._default_position_function is None:
# No reasonable way to display the polygon, so we do this hack:
g = self.graphical_polygon(l)
poly = self._ss.polygon(l)
sxmax = self.xmax()
pxmin = g.xmin()
t = T((QQ(self.xmax() - g.xmin() + 1),
QQ(-(g.ymin()+g.ymax()) / ZZ(2) )))
g.set_transformation(t)
self.make_visible(l)
i=i+1
if i>=limit:
return
def get_surface(self):
r"""
Return the underlying similarity surface.
"""
return self._ss
def xmin(self):
r"""
Return the minimal x-coordinate of a vertex of a visible graphical polygon.
.. TODO::
this should be xmin
"""
return min([self.graphical_polygon(label).xmin() for label in self.visible()])
def ymin(self):
r"""
Return the minimal y-coordinate of a vertex of a visible graphical polygon.
"""
return min([self.graphical_polygon(label).ymin() for label in self.visible()])
def xmax(self):
r"""
Return the maximal x-coordinate of a vertex of a visible graphical polygon.
"""
return max([self.graphical_polygon(label).xmax() for label in self.visible()])
def ymax(self):
r"""
Return the minimal y-coordinate of a vertex of a visible graphical polygon.
"""
return max([self.graphical_polygon(label).ymax() for label in self.visible()])
def bounding_box(self):
r"""
Return the quadruple (x1,y1,x2,y2) where x1 and y1 are the minimal
x- and y-coordinates of a visible graphical polygon and x2 and y2 are the
maximal x-and y- cordinates of a visible graphical polygon.
"""
return self.xmin(), self.ymin(), self.xmax(), self.ymax()
def graphical_polygon(self, label):
r"""
Return the graphical_polygon with the given label.
"""
if label in self._polygons:
return self._polygons[label]
else:
t = None
if not self._default_position_function is None:
t=self._default_position_function(label)
p = GraphicalPolygon(self._ss.polygon(label), transformation=t)
self._polygons[label] = p
return p
def make_adjacent(self, p, e, reverse = False, visible = True):
r"""
Move the polygon across the prescribed edge so that is adjacent.
The polygon moved is obtained from opposite_edge(p,e).
If reverse=True then the polygon is moved so that there is a fold
at the edge.
If visible is True (by default), we also make the moved polygon visible.
EXAMPLES::
sage: from flatsurf import *
sage: s = similarity_surfaces.example()
sage: gs = s.graphical_surface(adjacencies=[])
sage: gs.graphical_polygon(0)
GraphicalPolygon with vertices [(0.0, 0.0), (2.0, -2.0), (2.0, 0.0)]
sage: gs.graphical_polygon(1)
GraphicalPolygon with vertices [(0.0, 0.0), (2.0, 0.0), (1.0, 3.0)]
sage: print("Polygon 0, edge 0 is opposite "+str(gs.opposite_edge(0,0)))
Polygon 0, edge 0 is opposite (1, 1)
sage: gs.make_adjacent(0,0)
sage: gs.graphical_polygon(0)
GraphicalPolygon with vertices [(0.0, 0.0), (2.0, -2.0), (2.0, 0.0)]
sage: gs.graphical_polygon(1)
GraphicalPolygon with vertices [(0.4, -2.8), (2.0, -2.0), (0.0, 0.0)]
"""
pp,ee = self._ss.opposite_edge(p,e)
if reverse:
from flatsurf.geometry.similarity import SimilarityGroup
G = SimilarityGroup(self._ss.base_ring())
q = self._ss.polygon(p)
a = q.vertex(e)
b = q.vertex(e+1)
# This is the similarity carrying the origin to a and (1,0) to b:
g = G(b[0]-a[0],b[1]-a[1],a[0],a[1])
qq = self._ss.polygon(pp)
aa = qq.vertex(ee+1)
bb = qq.vertex(ee)
# This is the similarity carrying the origin to aa and (1,0) to bb:
gg = G(bb[0]-aa[0],bb[1]-aa[1],aa[0],aa[1])
reflection = G(
self._ss.base_ring().one(),
self._ss.base_ring().zero(),
self._ss.base_ring().zero(),
self._ss.base_ring().zero(),
-1)
# This is the similarity carrying (a,b) to (aa,bb):
g = gg*reflection*(~g)
else:
g = self._ss.edge_transformation(pp,ee)
h = self.graphical_polygon(p).transformation()
self.graphical_polygon(pp).set_transformation(h*g)
if visible:
self.make_visible(pp)
def make_adjacent_and_visible(self, p, e, reverse=False):
r"""
Move the polygon across the prescribed edge so that is adjacent,
and make the moved polygon visible.
"""
from sage.misc.superseded import deprecation
deprecation(42, "Do not use .make_adjacent_and_visible(). Use .make_adjacent() instead.")
self.make_adjacent(p, e, reverse=reverse)
def is_adjacent(self,p,e):
r"""
Returns the truth value of the statement
'The polygon opposite edge (p,e) is adjacent to that edge.'
EXAMPLES::
sage: from flatsurf import *
sage: s = similarity_surfaces.example()
sage: g = s.graphical_surface(adjacencies=[])
sage: g.is_adjacent(0,0)
False
sage: g.is_adjacent(0,1)
False
sage: g.make_all_visible(adjacent=True)
sage: g.is_adjacent(0,0)
True
sage: g.is_adjacent(0,1)
False
"""
pp,ee = self.opposite_edge(p,e)
if not self.is_visible(pp):
return False
g = self.graphical_polygon(p)
gg = self.graphical_polygon(pp)
return g.transformed_vertex(e) == gg.transformed_vertex(ee+1) and \
g.transformed_vertex(e+1) == gg.transformed_vertex(ee)
def to_surface(self, point, v=None, label=None, ring=None, return_all=False, \
singularity_limit=None, search_all = False, search_limit=None):
r""" Converts from graphical coordinates to similarity surface coordinates.
A point always must be provided. If a vector v is provided then a
SimilaritySurfaceTangentVector will be returned. If v is not provided, then a
SurfacePoint is returned.
INPUT:
- ``point`` -- Coordinates of a point in graphical coordinates to be
converted to graphical coordinates.
- ``v`` -- (default ``None``) If provided a tangent vector in graphical
coordinates based at the provided point.
- ``label`` -- (default ``None``) If provided, then we only convert
points and tangent vectors in the corresponding graphical polygon.
- ``ring`` -- (default ``None``) If provided, then objects returned
will be defined over the given ring, otherwise we use the base_ring
of the surface.
- ``return_all`` -- (default ``False``) By default we return the first
point or vector we find. However if the graphical polygons overlap,
then a point or vector might correspond to more than one point or
vector on the surface. If ``return_all`` is set to ``True`` then we
return a set of all points we find instead.
- ``singularity_limit`` -- (default ``None``) This only has an effect
if returning a singular point (i.e., ``v`` is ``None``) and the
surface is infinite. In this case, the singularity should be
returned but it could be infinite. Then singularity_limit controls
how far we look for the singularity to close. This value is passed
to ``SimilaritySurface.surface_point``.
- ``search_all`` -- (default ``False``) By default we look just in
polygons with visible label. If set to `True``, then we instead
look in all labels.
- ``search_limit`` -- (default ``None``) If ``search_all`` is ``True``,
then we look at the first ``search_limit`` polygons instead of all
polygons. This must be set to an positive integer if ``search_all``
is and the surface is infinite.
EXAMPLES::
sage: from flatsurf import *
sage: s = similarity_surfaces.example()
sage: gs = s.graphical_surface()
sage: gs.to_surface((1,-2))
Surface point located at (1, 1/2) in polygon 1
sage: gs.to_surface((1,-2), v=(1,0))
SimilaritySurfaceTangentVector in polygon 1 based at (1, 1/2) with vector (1, -1/2)
sage: s = translation_surfaces.infinite_staircase()
sage: gs = s.graphical_surface()
sage: gs.to_surface((4,4), (1,1), search_all=True, search_limit=20)
SimilaritySurfaceTangentVector in polygon 8 based at (0, 0) with vector (1, 1)
sage: s = translation_surfaces.square_torus()
sage: pc = s.minimal_cover(cover_type="planar")
sage: gs = pc.graphical_surface()
sage: gs.to_surface((3,2), search_all=True, search_limit=20)
Traceback (most recent call last):
...
ValueError: To obtain a singularity on an infinite surface, singularity_limit must be set.
sage: gs.to_surface((3,2), search_all=True, search_limit=20, singularity_limit=4)
Surface point with 4 coordinate representations
sage: p = gs.to_surface((sqrt(3),sqrt(2)), ring=AA, search_all=True, search_limit=20)
sage: next(iter(p.coordinates(p.labels()[0]))).parent()
Vector space of dimension 2 over Algebraic Real Field
sage: v = gs.to_surface((3/2,3/2),(sqrt(3),sqrt(2)),ring=AA,search_all=True, search_limit=20)
sage: v.bundle()
Tangent bundle of TranslationSurface built from infinitely many polygons defined over Algebraic Real Field
"""
if label is None:
if return_all:
ret = set()
s = self.get_surface()
if search_all:
if search_limit is None:
if s.is_finite():
it = s.label_iterator()
else:
raise ValueError("If search_all=True and the surface is infinite, then a search_limit must be provided.")
else:
from itertools import islice
it = islice(s.label_iterator(), search_limit)
else:
it = self.visible()
for label in it:
try:
val = self.to_surface(point, v=v, label=label, ring=ring, singularity_limit=singularity_limit)
if return_all:
ret.add(val)
else:
return val
except AssertionError:
# Not in the polygon
pass
except ValueError as e:
if e.args[0] == 'need a limit when working with an infinite surface':
raise ValueError("To obtain a singularity on an infinite surface, " + \
"singularity_limit must be set.")
# Otherwise it is not in the polygon.
if return_all:
return ret
else:
raise ValueError("Point or vector is not in a visible graphical_polygon.")
else:
gp = self.graphical_polygon(label)
coords = gp.transform_back(point)
s = self.get_surface()
if v is None:
return s.surface_point(label, coords, ring=ring, limit=singularity_limit)
else:
return s.tangent_vector(label, coords, (~(gp.transformation().derivative()))*vector(v), ring=ring)
def opposite_edge(self, p, e):
r"""
Given the label ``p`` of a polygon and an edge ``e`` in that polygon
returns the pair (``pp``, ``ee``) to which this edge is glued.
"""
return self._ss.opposite_edge(p,e)
def reset_letters(self,p,e):
r"""
Resets the letter dictionary for storing letters used in
edge labeling if edge_labels="letter" is used.
"""
try:
del self._letters
del self._next_letter
except:
pass
def _get_letter_for_edge(self, p, e):
if not hasattr(self,"_letters"):
self._letters={}
self._next_letter=1
try:
return self._letters[(p,e)]
except KeyError:
# convert number to string
nl = self._next_letter
self._next_letter = nl + 1
letter = ""
while nl!=0:
val = nl % 52
if val==0:
val=52
letter = "Z" + letter
elif val<27:
letter = chr(97+val-1) + letter
else:
letter = chr(65+val-27) + letter
nl = (nl-val)/52
self._letters[(p,e)] = letter
self._letters[self._ss.opposite_edge(p,e)] = letter
return letter
def edge_labels(self, lab):
r"""
Return the list of edge labels to be used for the polygon with label ``lab``.
EXAMPLES::
sage: from flatsurf import *
sage: s = similarity_surfaces.example()
sage: g = s.graphical_surface(cached=False, adjacencies=[])
sage: g.edge_labels(0)
['1', '1', '1']
sage: g.make_all_visible(adjacent=True)
sage: g.edge_labels(0)
[None, '1', '1']
sage: g.make_adjacent(0,0)
sage: g.edge_labels(0)
[None, '1', '1']
sage: g.edge_labels(1)
['0', None, '0']
sage: s = similarity_surfaces.example()
sage: g = s.graphical_surface(cached=False, adjacencies=[], edge_labels='number')
sage: g.edge_labels(0)
['0', '1', '2']
sage: g = s.graphical_surface(cached=False, adjacencies=[], edge_labels='gluings and number')
sage: g.edge_labels(0)
['0 -> (1, 1)', '1 -> (1, 2)', '2 -> (1, 0)']
sage: g.make_all_visible(adjacent=True)
sage: g.edge_labels(0)
['0', '1 -> (1, 2)', '2 -> (1, 0)']
"""
if not self._edge_labels:
return None
s = self._ss
g = self.graphical_polygon(lab)
p = g.base_polygon()
if self._edge_labels == 'gluings':
ans = []
for e in range(p.num_edges()):
if self.is_adjacent(lab, e):
ans.append(None)
else:
llab,ee = s.opposite_edge(lab,e)
ans.append(str(llab))
elif self._edge_labels == 'number':
ans = list(map(str, range(p.num_edges())))
elif self._edge_labels == 'gluings and number':
ans = []
for e in range(p.num_edges()):
if self.is_adjacent(lab, e):
ans.append(str(e))
else:
ans.append("{} -> {}".format(e, s.opposite_edge(lab,e)))
elif self._edge_labels == "letter":
ans = []
for e in range(p.num_edges()):
llab,ee = s.opposite_edge(lab,e)
if not self.is_visible(llab) or self.is_adjacent(lab, e):
ans.append(None)
else:
ans.append(self._get_letter_for_edge(lab,e))
else:
raise RuntimeError("invalid option for edge_labels")
return ans
def plot_polygon(self, label, graphical_polygon, upside_down):
r"""
Internal method for plotting polygons returning a Graphics object.
Calls :func:`graphical_polygon.GraphicalPolygon.plot_polygon` passing
the attribute `upside_down_polygon_options` if the polygon is upside down
and `polygon_options` otherwise.
Override this method for fine control of how the polygons are drawn.
INPUT:
- ``label`` -- The label of the polygon being plotted.
- ``graphical_polygon`` -- The associated graphical polygon.
- ``upside_down`` -- True if and only if the polygon will be rendered upside down.
"""
if upside_down:
return graphical_polygon.plot_polygon(**self.upside_down_polygon_options)
else:
return graphical_polygon.plot_polygon(**self.polygon_options)
def plot_polygon_label(self, label, graphical_polygon, upside_down):
r"""
Internal method for plotting polygon labels returning a Graphics2D.
Calls :func:`graphical_polygon.GraphicalPolygon.plot_polygon_label` passing
the attribute `polygon_label_options`.
Override this method for fine control of how the polygons are drawn.
INPUT:
- ``label`` -- The label of the polygon being plotted.
- ``graphical_polygon`` -- The associated graphical polygon.
- ``upside_down`` -- True if and only if the polygon will be rendered upside down.
"""
return graphical_polygon.plot_label(label,**self.polygon_label_options)
def plot_edge(self, label, edge, graphical_polygon, is_adjacent, is_self_glued):
r"""
Internal method for plotting a polygon's edge returning a Graphics2D.
The method calls :func:`graphical_polygon.GraphicalPolygon.plot_edge`.
Depending on the geometry of the edge pair, it passes one of the attributes
`adjacent_edge_options`, `self_glued_edge_options` or `non_adjacent_edge_options`.
Override this method for fine control of how the edge is drawn.
INPUT:
- ``label`` -- The label of the polygon.
- ``edge`` -- Integer representing the edge of the polygon.
- ``graphical_polygon`` -- The associated graphical polygon.
- ``is_adjacent`` -- True if and only if the polygon opposite this edge is visible and adjacent to this edge.
In this case, plot_edge is called only once for this edge.
- ``is_self_glued`` -- True if and only if the edge is glued to itself by a 180 degree rotation.
This is never True if is_adjacent is True.
"""
if is_adjacent:
return graphical_polygon.plot_edge(edge, **self.adjacent_edge_options)
elif is_self_glued:
return graphical_polygon.plot_edge(edge, **self.self_glued_edge_options)
else:
return graphical_polygon.plot_edge(edge, **self.non_adjacent_edge_options)
def plot_edge_label(self, p, e, edge_label, graphical_polygon):
r"""
Internal method for plotting an edge label.
Calls :func:`graphical_polygon.GraphicalPolygon.plot_edge_label` passing
the attribute `edge_label_options`.
Override this method for fine control of how the edge is drawn.
INPUT:
- ``p`` -- The label of the polygon.
- ``e`` -- Integer representing the edge of the polygon.
- ``edge_label`` -- A string containing the label to be printed on the edge.
- ``graphical_polygon`` -- The associated graphical polygon.
"""
return graphical_polygon.plot_edge_label(e, edge_label, **self.edge_label_options)
def plot_zero_flag(self, label, graphical_polygon):
r"""
Internal method for plotting a polygon's zero_flag and returning a Graphics2D.
Simply calls :func:`graphical_polygon.GraphicalPolygon.plot_zero_flag` passing
the attribute `zero_flag_options`.
Override this method for fine control of how the edge is drawn.
INPUT:
- ``label`` -- The label of the polygon.
- ``graphical_polygon`` -- The associated graphical polygon.
"""
return graphical_polygon.plot_zero_flag(**self.zero_flag_options)
def plot(self):
r"""
Returns a plot of the GraphicalSurface
EXAMPLES::
sage: from flatsurf import *
sage: s = similarity_surfaces.example()
sage: from flatsurf.graphical.surface import GraphicalSurface
sage: gs = GraphicalSurface(s)
sage: gs.make_visible(1)
sage: gs.plot() # not tested (problem with matplotlib font caches on Travis)
Graphics object consisting of 13 graphics primitives
Check that label options are handled correctly::
sage: S = translation_surfaces.square_torus()
sage: S.plot(polygon_labels=True, edge_labels=True) # not tested (problem with matplotlib font caches on Travis)
Graphics object consisting of 10 graphics primitives
sage: S.plot(polygon_labels=False, edge_labels=True) # not tested (problem with matplotlib font caches on Travis)
Graphics object consisting of 9 graphics primitives
sage: S.plot(polygon_labels=True, edge_labels=False) # not tested (problem with matplotlib font caches on Travis)
Graphics object consisting of 6 graphics primitives
sage: S.plot(polygon_labels=False, edge_labels=False) # not tested (problem with matplotlib font caches on Travis)
Graphics object consisting of 5 graphics primitives
"""
from sage.plot.graphics import Graphics
p = Graphics()
# Make sure we don't plot adjacent edges more than once.
plotted_adjacent_edges = set()
for label in self._visible:
polygon = self.graphical_polygon(label)
upside_down = polygon.transformation().sign()==-1
# Plot the polygons
if upside_down and self.will_plot_upside_down_polygons:
p += self.plot_polygon(label, polygon, upside_down)
elif self.will_plot_polygons:
p += self.plot_polygon(label, polygon, upside_down)
if self.will_plot_zero_flags:
p += self.plot_zero_flag(label,polygon)
# Add the polygon label
if self.will_plot_polygon_labels:
p += self.plot_polygon_label(label, polygon, upside_down)
# Plot the edges
if self.will_plot_edges:
for i in range(self._ss.polygon(label).num_edges()):
if self.is_adjacent(label,i):
if self.will_plot_adjacent_edges and (label,i) not in plotted_adjacent_edges:
plotted_adjacent_edges.add(self._ss.opposite_edge(label,i))
p += self.plot_edge(label, i, polygon, True, False)
elif (label,i) == self._ss.opposite_edge(label,i):
# Self-glued edge
if self.will_plot_self_glued_edges:
p += self.plot_edge(label, i, polygon, False, True)
else:
if self.will_plot_non_adjacent_edges:
p += self.plot_edge(label, i, polygon, False, False)
# Plot the edge labels.
if self.will_plot_edge_labels:
# get the edge labels
edge_labels = self.edge_labels(label)
if edge_labels is not None:
for i in range(self._ss.polygon(label).num_edges()):
if edge_labels[i] is not None:
p += self.plot_edge_label(label, i, edge_labels[i], polygon)
return p
| gpl-2.0 |
gsmaxwell/phase_offset_rx | gr-digital/examples/example_costas.py | 17 | 4430 | #!/usr/bin/env python
from gnuradio import gr, digital
from gnuradio import eng_notation
from gnuradio.eng_option import eng_option
from optparse import OptionParser
try:
import scipy
except ImportError:
print "Error: could not import scipy (http://www.scipy.org/)"
sys.exit(1)
try:
import pylab
except ImportError:
print "Error: could not import pylab (http://matplotlib.sourceforge.net/)"
sys.exit(1)
class example_costas(gr.top_block):
def __init__(self, N, sps, rolloff, ntaps, bw, noise, foffset, toffset, poffset):
gr.top_block.__init__(self)
rrc_taps = gr.firdes.root_raised_cosine(
sps, sps, 1.0, rolloff, ntaps)
data = 2.0*scipy.random.randint(0, 2, N) - 1.0
data = scipy.exp(1j*poffset) * data
self.src = gr.vector_source_c(data.tolist(), False)
self.rrc = gr.interp_fir_filter_ccf(sps, rrc_taps)
self.chn = gr.channel_model(noise, foffset, toffset)
self.cst = digital.costas_loop_cc(bw, 2)
self.vsnk_src = gr.vector_sink_c()
self.vsnk_cst = gr.vector_sink_c()
self.vsnk_frq = gr.vector_sink_f()
self.connect(self.src, self.rrc, self.chn, self.cst, self.vsnk_cst)
self.connect(self.rrc, self.vsnk_src)
self.connect((self.cst,1), self.vsnk_frq)
def main():
parser = OptionParser(option_class=eng_option, conflict_handler="resolve")
parser.add_option("-N", "--nsamples", type="int", default=2000,
help="Set the number of samples to process [default=%default]")
parser.add_option("-S", "--sps", type="int", default=4,
help="Set the samples per symbol [default=%default]")
parser.add_option("-r", "--rolloff", type="eng_float", default=0.35,
help="Set the rolloff factor [default=%default]")
parser.add_option("-W", "--bandwidth", type="eng_float", default=2*scipy.pi/100.0,
help="Set the loop bandwidth [default=%default]")
parser.add_option("-n", "--ntaps", type="int", default=45,
help="Set the number of taps in the filters [default=%default]")
parser.add_option("", "--noise", type="eng_float", default=0.0,
help="Set the simulation noise voltage [default=%default]")
parser.add_option("-f", "--foffset", type="eng_float", default=0.0,
help="Set the simulation's normalized frequency offset (in Hz) [default=%default]")
parser.add_option("-t", "--toffset", type="eng_float", default=1.0,
help="Set the simulation's timing offset [default=%default]")
parser.add_option("-p", "--poffset", type="eng_float", default=0.707,
help="Set the simulation's phase offset [default=%default]")
(options, args) = parser.parse_args ()
# Adjust N for the interpolation by sps
options.nsamples = options.nsamples // options.sps
# Set up the program-under-test
put = example_costas(options.nsamples, options.sps, options.rolloff,
options.ntaps, options.bandwidth, options.noise,
options.foffset, options.toffset, options.poffset)
put.run()
data_src = scipy.array(put.vsnk_src.data())
# Convert the FLL's LO frequency from rads/sec to Hz
data_frq = scipy.array(put.vsnk_frq.data()) / (2.0*scipy.pi)
# adjust this to align with the data.
data_cst = scipy.array(3*[0,]+list(put.vsnk_cst.data()))
# Plot the Costas loop's LO frequency
f1 = pylab.figure(1, figsize=(12,10), facecolor='w')
s1 = f1.add_subplot(2,2,1)
s1.plot(data_frq)
s1.set_title("Costas LO")
s1.set_xlabel("Samples")
s1.set_ylabel("Frequency (normalized Hz)")
# Plot the IQ symbols
s3 = f1.add_subplot(2,2,2)
s3.plot(data_src.real, data_src.imag, "o")
s3.plot(data_cst.real, data_cst.imag, "rx")
s3.set_title("IQ")
s3.set_xlabel("Real part")
s3.set_ylabel("Imag part")
s3.set_xlim([-2, 2])
s3.set_ylim([-2, 2])
# Plot the symbols in time
s4 = f1.add_subplot(2,2,3)
s4.set_position([0.125, 0.05, 0.775, 0.4])
s4.plot(data_src.real, "o-")
s4.plot(data_cst.real, "rx-")
s4.set_title("Symbols")
s4.set_xlabel("Samples")
s4.set_ylabel("Real Part of Signals")
pylab.show()
if __name__ == "__main__":
try:
main()
except KeyboardInterrupt:
pass
| gpl-3.0 |
DimensionalScoop/kautschuk | AP_SS16/US3/PythonSkript.py | 1 | 14222 | ##################################################### Import system libraries ######################################################
import matplotlib as mpl
mpl.rcdefaults()
mpl.rcParams.update(mpl.rc_params_from_file('meine-matplotlibrc'))
import matplotlib.pyplot as plt
import numpy as np
import scipy.constants as const
import uncertainties.unumpy as unp
from uncertainties import ufloat
from uncertainties.unumpy import (
nominal_values as noms,
std_devs as stds,
)
################################################ Finish importing system libraries #################################################
################################################ Adding subfolder to system's path #################################################
import os, sys, inspect
# realpath() will make your script run, even if you symlink it :)
cmd_folder = os.path.realpath(os.path.abspath(os.path.split(inspect.getfile( inspect.currentframe() ))[0]))
if cmd_folder not in sys.path:
sys.path.insert(0, cmd_folder)
# use this if you want to include modules from a subfolder
cmd_subfolder = os.path.realpath(os.path.abspath(os.path.join(os.path.split(inspect.getfile( inspect.currentframe() ))[0],"python_custom_scripts")))
if cmd_subfolder not in sys.path:
sys.path.insert(0, cmd_subfolder)
############################################# Finish adding subfolder to system's path #############################################
##################################################### Import custom libraries ######################################################
from curve_fit import ucurve_fit
from table import (
make_table,
make_full_table,
make_composed_table,
make_SI,
write,
)
from regression import (
reg_linear,
reg_quadratic,
reg_cubic
)
from error_calculation import(
MeanError
)
################################################ Finish importing custom libraries #################################################
### Gegebene Daten
rho_L = 1.15 # in g/cm^3
c_L = 1800 # m/s
eta = 12 # mPa s
c_P = 2700 # m/2
l = 30.7 # mm
di_k = 7e-3 # m
di_m = 10e-3 # m
di_g = 16e-3 # m
da_k = 10e-3 # m
da_m = 15e-3 # m
da_g = 20e-3 # m
nu_0 = 2e6 # Hz
import scipy.stats
# def mean(values):
# return ufloat(np.mean(noms(values)), scipy.stats.sem(noms(values)))
def mean(values, axis=0):
return unp.uarray((np.mean(noms(values), axis=axis), scipy.stats.sem(noms(values), axis=axis)))
Theta = np.array([30, 15, 60])*np.pi/180 # rad
alpha = np.pi/2 - np.arcsin(np.sin(Theta) * c_L/c_P) # rad
# wiederverwendbare Funktion zum Erledigen der Aufgaben für verschiedene Innenwinkel
def do_job_a(filename, error, j, filename_out = None):
# Einlesen der Messdaten
P, Delta_f_30, Delta_f_15, Delta_f_60 = np.genfromtxt(filename, unpack=True)
#
di = [7,10,16]
colors = ['rx', 'bx', 'gx']
Delta_f_30_error = Delta_f_30*error
Delta_f_30 = unp.uarray(Delta_f_30, Delta_f_30_error)
Delta_f_15_error = Delta_f_15*error
Delta_f_15 = unp.uarray(Delta_f_15, Delta_f_15_error)
Delta_f_60_error = Delta_f_60*error
Delta_f_60 = unp.uarray(Delta_f_60, Delta_f_60_error)
v= unp.uarray(np.zeros(3), np.zeros(3))
v[0] = c_L / 2 / nu_0 * Delta_f_30 / np.cos(alpha[0])
v[1] = c_L / 2 / nu_0 * Delta_f_15 / np.cos(alpha[1])
v[2] = c_L / 2 / nu_0 * Delta_f_60 / np.cos(alpha[2])
v_mean = mean([v[0], v[1], v[2]], 0)
# TABLES
write('build/Tabelle_a_'+str(di[j])+'.tex',
make_table(
[P,Delta_f_30,Delta_f_15,Delta_f_60,v[0],v[1],v[2], v_mean],
[0, 1, 1, 1, 1, 1, 1, 1]))
write('build/Tabelle_a_'+str(di[j])+'_texformat.tex', make_full_table(
r'Messdaten und daraus errechnete Geschwindikgiet für $\d_i = $'+str(di[j])+r'$\si{\milli\meter}$.',
'table:A'+str(j),
'build/Tabelle_a_'+str(di[j])+'.tex',
[1,2,3,4,5,6,7],
[r'$\frac{P}{P_\text{max}} \:/\: \si{\percent}$',
r'$\Delta f_{30°} \:/\: \si{\hertz}$',
r'$\Delta f_{15°} \:/\: \si{\hertz}$',
r'$\Delta f_{60°} \:/\: \si{\hertz}$',
r'$v_{30°} \:/\: \si{\meter\per\second}$',
r'$v_{15°} \:/\: \si{\meter\per\second}$',
r'$v_{60°} \:/\: \si{\meter\per\second}$',
r'$\overline{v} \:/\: \si{\meter\per\second}$']))
# Plotting
plt.figure(1)
y = Delta_f_30 / np.cos(alpha[0])
plt.errorbar(noms(v[0]), noms(y), fmt=colors[j], xerr = stds(v[0]), yerr=stds(y), label=r'$d_i = ' + str(di[j]) + r'\si{\milli\meter}$')
plt.figure(2)
y = Delta_f_15 / np.cos(alpha[1])
plt.errorbar(noms(v[1]), noms(y), fmt=colors[j], xerr = stds(v[1]), yerr=stds(y), label=r'$d_i = ' + str(di[j]) + r'\si{\milli\meter}$')
plt.figure(3)
y = Delta_f_60 / np.cos(alpha[2])
plt.errorbar(noms(v[2]), noms(y), fmt=colors[j], xerr = stds(v[2]), yerr=stds(y), label=r'$d_i = ' + str(di[j]) + r'\si{\milli\meter}$')
i = 1
if (filename_out):
for name in filename_out:
plt.figure(i)
plt.xlabel(r'$v \:/\: \si{\meter\per\second}$')
plt.ylabel(r'$\Delta\nu / \cos{\alpha} \:/\: \si{\kilo\volt}$')
plt.legend(loc='best')
plt.tight_layout(pad=0, h_pad=1.08, w_pad=1.08)
plt.savefig(name)
i += 1
def do_job_b(filename, error, P, limits):
# Einlesen der Messdaten
Tiefe, Delta_f, Intensity = np.genfromtxt(filename, unpack=True)
colors = ['rx', 'bx', 'gx']
Delta_f_error = Delta_f*error
Delta_f = unp.uarray(Delta_f, Delta_f_error)
v = c_L / 2 / nu_0 * Delta_f / np.cos(alpha[1]) # 15 ° Winkel
###### Fit im Intervall limits mit quadratischer Funktion gemäß dem Gesetz von Hagen-Poiseuille
i = 0
start = 0
end = 0
for x in Tiefe:
if (x == limits[0]):
start = i
if (x == limits[1]):
end = i
i += 1
params = ucurve_fit(reg_quadratic, Tiefe[start:(end+1)], v[start:(end+1)]) # quadratischer Fit
a, x0, c = params
write('build/parameter_a.tex', make_SI(a * 1e-3, r'\kilo\volt', figures=1))
##### Ende Fit ########
# Plotting
plt.clf
fig, ax1 = plt.subplots()
t_plot = np.linspace(limits[0]-0.5, limits[1]+0.5, 50)
# Momentangeschwindigkeiten
Ins1 = ax1.plot(Tiefe, noms(v), 'rx', label='Momentangeschwindigkeit')
Ins2 = ax1.plot(t_plot, reg_quadratic(t_plot, *noms(params)), 'r--', label='Fit')
ax1.set_xlabel(r'$\text{Laufzeit} \:/\: \si{\micro\second}$')
ax1.set_ylabel(r'$v \:/\: \si{\meter\per\second}$')
if ( P==45 ):
ax1.set_ylim(0.45, 0.9) # hard coded stuff ftl !
# Streuintensitäten
ax2 = ax1.twinx()
Ins3 = ax2.plot(Tiefe, Intensity, 'b+', label='Intensität')
ax2.set_ylabel(r'$I \:/\: \si{\kilo\volt\squared\per\second}$')
# Theoretische Grenzen des Rohres einzeichnen
ax1.plot((noms(x0)-5/1.5, noms(x0)-5/1.5), (ax1.get_ylim()[0], ax1.get_ylim()[1]), 'k:', linewidth=1)
ax1.plot((noms(x0)+5/1.5, noms(x0)+5/1.5), (ax1.get_ylim()[0], ax1.get_ylim()[1]), 'k:', linewidth=1)
ax1.plot((noms(x0)-5/1.5-2.5/2.5, noms(x0)-5/1.5-2.5/2.5), (ax1.get_ylim()[0], ax1.get_ylim()[1]), 'k:', linewidth=1)
ax1.plot((noms(x0)+5/1.5+2.5/2.5, noms(x0)+5/1.5+2.5/2.5), (ax1.get_ylim()[0], ax1.get_ylim()[1]), 'k:', linewidth=1)
Ins = Ins1 + Ins2 + Ins3
labs = [l.get_label() for l in Ins]
ax1.legend(Ins, labs, loc='upper left')
plt.tight_layout(pad=0, h_pad=1.08, w_pad=1.08)
plt.savefig('build/Plot_b_P'+str(P)+'.pdf')
do_job_a('messdaten/Delta_f_7mm.txt', 0.07, 0)
do_job_a('messdaten/Delta_f_10mm.txt', 0.07, 1)
do_job_a('messdaten/Delta_f_16mm.txt', 0.07, 2, ['build/Plot_a_30deg.pdf', 'build/Plot_a_15deg.pdf', 'build/Plot_a_60deg.pdf'])
do_job_b('messdaten/stroemungsprofil45.txt', 0.07, 45, [14, 17])
do_job_b('messdaten/stroemungsprofil70.txt', 0.07, 70, [14, 17])
# Tabelle für Messdaten
Tiefe, Delta_f_45, Intensity_45 = np.genfromtxt('messdaten/stroemungsprofil45.txt', unpack=True)
Tiefe, Delta_f_70, Intensity_70 = np.genfromtxt('messdaten/stroemungsprofil70.txt', unpack=True)
error = 0.07
Delta_f_45_error = Delta_f_45*error
Delta_f_45 = unp.uarray(Delta_f_45, Delta_f_45_error)
Delta_f_70_error = Delta_f_70*error
Delta_f_70 = unp.uarray(Delta_f_70, Delta_f_70_error)
Intensity_45_error = Intensity_45*error
Intensity_45 = unp.uarray(Intensity_45, Intensity_45_error)
Intensity_70_error = Intensity_70*error
Intensity_70 = unp.uarray(Intensity_70, Intensity_70_error)
write('build/Tabelle_messdaten.tex', make_table([Tiefe, Delta_f_45, Intensity_45, Delta_f_70, Intensity_70],[0, 1, 1, 1, 1]))
write('build/Tabelle_messdaten_texformat.tex', make_full_table(
'Messdaten zum Strömungsprofil.',
'table:messdaten_b',
'build/Tabelle_messdaten.tex',
[1,2,3,4], # Hier aufpassen: diese Zahlen bezeichnen diejenigen resultierenden Spaltennummern,
# die Multicolumns sein sollen
[r'$\text{Laufzeit} \:/\: \si{\micro\second}$',
r'$\Delta f_{45\si{\percent}} \:/\: \si{\hertz}$',
r'$I_{45\si{\percent}} \:/\: \si{\kilo\square\volt\per\second}$',
r'$\Delta f_{70\si{\percent}} \:/\: \si{\hertz}$',
r'$I_{70\si{\percent}} \:/\: \si{\kilo\square\volt\per\second}$']))
################################ FREQUENTLY USED CODE ################################
#
########## IMPORT ##########
# t, U, U_err = np.genfromtxt('data.txt', unpack=True)
# t *= 1e-3
########## ERRORS ##########
# R_unc = ufloat(R[0],R[2])
# U = 1e3 * unp.uarray(U, U_err)
# Rx_mean = np.mean(Rx) # Mittelwert und syst. Fehler
# Rx_mean_err = MeanError(noms(Rx)) # Fehler des Mittelwertes
#
## Relative Fehler zum späteren Vergleich in der Diskussion
# RelFehler_G = (G_mess - G_lit) / G_lit
# RelFehler_B = (B_mess - B_lit) / B_lit
# write('build/RelFehler_G.tex', make_SI(RelFehler_G*100, r'\percent', figures=1))
# write('build/RelFehler_B.tex', make_SI(RelFehler_B*100, r'\percent', figures=1))
########## CURVE FIT ##########
# def f(t, a, b, c, d):
# return a * np.sin(b * t + c) + d
#
# params = ucurve_fit(f, t, U, p0=[1, 1e3, 0, 0]) # p0 bezeichnet die Startwerte der zu fittenden Parameter
# params = ucurve_fit(reg_linear, x, y) # linearer Fit
# params = ucurve_fit(reg_quadratic, x, y) # quadratischer Fit
# params = ucurve_fit(reg_cubic, x, y) # kubischer Fit
# a, b = params
# write('build/parameter_a.tex', make_SI(a * 1e-3, r'\kilo\volt', figures=1)) # type in Anz. signifikanter Stellen
# write('build/parameter_b.tex', make_SI(b * 1e-3, r'\kilo\hertz', figures=2)) # type in Anz. signifikanter Stellen
########## PLOTTING ##########
# plt.clf # clear actual plot before generating a new one
#
## automatically choosing limits with existing array T1
# t_plot = np.linspace(np.amin(T1), np.amax(T1), 100)
# plt.xlim(t_plot[0]-1/np.size(T1)*(t_plot[-1]-t_plot[0]), t_plot[-1]+1/np.size(T1)*(t_plot[-1]-t_plot[0]))
#
## hard coded limits
# t_plot = np.linspace(-0.5, 2 * np.pi + 0.5, 1000) * 1e-3
#
## standard plotting
# plt.plot(t_plot * 1e3, f(t_plot, *noms(params)) * 1e-3, 'b-', label='Fit')
# plt.plot(t * 1e3, U * 1e3, 'rx', label='Messdaten')
## plt.errorbar(B * 1e3, noms(y) * 1e5, fmt='rx', yerr=stds(y) * 1e5, label='Messdaten') # mit Fehlerbalken
## plt.xscale('log') # logarithmische x-Achse
# plt.xlim(t_plot[0] * 1e3, t_plot[-1] * 1e3)
# plt.xlabel(r'$t \:/\: \si{\milli\second}$')
# plt.ylabel(r'$U \:/\: \si{\kilo\volt}$')
# plt.legend(loc='best')
# plt.tight_layout(pad=0, h_pad=1.08, w_pad=1.08)
# plt.savefig('build/aufgabenteil_a_plot.pdf')
########## WRITING TABLES ##########
### IF THERE IS ONLY ONE COLUMN IN A TABLE (workaround):
## a=np.array([Wert_d[0]])
## b=np.array([Rx_mean])
## c=np.array([Rx_mean_err])
## d=np.array([Lx_mean*1e3])
## e=np.array([Lx_mean_err*1e3])
#
# write('build/Tabelle_b.tex', make_table([a,b,c,d,e],[0, 1, 0, 1, 1])) # Jeder fehlerbehaftete Wert bekommt zwei Spalten
# write('build/Tabelle_b_texformat.tex', make_full_table(
# 'Messdaten Kapazitätsmessbrücke.',
# 'table:A2',
# 'build/Tabelle_b.tex',
# [1,2,3,4,5], # Hier aufpassen: diese Zahlen bezeichnen diejenigen resultierenden Spaltennummern,
# # die Multicolumns sein sollen
# ['Wert',
# r'$C_2 \:/\: \si{\nano\farad}$',
# r'$R_2 \:/\: \si{\ohm}$',
# r'$R_3 / R_4$', '$R_x \:/\: \si{\ohm}$',
# r'$C_x \:/\: \si{\nano\farad}$']))
#
## Aufsplitten von Tabellen, falls sie zu lang sind
# t1, t2 = np.array_split(t * 1e3, 2)
# U1, U2 = np.array_split(U * 1e-3, 2)
# write('build/loesung-table.tex', make_table([t1, U1, t2, U2], [3, None, 3, None])) # type in Nachkommastellen
#
## Verschmelzen von Tabellen (nur Rohdaten, Anzahl der Zeilen muss gleich sein)
# write('build/Tabelle_b_composed.tex', make_composed_table(['build/Tabelle_b_teil1.tex','build/Tabelle_b_teil2.tex']))
########## ARRAY FUNCTIONS ##########
# np.arange(2,10) # Erzeugt aufwärts zählendes Array von 2 bis 10
# np.zeros(15) # Erzeugt Array mit 15 Nullen
# np.ones(15) # Erzeugt Array mit 15 Einsen
#
# np.amin(array) # Liefert den kleinsten Wert innerhalb eines Arrays
# np.argmin(array) # Gibt mir den Index des Minimums eines Arrays zurück
# np.amax(array) # Liefert den größten Wert innerhalb eines Arrays
# np.argmax(array) # Gibt mir den Index des Maximums eines Arrays zurück
#
# a1,a2 = np.array_split(array, 2) # Array in zwei Hälften teilen
# np.size(array) # Anzahl der Elemente eines Arrays ermitteln
########## ARRAY INDEXING ##########
# y[n - 1::n] # liefert aus einem Array jeden n-ten Wert als Array
########## DIFFERENT STUFF ##########
# R = const.physical_constants["molar gas constant"] # Array of value, unit, error
| mit |
gtoonstra/airflow | airflow/hooks/dbapi_hook.py | 11 | 10932 | # -*- coding: utf-8 -*-
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from builtins import str
from past.builtins import basestring
from datetime import datetime
from contextlib import closing
import sys
from sqlalchemy import create_engine
from airflow.hooks.base_hook import BaseHook
from airflow.exceptions import AirflowException
class DbApiHook(BaseHook):
"""
Abstract base class for sql hooks.
"""
# Override to provide the connection name.
conn_name_attr = None
# Override to have a default connection id for a particular dbHook
default_conn_name = 'default_conn_id'
# Override if this db supports autocommit.
supports_autocommit = False
# Override with the object that exposes the connect method
connector = None
def __init__(self, *args, **kwargs):
if not self.conn_name_attr:
raise AirflowException("conn_name_attr is not defined")
elif len(args) == 1:
setattr(self, self.conn_name_attr, args[0])
elif self.conn_name_attr not in kwargs:
setattr(self, self.conn_name_attr, self.default_conn_name)
else:
setattr(self, self.conn_name_attr, kwargs[self.conn_name_attr])
def get_conn(self):
"""Returns a connection object
"""
db = self.get_connection(getattr(self, self.conn_name_attr))
return self.connector.connect(
host=db.host,
port=db.port,
username=db.login,
schema=db.schema)
def get_uri(self):
conn = self.get_connection(getattr(self, self.conn_name_attr))
login = ''
if conn.login:
login = '{conn.login}:{conn.password}@'.format(conn=conn)
host = conn.host
if conn.port is not None:
host += ':{port}'.format(port=conn.port)
return '{conn.conn_type}://{login}{host}/{conn.schema}'.format(
conn=conn, login=login, host=host)
def get_sqlalchemy_engine(self, engine_kwargs=None):
if engine_kwargs is None:
engine_kwargs = {}
return create_engine(self.get_uri(), **engine_kwargs)
def get_pandas_df(self, sql, parameters=None):
"""
Executes the sql and returns a pandas dataframe
:param sql: the sql statement to be executed (str) or a list of
sql statements to execute
:type sql: str or list
:param parameters: The parameters to render the SQL query with.
:type parameters: mapping or iterable
"""
if sys.version_info[0] < 3:
sql = sql.encode('utf-8')
import pandas.io.sql as psql
with closing(self.get_conn()) as conn:
return psql.read_sql(sql, con=conn, params=parameters)
def get_records(self, sql, parameters=None):
"""
Executes the sql and returns a set of records.
:param sql: the sql statement to be executed (str) or a list of
sql statements to execute
:type sql: str or list
:param parameters: The parameters to render the SQL query with.
:type parameters: mapping or iterable
"""
if sys.version_info[0] < 3:
sql = sql.encode('utf-8')
with closing(self.get_conn()) as conn:
with closing(conn.cursor()) as cur:
if parameters is not None:
cur.execute(sql, parameters)
else:
cur.execute(sql)
return cur.fetchall()
def get_first(self, sql, parameters=None):
"""
Executes the sql and returns the first resulting row.
:param sql: the sql statement to be executed (str) or a list of
sql statements to execute
:type sql: str or list
:param parameters: The parameters to render the SQL query with.
:type parameters: mapping or iterable
"""
if sys.version_info[0] < 3:
sql = sql.encode('utf-8')
with closing(self.get_conn()) as conn:
with closing(conn.cursor()) as cur:
if parameters is not None:
cur.execute(sql, parameters)
else:
cur.execute(sql)
return cur.fetchone()
def run(self, sql, autocommit=False, parameters=None):
"""
Runs a command or a list of commands. Pass a list of sql
statements to the sql parameter to get them to execute
sequentially
:param sql: the sql statement to be executed (str) or a list of
sql statements to execute
:type sql: str or list
:param autocommit: What to set the connection's autocommit setting to
before executing the query.
:type autocommit: bool
:param parameters: The parameters to render the SQL query with.
:type parameters: mapping or iterable
"""
if isinstance(sql, basestring):
sql = [sql]
with closing(self.get_conn()) as conn:
if self.supports_autocommit:
self.set_autocommit(conn, autocommit)
with closing(conn.cursor()) as cur:
for s in sql:
if sys.version_info[0] < 3:
s = s.encode('utf-8')
self.log.info(s)
if parameters is not None:
cur.execute(s, parameters)
else:
cur.execute(s)
# If autocommit was set to False for db that supports autocommit,
# or if db does not supports autocommit, we do a manual commit.
if not self.get_autocommit(conn):
conn.commit()
def set_autocommit(self, conn, autocommit):
"""
Sets the autocommit flag on the connection
"""
if not self.supports_autocommit and autocommit:
self.log.warn(
("%s connection doesn't support "
"autocommit but autocommit activated."),
getattr(self, self.conn_name_attr))
conn.autocommit = autocommit
def get_autocommit(self, conn):
"""
Get autocommit setting for the provided connection.
Return True if conn.autocommit is set to True.
Return False if conn.autocommit is not set or set to False or conn
does not support autocommit.
:param conn: Connection to get autocommit setting from.
:type conn: connection object.
:return: connection autocommit setting.
:rtype bool.
"""
return getattr(conn, 'autocommit', False) and self.supports_autocommit
def get_cursor(self):
"""
Returns a cursor
"""
return self.get_conn().cursor()
def insert_rows(self, table, rows, target_fields=None, commit_every=1000,
replace=False):
"""
A generic way to insert a set of tuples into a table,
a new transaction is created every commit_every rows
:param table: Name of the target table
:type table: str
:param rows: The rows to insert into the table
:type rows: iterable of tuples
:param target_fields: The names of the columns to fill in the table
:type target_fields: iterable of strings
:param commit_every: The maximum number of rows to insert in one
transaction. Set to 0 to insert all rows in one transaction.
:type commit_every: int
:param replace: Whether to replace instead of insert
:type replace: bool
"""
if target_fields:
target_fields = ", ".join(target_fields)
target_fields = "({})".format(target_fields)
else:
target_fields = ''
i = 0
with closing(self.get_conn()) as conn:
if self.supports_autocommit:
self.set_autocommit(conn, False)
conn.commit()
with closing(conn.cursor()) as cur:
for i, row in enumerate(rows, 1):
lst = []
for cell in row:
lst.append(self._serialize_cell(cell, conn))
values = tuple(lst)
placeholders = ["%s", ] * len(values)
if not replace:
sql = "INSERT INTO "
else:
sql = "REPLACE INTO "
sql += "{0} {1} VALUES ({2})".format(
table,
target_fields,
",".join(placeholders))
cur.execute(sql, values)
if commit_every and i % commit_every == 0:
conn.commit()
self.log.info(
"Loaded {i} into {table} rows so far".format(**locals())
)
conn.commit()
self.log.info(
"Done loading. Loaded a total of {i} rows".format(**locals()))
@staticmethod
def _serialize_cell(cell, conn=None):
"""
Returns the SQL literal of the cell as a string.
:param cell: The cell to insert into the table
:type cell: object
:param conn: The database connection
:type conn: connection object
:return: The serialized cell
:rtype: str
"""
if cell is None:
return None
if isinstance(cell, datetime):
return cell.isoformat()
return str(cell)
def bulk_dump(self, table, tmp_file):
"""
Dumps a database table into a tab-delimited file
:param table: The name of the source table
:type table: str
:param tmp_file: The path of the target file
:type tmp_file: str
"""
raise NotImplementedError()
def bulk_load(self, table, tmp_file):
"""
Loads a tab-delimited file into a database table
:param table: The name of the target table
:type table: str
:param tmp_file: The path of the file to load into the table
:type tmp_file: str
"""
raise NotImplementedError()
| apache-2.0 |
mode89/snn | izhikevich/classical_conditioning.py | 1 | 4842 | import collections
import matplotlib.pyplot as plt
import numpy
import random
random.seed(0)
T = 3600 * 10
N = 1000
Ne = int(N * 0.8)
Ni = N - Ne
M = int(N * 0.1)
D = 1
a = numpy.concatenate((
0.02 * numpy.ones(Ne),
0.1 * numpy.ones(Ni)
))
d = numpy.concatenate((
8 * numpy.ones(Ne),
2 * numpy.ones(Ni)
))
# generate post-synaptic connections
post = numpy.empty((N, M), dtype=numpy.int)
for i in range(Ne):
post[i,:] = random.sample(range(N), M)
for i in range(Ne, N):
post[i,:] = random.sample(range(Ne), M)
# find pre-synaptic connections to excitatory neurons
pre = [[] for i in range(N)]
for i in range(Ne):
for j in range(M):
pre[post[i,j]].append(i)
# generate delays
delays = [[[] for i in range(D)] for j in range(N)]
for i in range(Ne):
for j in range(M):
delays[i][int(D * random.random())].append(post[i,j])
for i in range(Ne, N):
for j in range(M):
delays[i][0].append(post[i,j])
# generate matrix of synaptic weights
s = numpy.zeros((N, N))
for i in range(Ne):
s[i, post[i,:]] = 1.0
for i in range(Ne, N):
s[i, post[i,:]] = -1.0
v = -65 * numpy.ones(N)
u = 0.2 * v
firings = collections.deque(maxlen=D)
STDP = numpy.zeros(N)
sm = 4.0
sd = numpy.zeros((N, N))
DA = 0
rew = []
ST = 100
stimulus = [[] for i in range(ST)]
for i in range(ST):
stimulus[i] = random.sample(range(N), 50)
next_stimuli = 0
s_mean_indices = [i for i in range(N) if i not in stimulus[0]]
class data:
firings = []
stimulus = []
s0_mean = []
s_mean = []
plt.subplot(211)
scatter = plt.scatter([], [], color="black", marker=".")
plt.subplot(212)
s_mean_line, s0_mean_line = plt.plot([], [], [], [])
for sec in range(T):
data.firings = []
data.stimulus = []
for ms in range(1000):
t = sec * 1000 + ms
print(t)
# provide random input
I = 13.0 * numpy.array(
[random.uniform(-0.5, 0.5) for i in range(N)])
# issuse random stimuli
if t == next_stimuli:
print("Stimuli")
next_stimuli = t + random.randint(100, 300)
stimuli = random.randrange(0, ST)
neurons = stimulus[stimuli]
I[neurons] = 17.0
# reward stimuli with index 0
if stimuli == 0:
print("Reward")
rew.append(t + random.randrange(0, 1000))
data.stimulus.append(t)
# identify fired neurons
fired = numpy.argwhere(v >= 30)
if fired.size > 0:
v[fired] = -65.0
u[fired] = u[fired] + d[fired]
# deliver spikes to post-synaptic neurons
firings.appendleft(fired)
for time in range(len(firings)):
for fired_neuron in firings[time]:
post_neurons = delays[fired_neuron][time]
if len(post_neurons) > 0:
I[post_neurons] += s[fired_neuron, post_neurons]
# update post-synaptic potential
for i in range(2):
v += 0.5 * ((0.04 * v + 5.0) * v + 140.0 - u + I)
u += a * (0.2 * v - u)
# update eligibility trace
STDP[fired] = 0.1
for fired_neuron in fired:
if fired_neuron < Ne:
post_neurons = post[fired_neuron,:]
sd[fired_neuron, post_neurons] -= 1.2 * STDP[post_neurons]
pre_neurons = pre[fired_neuron]
sd[pre_neurons, fired_neuron] += STDP[pre_neurons]
STDP *= 0.95
# update synaptics weights
DA *= 0.995
if t % 10 == 0:
s[0:Ne,:] = numpy.maximum(0.0, numpy.minimum(sm,
s[0:Ne,:] + (0.002 + DA) * sd[0:Ne,:]))
sd *= 0.99
# issue reward
if any([it == t for it in rew]):
print("Increase dopamine")
DA += 0.5
# save plotting data
data.firings.append(fired)
data.s_mean.append(
numpy.sum(s[s_mean_indices,0:Ne]) / ((N - 50) * M))
data.s0_mean.append(
numpy.sum(s[stimulus[0],0:Ne]) / (50 * M))
# Update plots
plt.subplot(211)
points = []
for ms in range(1000):
t = sec * 1000 + ms
for fired in data.firings[ms]:
points.append((t, fired))
# plot spikes
scatter.set_offsets(points)
plt.xlim(sec * 1000, sec * 1000 + 1000)
plt.ylim(0, N)
# plot rewarded stimulus
for st in data.stimulus:
plt.axvline(st, color="red")
# plot mean value of synaptic strength
s_mean_ax = plt.subplot(212)
s_mean_line.set_data(
range(sec * 1000 + 1000), data.s_mean)
s0_mean_line.set_data(
range(sec * 1000 + 1000), data.s0_mean)
s_mean_ax.relim()
s_mean_ax.set_xlim(0, sec * 1000 + 1000)
s_mean_ax.autoscale_view()
# update UI
plt.ion()
plt.pause(0.01)
| mit |
bhzunami/reanalytics | app/controller/report/report_generator.py | 1 | 26294 | from flask import current_app
from ...models import AnalyticView, Location
from ... import db
import datetime as dt
import pandas as pd
import os
# To sort the index in the excel file we define a global index
sort_index = [1, 2, 3, 4, 5, 6, 0]
# We use 0...6 for the header definition to use the
# same index for every type. This is the translation
# to write in the excel
headers = {
'room': {
0: 'not defined',
1: '1-1.5',
2: '2-2.5',
3: '3-3.5',
4: '4-4.5',
5: '5-5.5',
6: '6+'
},
'price': {
0: 'not defined',
1: '<1000',
2: '1000 - 1499',
3: '1500 - 1999',
4: '2000 - 2499',
5: '2500 - 3000',
6: '>=3000'},
'area': {
0: 'not defined',
1: '<50',
2: '50 - 99',
3: '100 - 149',
4: '150 - 199',
5: '>= 200',
6: 'khjggkjh'}
}
# Group functions
def group_by_rooms(value):
if value >= 6:
return 6
return value
def group_by_price(value):
if value == 0:
return 0
if value < 1000:
return 1
elif value >= 1000 and value < 1500:
return 2
elif value >= 1500 and value < 2000:
return 3
elif value >= 2000 and value < 2500:
return 4
elif value >= 2500 and value < 3000:
return 5
else:
return 6
def group_by_area(value):
if value == 0:
return 0
if value < 50:
return 1
elif value >= 50 and value < 100:
return 2
elif value >= 100 and value < 150:
return 3
elif value >= 150 and value < 200:
return 4
else:
return 5
class ReportGenerator(object):
def __init__(self, plz, type, year, report_id):
"""Prepare a new excel file, formats and load the data from the database
"""
self.location = Location.query.filter_by(plz=plz).first()
# Create the filename
self.file_name = os.path.join(current_app.config['REPORT_DIR'],
'Report_{}.xls'.format(report_id))
# Create a writer
self.writer = pd.ExcelWriter(self.file_name,
engine='xlsxwriter',
options={'nan_inf_to_errors': True})
self.workbook = self.writer.book
# http://xlsxwriter.readthedocs.io/format.html
self.formats = {
'yellow_bar': self.workbook.add_format({'bg_color': '#FFFF00',
'font_color': '#000000',
'bold': True}),
'reanalytic_clolor': self.workbook.add_format({'bg_color': '#065f69',
'font_color': '#FFFFFF',
'bold': True,
'font_size': 20,
'valign': 'vcenter'}),
'bold': self.workbook.add_format({'bold': True}),
'merge_format': self.workbook.add_format({'align': 'center'}),
'title': self.workbook.add_format({'bold': True, 'font_size': 20}),
'h1': self.workbook.add_format({'bold': True, 'font_size': 18}),
'h2': self.workbook.add_format({'bold': True, 'font_size': 16}),
'h3': self.workbook.add_format({'font_size': 14}),
'percent': self.workbook.add_format({'num_format': '0.00%'}),
}
# Select the data from view
# Actual data means get only ads which are at this time active -> not finished
self.actual_data = pd.read_sql_query(
db.select([AnalyticView.rooms,
AnalyticView.price,
AnalyticView.area,
AnalyticView.plz,
AnalyticView.district_nr,
AnalyticView.cdate,
AnalyticView.edate])
.where(AnalyticView.canton_nr == self.location.canton_nr)
.where(AnalyticView.type == type)
# .where(AnalyticView.edate == dt.datetime.today()),
.where(AnalyticView.edate == "2016-07-10"),
db.session.bind,
parse_dates=['cdate', 'edate'])
# historical data means all ads from a period of time
self.historical_data = pd.read_sql_query(
db.select([AnalyticView.rooms,
AnalyticView.price,
AnalyticView.area,
AnalyticView.plz,
AnalyticView.district_nr,
AnalyticView.cdate,
AnalyticView.edate])
.where(AnalyticView.plz == self.location.plz)
.where(AnalyticView.type == type)
.where(AnalyticView.cyear >= year),
db.session.bind,
# index_col=['cdate'],
parse_dates=['cdate', 'edate'])
def make_title_sheet(self):
"""
Make a title sheet with some overview data
"""
worksheet = self.workbook.add_worksheet('Kapitalübersicht')
worksheet.set_column(0, 0, 20)
worksheet.set_row(5, 30)
worksheet.write('A6', 'REANALYTICS', self.formats['title'])
worksheet.merge_range('A9:M12', 'Report für {} {}'.format(self.location.plz,
self.location.locality),
self.formats['reanalytic_clolor'])
worksheet.write('A15', 'Datum', self.formats['bold'])
worksheet.write('B15', '{}'.format(dt.datetime.today().strftime("%d.%m.%Y")))
worksheet.write('A16', 'Lizensiert für', self.formats['bold'])
worksheet.write('B16', 'Linus Schenk', )
worksheet.write('A18', 'Powered by reanalytic.ch', self.formats['bold'])
def make_quantitive_analysis(self):
""" Mengenanalyse
"""
# Print scatterplot data
self.write_dataframe(df=self.actual_data[(self.actual_data.price != 0) &
(self.actual_data.area != 0) &
(self.actual_data.plz == self.location.plz)]
[['area', 'price']].transpose(),
ws=self.workbook.add_worksheet('scatter_plot_data'),
row=1,
title='Data',
overwriteHeaders=['area', 'price'])
# Y1
sheetname = 'Mengenanalyse'
worksheet = self.workbook.add_worksheet(sheetname)
worksheet.set_row(0, 30)
worksheet.write('A1', 'Mengenanalyse', self.formats['title'])
# Count
worksheet.write(1, 0, "Currently available apartments in {}".format(
self.location.plz))
worksheet.write(1, 1, len(self.actual_data[self.actual_data.plz == self.location.plz].index))
worksheet.write(2, 0, "Currently available apartments in {}".format(self.location.district))
worksheet.write(2, 1, len(self.actual_data[self.actual_data.district_nr == self.location.district_nr].index))
worksheet.write(3, 0, "Currently available apartments in {}".format(self.location.canton))
worksheet.write(3, 1, len(self.actual_data.index))
# Rooms
# - - - -
self.actual_data['grooms'] = self.actual_data.rooms.apply(group_by_rooms)
rooms = self.build_percent_data_frame('rooms', 'grooms')
index = 5
# Insert in excel
index = self.write_dataframe(df=rooms,
ws=worksheet,
row=index,
title='rooms',
type='room',
format=self.formats['percent'])
worksheet.write(index, 0, 'Nicht definiert')
worksheet.write(index, 1, self.actual_data.loc[self.actual_data.rooms == 0]
.count().rooms)
# Price
# - - - - -
self.actual_data['gprice'] = self.actual_data.loc[self.actual_data.price != 0].price.apply(group_by_price)
prices = self.build_percent_data_frame('price', 'gprice')
# Insert in excel
index = self.write_dataframe(df=prices,
ws=worksheet,
row=index+2,
title='Preis',
type='price',
format=self.formats['percent'])
worksheet.write(index, 0, 'Nicht definiert')
worksheet.write(index, 1, self.actual_data.loc[self.actual_data.price == 0]
.count().price)
# Area
# - - - -
self.actual_data['garea'] = self.actual_data.loc[self.actual_data.area != 0].area.apply(group_by_area)
areas = self.build_percent_data_frame('area', 'garea')
# Insert in excel
index = self.write_dataframe(df=areas,
ws=worksheet,
row=index+2,
title='Area',
type='area',
format=self.formats['percent'])
worksheet.write(index, 0, 'Nicht definiert')
worksheet.write(index, 1, self.actual_data.loc[self.actual_data.area == 0]
.count().price)
index += 2
# Y2
# =======================================================================
# Natural growth:
# =======================================================================
self.historical_data['grooms'] = self.historical_data.rooms.apply(group_by_rooms)
self.historical_data['gprice'] = self.historical_data.price.apply(group_by_price)
self.historical_data['garea'] = self.historical_data.area.apply(group_by_area)
worksheet = self.workbook.add_worksheet('Bestandesentwicklung')
worksheet.write(0, 0, 'Bestandesentwicklung', self.formats['h2'])
index = 1
# Create Timeperiod data
cdata = self.historical_data.set_index('cdate')
edata = self.historical_data.set_index('edate')
# Rooms
# - - - -
crooms = self.make_time_series(cdata, 'grooms')
erooms = self.make_time_series(edata, 'grooms')
self.write_dataframe(
df=crooms,
ws=worksheet,
row=index+1,
title='Rooms new',
type='room')
self.write_dataframe(
df=erooms,
ws=worksheet,
row=index+1,
col=10,
title='Room: closed',
type='room')
index = self.write_dataframe(
df=crooms - erooms,
ws=worksheet,
row=index+1,
col=20,
title='Room difference',
type='room')
# Price
# - - - -
cprice = self.make_time_series(cdata, 'gprice')
eprice = self.make_time_series(edata, 'gprice')
self.write_dataframe(
df=cprice,
ws=worksheet,
row=index+2,
title='Price new',
type='price')
self.write_dataframe(
df=eprice,
ws=worksheet,
row=index+2,
col=10,
title='Price closed',
type='price')
index = self.write_dataframe(
df=cprice - eprice,
ws=worksheet,
row=index+2,
col=20,
title='Price Difference',
type='price')
# Area
# - - - -
carea = self.make_time_series(cdata, 'garea')
earea = self.make_time_series(edata, 'garea')
self.write_dataframe(
df=carea,
ws=worksheet,
row=index+2,
title='Area new',
type='area')
self.write_dataframe(
df=earea,
ws=worksheet,
row=index+2,
col=10,
title='Area close',
type='area')
index = self.write_dataframe(
df=carea - earea,
ws=worksheet,
row=index+2,
col=20,
title='Area Difference',
type='area')
index += 2
# Y3
# Durchschnittliche Wohnungsgrösse (m2)
self.actual_data['garea'] = self.actual_data.area.apply(group_by_area)
darea = self.actual_data.area.quantile([.25, .5, .75])
worksheet.write(index, 0, 'Durchschnittliche Wohnungsgrösse (m2)', self.formats['h2'])
worksheet.write(index+1, 0, '0.25')
worksheet.write(index+1, 1, darea[0.25])
worksheet.write(index+2, 0, '0.5')
worksheet.write(index+2, 1, darea[0.5])
worksheet.write(index+3, 0, '0.75')
worksheet.write(index+3, 1, darea[0.75])
index += 4
quantiles = [.25, .5, .75]
mean_area = self.build_quantile_data('area', 'grooms', quantiles)
index = self.write_dataframe(
df=mean_area[self.location.plz]
.reindex(sort_index, level=0)
.unstack(1),
ws=worksheet,
row=index+2,
title=self.location.plz,
type='rooms')
index = self.write_dataframe(
df=mean_area[self.location.district]
.reindex(sort_index, level=0)
.unstack(1),
ws=worksheet,
row=index+2,
title=self.location.district,
type='rooms')
index = self.write_dataframe(
df=mean_area[self.location.canton]
.reindex(sort_index, level=0)
.unstack(1),
ws=worksheet,
row=index+2,
title=self.location.canton,
type='rooms')
# Y4
def make_price_analysis(self):
""" Preisanalyse
"""
sheetname = 'Preisanalyse'
worksheet = self.workbook.add_worksheet(sheetname)
worksheet.set_row(0, 30)
worksheet.write('A1', 'Preisanalyse', self.formats['title'])
index = 2
dprice = self.actual_data.loc[self.actual_data.price != 0].price.quantile([.25, .5, .75])
worksheet.write(index, 0, 'Mietpreisniveau allgemein', self.formats['h2'])
worksheet.write(index+1, 0, '0.25')
worksheet.write(index+1, 1, dprice[0.25])
worksheet.write(index+2, 0, '0.5')
worksheet.write(index+2, 1, dprice[0.5])
worksheet.write(index+3, 0, '0.75')
worksheet.write(index+3, 1, dprice[0.75])
index += 4
quantiles = [.5]
index = self.write_dataframe(
df=self.build_quantile_data('price', 'grooms', quantiles)
.reindex(sort_index, level=0)
.unstack(1),
ws=worksheet,
row=index+2,
title='Room',
type='room')
index = self.write_dataframe(
df=self.build_quantile_data('price', 'garea', quantiles)
.reindex(sort_index, level=0)
.unstack(1),
ws=worksheet,
row=index+2,
title='Area',
type='area')
# Y5
# Price per square meter
self.actual_data['price_per_m'] = self.actual_data.price / self.actual_data.loc[self.actual_data.area != 0].area
index = self.write_dataframe(
df=self.build_quantile_data('price_per_m', 'grooms', quantiles)
.reindex(sort_index, level=0)
.unstack(1),
ws=worksheet,
row=index+2,
title='Price per m',
type='room')
# Entwicklung des Mietpreisniveaus
self.historical_data['price_per_m'] = self.historical_data.price / self.historical_data.loc[self.historical_data.area != 0].area
cdata = self.historical_data.set_index('cdate')
crooms = self.make_time_seriesq(cdata, 'grooms', 'price_per_m')
carea = self.make_time_seriesq(cdata, 'garea', 'price_per_m')
index = self.write_dataframe(
df=crooms,
ws=worksheet,
row=index+2,
title='Preisentwicklung',
type='room')
index = self.write_dataframe(
df=carea,
ws=worksheet,
row=index+2,
title='Preis',
type='area')
# Y7
def make_timePeriod(self):
"""
Insertionsdauer
"""
import numpy as np
from datetime import date, timedelta
twomonthago = date.today() - timedelta(60)
self.actual_data['duration'] = self.actual_data.loc[self.actual_data.edate <= twomonthago].edate - self.actual_data.loc[self.actual_data.cdate <= twomonthago].cdate
worksheet = self.workbook.add_worksheet('Insertionsdauer')
worksheet.set_row(0, 30)
worksheet.write('A1', 'Insertionsdauer', self.formats['title'])
index = 2
dduration = self.actual_data.duration.quantile([.25, .5, .75])
worksheet.write(index, 0, 'Insertionsdauer', self.formats['h2'])
worksheet.write(index+1, 0, '0.25')
worksheet.write(index+1, 1, dduration[0.25] / (np.timedelta64(1, 'h') * 24) )
worksheet.write(index+2, 0, '0.5')
worksheet.write(index+2, 1, dduration[0.5]/(np.timedelta64(1, 'h') * 24))
worksheet.write(index+3, 0, '0.75')
worksheet.write(index+3, 1, dduration[0.75]/(np.timedelta64(1, 'h') * 24))
index = 7
index = self.write_dataframe(
df=self.build_tquantile_data('duration', 'grooms', 0.5).reindex(sort_index),
ws=worksheet,
row=index,
title='Room',
type='room')
index = self.write_dataframe(
df=self.build_tquantile_data('duration', 'gprice', 0.5).reindex(sort_index),
ws=worksheet,
row=index+2,
title='Price',
type='price')
index = self.write_dataframe(
df=self.build_tquantile_data('duration', 'garea', 0.5).reindex([1, 2, 3, 4, 5, 0]),
ws=worksheet,
row=index+2,
title='Area',
type='area')
# Y8
# Von 2015
index += 2
self.historical_data['duration'] = self.historical_data.loc[self.historical_data.edate <= twomonthago].edate - self.historical_data.loc[self.historical_data.cdate <= twomonthago].cdate
dduration = self.historical_data.duration.quantile([.25, .5, .75])
worksheet.write(index, 0, 'Insertionsdauer ab 2015', self.formats['h2'])
worksheet.write(index+1, 0, '0.25')
worksheet.write(index+1, 1, dduration[0.25] / (np.timedelta64(1, 'h') * 24))
worksheet.write(index+2, 0, '0.5')
worksheet.write(index+2, 1, dduration[0.5]/(np.timedelta64(1, 'h') * 24))
worksheet.write(index+3, 0, '0.75')
worksheet.write(index+3, 1, dduration[0.75]/(np.timedelta64(1, 'h') * 24))
index += 3
cdata = self.historical_data.set_index('cdate')
crooms = self.make_time_seriesq(cdata, 'grooms', 'duration')
cprice = self.make_time_seriesq(cdata, 'gprice', 'duration')
carea = self.make_time_seriesq(cdata, 'garea', 'duration')
index = self.write_dataframe(
df=crooms,
ws=worksheet,
row=index+2,
title='Entwicklung insertions dauer',
type='room')
index = self.write_dataframe(
df=cprice,
ws=worksheet,
row=index+2,
type='price')
index = self.write_dataframe(
df=carea,
ws=worksheet,
row=index+2,
type='area')
def finish(self):
# Close the workbook
# Write all datas to workbook just in case:
worksheet = self.workbook.add_worksheet('actual_data')
self.write_dataframe(df=self.actual_data.transpose(),
ws=worksheet,
overwriteHeaders=self.actual_data.keys().values.tolist())
worksheet = self.workbook.add_worksheet('historical_data')
self.write_dataframe(df=self.historical_data.transpose(),
ws=worksheet,
overwriteHeaders=self.historical_data.keys().values.tolist())
self.workbook.close()
def write_dataframe(self, df, ws, row=0, col=0, title=None, type=None, format=None, overwriteHeaders=None):
"""
Helper method to write a dataframe to an excel worksheet
"""
# Write title if we have a title
if title:
ws.write(row, col, '{}'.format(title), self.formats['h3'])
row += 1
# Write headers
# Normally we would have a type and with this type we print the correct header.
# But it is also possible to give no type and overwrite the headers
if type:
for i in range(0, len(df.index)):
ws.write(row, col+i+1, headers[type][df.index[i]])
row += 1
# If really nessessery you can overwrite the headers
elif overwriteHeaders:
for i in range(0, len(overwriteHeaders)):
ws.write(row, col+i+1, overwriteHeaders[i])
row += 1
# Write down the data
for i in range(df.shape[1]):
if isinstance(df.keys()[i], tuple):
ws.write(row+i, col, '{}'.format(df.keys()[i][0]))
else:
ws.write(row+i, col, '{}'.format(df.keys()[i]))
df[df.keys()[i]] = df[df.keys()[i]].fillna(0)
for j in range(df.shape[0]):
ws.write(row+i, col+j+1, df[df.keys()[i]][df.index[j]], format)
return row + df.shape[1]
def build_percent_data_frame(self, name, group):
""" Build dataframe on plz/location/canton
and calculate the percentage for this
"""
return pd.DataFrame({
self.location.canton:
self.actual_data.loc[self.actual_data[name] != 0]
.groupby(group)[name]
.count() / len(self.actual_data.loc[self.actual_data[name] != 0]),
self.location.district:
self.actual_data.loc[(self.actual_data[name] != 0) &
(self.actual_data.district_nr == self.location.district_nr)]
.groupby(group)[name]
.count() / len(self.actual_data.loc[(self.actual_data[name] != 0) &
(self.actual_data.district_nr == self.location.district_nr)]),
self.location.plz:
self.actual_data.loc[(self.actual_data[name] != 0) &
(self.actual_data.plz == 4103)]
.groupby(group)[name]
.count() / len(self.actual_data.loc[(self.actual_data[name] != 0) &
(self.actual_data.plz == self.location.plz)])
})
def build_quantile_data(self, name, group, quantiles=[]):
"""Create Dataframe for plz/location/canton as key and
calculate the quantile for the name
If you are not dealing with timeseries use this function!
:param name: the attribute name you want the quantiles
:param group: which attribute should be grouped
:param quantiles: a list of floats for qunatile
"""
return pd.DataFrame({
self.location.canton:
self.actual_data.loc[self.actual_data[name] != 0]
.groupby(group)[name]
.quantile(quantiles),
self.location.district:
self.actual_data.loc[(self.actual_data[name] != 0) &
(self.actual_data.district_nr == self.location.district_nr)]
.groupby(group)[name]
.quantile(quantiles),
self.location.plz:
self.actual_data.loc[(self.actual_data[name] != 0) &
(self.actual_data.plz == self.location.plz)]
.groupby(group)[name]
.quantile(quantiles)
})
def build_tquantile_data(self, name, group, quantiles=[]):
"""Create Dataframe for plz/location/canton as key and
calculate the quantile for the name
This function deals with timeseries and for that we can not use
the [data != 0] cause timeseries can not be used for that
:param name: the attribute name you want the quantiles
:param group: which attribute should be grouped
:param quantiles: a list of floats for qunatile
"""
return pd.DataFrame({
# Canton
self.location.canton:
self.actual_data
.groupby(group)[name]
.quantile(quantiles),
# District
self.location.district:
self.actual_data.loc[self.actual_data.district_nr == self.location.district_nr]
.groupby(group)[name]
.quantile(quantiles),
# Plz
self.location.plz:
self.actual_data.loc[self.actual_data.plz == self.location.plz]
.groupby(group)[name]
.quantile(quantiles)
})
def make_time_series(self, df, group):
""" Build timeseries if a timeseris is a index you can
group with pd.TimeGrouper("M")
"""
return df.groupby([pd.TimeGrouper("M"),
group])[group].count().reindex(sort_index,
level=1).unstack(0)
def make_time_seriesq(self, df, group, attribute):
return df.groupby([pd.TimeGrouper("M"),
group])[attribute].quantile(.5).reindex(sort_index,
level=1).unstack(0)
| apache-2.0 |
gongzhitaao/tensorflow-adversarial | example/fgmt_mnist2.py | 1 | 8717 | """
Use fast gradient sign method to craft adversarial on MNIST.
Dependencies: python3, tensorflow v1.4, numpy, matplotlib
"""
import os
import numpy as np
import matplotlib
matplotlib.use('Agg') # noqa: E402
import matplotlib.pyplot as plt
import matplotlib.gridspec as gridspec
import tensorflow as tf
from attacks import fgmt
img_size = 28
img_chan = 1
n_classes = 10
print('\nLoading MNIST')
mnist = tf.keras.datasets.mnist
(X_train, y_train), (X_test, y_test) = mnist.load_data()
X_train = np.reshape(X_train, [-1, img_size, img_size, img_chan])
X_train = X_train.astype(np.float32) / 255
X_test = np.reshape(X_test, [-1, img_size, img_size, img_chan])
X_test = X_test.astype(np.float32) / 255
to_categorical = tf.keras.utils.to_categorical
y_train = to_categorical(y_train)
y_test = to_categorical(y_test)
print('\nSpliting data')
ind = np.random.permutation(X_train.shape[0])
X_train, y_train = X_train[ind], y_train[ind]
VALIDATION_SPLIT = 0.1
n = int(X_train.shape[0] * (1-VALIDATION_SPLIT))
X_valid = X_train[n:]
X_train = X_train[:n]
y_valid = y_train[n:]
y_train = y_train[:n]
print('\nConstruction graph')
def model(x, logits=False, training=False):
with tf.variable_scope('conv0'):
z = tf.layers.conv2d(x, filters=32, kernel_size=[3, 3],
padding='same', activation=tf.nn.relu)
z = tf.layers.max_pooling2d(z, pool_size=[2, 2], strides=2)
with tf.variable_scope('conv1'):
z = tf.layers.conv2d(z, filters=64, kernel_size=[3, 3],
padding='same', activation=tf.nn.relu)
z = tf.layers.max_pooling2d(z, pool_size=[2, 2], strides=2)
with tf.variable_scope('flatten'):
shape = z.get_shape().as_list()
z = tf.reshape(z, [-1, np.prod(shape[1:])])
with tf.variable_scope('mlp'):
z = tf.layers.dense(z, units=128, activation=tf.nn.relu)
z = tf.layers.dropout(z, rate=0.25, training=training)
logits_ = tf.layers.dense(z, units=10, name='logits')
y = tf.nn.softmax(logits_, name='ybar')
if logits:
return y, logits_
return y
class Dummy:
pass
env = Dummy()
with tf.variable_scope('model'):
env.x = tf.placeholder(tf.float32, (None, img_size, img_size, img_chan),
name='x')
env.y = tf.placeholder(tf.float32, (None, n_classes), name='y')
env.training = tf.placeholder_with_default(False, (), name='mode')
env.ybar, logits = model(env.x, logits=True, training=env.training)
with tf.variable_scope('acc'):
count = tf.equal(tf.argmax(env.y, axis=1), tf.argmax(env.ybar, axis=1))
env.acc = tf.reduce_mean(tf.cast(count, tf.float32), name='acc')
with tf.variable_scope('loss'):
xent = tf.nn.softmax_cross_entropy_with_logits(labels=env.y,
logits=logits)
env.loss = tf.reduce_mean(xent, name='loss')
with tf.variable_scope('train_op'):
optimizer = tf.train.AdamOptimizer()
env.train_op = optimizer.minimize(env.loss)
env.saver = tf.train.Saver()
with tf.variable_scope('model', reuse=True):
env.adv_eps = tf.placeholder(tf.float32, (), name='adv_eps')
env.adv_epochs = tf.placeholder(tf.int32, (), name='adv_epochs')
env.adv_y = tf.placeholder(tf.int32, (), name='adv_y')
env.x_fgmt = fgmt(model, env.x, y=env.adv_y, epochs=env.adv_epochs,
eps=env.adv_eps)
print('\nInitializing graph')
sess = tf.InteractiveSession()
sess.run(tf.global_variables_initializer())
sess.run(tf.local_variables_initializer())
def evaluate(sess, env, X_data, y_data, batch_size=128):
"""
Evaluate TF model by running env.loss and env.acc.
"""
print('\nEvaluating')
n_sample = X_data.shape[0]
n_batch = int((n_sample+batch_size-1) / batch_size)
loss, acc = 0, 0
for batch in range(n_batch):
print(' batch {0}/{1}'.format(batch + 1, n_batch), end='\r')
start = batch * batch_size
end = min(n_sample, start + batch_size)
cnt = end - start
batch_loss, batch_acc = sess.run(
[env.loss, env.acc],
feed_dict={env.x: X_data[start:end],
env.y: y_data[start:end]})
loss += batch_loss * cnt
acc += batch_acc * cnt
loss /= n_sample
acc /= n_sample
print(' loss: {0:.4f} acc: {1:.4f}'.format(loss, acc))
return loss, acc
def train(sess, env, X_data, y_data, X_valid=None, y_valid=None, epochs=1,
load=False, shuffle=True, batch_size=128, name='model'):
"""
Train a TF model by running env.train_op.
"""
if load:
if not hasattr(env, 'saver'):
return print('\nError: cannot find saver op')
print('\nLoading saved model')
return env.saver.restore(sess, 'model/{}'.format(name))
print('\nTrain model')
n_sample = X_data.shape[0]
n_batch = int((n_sample+batch_size-1) / batch_size)
for epoch in range(epochs):
print('\nEpoch {0}/{1}'.format(epoch + 1, epochs))
if shuffle:
print('\nShuffling data')
ind = np.arange(n_sample)
np.random.shuffle(ind)
X_data = X_data[ind]
y_data = y_data[ind]
for batch in range(n_batch):
print(' batch {0}/{1}'.format(batch + 1, n_batch), end='\r')
start = batch * batch_size
end = min(n_sample, start + batch_size)
sess.run(env.train_op, feed_dict={env.x: X_data[start:end],
env.y: y_data[start:end],
env.training: True})
if X_valid is not None:
evaluate(sess, env, X_valid, y_valid)
if hasattr(env, 'saver'):
print('\n Saving model')
os.makedirs('model', exist_ok=True)
env.saver.save(sess, 'model/{}'.format(name))
def predict(sess, env, X_data, batch_size=128):
"""
Do inference by running env.ybar.
"""
print('\nPredicting')
n_classes = env.ybar.get_shape().as_list()[1]
n_sample = X_data.shape[0]
n_batch = int((n_sample+batch_size-1) / batch_size)
yval = np.empty((n_sample, n_classes))
for batch in range(n_batch):
print(' batch {0}/{1}'.format(batch + 1, n_batch), end='\r')
start = batch * batch_size
end = min(n_sample, start + batch_size)
y_batch = sess.run(env.ybar, feed_dict={env.x: X_data[start:end]})
yval[start:end] = y_batch
print()
return yval
def make_fgmt(sess, env, X_data, epochs=1, eps=0.01, batch_size=128):
"""
Generate FGSM by running env.x_fgsm.
"""
print('\nMaking adversarials via FGSM')
n_sample = X_data.shape[0]
n_batch = int((n_sample + batch_size - 1) / batch_size)
X_adv = np.empty_like(X_data)
for batch in range(n_batch):
print(' batch {0}/{1}'.format(batch + 1, n_batch), end='\r')
start = batch * batch_size
end = min(n_sample, start + batch_size)
adv = sess.run(env.x_fgmt, feed_dict={
env.x: X_data[start:end],
env.adv_y: np.random.choice(n_classes),
env.adv_eps: eps,
env.adv_epochs: epochs})
X_adv[start:end] = adv
print()
return X_adv
print('\nTraining')
train(sess, env, X_train, y_train, X_valid, y_valid, load=True, epochs=5,
name='mnist')
print('\nEvaluating on clean data')
evaluate(sess, env, X_test, y_test)
print('\nGenerating adversarial data')
X_adv = make_fgmt(sess, env, X_test, eps=0.02, epochs=8)
print('\nEvaluating on adversarial data')
evaluate(sess, env, X_adv, y_test)
print('\nRandomly sample adversarial data from each category')
y1 = predict(sess, env, X_test)
y2 = predict(sess, env, X_adv)
z0 = np.argmax(y_test, axis=1)
z1 = np.argmax(y1, axis=1)
z2 = np.argmax(y2, axis=1)
X_tmp = np.empty((10, 28, 28))
y_tmp = np.empty((10, 10))
for i in range(10):
print('Target {0}'.format(i))
ind, = np.where(np.all([z0 == i, z1 == i, z2 != i], axis=0))
cur = np.random.choice(ind)
X_tmp[i] = np.squeeze(X_adv[cur])
y_tmp[i] = y2[cur]
print('\nPlotting results')
fig = plt.figure(figsize=(10, 1.2))
gs = gridspec.GridSpec(1, 10, wspace=0.05, hspace=0.05)
label = np.argmax(y_tmp, axis=1)
proba = np.max(y_tmp, axis=1)
for i in range(10):
ax = fig.add_subplot(gs[0, i])
ax.imshow(X_tmp[i], cmap='gray', interpolation='none')
ax.set_xticks([])
ax.set_yticks([])
ax.set_xlabel('{0} ({1:.2f})'.format(label[i], proba[i]),
fontsize=12)
print('\nSaving figure')
gs.tight_layout(fig)
os.makedirs('img', exist_ok=True)
plt.savefig('img/fgmt_mnist2.png')
| mit |
mbayon/TFG-MachineLearning | venv/lib/python3.6/site-packages/pandas/__init__.py | 6 | 5461 | # pylint: disable-msg=W0614,W0401,W0611,W0622
# flake8: noqa
__docformat__ = 'restructuredtext'
# Let users know if they're missing any of our hard dependencies
hard_dependencies = ("numpy", "pytz", "dateutil")
missing_dependencies = []
for dependency in hard_dependencies:
try:
__import__(dependency)
except ImportError as e:
missing_dependencies.append(dependency)
if missing_dependencies:
raise ImportError(
"Missing required dependencies {0}".format(missing_dependencies))
del hard_dependencies, dependency, missing_dependencies
# numpy compat
from pandas.compat.numpy import *
try:
from pandas._libs import (hashtable as _hashtable,
lib as _lib,
tslib as _tslib)
except ImportError as e: # pragma: no cover
# hack but overkill to use re
module = str(e).lstrip('cannot import name ')
raise ImportError("C extension: {0} not built. If you want to import "
"pandas from the source directory, you may need to run "
"'python setup.py build_ext --inplace --force' to build "
"the C extensions first.".format(module))
from datetime import datetime
# let init-time option registration happen
import pandas.core.config_init
from pandas.core.api import *
from pandas.core.sparse.api import *
from pandas.stats.api import *
from pandas.tseries.api import *
from pandas.core.computation.api import *
from pandas.core.reshape.api import *
# deprecate tools.plotting, plot_params and scatter_matrix on the top namespace
import pandas.tools.plotting
plot_params = pandas.plotting._style._Options(deprecated=True)
# do not import deprecate to top namespace
scatter_matrix = pandas.util._decorators.deprecate(
'pandas.scatter_matrix', pandas.plotting.scatter_matrix,
'pandas.plotting.scatter_matrix')
from pandas.util._print_versions import show_versions
from pandas.io.api import *
from pandas.util._tester import test
import pandas.testing
# extension module deprecations
from pandas.util._depr_module import _DeprecatedModule
json = _DeprecatedModule(deprmod='pandas.json',
moved={'dumps': 'pandas.io.json.dumps',
'loads': 'pandas.io.json.loads'})
parser = _DeprecatedModule(deprmod='pandas.parser',
removals=['na_values'],
moved={'CParserError': 'pandas.errors.ParserError'})
lib = _DeprecatedModule(deprmod='pandas.lib', deprmodto=False,
moved={'Timestamp': 'pandas.Timestamp',
'Timedelta': 'pandas.Timedelta',
'NaT': 'pandas.NaT',
'infer_dtype': 'pandas.api.types.infer_dtype'})
tslib = _DeprecatedModule(deprmod='pandas.tslib',
moved={'Timestamp': 'pandas.Timestamp',
'Timedelta': 'pandas.Timedelta',
'NaT': 'pandas.NaT',
'NaTType': 'type(pandas.NaT)',
'OutOfBoundsDatetime': 'pandas.errors.OutOfBoundsDatetime'})
# use the closest tagged version if possible
from ._version import get_versions
v = get_versions()
__version__ = v.get('closest-tag', v['version'])
del get_versions, v
# module level doc-string
__doc__ = """
pandas - a powerful data analysis and manipulation library for Python
=====================================================================
**pandas** is a Python package providing fast, flexible, and expressive data
structures designed to make working with "relational" or "labeled" data both
easy and intuitive. It aims to be the fundamental high-level building block for
doing practical, **real world** data analysis in Python. Additionally, it has
the broader goal of becoming **the most powerful and flexible open source data
analysis / manipulation tool available in any language**. It is already well on
its way toward this goal.
Main Features
-------------
Here are just a few of the things that pandas does well:
- Easy handling of missing data in floating point as well as non-floating
point data
- Size mutability: columns can be inserted and deleted from DataFrame and
higher dimensional objects
- Automatic and explicit data alignment: objects can be explicitly aligned
to a set of labels, or the user can simply ignore the labels and let
`Series`, `DataFrame`, etc. automatically align the data for you in
computations
- Powerful, flexible group by functionality to perform split-apply-combine
operations on data sets, for both aggregating and transforming data
- Make it easy to convert ragged, differently-indexed data in other Python
and NumPy data structures into DataFrame objects
- Intelligent label-based slicing, fancy indexing, and subsetting of large
data sets
- Intuitive merging and joining data sets
- Flexible reshaping and pivoting of data sets
- Hierarchical labeling of axes (possible to have multiple labels per tick)
- Robust IO tools for loading data from flat files (CSV and delimited),
Excel files, databases, and saving/loading data from the ultrafast HDF5
format
- Time series-specific functionality: date range generation and frequency
conversion, moving window statistics, moving window linear regressions,
date shifting and lagging, etc.
"""
| mit |
brianjpetersen/homely | homely/__init__.py | 1 | 6066 | # standard libraries
import os
import sys
import copy
import itertools
# third party libraries
from qtpy import QtGui, QtCore, QtWidgets
try:
from serial.tools.list_ports import comports as list_serial_ports
except:
pass
try:
import _winreg as winreg
except ImportError:
pass
try:
import win32api
except ImportError:
pass
# first party libraries
from . import qmatplotlib as matplotlib
__where__ = os.path.dirname(os.path.abspath(__file__))
with open(os.path.join(__where__, '..', 'VERSION'), 'rb') as f:
__version__ = f.read()
class QConsoleLog(QtWidgets.QPlainTextEdit):
def __init__(self, stdout=True, stderr=True, *args, **kwargs):
super(QConsoleLog, self).__init__(*args, **kwargs)
# only allow reads
self.setReadOnly(True)
# change default colors
palette = QtWidgets.QPalette()
palette.setColor(QtGui.QPalette.Base, QtGui.QColor(0, 0, 0))
palette.setColor(QtGui.QPalette.Text, QtGui.QColor(255, 255, 255))
self.setPalette(palette)
# capture outputs, as appropriate
if stdout:
sys.stdout = self
if stderr:
sys.stderr = self
def flush(self):
pass
def write(self, s):
self.moveCursor(QtWidgets.QTextCursor.End)
self.insertPlainText(s)
def _get_text(self):
return self.toPlainText()
text = property(_get_text)
class QFormLayout(QtWidgets.QFormLayout):
def __init__(self, *args, **kwargs):
super(QFormLayout, self).__init__(*args, **kwargs)
self.fieldLabel = {}
def disableField(self, field):
field.setDisabled(True)
label = self.fieldLabel[field]
label.setDisabled(True)
def enableField(self, field):
field.setEnabled(True)
label = self.fieldLabel[field]
label.setEnabled(True)
def addRow(self, label, widget, stretch=True):
layout = QtWidgets.QHBoxLayout()
if issubclass(type(widget), QtWidgets.QLayout):
layout.addLayout(widget)
elif issubclass(type(widget), QtWidgets.QWidget):
layout.addWidget(widget)
else:
raise ValueError()
if stretch:
layout.addStretch()
super(QFormLayout, self).addRow(label, layout)
self.fieldLabel[widget] = self.labelForField(layout)
class QViewComboBox(QtWidgets.QWidget):
item_selection_changed = QtCore.pyqtSignal()
def __init__(self, view, add_selected=True, sleep=None, editable=False, *args, **kwargs):
super(QViewComboBox, self).__init__(*args, **kwargs)
self.ComboBox = QtWidgets.QComboBox()
self.ComboBox.setEditable(editable)
main_layout = QtWidgets.QHBoxLayout()
main_layout.setContentsMargins(0, 0, 0, 0)
self.ComboBox.currentIndexChanged.connect(lambda: self.item_selection_changed.emit())
self._view = view
self.add_selected = add_selected
self._sleep = sleep
if self._sleep is not None:
assert type(self._sleep) == int
self.timer = QtCore.QTimer()
self.timer.timeout.connect(self.update_items)
self.timer.start(1000*int(self._sleep))
self.update_items()
self.refreshButton = QtWidgets.QPushButton()
fname = os.path.join(__where__, 'reload.png')
self.refreshButton.setIcon(QtGui.QIcon(fname))
self.refreshButton.setFixedSize(24, 24)
self.refreshButton.clicked.connect(self.update_items)
self.setLayout(main_layout)
main_layout.addWidget(self.ComboBox)
main_layout.addWidget(self.refreshButton)
def update_items(self):
self.ComboBox.blockSignals(True)
current_item_selected = copy.deepcopy(self.selected_item)
new_items = self._view()
self.ComboBox.clear()
self.ComboBox.addItems(list(new_items))
current_item_reselected = False
for indx in range(self.ComboBox.count()):
if self.ComboBox.itemText(indx) == current_item_selected:
self.ComboBox.setCurrentIndex(indx)
current_item_reselected = True
break
self.ComboBox.blockSignals(False)
if not current_item_reselected:
indx = 0
self.ComboBox.currentIndexChanged.emit(indx)
def currentIndex(self):
indx = self.ComboBox.currentIndex()
return int(indx)
def itemText(self, indx):
item = self.ComboBox.itemText(indx)
return str(item)
def _get_selected_item(self):
indx = self.currentIndex()
item = self.itemText(indx)
return str(item)
selected_item = property(_get_selected_item)
class QSerialPortSelector(QViewComboBox):
def __init__(self, *args, **kwargs):
super(QSerialPortSelector, self).__init__(self.available_serial_ports, *args, **kwargs)
def available_serial_ports(self):
ports = []
if sys.platform.startswith('win'):
path = 'HARDWARE\\DEVICEMAP\\SERIALCOMM'
try:
key = winreg.OpenKey(winreg.HKEY_LOCAL_MACHINE, path)
except:
return []
for i in itertools.count():
try:
port = str(winreg.EnumValue(key, i)[1])
ports.append(port)
except EnvironmentError:
break
else:
for port, _, _ in list_serial_ports():
ports.append(port)
return ports
class QPeripheralDriveSelector(QViewComboBox):
def __init__(self, *args, **kwargs):
super(QPeripheralDriveSelector, self).__init__(self.available_peripheral_drives, *args, **kwargs)
def available_peripheral_drives(self):
""" NB: this is only set up for Windows machines currently.
"""
try:
drives = win32api.GetLogicalDriveStrings()
drives = [drive for drive in drives.split('\000') if drive and drive != 'C:\\']
return drives
except:
return [] | mit |
hansiu/pathways-analysis | docs/conf.py | 3 | 5764 | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
#
# Pathway Analysis documentation build configuration file, created by
# sphinx-quickstart on Thu Oct 12 11:44:06 2017.
#
# This file is execfile()d with the current directory set to its
# containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#
import os
import sys
sys.path.insert(0, os.path.abspath('..'))
# -- General configuration ------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#
# needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
'sphinx.ext.autodoc',
'sphinx.ext.intersphinx',
'sphinx.ext.coverage',
'sphinx.ext.mathjax',
'sphinx.ext.viewcode',
'sphinx.ext.napoleon',
'sphinx_autodoc_annotation',
'sphinx_autodoc_typehints'
]
add_module_names = False
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix(es) of source filenames.
# You can specify multiple suffix as a list of string:
#
# source_suffix = ['.rst', '.md']
source_suffix = '.rst'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = 'Pathway Analysis'
copyright = '2017, KNBiBS'
author = 'KNBiBS'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = '0.1'
# The full version, including alpha/beta/rc tags.
release = '0.1'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#
# This is also used if you do content translation via gettext catalogs.
# Usually you set "language" from the command line for these cases.
language = None
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
# This patterns also effect to html_static_path and html_extra_path
exclude_patterns = ['_build', 'Thumbs.db', '.DS_Store']
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# If true, `todo` and `todoList` produce output, else they produce nothing.
todo_include_todos = False
# -- Options for HTML output ----------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
#
html_theme = 'alabaster'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#
html_theme_options = {
'github_button': True,
'github_user': 'kn-bibs',
'github_repo': 'pathways-analysis',
'github_type': 'star',
'travis_button': True,
}
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# Custom sidebar templates, must be a dictionary that maps document names
# to template names.
#
# This is required for the alabaster theme
# refs: http://alabaster.readthedocs.io/en/latest/installation.html#sidebars
html_sidebars = {
'**': [
'about.html',
'navigation.html',
'relations.html', # needs 'show_related': True theme option to display
'searchbox.html',
'donate.html',
]
}
# -- Options for HTMLHelp output ------------------------------------------
# Output file base name for HTML help builder.
htmlhelp_basename = 'PathwayAnalysisdoc'
# -- Options for LaTeX output ---------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#
# 'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#
# 'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#
# 'preamble': '',
# Latex figure (float) alignment
#
# 'figure_align': 'htbp',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
(master_doc, 'PathwayAnalysis.tex', 'Pathway Analysis Documentation',
'KNBiBS', 'manual'),
]
# -- Options for manual page output ---------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
(master_doc, 'pathwayanalysis', 'Pathway Analysis Documentation',
[author], 1)
]
# -- Options for Texinfo output -------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
(master_doc, 'PathwayAnalysis', 'Pathway Analysis Documentation',
author, 'PathwayAnalysis', 'One line description of project.',
'Miscellaneous'),
]
intersphinx_mapping = {
'python': ('https://docs.python.org/3.6', None),
'pandas': ('https://pandas.pydata.org/pandas-docs/stable/', None)
}
| mit |
0x0all/Passage | examples/newsgroup.py | 2 | 1787 | from sklearn.datasets import fetch_20newsgroups
categories = ['alt.atheism', 'sci.space']
newsgroups_train = fetch_20newsgroups(subset='train',
remove=('headers', 'footers', 'quotes'),
categories=categories)
newsgroups_test = fetch_20newsgroups(subset='test',
remove=('headers', 'footers', 'quotes'),
categories=categories)
print len(newsgroups_train.data), len(newsgroups_test.data)
from sklearn import metrics
from passage.preprocessing import Tokenizer
from passage.layers import Embedding, GatedRecurrent, Dense
from passage.models import RNN
from passage.utils import save
tokenizer = Tokenizer(min_df=10, max_features=50000)
X_train = tokenizer.fit_transform(newsgroups_train.data)
X_test = tokenizer.transform(newsgroups_test.data)
Y_train = newsgroups_train.target
Y_test = newsgroups_test.target
print tokenizer.n_features
layers = [
Embedding(size=128, n_features=tokenizer.n_features),
GatedRecurrent(size=256, activation='tanh', gate_activation='steeper_sigmoid',
init='orthogonal', seq_output=False),
Dense(size=1, activation='sigmoid', init='orthogonal') # sigmoid for binary classification
]
model = RNN(layers=layers, cost='bce') # bce is classification loss for binary classification and sigmoid output
for i in range(2):
model.fit(X_train, Y_train, n_epochs=1)
tr_preds = model.predict(X_train[:len(Y_test)])
te_preds = model.predict(X_test)
tr_acc = metrics.accuracy_score(Y_train[:len(Y_test)], tr_preds > 0.5)
te_acc = metrics.accuracy_score(Y_test, te_preds > 0.5)
print i, tr_acc, te_acc # dataset too small to fully utilize Passage
save(model, 'model.pkl') | mit |
cgre-aachen/gempy | examples/tutorials/ch1_fundamentals/ch1_6_2d_visualization.py | 1 | 6065 | """
1.6: 2D Visualization.
======================
"""
# %%
# Importing GemPy
import gempy as gp
# Importing auxiliary libraries
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
np.random.seed(1515)
pd.set_option('precision', 2)
# %%
# Model interpolation
# ~~~~~~~~~~~~~~~~~~~
#
# %%
# Data Preparation
data_path = 'https://raw.githubusercontent.com/cgre-aachen/gempy_data/master/'
geo_data = gp.create_data('viz_2d', [0, 1000, 0, 1000, 0, 1000], resolution=[10, 10, 10],
path_o=data_path + "/data/input_data/jan_models/model5_orientations.csv",
path_i=data_path + "/data/input_data/jan_models/model5_surface_points.csv")
# %%
gp.plot_2d(geo_data)
# %%
geo_data.set_topography(d_z=(500, 1000))
# %%
section_dict = {'section1': ([0, 0], [1000, 1000], [100, 80]),
'section2': ([800, 0], [800, 1000], [150, 100]),
'section3': ([50, 200], [100, 500], [200, 150])}
# %%
geo_data.set_section_grid(section_dict)
gp.plot.plot_section_traces(geo_data)
# %%
geo_data.grid.sections
# %%
gp.set_interpolator(geo_data, theano_optimizer='fast_compile')
# %%
gp.map_stack_to_surfaces(geo_data, {"Fault_Series": 'fault',
"Strat_Series": ('rock2', 'rock1')})
geo_data.set_is_fault(['Fault_Series'])
# %%
geo_data.get_active_grids()
# %%
gp.compute_model(geo_data)
# %%
# new plotting api
gp.plot_2d(geo_data, section_names=['section1'])
# %%
# or
# %%
gp.plot.plot_2d(geo_data, section_names=['section1'])
# %%
# Plot 2d: Object oriented:
# ------------------------
#
# %%
# One plot
# ^^^^^^^^
#
# %%
p = gp.plot_2d(geo_data, section_names=[], direction=None, show=False)
p.fig.show()
# %%
p = gp.plot_2d(geo_data, section_names=[], direction=None, show=False)
# -----new code------
sec_name = 'section1'
s1 = p.add_section(sec_name)
p.plot_data(s1, sec_name, projection_distance=200)
p.fig.show()
# %%
p = gp.plot_2d(geo_data, section_names=[], direction=None, show=False)
sec_name = 'section1'
s1 = p.add_section(sec_name)
# -----new code------
p.plot_data(s1, sec_name, projection_distance=200)
p.plot_contacts(s1, sec_name)
p.fig.show()
# %%
p = gp.plot_2d(geo_data, section_names=[], direction=None, show=False)
sec_name = 'section1'
s1 = p.add_section(sec_name)
p.plot_data(s1, sec_name, projection_distance=200)
p.plot_contacts(s1, sec_name)
# -----new code------
p.plot_lith(s1, sec_name)
p.plot_topography(s1, sec_name)
p.fig.show()
# %%
# Several plots
# ^^^^^^^^^^^^^
#
# %%
sec_name = 'section1'
sec_name_2 = 'section3'
p2 = gp.plot_2d(geo_data, n_axis=3, figsize=(15, 15), # General fig options
section_names=[sec_name, 'topography'], cell_number=[3], # Defining the sections
show_data=False, show_lith=False, show_scalar=False, show_boundaries=False)
# %%
# Create the section. This loacte the axes and give the right
# aspect ratio and labels
p2 = gp.plot_2d(geo_data, n_axis=3, figsize=(15, 15), # General fig options
section_names=[sec_name, 'topography'], cell_number=[3], # Defining the sections
show_data=False, show_lith=False, show_scalar=False, show_boundaries=False,
show=False)
# -----new code------
s1 = p2.add_section(sec_name_2, ax_pos=224)
p2.fig.show()
# %%
# Axes 0
p2 = gp.plot_2d(geo_data, n_axis=3, figsize=(15, 15), # General fig options
section_names=[sec_name, 'topography'], cell_number=[3], # Defining the sections
show_data=False, show_lith=False, show_scalar=False, show_boundaries=False,
show=False)
s1 = p2.add_section(sec_name_2, ax_pos=224)
# -----new code------
p2.plot_contacts(s1, sec_name_2)
p2.plot_lith(s1, sec_name_2)
p2.plot_data(s1, sec_name_2, projection_distance=200)
p2.plot_topography(s1, sec_name_2)
p2.fig.show()
# %%
# Axes 1
# sphinx_gallery_thumbnail_number = 12
p2 = gp.plot_2d(geo_data, n_axis=3, figsize=(15, 15), # General fig options
section_names=[sec_name, 'topography'], cell_number=[3], # Defining the sections
show_data=False, show_lith=False, show_scalar=False, show_boundaries=False,
show=False)
s1 = p2.add_section(sec_name_2, ax_pos=224)
p2.plot_contacts(s1, sec_name_2)
p2.plot_lith(s1, sec_name_2)
p2.plot_data(s1, sec_name_2, projection_distance=200)
p2.plot_topography(s1, sec_name_2)
# -----new code------
p2.plot_contacts(p2.axes[0], cell_number=3)
p2.plot_scalar_field(p2.axes[0], cell_number=3, series_n=1)
p2.fig.show()
# %%
# Axes2
p2 = gp.plot_2d(geo_data, n_axis=3, figsize=(15, 15), # General fig options
section_names=[sec_name, 'topography'], cell_number=[3], # Defining the sections
show_data=False, show_lith=False, show_scalar=False, show_boundaries=False,
show=False)
s1 = p2.add_section(sec_name_2, ax_pos=224)
p2.plot_contacts(s1, sec_name_2)
p2.plot_lith(s1, sec_name_2)
p2.plot_data(s1, sec_name_2, projection_distance=200)
p2.plot_topography(s1, sec_name_2)
p2.plot_contacts(p2.axes[0], cell_number=3)
p2.plot_scalar_field(p2.axes[0], cell_number=3, series_n=1)
# -----new code------
p2.plot_lith(p2.axes[1], 'topography')
p2.plot_contacts(p2.axes[1], 'topography')
p2.fig.show()
# %%
# Plotting traces:
# ''''''''''''''''
#
# %%
p2.plot_section_traces(p2.axes[1])
p2.fig.show()
# %%
gp.plot.plot_section_traces(geo_data)
# %%
# Plot API
# --------
#
# %%
# If nothing is passed, a Plot2D object is created and therefore you are
# in the same situation as above:
#
# %%
p3 = gp.plot_2d(geo_data)
# %%
# Alternatively you can pass section\_names, cell\_numbers + direction or
# any combination of the above:
#
# %%
gp.plot_2d(geo_data, section_names=['topography'])
# %%
gp.plot_2d(geo_data, section_names=['section1'])
# %%
gp.plot_2d(geo_data, section_names=['section1', 'section2'])
# %%
gp.plot_2d(geo_data, figsize=(15, 15), section_names=['section1', 'section2', 'topography'],
cell_number='mid')
| lgpl-3.0 |
NunoEdgarGub1/scikit-learn | sklearn/tests/test_cross_validation.py | 70 | 41943 | """Test the cross_validation module"""
from __future__ import division
import warnings
import numpy as np
from scipy.sparse import coo_matrix
from scipy import stats
from sklearn.utils.testing import assert_true
from sklearn.utils.testing import assert_false
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_almost_equal
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import assert_greater
from sklearn.utils.testing import assert_less
from sklearn.utils.testing import assert_not_equal
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_warns_message
from sklearn.utils.testing import ignore_warnings
from sklearn.utils.mocking import CheckingClassifier, MockDataFrame
from sklearn import cross_validation as cval
from sklearn.datasets import make_regression
from sklearn.datasets import load_boston
from sklearn.datasets import load_digits
from sklearn.datasets import load_iris
from sklearn.metrics import explained_variance_score
from sklearn.metrics import make_scorer
from sklearn.metrics import precision_score
from sklearn.externals import six
from sklearn.externals.six.moves import zip
from sklearn.linear_model import Ridge
from sklearn.neighbors import KNeighborsClassifier
from sklearn.svm import SVC
from sklearn.cluster import KMeans
from sklearn.preprocessing import Imputer, LabelBinarizer
from sklearn.pipeline import Pipeline
class MockClassifier(object):
"""Dummy classifier to test the cross-validation"""
def __init__(self, a=0, allow_nd=False):
self.a = a
self.allow_nd = allow_nd
def fit(self, X, Y=None, sample_weight=None, class_prior=None,
sparse_sample_weight=None, sparse_param=None, dummy_int=None,
dummy_str=None, dummy_obj=None, callback=None):
"""The dummy arguments are to test that this fit function can
accept non-array arguments through cross-validation, such as:
- int
- str (this is actually array-like)
- object
- function
"""
self.dummy_int = dummy_int
self.dummy_str = dummy_str
self.dummy_obj = dummy_obj
if callback is not None:
callback(self)
if self.allow_nd:
X = X.reshape(len(X), -1)
if X.ndim >= 3 and not self.allow_nd:
raise ValueError('X cannot be d')
if sample_weight is not None:
assert_true(sample_weight.shape[0] == X.shape[0],
'MockClassifier extra fit_param sample_weight.shape[0]'
' is {0}, should be {1}'.format(sample_weight.shape[0],
X.shape[0]))
if class_prior is not None:
assert_true(class_prior.shape[0] == len(np.unique(y)),
'MockClassifier extra fit_param class_prior.shape[0]'
' is {0}, should be {1}'.format(class_prior.shape[0],
len(np.unique(y))))
if sparse_sample_weight is not None:
fmt = ('MockClassifier extra fit_param sparse_sample_weight'
'.shape[0] is {0}, should be {1}')
assert_true(sparse_sample_weight.shape[0] == X.shape[0],
fmt.format(sparse_sample_weight.shape[0], X.shape[0]))
if sparse_param is not None:
fmt = ('MockClassifier extra fit_param sparse_param.shape '
'is ({0}, {1}), should be ({2}, {3})')
assert_true(sparse_param.shape == P_sparse.shape,
fmt.format(sparse_param.shape[0],
sparse_param.shape[1],
P_sparse.shape[0], P_sparse.shape[1]))
return self
def predict(self, T):
if self.allow_nd:
T = T.reshape(len(T), -1)
return T[:, 0]
def score(self, X=None, Y=None):
return 1. / (1 + np.abs(self.a))
def get_params(self, deep=False):
return {'a': self.a, 'allow_nd': self.allow_nd}
X = np.ones((10, 2))
X_sparse = coo_matrix(X)
W_sparse = coo_matrix((np.array([1]), (np.array([1]), np.array([0]))),
shape=(10, 1))
P_sparse = coo_matrix(np.eye(5))
y = np.arange(10) // 2
##############################################################################
# Tests
def check_valid_split(train, test, n_samples=None):
# Use python sets to get more informative assertion failure messages
train, test = set(train), set(test)
# Train and test split should not overlap
assert_equal(train.intersection(test), set())
if n_samples is not None:
# Check that the union of train an test split cover all the indices
assert_equal(train.union(test), set(range(n_samples)))
def check_cv_coverage(cv, expected_n_iter=None, n_samples=None):
# Check that a all the samples appear at least once in a test fold
if expected_n_iter is not None:
assert_equal(len(cv), expected_n_iter)
else:
expected_n_iter = len(cv)
collected_test_samples = set()
iterations = 0
for train, test in cv:
check_valid_split(train, test, n_samples=n_samples)
iterations += 1
collected_test_samples.update(test)
# Check that the accumulated test samples cover the whole dataset
assert_equal(iterations, expected_n_iter)
if n_samples is not None:
assert_equal(collected_test_samples, set(range(n_samples)))
def test_kfold_valueerrors():
# Check that errors are raised if there is not enough samples
assert_raises(ValueError, cval.KFold, 3, 4)
# Check that a warning is raised if the least populated class has too few
# members.
y = [3, 3, -1, -1, 2]
cv = assert_warns_message(Warning, "The least populated class",
cval.StratifiedKFold, y, 3)
# Check that despite the warning the folds are still computed even
# though all the classes are not necessarily represented at on each
# side of the split at each split
check_cv_coverage(cv, expected_n_iter=3, n_samples=len(y))
# Error when number of folds is <= 1
assert_raises(ValueError, cval.KFold, 2, 0)
assert_raises(ValueError, cval.KFold, 2, 1)
assert_raises(ValueError, cval.StratifiedKFold, y, 0)
assert_raises(ValueError, cval.StratifiedKFold, y, 1)
# When n is not integer:
assert_raises(ValueError, cval.KFold, 2.5, 2)
# When n_folds is not integer:
assert_raises(ValueError, cval.KFold, 5, 1.5)
assert_raises(ValueError, cval.StratifiedKFold, y, 1.5)
def test_kfold_indices():
# Check all indices are returned in the test folds
kf = cval.KFold(300, 3)
check_cv_coverage(kf, expected_n_iter=3, n_samples=300)
# Check all indices are returned in the test folds even when equal-sized
# folds are not possible
kf = cval.KFold(17, 3)
check_cv_coverage(kf, expected_n_iter=3, n_samples=17)
def test_kfold_no_shuffle():
# Manually check that KFold preserves the data ordering on toy datasets
splits = iter(cval.KFold(4, 2))
train, test = next(splits)
assert_array_equal(test, [0, 1])
assert_array_equal(train, [2, 3])
train, test = next(splits)
assert_array_equal(test, [2, 3])
assert_array_equal(train, [0, 1])
splits = iter(cval.KFold(5, 2))
train, test = next(splits)
assert_array_equal(test, [0, 1, 2])
assert_array_equal(train, [3, 4])
train, test = next(splits)
assert_array_equal(test, [3, 4])
assert_array_equal(train, [0, 1, 2])
def test_stratified_kfold_no_shuffle():
# Manually check that StratifiedKFold preserves the data ordering as much
# as possible on toy datasets in order to avoid hiding sample dependencies
# when possible
splits = iter(cval.StratifiedKFold([1, 1, 0, 0], 2))
train, test = next(splits)
assert_array_equal(test, [0, 2])
assert_array_equal(train, [1, 3])
train, test = next(splits)
assert_array_equal(test, [1, 3])
assert_array_equal(train, [0, 2])
splits = iter(cval.StratifiedKFold([1, 1, 1, 0, 0, 0, 0], 2))
train, test = next(splits)
assert_array_equal(test, [0, 1, 3, 4])
assert_array_equal(train, [2, 5, 6])
train, test = next(splits)
assert_array_equal(test, [2, 5, 6])
assert_array_equal(train, [0, 1, 3, 4])
def test_stratified_kfold_ratios():
# Check that stratified kfold preserves label ratios in individual splits
# Repeat with shuffling turned off and on
n_samples = 1000
labels = np.array([4] * int(0.10 * n_samples) +
[0] * int(0.89 * n_samples) +
[1] * int(0.01 * n_samples))
for shuffle in [False, True]:
for train, test in cval.StratifiedKFold(labels, 5, shuffle=shuffle):
assert_almost_equal(np.sum(labels[train] == 4) / len(train), 0.10,
2)
assert_almost_equal(np.sum(labels[train] == 0) / len(train), 0.89,
2)
assert_almost_equal(np.sum(labels[train] == 1) / len(train), 0.01,
2)
assert_almost_equal(np.sum(labels[test] == 4) / len(test), 0.10, 2)
assert_almost_equal(np.sum(labels[test] == 0) / len(test), 0.89, 2)
assert_almost_equal(np.sum(labels[test] == 1) / len(test), 0.01, 2)
def test_kfold_balance():
# Check that KFold returns folds with balanced sizes
for kf in [cval.KFold(i, 5) for i in range(11, 17)]:
sizes = []
for _, test in kf:
sizes.append(len(test))
assert_true((np.max(sizes) - np.min(sizes)) <= 1)
assert_equal(np.sum(sizes), kf.n)
def test_stratifiedkfold_balance():
# Check that KFold returns folds with balanced sizes (only when
# stratification is possible)
# Repeat with shuffling turned off and on
labels = [0] * 3 + [1] * 14
for shuffle in [False, True]:
for skf in [cval.StratifiedKFold(labels[:i], 3, shuffle=shuffle)
for i in range(11, 17)]:
sizes = []
for _, test in skf:
sizes.append(len(test))
assert_true((np.max(sizes) - np.min(sizes)) <= 1)
assert_equal(np.sum(sizes), skf.n)
def test_shuffle_kfold():
# Check the indices are shuffled properly, and that all indices are
# returned in the different test folds
kf = cval.KFold(300, 3, shuffle=True, random_state=0)
ind = np.arange(300)
all_folds = None
for train, test in kf:
sorted_array = np.arange(100)
assert_true(np.any(sorted_array != ind[train]))
sorted_array = np.arange(101, 200)
assert_true(np.any(sorted_array != ind[train]))
sorted_array = np.arange(201, 300)
assert_true(np.any(sorted_array != ind[train]))
if all_folds is None:
all_folds = ind[test].copy()
else:
all_folds = np.concatenate((all_folds, ind[test]))
all_folds.sort()
assert_array_equal(all_folds, ind)
def test_shuffle_stratifiedkfold():
# Check that shuffling is happening when requested, and for proper
# sample coverage
labels = [0] * 20 + [1] * 20
kf0 = list(cval.StratifiedKFold(labels, 5, shuffle=True, random_state=0))
kf1 = list(cval.StratifiedKFold(labels, 5, shuffle=True, random_state=1))
for (_, test0), (_, test1) in zip(kf0, kf1):
assert_true(set(test0) != set(test1))
check_cv_coverage(kf0, expected_n_iter=5, n_samples=40)
def test_kfold_can_detect_dependent_samples_on_digits(): # see #2372
# The digits samples are dependent: they are apparently grouped by authors
# although we don't have any information on the groups segment locations
# for this data. We can highlight this fact be computing k-fold cross-
# validation with and without shuffling: we observe that the shuffling case
# wrongly makes the IID assumption and is therefore too optimistic: it
# estimates a much higher accuracy (around 0.96) than than the non
# shuffling variant (around 0.86).
digits = load_digits()
X, y = digits.data[:800], digits.target[:800]
model = SVC(C=10, gamma=0.005)
n = len(y)
cv = cval.KFold(n, 5, shuffle=False)
mean_score = cval.cross_val_score(model, X, y, cv=cv).mean()
assert_greater(0.88, mean_score)
assert_greater(mean_score, 0.85)
# Shuffling the data artificially breaks the dependency and hides the
# overfitting of the model with regards to the writing style of the authors
# by yielding a seriously overestimated score:
cv = cval.KFold(n, 5, shuffle=True, random_state=0)
mean_score = cval.cross_val_score(model, X, y, cv=cv).mean()
assert_greater(mean_score, 0.95)
cv = cval.KFold(n, 5, shuffle=True, random_state=1)
mean_score = cval.cross_val_score(model, X, y, cv=cv).mean()
assert_greater(mean_score, 0.95)
# Similarly, StratifiedKFold should try to shuffle the data as little
# as possible (while respecting the balanced class constraints)
# and thus be able to detect the dependency by not overestimating
# the CV score either. As the digits dataset is approximately balanced
# the estimated mean score is close to the score measured with
# non-shuffled KFold
cv = cval.StratifiedKFold(y, 5)
mean_score = cval.cross_val_score(model, X, y, cv=cv).mean()
assert_greater(0.88, mean_score)
assert_greater(mean_score, 0.85)
def test_shuffle_split():
ss1 = cval.ShuffleSplit(10, test_size=0.2, random_state=0)
ss2 = cval.ShuffleSplit(10, test_size=2, random_state=0)
ss3 = cval.ShuffleSplit(10, test_size=np.int32(2), random_state=0)
for typ in six.integer_types:
ss4 = cval.ShuffleSplit(10, test_size=typ(2), random_state=0)
for t1, t2, t3, t4 in zip(ss1, ss2, ss3, ss4):
assert_array_equal(t1[0], t2[0])
assert_array_equal(t2[0], t3[0])
assert_array_equal(t3[0], t4[0])
assert_array_equal(t1[1], t2[1])
assert_array_equal(t2[1], t3[1])
assert_array_equal(t3[1], t4[1])
def test_stratified_shuffle_split_init():
y = np.asarray([0, 1, 1, 1, 2, 2, 2])
# Check that error is raised if there is a class with only one sample
assert_raises(ValueError, cval.StratifiedShuffleSplit, y, 3, 0.2)
# Check that error is raised if the test set size is smaller than n_classes
assert_raises(ValueError, cval.StratifiedShuffleSplit, y, 3, 2)
# Check that error is raised if the train set size is smaller than
# n_classes
assert_raises(ValueError, cval.StratifiedShuffleSplit, y, 3, 3, 2)
y = np.asarray([0, 0, 0, 1, 1, 1, 2, 2, 2])
# Check that errors are raised if there is not enough samples
assert_raises(ValueError, cval.StratifiedShuffleSplit, y, 3, 0.5, 0.6)
assert_raises(ValueError, cval.StratifiedShuffleSplit, y, 3, 8, 0.6)
assert_raises(ValueError, cval.StratifiedShuffleSplit, y, 3, 0.6, 8)
# Train size or test size too small
assert_raises(ValueError, cval.StratifiedShuffleSplit, y, train_size=2)
assert_raises(ValueError, cval.StratifiedShuffleSplit, y, test_size=2)
def test_stratified_shuffle_split_iter():
ys = [np.array([1, 1, 1, 1, 2, 2, 2, 3, 3, 3, 3, 3]),
np.array([0, 0, 0, 1, 1, 1, 2, 2, 2, 3, 3, 3]),
np.array([0, 1, 2, 3, 0, 1, 2, 3, 0, 1, 2, 3, 0, 1, 2]),
np.array([1, 1, 2, 2, 2, 3, 3, 3, 4, 4, 4, 4, 4, 4, 4, 4]),
np.array([-1] * 800 + [1] * 50)
]
for y in ys:
sss = cval.StratifiedShuffleSplit(y, 6, test_size=0.33,
random_state=0)
for train, test in sss:
assert_array_equal(np.unique(y[train]), np.unique(y[test]))
# Checks if folds keep classes proportions
p_train = (np.bincount(np.unique(y[train], return_inverse=True)[1])
/ float(len(y[train])))
p_test = (np.bincount(np.unique(y[test], return_inverse=True)[1])
/ float(len(y[test])))
assert_array_almost_equal(p_train, p_test, 1)
assert_equal(y[train].size + y[test].size, y.size)
assert_array_equal(np.lib.arraysetops.intersect1d(train, test), [])
def test_stratified_shuffle_split_even():
# Test the StratifiedShuffleSplit, indices are drawn with a
# equal chance
n_folds = 5
n_iter = 1000
def assert_counts_are_ok(idx_counts, p):
# Here we test that the distribution of the counts
# per index is close enough to a binomial
threshold = 0.05 / n_splits
bf = stats.binom(n_splits, p)
for count in idx_counts:
p = bf.pmf(count)
assert_true(p > threshold,
"An index is not drawn with chance corresponding "
"to even draws")
for n_samples in (6, 22):
labels = np.array((n_samples // 2) * [0, 1])
splits = cval.StratifiedShuffleSplit(labels, n_iter=n_iter,
test_size=1. / n_folds,
random_state=0)
train_counts = [0] * n_samples
test_counts = [0] * n_samples
n_splits = 0
for train, test in splits:
n_splits += 1
for counter, ids in [(train_counts, train), (test_counts, test)]:
for id in ids:
counter[id] += 1
assert_equal(n_splits, n_iter)
assert_equal(len(train), splits.n_train)
assert_equal(len(test), splits.n_test)
assert_equal(len(set(train).intersection(test)), 0)
label_counts = np.unique(labels)
assert_equal(splits.test_size, 1.0 / n_folds)
assert_equal(splits.n_train + splits.n_test, len(labels))
assert_equal(len(label_counts), 2)
ex_test_p = float(splits.n_test) / n_samples
ex_train_p = float(splits.n_train) / n_samples
assert_counts_are_ok(train_counts, ex_train_p)
assert_counts_are_ok(test_counts, ex_test_p)
def test_predefinedsplit_with_kfold_split():
# Check that PredefinedSplit can reproduce a split generated by Kfold.
folds = -1 * np.ones(10)
kf_train = []
kf_test = []
for i, (train_ind, test_ind) in enumerate(cval.KFold(10, 5, shuffle=True)):
kf_train.append(train_ind)
kf_test.append(test_ind)
folds[test_ind] = i
ps_train = []
ps_test = []
ps = cval.PredefinedSplit(folds)
for train_ind, test_ind in ps:
ps_train.append(train_ind)
ps_test.append(test_ind)
assert_array_equal(ps_train, kf_train)
assert_array_equal(ps_test, kf_test)
def test_leave_label_out_changing_labels():
# Check that LeaveOneLabelOut and LeavePLabelOut work normally if
# the labels variable is changed before calling __iter__
labels = np.array([0, 1, 2, 1, 1, 2, 0, 0])
labels_changing = np.array(labels, copy=True)
lolo = cval.LeaveOneLabelOut(labels)
lolo_changing = cval.LeaveOneLabelOut(labels_changing)
lplo = cval.LeavePLabelOut(labels, p=2)
lplo_changing = cval.LeavePLabelOut(labels_changing, p=2)
labels_changing[:] = 0
for llo, llo_changing in [(lolo, lolo_changing), (lplo, lplo_changing)]:
for (train, test), (train_chan, test_chan) in zip(llo, llo_changing):
assert_array_equal(train, train_chan)
assert_array_equal(test, test_chan)
def test_cross_val_score():
clf = MockClassifier()
for a in range(-10, 10):
clf.a = a
# Smoke test
scores = cval.cross_val_score(clf, X, y)
assert_array_equal(scores, clf.score(X, y))
# test with multioutput y
scores = cval.cross_val_score(clf, X_sparse, X)
assert_array_equal(scores, clf.score(X_sparse, X))
scores = cval.cross_val_score(clf, X_sparse, y)
assert_array_equal(scores, clf.score(X_sparse, y))
# test with multioutput y
scores = cval.cross_val_score(clf, X_sparse, X)
assert_array_equal(scores, clf.score(X_sparse, X))
# test with X and y as list
list_check = lambda x: isinstance(x, list)
clf = CheckingClassifier(check_X=list_check)
scores = cval.cross_val_score(clf, X.tolist(), y.tolist())
clf = CheckingClassifier(check_y=list_check)
scores = cval.cross_val_score(clf, X, y.tolist())
assert_raises(ValueError, cval.cross_val_score, clf, X, y,
scoring="sklearn")
# test with 3d X and
X_3d = X[:, :, np.newaxis]
clf = MockClassifier(allow_nd=True)
scores = cval.cross_val_score(clf, X_3d, y)
clf = MockClassifier(allow_nd=False)
assert_raises(ValueError, cval.cross_val_score, clf, X_3d, y)
def test_cross_val_score_pandas():
# check cross_val_score doesn't destroy pandas dataframe
types = [(MockDataFrame, MockDataFrame)]
try:
from pandas import Series, DataFrame
types.append((Series, DataFrame))
except ImportError:
pass
for TargetType, InputFeatureType in types:
# X dataframe, y series
X_df, y_ser = InputFeatureType(X), TargetType(y)
check_df = lambda x: isinstance(x, InputFeatureType)
check_series = lambda x: isinstance(x, TargetType)
clf = CheckingClassifier(check_X=check_df, check_y=check_series)
cval.cross_val_score(clf, X_df, y_ser)
def test_cross_val_score_mask():
# test that cross_val_score works with boolean masks
svm = SVC(kernel="linear")
iris = load_iris()
X, y = iris.data, iris.target
cv_indices = cval.KFold(len(y), 5)
scores_indices = cval.cross_val_score(svm, X, y, cv=cv_indices)
cv_indices = cval.KFold(len(y), 5)
cv_masks = []
for train, test in cv_indices:
mask_train = np.zeros(len(y), dtype=np.bool)
mask_test = np.zeros(len(y), dtype=np.bool)
mask_train[train] = 1
mask_test[test] = 1
cv_masks.append((train, test))
scores_masks = cval.cross_val_score(svm, X, y, cv=cv_masks)
assert_array_equal(scores_indices, scores_masks)
def test_cross_val_score_precomputed():
# test for svm with precomputed kernel
svm = SVC(kernel="precomputed")
iris = load_iris()
X, y = iris.data, iris.target
linear_kernel = np.dot(X, X.T)
score_precomputed = cval.cross_val_score(svm, linear_kernel, y)
svm = SVC(kernel="linear")
score_linear = cval.cross_val_score(svm, X, y)
assert_array_equal(score_precomputed, score_linear)
# Error raised for non-square X
svm = SVC(kernel="precomputed")
assert_raises(ValueError, cval.cross_val_score, svm, X, y)
# test error is raised when the precomputed kernel is not array-like
# or sparse
assert_raises(ValueError, cval.cross_val_score, svm,
linear_kernel.tolist(), y)
def test_cross_val_score_fit_params():
clf = MockClassifier()
n_samples = X.shape[0]
n_classes = len(np.unique(y))
DUMMY_INT = 42
DUMMY_STR = '42'
DUMMY_OBJ = object()
def assert_fit_params(clf):
# Function to test that the values are passed correctly to the
# classifier arguments for non-array type
assert_equal(clf.dummy_int, DUMMY_INT)
assert_equal(clf.dummy_str, DUMMY_STR)
assert_equal(clf.dummy_obj, DUMMY_OBJ)
fit_params = {'sample_weight': np.ones(n_samples),
'class_prior': np.ones(n_classes) / n_classes,
'sparse_sample_weight': W_sparse,
'sparse_param': P_sparse,
'dummy_int': DUMMY_INT,
'dummy_str': DUMMY_STR,
'dummy_obj': DUMMY_OBJ,
'callback': assert_fit_params}
cval.cross_val_score(clf, X, y, fit_params=fit_params)
def test_cross_val_score_score_func():
clf = MockClassifier()
_score_func_args = []
def score_func(y_test, y_predict):
_score_func_args.append((y_test, y_predict))
return 1.0
with warnings.catch_warnings(record=True):
scoring = make_scorer(score_func)
score = cval.cross_val_score(clf, X, y, scoring=scoring)
assert_array_equal(score, [1.0, 1.0, 1.0])
assert len(_score_func_args) == 3
def test_cross_val_score_errors():
class BrokenEstimator:
pass
assert_raises(TypeError, cval.cross_val_score, BrokenEstimator(), X)
def test_train_test_split_errors():
assert_raises(ValueError, cval.train_test_split)
assert_raises(ValueError, cval.train_test_split, range(3), train_size=1.1)
assert_raises(ValueError, cval.train_test_split, range(3), test_size=0.6,
train_size=0.6)
assert_raises(ValueError, cval.train_test_split, range(3),
test_size=np.float32(0.6), train_size=np.float32(0.6))
assert_raises(ValueError, cval.train_test_split, range(3),
test_size="wrong_type")
assert_raises(ValueError, cval.train_test_split, range(3), test_size=2,
train_size=4)
assert_raises(TypeError, cval.train_test_split, range(3),
some_argument=1.1)
assert_raises(ValueError, cval.train_test_split, range(3), range(42))
def test_train_test_split():
X = np.arange(100).reshape((10, 10))
X_s = coo_matrix(X)
y = np.arange(10)
# simple test
split = cval.train_test_split(X, y, test_size=None, train_size=.5)
X_train, X_test, y_train, y_test = split
assert_equal(len(y_test), len(y_train))
# test correspondence of X and y
assert_array_equal(X_train[:, 0], y_train * 10)
assert_array_equal(X_test[:, 0], y_test * 10)
# conversion of lists to arrays (deprecated?)
with warnings.catch_warnings(record=True):
split = cval.train_test_split(X, X_s, y.tolist(), allow_lists=False)
X_train, X_test, X_s_train, X_s_test, y_train, y_test = split
assert_array_equal(X_train, X_s_train.toarray())
assert_array_equal(X_test, X_s_test.toarray())
# don't convert lists to anything else by default
split = cval.train_test_split(X, X_s, y.tolist())
X_train, X_test, X_s_train, X_s_test, y_train, y_test = split
assert_true(isinstance(y_train, list))
assert_true(isinstance(y_test, list))
# allow nd-arrays
X_4d = np.arange(10 * 5 * 3 * 2).reshape(10, 5, 3, 2)
y_3d = np.arange(10 * 7 * 11).reshape(10, 7, 11)
split = cval.train_test_split(X_4d, y_3d)
assert_equal(split[0].shape, (7, 5, 3, 2))
assert_equal(split[1].shape, (3, 5, 3, 2))
assert_equal(split[2].shape, (7, 7, 11))
assert_equal(split[3].shape, (3, 7, 11))
# test stratification option
y = np.array([1, 1, 1, 1, 2, 2, 2, 2])
for test_size, exp_test_size in zip([2, 4, 0.25, 0.5, 0.75],
[2, 4, 2, 4, 6]):
train, test = cval.train_test_split(y,
test_size=test_size,
stratify=y,
random_state=0)
assert_equal(len(test), exp_test_size)
assert_equal(len(test) + len(train), len(y))
# check the 1:1 ratio of ones and twos in the data is preserved
assert_equal(np.sum(train == 1), np.sum(train == 2))
def train_test_split_pandas():
# check cross_val_score doesn't destroy pandas dataframe
types = [MockDataFrame]
try:
from pandas import DataFrame
types.append(DataFrame)
except ImportError:
pass
for InputFeatureType in types:
# X dataframe
X_df = InputFeatureType(X)
X_train, X_test = cval.train_test_split(X_df)
assert_true(isinstance(X_train, InputFeatureType))
assert_true(isinstance(X_test, InputFeatureType))
def train_test_split_mock_pandas():
# X mock dataframe
X_df = MockDataFrame(X)
X_train, X_test = cval.train_test_split(X_df)
assert_true(isinstance(X_train, MockDataFrame))
assert_true(isinstance(X_test, MockDataFrame))
X_train_arr, X_test_arr = cval.train_test_split(X_df, allow_lists=False)
assert_true(isinstance(X_train_arr, np.ndarray))
assert_true(isinstance(X_test_arr, np.ndarray))
def test_cross_val_score_with_score_func_classification():
iris = load_iris()
clf = SVC(kernel='linear')
# Default score (should be the accuracy score)
scores = cval.cross_val_score(clf, iris.data, iris.target, cv=5)
assert_array_almost_equal(scores, [0.97, 1., 0.97, 0.97, 1.], 2)
# Correct classification score (aka. zero / one score) - should be the
# same as the default estimator score
zo_scores = cval.cross_val_score(clf, iris.data, iris.target,
scoring="accuracy", cv=5)
assert_array_almost_equal(zo_scores, [0.97, 1., 0.97, 0.97, 1.], 2)
# F1 score (class are balanced so f1_score should be equal to zero/one
# score
f1_scores = cval.cross_val_score(clf, iris.data, iris.target,
scoring="f1_weighted", cv=5)
assert_array_almost_equal(f1_scores, [0.97, 1., 0.97, 0.97, 1.], 2)
def test_cross_val_score_with_score_func_regression():
X, y = make_regression(n_samples=30, n_features=20, n_informative=5,
random_state=0)
reg = Ridge()
# Default score of the Ridge regression estimator
scores = cval.cross_val_score(reg, X, y, cv=5)
assert_array_almost_equal(scores, [0.94, 0.97, 0.97, 0.99, 0.92], 2)
# R2 score (aka. determination coefficient) - should be the
# same as the default estimator score
r2_scores = cval.cross_val_score(reg, X, y, scoring="r2", cv=5)
assert_array_almost_equal(r2_scores, [0.94, 0.97, 0.97, 0.99, 0.92], 2)
# Mean squared error; this is a loss function, so "scores" are negative
mse_scores = cval.cross_val_score(reg, X, y, cv=5,
scoring="mean_squared_error")
expected_mse = np.array([-763.07, -553.16, -274.38, -273.26, -1681.99])
assert_array_almost_equal(mse_scores, expected_mse, 2)
# Explained variance
scoring = make_scorer(explained_variance_score)
ev_scores = cval.cross_val_score(reg, X, y, cv=5, scoring=scoring)
assert_array_almost_equal(ev_scores, [0.94, 0.97, 0.97, 0.99, 0.92], 2)
def test_permutation_score():
iris = load_iris()
X = iris.data
X_sparse = coo_matrix(X)
y = iris.target
svm = SVC(kernel='linear')
cv = cval.StratifiedKFold(y, 2)
score, scores, pvalue = cval.permutation_test_score(
svm, X, y, n_permutations=30, cv=cv, scoring="accuracy")
assert_greater(score, 0.9)
assert_almost_equal(pvalue, 0.0, 1)
score_label, _, pvalue_label = cval.permutation_test_score(
svm, X, y, n_permutations=30, cv=cv, scoring="accuracy",
labels=np.ones(y.size), random_state=0)
assert_true(score_label == score)
assert_true(pvalue_label == pvalue)
# check that we obtain the same results with a sparse representation
svm_sparse = SVC(kernel='linear')
cv_sparse = cval.StratifiedKFold(y, 2)
score_label, _, pvalue_label = cval.permutation_test_score(
svm_sparse, X_sparse, y, n_permutations=30, cv=cv_sparse,
scoring="accuracy", labels=np.ones(y.size), random_state=0)
assert_true(score_label == score)
assert_true(pvalue_label == pvalue)
# test with custom scoring object
def custom_score(y_true, y_pred):
return (((y_true == y_pred).sum() - (y_true != y_pred).sum())
/ y_true.shape[0])
scorer = make_scorer(custom_score)
score, _, pvalue = cval.permutation_test_score(
svm, X, y, n_permutations=100, scoring=scorer, cv=cv, random_state=0)
assert_almost_equal(score, .93, 2)
assert_almost_equal(pvalue, 0.01, 3)
# set random y
y = np.mod(np.arange(len(y)), 3)
score, scores, pvalue = cval.permutation_test_score(
svm, X, y, n_permutations=30, cv=cv, scoring="accuracy")
assert_less(score, 0.5)
assert_greater(pvalue, 0.2)
def test_cross_val_generator_with_indices():
X = np.array([[1, 2], [3, 4], [5, 6], [7, 8]])
y = np.array([1, 1, 2, 2])
labels = np.array([1, 2, 3, 4])
# explicitly passing indices value is deprecated
loo = cval.LeaveOneOut(4)
lpo = cval.LeavePOut(4, 2)
kf = cval.KFold(4, 2)
skf = cval.StratifiedKFold(y, 2)
lolo = cval.LeaveOneLabelOut(labels)
lopo = cval.LeavePLabelOut(labels, 2)
ps = cval.PredefinedSplit([1, 1, 2, 2])
ss = cval.ShuffleSplit(2)
for cv in [loo, lpo, kf, skf, lolo, lopo, ss, ps]:
for train, test in cv:
assert_not_equal(np.asarray(train).dtype.kind, 'b')
assert_not_equal(np.asarray(train).dtype.kind, 'b')
X[train], X[test]
y[train], y[test]
@ignore_warnings
def test_cross_val_generator_with_default_indices():
X = np.array([[1, 2], [3, 4], [5, 6], [7, 8]])
y = np.array([1, 1, 2, 2])
labels = np.array([1, 2, 3, 4])
loo = cval.LeaveOneOut(4)
lpo = cval.LeavePOut(4, 2)
kf = cval.KFold(4, 2)
skf = cval.StratifiedKFold(y, 2)
lolo = cval.LeaveOneLabelOut(labels)
lopo = cval.LeavePLabelOut(labels, 2)
ss = cval.ShuffleSplit(2)
ps = cval.PredefinedSplit([1, 1, 2, 2])
for cv in [loo, lpo, kf, skf, lolo, lopo, ss, ps]:
for train, test in cv:
assert_not_equal(np.asarray(train).dtype.kind, 'b')
assert_not_equal(np.asarray(train).dtype.kind, 'b')
X[train], X[test]
y[train], y[test]
def test_shufflesplit_errors():
assert_raises(ValueError, cval.ShuffleSplit, 10, test_size=2.0)
assert_raises(ValueError, cval.ShuffleSplit, 10, test_size=1.0)
assert_raises(ValueError, cval.ShuffleSplit, 10, test_size=0.1,
train_size=0.95)
assert_raises(ValueError, cval.ShuffleSplit, 10, test_size=11)
assert_raises(ValueError, cval.ShuffleSplit, 10, test_size=10)
assert_raises(ValueError, cval.ShuffleSplit, 10, test_size=8, train_size=3)
assert_raises(ValueError, cval.ShuffleSplit, 10, train_size=1j)
assert_raises(ValueError, cval.ShuffleSplit, 10, test_size=None,
train_size=None)
def test_shufflesplit_reproducible():
# Check that iterating twice on the ShuffleSplit gives the same
# sequence of train-test when the random_state is given
ss = cval.ShuffleSplit(10, random_state=21)
assert_array_equal(list(a for a, b in ss), list(a for a, b in ss))
def test_safe_split_with_precomputed_kernel():
clf = SVC()
clfp = SVC(kernel="precomputed")
iris = load_iris()
X, y = iris.data, iris.target
K = np.dot(X, X.T)
cv = cval.ShuffleSplit(X.shape[0], test_size=0.25, random_state=0)
tr, te = list(cv)[0]
X_tr, y_tr = cval._safe_split(clf, X, y, tr)
K_tr, y_tr2 = cval._safe_split(clfp, K, y, tr)
assert_array_almost_equal(K_tr, np.dot(X_tr, X_tr.T))
X_te, y_te = cval._safe_split(clf, X, y, te, tr)
K_te, y_te2 = cval._safe_split(clfp, K, y, te, tr)
assert_array_almost_equal(K_te, np.dot(X_te, X_tr.T))
def test_cross_val_score_allow_nans():
# Check that cross_val_score allows input data with NaNs
X = np.arange(200, dtype=np.float64).reshape(10, -1)
X[2, :] = np.nan
y = np.repeat([0, 1], X.shape[0] / 2)
p = Pipeline([
('imputer', Imputer(strategy='mean', missing_values='NaN')),
('classifier', MockClassifier()),
])
cval.cross_val_score(p, X, y, cv=5)
def test_train_test_split_allow_nans():
# Check that train_test_split allows input data with NaNs
X = np.arange(200, dtype=np.float64).reshape(10, -1)
X[2, :] = np.nan
y = np.repeat([0, 1], X.shape[0] / 2)
cval.train_test_split(X, y, test_size=0.2, random_state=42)
def test_permutation_test_score_allow_nans():
# Check that permutation_test_score allows input data with NaNs
X = np.arange(200, dtype=np.float64).reshape(10, -1)
X[2, :] = np.nan
y = np.repeat([0, 1], X.shape[0] / 2)
p = Pipeline([
('imputer', Imputer(strategy='mean', missing_values='NaN')),
('classifier', MockClassifier()),
])
cval.permutation_test_score(p, X, y, cv=5)
def test_check_cv_return_types():
X = np.ones((9, 2))
cv = cval.check_cv(3, X, classifier=False)
assert_true(isinstance(cv, cval.KFold))
y_binary = np.array([0, 1, 0, 1, 0, 0, 1, 1, 1])
cv = cval.check_cv(3, X, y_binary, classifier=True)
assert_true(isinstance(cv, cval.StratifiedKFold))
y_multiclass = np.array([0, 1, 0, 1, 2, 1, 2, 0, 2])
cv = cval.check_cv(3, X, y_multiclass, classifier=True)
assert_true(isinstance(cv, cval.StratifiedKFold))
X = np.ones((5, 2))
y_seq_of_seqs = [[], [1, 2], [3], [0, 1, 3], [2]]
with warnings.catch_warnings(record=True):
# deprecated sequence of sequence format
cv = cval.check_cv(3, X, y_seq_of_seqs, classifier=True)
assert_true(isinstance(cv, cval.KFold))
y_indicator_matrix = LabelBinarizer().fit_transform(y_seq_of_seqs)
cv = cval.check_cv(3, X, y_indicator_matrix, classifier=True)
assert_true(isinstance(cv, cval.KFold))
y_multioutput = np.array([[1, 2], [0, 3], [0, 0], [3, 1], [2, 0]])
cv = cval.check_cv(3, X, y_multioutput, classifier=True)
assert_true(isinstance(cv, cval.KFold))
def test_cross_val_score_multilabel():
X = np.array([[-3, 4], [2, 4], [3, 3], [0, 2], [-3, 1],
[-2, 1], [0, 0], [-2, -1], [-1, -2], [1, -2]])
y = np.array([[1, 1], [0, 1], [0, 1], [0, 1], [1, 1],
[0, 1], [1, 0], [1, 1], [1, 0], [0, 0]])
clf = KNeighborsClassifier(n_neighbors=1)
scoring_micro = make_scorer(precision_score, average='micro')
scoring_macro = make_scorer(precision_score, average='macro')
scoring_samples = make_scorer(precision_score, average='samples')
score_micro = cval.cross_val_score(clf, X, y, scoring=scoring_micro, cv=5)
score_macro = cval.cross_val_score(clf, X, y, scoring=scoring_macro, cv=5)
score_samples = cval.cross_val_score(clf, X, y,
scoring=scoring_samples, cv=5)
assert_almost_equal(score_micro, [1, 1 / 2, 3 / 4, 1 / 2, 1 / 3])
assert_almost_equal(score_macro, [1, 1 / 2, 3 / 4, 1 / 2, 1 / 4])
assert_almost_equal(score_samples, [1, 1 / 2, 3 / 4, 1 / 2, 1 / 4])
def test_cross_val_predict():
boston = load_boston()
X, y = boston.data, boston.target
cv = cval.KFold(len(boston.target))
est = Ridge()
# Naive loop (should be same as cross_val_predict):
preds2 = np.zeros_like(y)
for train, test in cv:
est.fit(X[train], y[train])
preds2[test] = est.predict(X[test])
preds = cval.cross_val_predict(est, X, y, cv=cv)
assert_array_almost_equal(preds, preds2)
preds = cval.cross_val_predict(est, X, y)
assert_equal(len(preds), len(y))
cv = cval.LeaveOneOut(len(y))
preds = cval.cross_val_predict(est, X, y, cv=cv)
assert_equal(len(preds), len(y))
Xsp = X.copy()
Xsp *= (Xsp > np.median(Xsp))
Xsp = coo_matrix(Xsp)
preds = cval.cross_val_predict(est, Xsp, y)
assert_array_almost_equal(len(preds), len(y))
preds = cval.cross_val_predict(KMeans(), X)
assert_equal(len(preds), len(y))
def bad_cv():
for i in range(4):
yield np.array([0, 1, 2, 3]), np.array([4, 5, 6, 7, 8])
assert_raises(ValueError, cval.cross_val_predict, est, X, y, cv=bad_cv())
def test_cross_val_predict_input_types():
clf = Ridge()
# Smoke test
predictions = cval.cross_val_predict(clf, X, y)
assert_equal(predictions.shape, (10,))
# test with multioutput y
predictions = cval.cross_val_predict(clf, X_sparse, X)
assert_equal(predictions.shape, (10, 2))
predictions = cval.cross_val_predict(clf, X_sparse, y)
assert_array_equal(predictions.shape, (10,))
# test with multioutput y
predictions = cval.cross_val_predict(clf, X_sparse, X)
assert_array_equal(predictions.shape, (10, 2))
# test with X and y as list
list_check = lambda x: isinstance(x, list)
clf = CheckingClassifier(check_X=list_check)
predictions = cval.cross_val_predict(clf, X.tolist(), y.tolist())
clf = CheckingClassifier(check_y=list_check)
predictions = cval.cross_val_predict(clf, X, y.tolist())
# test with 3d X and
X_3d = X[:, :, np.newaxis]
check_3d = lambda x: x.ndim == 3
clf = CheckingClassifier(check_X=check_3d)
predictions = cval.cross_val_predict(clf, X_3d, y)
assert_array_equal(predictions.shape, (10,))
def test_cross_val_predict_pandas():
# check cross_val_score doesn't destroy pandas dataframe
types = [(MockDataFrame, MockDataFrame)]
try:
from pandas import Series, DataFrame
types.append((Series, DataFrame))
except ImportError:
pass
for TargetType, InputFeatureType in types:
# X dataframe, y series
X_df, y_ser = InputFeatureType(X), TargetType(y)
check_df = lambda x: isinstance(x, InputFeatureType)
check_series = lambda x: isinstance(x, TargetType)
clf = CheckingClassifier(check_X=check_df, check_y=check_series)
cval.cross_val_predict(clf, X_df, y_ser)
def test_sparse_fit_params():
iris = load_iris()
X, y = iris.data, iris.target
clf = MockClassifier()
fit_params = {'sparse_sample_weight': coo_matrix(np.eye(X.shape[0]))}
a = cval.cross_val_score(clf, X, y, fit_params=fit_params)
assert_array_equal(a, np.ones(3))
def test_check_is_partition():
p = np.arange(100)
assert_true(cval._check_is_partition(p, 100))
assert_false(cval._check_is_partition(np.delete(p, 23), 100))
p[0] = 23
assert_false(cval._check_is_partition(p, 100))
| bsd-3-clause |
jreback/pandas | pandas/core/arrays/boolean.py | 1 | 22535 | import numbers
from typing import TYPE_CHECKING, List, Optional, Tuple, Type, Union
import warnings
import numpy as np
from pandas._libs import lib, missing as libmissing
from pandas._typing import ArrayLike, Dtype
from pandas.compat.numpy import function as nv
from pandas.core.dtypes.common import (
is_bool_dtype,
is_float,
is_float_dtype,
is_integer_dtype,
is_list_like,
is_numeric_dtype,
pandas_dtype,
)
from pandas.core.dtypes.dtypes import ExtensionDtype, register_extension_dtype
from pandas.core.dtypes.missing import isna
from pandas.core import ops
from .masked import BaseMaskedArray, BaseMaskedDtype
if TYPE_CHECKING:
import pyarrow
@register_extension_dtype
class BooleanDtype(BaseMaskedDtype):
"""
Extension dtype for boolean data.
.. versionadded:: 1.0.0
.. warning::
BooleanDtype is considered experimental. The implementation and
parts of the API may change without warning.
Attributes
----------
None
Methods
-------
None
Examples
--------
>>> pd.BooleanDtype()
BooleanDtype
"""
name = "boolean"
# mypy: https://github.com/python/mypy/issues/4125
@property
def type(self) -> Type: # type: ignore[override]
return np.bool_
@property
def kind(self) -> str:
return "b"
@property
def numpy_dtype(self) -> np.dtype:
return np.dtype("bool")
@classmethod
def construct_array_type(cls) -> Type["BooleanArray"]:
"""
Return the array type associated with this dtype.
Returns
-------
type
"""
return BooleanArray
def __repr__(self) -> str:
return "BooleanDtype"
@property
def _is_boolean(self) -> bool:
return True
@property
def _is_numeric(self) -> bool:
return True
def __from_arrow__(
self, array: Union["pyarrow.Array", "pyarrow.ChunkedArray"]
) -> "BooleanArray":
"""
Construct BooleanArray from pyarrow Array/ChunkedArray.
"""
import pyarrow
if isinstance(array, pyarrow.Array):
chunks = [array]
else:
# pyarrow.ChunkedArray
chunks = array.chunks
results = []
for arr in chunks:
# TODO should optimize this without going through object array
bool_arr = BooleanArray._from_sequence(np.array(arr))
results.append(bool_arr)
return BooleanArray._concat_same_type(results)
def coerce_to_array(
values, mask=None, copy: bool = False
) -> Tuple[np.ndarray, np.ndarray]:
"""
Coerce the input values array to numpy arrays with a mask.
Parameters
----------
values : 1D list-like
mask : bool 1D array, optional
copy : bool, default False
if True, copy the input
Returns
-------
tuple of (values, mask)
"""
if isinstance(values, BooleanArray):
if mask is not None:
raise ValueError("cannot pass mask for BooleanArray input")
values, mask = values._data, values._mask
if copy:
values = values.copy()
mask = mask.copy()
return values, mask
mask_values = None
if isinstance(values, np.ndarray) and values.dtype == np.bool_:
if copy:
values = values.copy()
elif isinstance(values, np.ndarray) and is_numeric_dtype(values.dtype):
mask_values = isna(values)
values_bool = np.zeros(len(values), dtype=bool)
values_bool[~mask_values] = values[~mask_values].astype(bool)
if not np.all(
values_bool[~mask_values].astype(values.dtype) == values[~mask_values]
):
raise TypeError("Need to pass bool-like values")
values = values_bool
else:
values_object = np.asarray(values, dtype=object)
inferred_dtype = lib.infer_dtype(values_object, skipna=True)
integer_like = ("floating", "integer", "mixed-integer-float")
if inferred_dtype not in ("boolean", "empty") + integer_like:
raise TypeError("Need to pass bool-like values")
mask_values = isna(values_object)
values = np.zeros(len(values), dtype=bool)
values[~mask_values] = values_object[~mask_values].astype(bool)
# if the values were integer-like, validate it were actually 0/1's
if (inferred_dtype in integer_like) and not (
np.all(
values[~mask_values].astype(float)
== values_object[~mask_values].astype(float)
)
):
raise TypeError("Need to pass bool-like values")
if mask is None and mask_values is None:
mask = np.zeros(len(values), dtype=bool)
elif mask is None:
mask = mask_values
else:
if isinstance(mask, np.ndarray) and mask.dtype == np.bool_:
if mask_values is not None:
mask = mask | mask_values
else:
if copy:
mask = mask.copy()
else:
mask = np.array(mask, dtype=bool)
if mask_values is not None:
mask = mask | mask_values
if values.ndim != 1:
raise ValueError("values must be a 1D list-like")
if mask.ndim != 1:
raise ValueError("mask must be a 1D list-like")
return values, mask
class BooleanArray(BaseMaskedArray):
"""
Array of boolean (True/False) data with missing values.
This is a pandas Extension array for boolean data, under the hood
represented by 2 numpy arrays: a boolean array with the data and
a boolean array with the mask (True indicating missing).
BooleanArray implements Kleene logic (sometimes called three-value
logic) for logical operations. See :ref:`boolean.kleene` for more.
To construct an BooleanArray from generic array-like input, use
:func:`pandas.array` specifying ``dtype="boolean"`` (see examples
below).
.. versionadded:: 1.0.0
.. warning::
BooleanArray is considered experimental. The implementation and
parts of the API may change without warning.
Parameters
----------
values : numpy.ndarray
A 1-d boolean-dtype array with the data.
mask : numpy.ndarray
A 1-d boolean-dtype array indicating missing values (True
indicates missing).
copy : bool, default False
Whether to copy the `values` and `mask` arrays.
Attributes
----------
None
Methods
-------
None
Returns
-------
BooleanArray
Examples
--------
Create an BooleanArray with :func:`pandas.array`:
>>> pd.array([True, False, None], dtype="boolean")
<BooleanArray>
[True, False, <NA>]
Length: 3, dtype: boolean
"""
# The value used to fill '_data' to avoid upcasting
_internal_fill_value = False
def __init__(self, values: np.ndarray, mask: np.ndarray, copy: bool = False):
if not (isinstance(values, np.ndarray) and values.dtype == np.bool_):
raise TypeError(
"values should be boolean numpy array. Use "
"the 'pd.array' function instead"
)
self._dtype = BooleanDtype()
super().__init__(values, mask, copy=copy)
@property
def dtype(self) -> BooleanDtype:
return self._dtype
@classmethod
def _from_sequence(
cls, scalars, *, dtype: Optional[Dtype] = None, copy: bool = False
) -> "BooleanArray":
if dtype:
assert dtype == "boolean"
values, mask = coerce_to_array(scalars, copy=copy)
return BooleanArray(values, mask)
@classmethod
def _from_sequence_of_strings(
cls, strings: List[str], *, dtype: Optional[Dtype] = None, copy: bool = False
) -> "BooleanArray":
def map_string(s):
if isna(s):
return s
elif s in ["True", "TRUE", "true", "1", "1.0"]:
return True
elif s in ["False", "FALSE", "false", "0", "0.0"]:
return False
else:
raise ValueError(f"{s} cannot be cast to bool")
scalars = [map_string(x) for x in strings]
return cls._from_sequence(scalars, dtype=dtype, copy=copy)
_HANDLED_TYPES = (np.ndarray, numbers.Number, bool, np.bool_)
def __array_ufunc__(self, ufunc, method: str, *inputs, **kwargs):
# For BooleanArray inputs, we apply the ufunc to ._data
# and mask the result.
if method == "reduce":
# Not clear how to handle missing values in reductions. Raise.
raise NotImplementedError("The 'reduce' method is not supported.")
out = kwargs.get("out", ())
for x in inputs + out:
if not isinstance(x, self._HANDLED_TYPES + (BooleanArray,)):
return NotImplemented
# for binary ops, use our custom dunder methods
result = ops.maybe_dispatch_ufunc_to_dunder_op(
self, ufunc, method, *inputs, **kwargs
)
if result is not NotImplemented:
return result
mask = np.zeros(len(self), dtype=bool)
inputs2 = []
for x in inputs:
if isinstance(x, BooleanArray):
mask |= x._mask
inputs2.append(x._data)
else:
inputs2.append(x)
def reconstruct(x):
# we don't worry about scalar `x` here, since we
# raise for reduce up above.
if is_bool_dtype(x.dtype):
m = mask.copy()
return BooleanArray(x, m)
else:
x[mask] = np.nan
return x
result = getattr(ufunc, method)(*inputs2, **kwargs)
if isinstance(result, tuple):
tuple(reconstruct(x) for x in result)
else:
return reconstruct(result)
def _coerce_to_array(self, value) -> Tuple[np.ndarray, np.ndarray]:
return coerce_to_array(value)
def astype(self, dtype, copy: bool = True) -> ArrayLike:
"""
Cast to a NumPy array or ExtensionArray with 'dtype'.
Parameters
----------
dtype : str or dtype
Typecode or data-type to which the array is cast.
copy : bool, default True
Whether to copy the data, even if not necessary. If False,
a copy is made only if the old dtype does not match the
new dtype.
Returns
-------
ndarray or ExtensionArray
NumPy ndarray, BooleanArray or IntegerArray with 'dtype' for its dtype.
Raises
------
TypeError
if incompatible type with an BooleanDtype, equivalent of same_kind
casting
"""
dtype = pandas_dtype(dtype)
if isinstance(dtype, ExtensionDtype):
return super().astype(dtype, copy)
if is_bool_dtype(dtype):
# astype_nansafe converts np.nan to True
if self._hasna:
raise ValueError("cannot convert float NaN to bool")
else:
return self._data.astype(dtype, copy=copy)
# for integer, error if there are missing values
if is_integer_dtype(dtype) and self._hasna:
raise ValueError("cannot convert NA to integer")
# for float dtype, ensure we use np.nan before casting (numpy cannot
# deal with pd.NA)
na_value = self._na_value
if is_float_dtype(dtype):
na_value = np.nan
# coerce
return self.to_numpy(dtype=dtype, na_value=na_value, copy=False)
def _values_for_argsort(self) -> np.ndarray:
"""
Return values for sorting.
Returns
-------
ndarray
The transformed values should maintain the ordering between values
within the array.
See Also
--------
ExtensionArray.argsort : Return the indices that would sort this array.
"""
data = self._data.copy()
data[self._mask] = -1
return data
def any(self, *, skipna: bool = True, **kwargs):
"""
Return whether any element is True.
Returns False unless there is at least one element that is True.
By default, NAs are skipped. If ``skipna=False`` is specified and
missing values are present, similar :ref:`Kleene logic <boolean.kleene>`
is used as for logical operations.
Parameters
----------
skipna : bool, default True
Exclude NA values. If the entire array is NA and `skipna` is
True, then the result will be False, as for an empty array.
If `skipna` is False, the result will still be True if there is
at least one element that is True, otherwise NA will be returned
if there are NA's present.
**kwargs : any, default None
Additional keywords have no effect but might be accepted for
compatibility with NumPy.
Returns
-------
bool or :attr:`pandas.NA`
See Also
--------
numpy.any : Numpy version of this method.
BooleanArray.all : Return whether all elements are True.
Examples
--------
The result indicates whether any element is True (and by default
skips NAs):
>>> pd.array([True, False, True]).any()
True
>>> pd.array([True, False, pd.NA]).any()
True
>>> pd.array([False, False, pd.NA]).any()
False
>>> pd.array([], dtype="boolean").any()
False
>>> pd.array([pd.NA], dtype="boolean").any()
False
With ``skipna=False``, the result can be NA if this is logically
required (whether ``pd.NA`` is True or False influences the result):
>>> pd.array([True, False, pd.NA]).any(skipna=False)
True
>>> pd.array([False, False, pd.NA]).any(skipna=False)
<NA>
"""
kwargs.pop("axis", None)
nv.validate_any((), kwargs)
values = self._data.copy()
np.putmask(values, self._mask, False)
result = values.any()
if skipna:
return result
else:
if result or len(self) == 0 or not self._mask.any():
return result
else:
return self.dtype.na_value
def all(self, *, skipna: bool = True, **kwargs):
"""
Return whether all elements are True.
Returns True unless there is at least one element that is False.
By default, NAs are skipped. If ``skipna=False`` is specified and
missing values are present, similar :ref:`Kleene logic <boolean.kleene>`
is used as for logical operations.
Parameters
----------
skipna : bool, default True
Exclude NA values. If the entire array is NA and `skipna` is
True, then the result will be True, as for an empty array.
If `skipna` is False, the result will still be False if there is
at least one element that is False, otherwise NA will be returned
if there are NA's present.
**kwargs : any, default None
Additional keywords have no effect but might be accepted for
compatibility with NumPy.
Returns
-------
bool or :attr:`pandas.NA`
See Also
--------
numpy.all : Numpy version of this method.
BooleanArray.any : Return whether any element is True.
Examples
--------
The result indicates whether any element is True (and by default
skips NAs):
>>> pd.array([True, True, pd.NA]).all()
True
>>> pd.array([True, False, pd.NA]).all()
False
>>> pd.array([], dtype="boolean").all()
True
>>> pd.array([pd.NA], dtype="boolean").all()
True
With ``skipna=False``, the result can be NA if this is logically
required (whether ``pd.NA`` is True or False influences the result):
>>> pd.array([True, True, pd.NA]).all(skipna=False)
<NA>
>>> pd.array([True, False, pd.NA]).all(skipna=False)
False
"""
kwargs.pop("axis", None)
nv.validate_all((), kwargs)
values = self._data.copy()
np.putmask(values, self._mask, True)
result = values.all()
if skipna:
return result
else:
if not result or len(self) == 0 or not self._mask.any():
return result
else:
return self.dtype.na_value
def _logical_method(self, other, op):
assert op.__name__ in {"or_", "ror_", "and_", "rand_", "xor", "rxor"}
other_is_booleanarray = isinstance(other, BooleanArray)
other_is_scalar = lib.is_scalar(other)
mask = None
if other_is_booleanarray:
other, mask = other._data, other._mask
elif is_list_like(other):
other = np.asarray(other, dtype="bool")
if other.ndim > 1:
raise NotImplementedError("can only perform ops with 1-d structures")
other, mask = coerce_to_array(other, copy=False)
elif isinstance(other, np.bool_):
other = other.item()
if other_is_scalar and other is not libmissing.NA and not lib.is_bool(other):
raise TypeError(
"'other' should be pandas.NA or a bool. "
f"Got {type(other).__name__} instead."
)
if not other_is_scalar and len(self) != len(other):
raise ValueError("Lengths must match to compare")
if op.__name__ in {"or_", "ror_"}:
result, mask = ops.kleene_or(self._data, other, self._mask, mask)
elif op.__name__ in {"and_", "rand_"}:
result, mask = ops.kleene_and(self._data, other, self._mask, mask)
elif op.__name__ in {"xor", "rxor"}:
result, mask = ops.kleene_xor(self._data, other, self._mask, mask)
return BooleanArray(result, mask)
def _cmp_method(self, other, op):
from pandas.arrays import FloatingArray, IntegerArray
if isinstance(other, (IntegerArray, FloatingArray)):
return NotImplemented
mask = None
if isinstance(other, BooleanArray):
other, mask = other._data, other._mask
elif is_list_like(other):
other = np.asarray(other)
if other.ndim > 1:
raise NotImplementedError("can only perform ops with 1-d structures")
if len(self) != len(other):
raise ValueError("Lengths must match to compare")
if other is libmissing.NA:
# numpy does not handle pd.NA well as "other" scalar (it returns
# a scalar False instead of an array)
result = np.zeros_like(self._data)
mask = np.ones_like(self._data)
else:
# numpy will show a DeprecationWarning on invalid elementwise
# comparisons, this will raise in the future
with warnings.catch_warnings():
warnings.filterwarnings("ignore", "elementwise", FutureWarning)
with np.errstate(all="ignore"):
result = op(self._data, other)
# nans propagate
if mask is None:
mask = self._mask.copy()
else:
mask = self._mask | mask
return BooleanArray(result, mask, copy=False)
def _arith_method(self, other, op):
mask = None
op_name = op.__name__
if isinstance(other, BooleanArray):
other, mask = other._data, other._mask
elif is_list_like(other):
other = np.asarray(other)
if other.ndim > 1:
raise NotImplementedError("can only perform ops with 1-d structures")
if len(self) != len(other):
raise ValueError("Lengths must match")
# nans propagate
if mask is None:
mask = self._mask
if other is libmissing.NA:
mask |= True
else:
mask = self._mask | mask
if other is libmissing.NA:
# if other is NA, the result will be all NA and we can't run the
# actual op, so we need to choose the resulting dtype manually
if op_name in {"floordiv", "rfloordiv", "mod", "rmod", "pow", "rpow"}:
dtype = "int8"
else:
dtype = "bool"
result = np.zeros(len(self._data), dtype=dtype)
else:
if op_name in {"pow", "rpow"} and isinstance(other, np.bool_):
# Avoid DeprecationWarning: In future, it will be an error
# for 'np.bool_' scalars to be interpreted as an index
other = bool(other)
with np.errstate(all="ignore"):
result = op(self._data, other)
# divmod returns a tuple
if op_name == "divmod":
div, mod = result
return (
self._maybe_mask_result(div, mask, other, "floordiv"),
self._maybe_mask_result(mod, mask, other, "mod"),
)
return self._maybe_mask_result(result, mask, other, op_name)
def _reduce(self, name: str, *, skipna: bool = True, **kwargs):
if name in {"any", "all"}:
return getattr(self, name)(skipna=skipna, **kwargs)
return super()._reduce(name, skipna=skipna, **kwargs)
def _maybe_mask_result(self, result, mask, other, op_name: str):
"""
Parameters
----------
result : array-like
mask : array-like bool
other : scalar or array-like
op_name : str
"""
# if we have a float operand we are by-definition
# a float result
# or our op is a divide
if (is_float_dtype(other) or is_float(other)) or (
op_name in ["rtruediv", "truediv"]
):
from pandas.core.arrays import FloatingArray
return FloatingArray(result, mask, copy=False)
elif is_bool_dtype(result):
return BooleanArray(result, mask, copy=False)
elif is_integer_dtype(result):
from pandas.core.arrays import IntegerArray
return IntegerArray(result, mask, copy=False)
else:
result[mask] = np.nan
return result
| bsd-3-clause |
TuSimple/mxnet | example/multivariate_time_series/src/lstnet.py | 17 | 11583 | # !/usr/bin/env python
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# -*- coding: utf-8 -*-
#Todo: Ensure skip connection implementation is correct
import os
import math
import numpy as np
import pandas as pd
import mxnet as mx
import argparse
import logging
import metrics
logging.basicConfig(level=logging.DEBUG)
parser = argparse.ArgumentParser(description="Deep neural network for multivariate time series forecasting",
formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument('--data-dir', type=str, default='../data',
help='relative path to input data')
parser.add_argument('--max-records', type=int, default=None,
help='total records before data split')
parser.add_argument('--q', type=int, default=24*7,
help='number of historical measurements included in each training example')
parser.add_argument('--horizon', type=int, default=3,
help='number of measurements ahead to predict')
parser.add_argument('--splits', type=str, default="0.6,0.2",
help='fraction of data to use for train & validation. remainder used for test.')
parser.add_argument('--batch-size', type=int, default=128,
help='the batch size.')
parser.add_argument('--filter-list', type=str, default="6,12,18",
help='unique filter sizes')
parser.add_argument('--num-filters', type=int, default=100,
help='number of each filter size')
parser.add_argument('--recurrent-state-size', type=int, default=100,
help='number of hidden units in each unrolled recurrent cell')
parser.add_argument('--seasonal-period', type=int, default=24,
help='time between seasonal measurements')
parser.add_argument('--time-interval', type=int, default=1,
help='time between each measurement')
parser.add_argument('--gpus', type=str, default='',
help='list of gpus to run, e.g. 0 or 0,2,5. empty means using cpu. ')
parser.add_argument('--optimizer', type=str, default='adam',
help='the optimizer type')
parser.add_argument('--lr', type=float, default=0.001,
help='initial learning rate')
parser.add_argument('--dropout', type=float, default=0.2,
help='dropout rate for network')
parser.add_argument('--num-epochs', type=int, default=100,
help='max num of epochs')
parser.add_argument('--save-period', type=int, default=20,
help='save checkpoint for every n epochs')
parser.add_argument('--model_prefix', type=str, default='electricity_model',
help='prefix for saving model params')
def build_iters(data_dir, max_records, q, horizon, splits, batch_size):
"""
Load & generate training examples from multivariate time series data
:return: data iters & variables required to define network architecture
"""
# Read in data as numpy array
df = pd.read_csv(os.path.join(data_dir, "electricity.txt"), sep=",", header=None)
feature_df = df.iloc[:, :].astype(float)
x = feature_df.as_matrix()
x = x[:max_records] if max_records else x
# Construct training examples based on horizon and window
x_ts = np.zeros((x.shape[0] - q, q, x.shape[1]))
y_ts = np.zeros((x.shape[0] - q, x.shape[1]))
for n in range(x.shape[0]):
if n + 1 < q:
continue
elif n + 1 + horizon > x.shape[0]:
continue
else:
y_n = x[n + horizon, :]
x_n = x[n + 1 - q:n + 1, :]
x_ts[n-q] = x_n
y_ts[n-q] = y_n
# Split into training and testing data
training_examples = int(x_ts.shape[0] * splits[0])
valid_examples = int(x_ts.shape[0] * splits[1])
x_train, y_train = x_ts[:training_examples], \
y_ts[:training_examples]
x_valid, y_valid = x_ts[training_examples:training_examples + valid_examples], \
y_ts[training_examples:training_examples + valid_examples]
x_test, y_test = x_ts[training_examples + valid_examples:], \
y_ts[training_examples + valid_examples:]
#build iterators to feed batches to network
train_iter = mx.io.NDArrayIter(data=x_train,
label=y_train,
batch_size=batch_size)
val_iter = mx.io.NDArrayIter(data=x_valid,
label=y_valid,
batch_size=batch_size)
test_iter = mx.io.NDArrayIter(data=x_test,
label=y_test,
batch_size=batch_size)
return train_iter, val_iter, test_iter
def sym_gen(train_iter, q, filter_list, num_filter, dropout, rcells, skiprcells, seasonal_period, time_interval):
input_feature_shape = train_iter.provide_data[0][1]
X = mx.symbol.Variable(train_iter.provide_data[0].name)
Y = mx.sym.Variable(train_iter.provide_label[0].name)
# reshape data before applying convolutional layer (takes 4D shape incase you ever work with images)
conv_input = mx.sym.reshape(data=X, shape=(0, 1, q, -1))
###############
# CNN Component
###############
outputs = []
for i, filter_size in enumerate(filter_list):
# pad input array to ensure number output rows = number input rows after applying kernel
padi = mx.sym.pad(data=conv_input, mode="constant", constant_value=0,
pad_width=(0, 0, 0, 0, filter_size - 1, 0, 0, 0))
convi = mx.sym.Convolution(data=padi, kernel=(filter_size, input_feature_shape[2]), num_filter=num_filter)
acti = mx.sym.Activation(data=convi, act_type='relu')
trans = mx.sym.reshape(mx.sym.transpose(data=acti, axes=(0, 2, 1, 3)), shape=(0, 0, 0))
outputs.append(trans)
cnn_features = mx.sym.Concat(*outputs, dim=2)
cnn_reg_features = mx.sym.Dropout(cnn_features, p=dropout)
###############
# RNN Component
###############
stacked_rnn_cells = mx.rnn.SequentialRNNCell()
for i, recurrent_cell in enumerate(rcells):
stacked_rnn_cells.add(recurrent_cell)
stacked_rnn_cells.add(mx.rnn.DropoutCell(dropout))
outputs, states = stacked_rnn_cells.unroll(length=q, inputs=cnn_reg_features, merge_outputs=False)
rnn_features = outputs[-1] #only take value from final unrolled cell for use later
####################
# Skip-RNN Component
####################
stacked_rnn_cells = mx.rnn.SequentialRNNCell()
for i, recurrent_cell in enumerate(skiprcells):
stacked_rnn_cells.add(recurrent_cell)
stacked_rnn_cells.add(mx.rnn.DropoutCell(dropout))
outputs, states = stacked_rnn_cells.unroll(length=q, inputs=cnn_reg_features, merge_outputs=False)
# Take output from cells p steps apart
p = int(seasonal_period / time_interval)
output_indices = list(range(0, q, p))
outputs.reverse()
skip_outputs = [outputs[i] for i in output_indices]
skip_rnn_features = mx.sym.concat(*skip_outputs, dim=1)
##########################
# Autoregressive Component
##########################
auto_list = []
for i in list(range(input_feature_shape[2])):
time_series = mx.sym.slice_axis(data=X, axis=2, begin=i, end=i+1)
fc_ts = mx.sym.FullyConnected(data=time_series, num_hidden=1)
auto_list.append(fc_ts)
ar_output = mx.sym.concat(*auto_list, dim=1)
######################
# Prediction Component
######################
neural_components = mx.sym.concat(*[rnn_features, skip_rnn_features], dim=1)
neural_output = mx.sym.FullyConnected(data=neural_components, num_hidden=input_feature_shape[2])
model_output = neural_output + ar_output
loss_grad = mx.sym.LinearRegressionOutput(data=model_output, label=Y)
return loss_grad, [v.name for v in train_iter.provide_data], [v.name for v in train_iter.provide_label]
def train(symbol, train_iter, valid_iter, data_names, label_names):
devs = mx.cpu() if args.gpus is None or args.gpus is '' else [mx.gpu(int(i)) for i in args.gpus.split(',')]
module = mx.mod.Module(symbol, data_names=data_names, label_names=label_names, context=devs)
module.bind(data_shapes=train_iter.provide_data, label_shapes=train_iter.provide_label)
module.init_params(mx.initializer.Uniform(0.1))
module.init_optimizer(optimizer=args.optimizer, optimizer_params={'learning_rate': args.lr})
for epoch in range(1, args.num_epochs+1):
train_iter.reset()
val_iter.reset()
for batch in train_iter:
module.forward(batch, is_train=True) # compute predictions
module.backward() # compute gradients
module.update() # update parameters
train_pred = module.predict(train_iter).asnumpy()
train_label = train_iter.label[0][1].asnumpy()
print('\nMetrics: Epoch %d, Training %s' % (epoch, metrics.evaluate(train_pred, train_label)))
val_pred = module.predict(val_iter).asnumpy()
val_label = val_iter.label[0][1].asnumpy()
print('Metrics: Epoch %d, Validation %s' % (epoch, metrics.evaluate(val_pred, val_label)))
if epoch % args.save_period == 0 and epoch > 1:
module.save_checkpoint(prefix=os.path.join("../models/", args.model_prefix), epoch=epoch, save_optimizer_states=False)
if epoch == args.num_epochs:
module.save_checkpoint(prefix=os.path.join("../models/", args.model_prefix), epoch=epoch, save_optimizer_states=False)
if __name__ == '__main__':
# parse args
args = parser.parse_args()
args.splits = list(map(float, args.splits.split(',')))
args.filter_list = list(map(int, args.filter_list.split(',')))
# Check valid args
if not max(args.filter_list) <= args.q:
raise AssertionError("no filter can be larger than q")
if not args.q >= math.ceil(args.seasonal_period / args.time_interval):
raise AssertionError("size of skip connections cannot exceed q")
# Build data iterators
train_iter, val_iter, test_iter = build_iters(args.data_dir, args.max_records, args.q, args.horizon, args.splits, args.batch_size)
# Choose cells for recurrent layers: each cell will take the output of the previous cell in the list
rcells = [mx.rnn.GRUCell(num_hidden=args.recurrent_state_size)]
skiprcells = [mx.rnn.LSTMCell(num_hidden=args.recurrent_state_size)]
# Define network symbol
symbol, data_names, label_names = sym_gen(train_iter, args.q, args.filter_list, args.num_filters,
args.dropout, rcells, skiprcells, args.seasonal_period, args.time_interval)
# train cnn model
train(symbol, train_iter, val_iter, data_names, label_names)
| apache-2.0 |
tapomayukh/projects_in_python | rapid_categorization/haptic_map/NIDRR/Demo_April_2014/collect_data_taxel_based_nidrr.py | 1 | 9797 | #!/usr/bin/env python
# Collect PR2 sleeve data for NIDRR
import math, numpy as np
import matplotlib.pyplot as pp
import scipy.linalg as lin
import scipy.ndimage as ni
import roslib; roslib.load_manifest('sandbox_tapo_darpa_m3')
import rospy
import tf
import os
import optparse
import hrl_lib.util as ut
import hrl_lib.transforms as tr
import hrl_lib.matplotlib_util as mpu
import pickle
from hrl_haptic_manipulation_in_clutter_msgs.msg import SkinContact
from hrl_haptic_manipulation_in_clutter_msgs.msg import TaxelArray as TaxelArray_Meka
import sys
def callback(data, callback_args):
#rospy.loginfo('Getting data!')
# Fixing Transforms
tf_lstnr = callback_args
sc = SkinContact()
sc.header.frame_id = '/odom_combined' #'/torso_lift_link' # has to be this and no other coord frame.
sc.header.stamp = data.header.stamp
tf_lstnr.waitForTransform(sc.header.frame_id, data.header.frame_id, rospy.Time(0), rospy.Duration(40.0))
try:
tf_lstnr.waitForTransform(sc.header.frame_id, data.header.frame_id, rospy.Time(0), rospy.Duration(40.0))
t1, q1 = tf_lstnr.lookupTransform(sc.header.frame_id,
data.header.frame_id,
rospy.Time(0))
t1 = np.matrix(t1).reshape(3,1)
r1 = tr.quaternion_to_matrix(q1)
# Gathering Force Data
force_vectors = np.row_stack([data.values_x, data.values_y, data.values_z])
fmags_instant = ut.norm(force_vectors)
threshold = 0.01
fmags_tuned = fmags_instant - threshold
fmags_tuned[np.where(fmags_tuned<0)]=0
fmags_instant_tuned = fmags_tuned
global fmags
for i in range(len(fmags_instant_tuned)):
fmags[i].append(fmags_instant_tuned[i])
# Gathering Contact Data for Haptic Mapping
global global_contact_vector
for i in range(len(fmags_instant_tuned)):
global_contact_vector[i].append(r1*((np.column_stack([data.centers_x[i], data.centers_y[i], data.centers_z[i]])).T) + t1)
except (tf.Exception, tf.LookupException, tf.ConnectivityException):
pass
def processdata():
global sleeve
str_process = 'Processing data for ' + sleeve + '!'
rospy.loginfo(str_process)
global fmags
global global_contact_vector
for key in fmags:
temp_force_store = []
temp_contact_motion = []
init_contact = 0
init_contact_store = 0.0
max_temp = max(fmags[key])
#if max_temp > 0.0:
#print key
#print '#####'
for i in range(len(fmags[key])):
if fmags[key][i] > 0.0:
init_contact = init_contact + 1
temp_force_store.append(fmags[key][i])
if init_contact == 1:
#print "Started Contact !"
init_contact_store = global_contact_vector[key][i]
temp_contact_motion.append(0.0)
else:
temp_contact_motion.append(abs(lin.norm(global_contact_vector[key][i] - init_contact_store)))
else:
if len(temp_force_store) > 0:
#print "Broke Contact !"
savedata(temp_force_store, temp_contact_motion)
temp_force_store = []
temp_contact_motion = []
init_contact = 0
init_contact_store = 0.0
def savedata(force, motion):
global trial_index
global directory
global sleeve
time = []
contact_area = []
if len(force) > 10:
str_save = 'Saving data for ' + sleeve + '!'
rospy.loginfo(str_save)
time_len = len(force)
while len(time) < time_len:
if len(time) == 0:
time.append(0.0)
contact_area.append(1.0)
else:
time.append(time[len(time)-1] + 0.01)
contact_area.append(1.0)
if not os.path.exists(directory):
os.makedirs(directory)
ut.save_pickle([time, force, contact_area, motion], directory + '/trial_' + sleeve + np.str(trial_index) +'.pkl')
trial_index = trial_index + 1
#else:
#print "Too few samples, Not saving the data"
def plotdata():
rospy.loginfo('Plotting data!')
global directory
global trial_index
global sleeve
for trial_num in range(1, trial_index):
ta = ut.load_pickle(directory + '/trial_' + sleeve + np.str(trial_num) +'.pkl')
mpu.figure(3*trial_num-2)
pp.title('Time-Varying Force')
pp.xlabel('Time (s)')
pp.ylabel('Max Force')
pp.plot(ta[0], ta[1])
pp.grid('on')
mpu.figure(3*trial_num-1)
pp.title('Time-Varying Contact')
pp.xlabel('Time (s)')
pp.ylabel('No. of Contact Regions')
pp.plot(ta[0], ta[2])
pp.grid('on')
mpu.figure(3*trial_num)
pp.title('Point Tracker')
pp.xlabel('Time (s)')
pp.ylabel('Contact Point Distance')
pp.plot(ta[0], ta[3])
pp.grid('on')
def getdata(node_name, topic_subscriber, sleeve):
str_init = 'Initializing the Node for ' + sleeve + '!'
str_sub = 'Waiting to Subscribe to the Skin Message for ' + sleeve
rospy.loginfo(str_init)
rospy.init_node(node_name, anonymous=True)
tf_lstnr = tf.TransformListener()
rospy.loginfo(str_sub)
rospy.Subscriber(topic_subscriber, TaxelArray_Meka, callback, callback_args = (tf_lstnr))
rospy.spin()
if __name__ == '__main__':
p = optparse.OptionParser()
p.add_option('--upperarm', action='store_true', dest='upperarm', help='node for the upperarm taxels of PR2 sleeve')
p.add_option('--forearm', action='store_true', dest='forearm', help='node for the forearm taxels of PR2 sleeve')
p.add_option('--gripper_left_link', action='store_true', dest='gripper_left_link', help='node for the gripper left link taxels of PR2 sleeve')
p.add_option('--gripper_right_link', action='store_true', dest='gripper_right_link', help='node for the gripper right link taxels of PR2 sleeve')
p.add_option('--gripper_palm', action='store_true', dest='gripper_palm', help='node for the gripper palm taxels of PR2 sleeve')
p.add_option('--fingertip_left', action='store_true', dest='fingertip_left', help='node for the fingertip left taxels of PR2 pps')
p.add_option('--fingertip_right', action='store_true', dest='fingertip_right', help='node for the fingertip right taxels of PR2 pps')
p.add_option('--exp_num', action='store', dest='exp_num', default = None, help='specify the experiment number', type='float')
opt, args = p.parse_args()
if opt.upperarm and opt.exp_num:
node_name = "PR2_upperarm_collect_data"
topic_subscriber = "/pr2_fabric_upperarm_sensor/taxels/forces"
topic_publisher = 'demo_upperarm_taxelarray_with_cost'
num_taxels = 4
sleeve = 'upperarm'
num = opt.exp_num
elif opt.forearm and opt.exp_num:
node_name = "PR2_forearm_collect_data"
topic_subscriber = "/pr2_fabric_forearm_sensor/taxels/forces"
topic_publisher = 'demo_forearm_taxelarray_with_cost'
num_taxels = 22
sleeve = 'forearm'
num = opt.exp_num
elif opt.gripper_left_link and opt.exp_num:
node_name = "PR2_gripper_left_link_collect_data"
topic_subscriber = "/pr2_fabric_gripper_left_link_sensor/taxels/forces"
topic_publisher = 'demo_gripper_left_link_taxelarray_with_cost'
num_taxels = 4
sleeve = 'gripper_left_link'
num = opt.exp_num
elif opt.gripper_right_link and opt.exp_num:
node_name = "PR2_gripper_right_link_collect_data"
topic_subscriber = "/pr2_fabric_gripper_right_link_sensor/taxels/forces"
topic_publisher = 'demo_gripper_right_link_taxelarray_with_cost'
num_taxels = 4
sleeve = 'gripper_right_link'
num = opt.exp_num
elif opt.gripper_palm and opt.exp_num:
node_name = "PR2_gripper_palm_collect_data"
topic_subscriber = "/pr2_fabric_gripper_palm_sensor/taxels/forces"
topic_publisher = 'demo_gripper_palm_taxelarray_with_cost'
num_taxels = 2
sleeve = 'gripper_palm'
num = opt.exp_num
elif opt.fingertip_left and opt.exp_num:
node_name = "PR2_fingertip_left_collect_data"
topic_subscriber = "/pr2_pps_left_sensor/taxels/forces"
topic_publisher = 'demo_fingertip_left_taxelarray_with_cost'
num_taxels = 3
sleeve = 'fingertip_left'
num = opt.exp_num
elif opt.fingertip_right and opt.exp_num:
node_name = "PR2_fingertip_right_collect_data"
topic_subscriber = "/pr2_pps_right_sensor/taxels/forces"
topic_publisher = 'demo_fingertip_right_taxelarray_with_cost'
num_taxels = 3
sleeve = 'fingertip_right'
num = opt.exp_num
else:
rospy.logerr('Specify --exp_num and --upperarm or --forearm or --gripper_left_link or --gripper_right_link or --gripper_palm or --fingertip_left or --fingertip_right')
sys.exit()
# Global Params
#obj_class = 'human/'
obj_class = 'furniture/'
#obj_class = 'mannequin/'
trial_index = 1
directory = '/home/nidrr/svn/robot1_data/usr/tapo/data/rapid_categorization/Taxel_Based/NIDRR_demo_data/' + obj_class + np.str(int(num))
# Global Data dicts
fmags = {}
for i in range(num_taxels):
fmags[i] = []
global_contact_vector = {}
for i in range(num_taxels):
global_contact_vector[i] = []
# Function Calls
getdata(node_name, topic_subscriber, sleeve)
processdata()
#plotdata()
#pp.show()
| mit |
nagyistoce/deep_nets_iclr04 | lib/layer_blocks.py | 3 | 17537 | import sys, os
import theano
import theano.tensor as T
from theano.tensor.signal import downsample
from theano.tensor.nnet import conv
import re
import time
import cPickle as pickle
from types import *
import numpy
import math
import matplotlib.pyplot as plt
from data_handling import DataSlidingWindow
import tools
import warnings
warnings.filterwarnings("ignore")
rng = numpy.random.RandomState(23455) #constant random seed: such that experiments are repeatable
class Layer(object):
instance_count = 0
def __init__(self, layerxml, params):
Layer.instance_count += 1
if layerxml.find('id') != None:
self.id = layerxml.find('id').text
else:
self.id = str(Layer.instance_count)
self.layer_no = Layer.instance_count
self.log = False
self.representative_image = None
self.load_weights_while_train = False
if layerxml.find('load_weights') != None and layerxml.find('load_weights').text == 'True':
self.load_weights_while_train = True
self.load_weights = False
if params.load_weights or (layerxml.find('load_weights') != None and layerxml.find('load_weights').text == 'True'):
self.load_weights = True
self.log = False
if layerxml.find('log') is not None:
self.log = layerxml.find('log').text == 'True'
self.weight_update = True
if layerxml.find('weight_update') is not None:
self.weight_update = layerxml.find('weight_update').text == 'True'
print 'Weight update for layer {0:s} is {1:s}'.format(self.id, str(self.weight_update))
# The default output and input size is 'empty'.
# - All sub-classes are expected to set the output size to something
# that is not empty in their __init__ method.
# - The convention is that the 0th dimension is the batch size.
self.out_size = (0) # Default output size is 'empty'
self.in_size = (0) # Default output size is 'empty'
class Weight(object):
def __init__(self, w_shape, load_weights, weights_dir, bound, name, epoch):
super(Weight, self).__init__()
self.name = name
if not load_weights:
self.np_values = numpy.asarray(bound * rng.standard_normal(w_shape), dtype=theano.config.floatX)
else:
self.load_weight(weights_dir, epoch)
if type(w_shape) == IntType:
self.np_values = self.np_values + math.fabs(self.np_values.min())
self.val = theano.shared(value=self.np_values, name=name)
def save_weight(self, dir, epoch = 0):
print '- Saved weight: ' + self.name + '_ep'+str(epoch)
numpy.save(dir + self.name + '_ep'+str(epoch), self.val.get_value())
def load_weight(self, dir, epoch = 0):
print '- Loaded weight: ' + self.name + '_ep'+str(epoch)
self.np_values = numpy.load(dir + self.name + '_ep'+str(epoch) + '.npy')
class SoftMaxLayer(Layer):
type = 'SoftMaxLayer'
def __init__(self, layerxml, params, prev_layer = None):
super(SoftMaxLayer, self).__init__(layerxml, params)
if prev_layer.type != "OutputLayer" and prev_layer.type != "DropoutLayer":
raise NotImplementedError()
if len(prev_layer.out_size) != 2:
raise NotImplementedError()
self.in_size = (params.batch_size, prev_layer.out_size[-1])
self.out_size = (params.batch_size, 2)
n_in = self.in_size[-1]
n_out = self.out_size[-1]
print 'No of input units: ' + str(n_in)
print 'No of output units: ' + str(n_out)
W_bound = 0.00000000 #numpy.sqrt(6. / (n_in + n_out))
self.W = Weight((n_in, n_out), self.load_weights, params.weights_dir, \
W_bound, 'W_' + str(self.id), params.epoch_no)
self.b = Weight(n_out, self.load_weights, params.weights_dir, \
W_bound, 'b_' + str(self.id), params.epoch_no)
def compute(self, input, params):
input = input.flatten(2)
self.input = input
# compute vector of class-membership probabilities in symbolic form
self.p_y_given_x = T.nnet.softmax(T.dot(input, self.W.val) + self.b.val)
self.output = T.argmax(self.p_y_given_x, axis=1)
# confidence of the label
self.confidence = self.p_y_given_x[T.arange(self.p_y_given_x.shape[0]), self.output]
self.params = [self.W.val, self.b.val]
def write_log(self, params, get_output_layer, epoch, iter):
# write layer1 output
op_out = numpy.asarray([get_output_layer(0)]).T
l0name = '{0:s}/SofMax-test-epoch{1:04d}-iter{2:04d}-'.format(params.op_dir, epoch, iter) + '{0:d}.png'
tools.write_image_multiple(op_out, (1,1), params.pnt_nos, l0name)
class OutputLayer(Layer):
type = 'OutputLayer'
def __init__(self, layerxml, params, prev_layer = None):
super(OutputLayer, self).__init__(layerxml, params)
self.in_size = prev_layer.out_size
self.out_size = (params.batch_size, int(layerxml.find('no_states').text))
n_in = numpy.prod(prev_layer.out_size[1:])
n_out = self.out_size[-1]
print 'No of input units: ' + str(n_in)
print 'No of output units: ' + str(n_out)
W_bound = 1.0/numpy.sqrt(n_in)
if layerxml.find('activation') is not None:
if layerxml.find('activation').text == 'tanh':
self.activation = T.tanh
elif layerxml.find('activation').text == 'relu':
self.activation = self.relu
else:
self.activation = T.nnet.sigmoid
W_bound *= 4
self.W = Weight((n_in, n_out), self.load_weights, params.weights_dir, W_bound, 'W_' + str(self.id), params.epoch_no)
self.b = Weight(n_out, self.load_weights, params.weights_dir, W_bound, 'b_' + str(self.id), params.epoch_no)
def relu(self, x):
return T.maximum(x, 0)
def compute(self, input, params):
input = input.flatten(2)
self.input = input
lin_output = T.dot(self.input, self.W.val) + self.b.val
self.output = self.activation(lin_output)
#self.output = T.nnet.sigmoid(lin_output)
# parameters of the model
self.params = [self.W.val, self.b.val]
def write_log(self, params, get_output_layer, epoch, iter):
# write layer1 output
op_out = get_output_layer(0)
l0name = '{0:s}/Output-test-epoch{1:04d}-iter{2:04d}-'.format(params.op_dir, epoch, iter) + '{0:d}.png'
#tools.write_image_multiple(op_out, params.imshape[:2], params.pnt_nos, l0name)
#self.representative_image = l0name
tools.write_image_multiple(op_out, (1,1), params.pnt_nos, l0name)
class DropoutLayer(Layer):
type = 'DropoutLayer'
# Each layer instance needs its own seed, so draw from this srng to get the
# seeds for each layer.
__dropout_seed_srng = numpy.random.RandomState(0)
# In order to turn dropout off at test time we need to keep track of the
# probability of all our dropout layers that have been instantiated.
dropout_layers = []
def __init__(self, layerxml, params, prev_layer = None):
super(DropoutLayer, self).__init__(layerxml, params)
DropoutLayer.dropout_layers.append(self) # TODO: Is this threadsafe?
# TODO: Make dropout probability programmable (ie, use a shared variable)
self.prob = float(layerxml.find('prob').text)
print 'Dropout Probability: ' + str(self.prob)
self.in_size = prev_layer.out_size
self.out_size = self.in_size
def compute(self, input, params):
# We need to be able to turn on and off the dropout (on for training,
# off for testing). Therefore use a shared variable to control
# the current dropout state. Start in "ON" state by default.
self.dropout_on = T.shared(numpy.cast[theano.config.floatX](1.0), \
borrow=True)
# Create a random stream to generate a random mask of 0 and 1
# activations.
seed = DropoutLayer.__dropout_seed_srng.randint(0, sys.maxint)
srng = theano.tensor.shared_randomstreams.RandomStreams(seed)
# p=1-p because 1's indicate keep and p is prob of dropping
self.mask = srng.binomial(n=1, p=1.0 - self.prob, size=input.shape)
# When dropout is off, activations must be multiplied by the average
# on probability (ie 1 - p)
off_gain = (1.0 - self.prob)
# The cast in the following expression is important because:
# int * float32 = float64 which pulls things off the gpu
self.output = input * self.dropout_on * T.cast(self.mask, theano.config.floatX) + \
off_gain * input * (1.0 - self.dropout_on)
# Static method to turn off dropout for all DropoutLayer instances
# When training set training to True, otherwise False
@staticmethod
def SetDropoutOn(training):
if training:
dropout_on = 1.0
else:
dropout_on = 0.0
for i in range(0, len(DropoutLayer.dropout_layers)):
DropoutLayer.dropout_layers[i].dropout_on.set_value(dropout_on)
class InputLayerSW(Layer):
type = 'InputLayerSW'
def __init__(self, layerxml, params, prev_layer = None, windowshape = None):
super(InputLayerSW, self).__init__(layerxml, params)
# InputLayerSW must be the first layer
if prev_layer != None:
raise NotImplementedError()
self.data_info = []
for d in layerxml.findall('data'):
data_dir = d.find('dir').text + '/'
self.data_info.append((data_dir))
self.prepro_type = layerxml.find('preprocessing').text
self.windowshape = windowshape
if self.windowshape == None:
#TODO: Parse from layer_xml, not hardcode
self.windowshape = (int(layerxml.find('windowshape/h').text),
int(layerxml.find('windowshape/w').text),
int(layerxml.find('windowshape/d').text))
if not params.load_weights:
self.data = DataSlidingWindow(points=params.pnt_nos)
self.data.load_picked_data(params.shared_op_dir)
for dir in self.data_info:
print 'Adding data from: ' + dir
self.data.add_to_dataset(dir)
self.data.load_data(self.windowshape, shuffle=False)
self.data.save_picked_data(params.shared_op_dir)
params.imshape = self.windowshape
self.out_size = (params.batch_size, params.imshape[2], params.imshape[0], params.imshape[1])
# TODO: Is this in_size correct?
self.in_size = (params.batch_size, params.imshape[2], params.imshape[0], params.imshape[1])
print("InputLayerSW out_size: " + str(self.out_size))
def compute(self, input, params):
self.output = input.reshape((params.batch_size, params.imshape[2], params.imshape[0], params.imshape[1]))
def write_log(self, params):
# TODO: Fix this (I assume the data format was changed at some point)
#tools.write_image(self.data.train_set_x.get_value(), (self.windowshape[0],self.windowshape[1], self.windowshape[2]), params.op_dir + 'train_x_before_preprocess.png')
#tools.write_image_multiple(self.data.train_set_y.get_value(), (1,1), params.pnt_nos, params.op_dir + 'train_y_slice-{0:d}.png')
#tools.write_image(self.data.valid_set_x.get_value(), (self.windowshape[0],self.windowshape[1], self.windowshape[2]), params.op_dir + 'valid_x_before_preprocess.png')
#tools.write_image_multiple(self.data.valid_set_y.get_value(), (1,1), params.pnt_nos, params.op_dir + 'valid_y_slice-{0:d}.png')
if len(self.data.X_names['test']) > 0:
print 'Data format changed'
#tools.write_image(self.data.X_SW_p['test'], (self.windowshape[0],self.windowshape[1], self.windowshape[2]), params.op_dir + 'X_SW_p.png')
#tools.write_image(self.data.X_SW_p['test'], (self.windowshape[0],self.windowshape[1], self.windowshape[2]), params.op_dir + 'X_SW_n.png')
#tools.write_image_multiple(self.data.test_set_y.get_value(), (1,1), params.pnt_nos, params.op_dir + 'test_set_y_slice-{0:d}.png')
class ConvPoolLayer(Layer):
type = 'ConvPoolLayer'
def __init__(self, layerxml, params, prev_layer = None):
super(ConvPoolLayer, self).__init__(layerxml, params)
self.nkerns = int(layerxml.find('nos_filters').text)
self.filter_size = int(layerxml.find('filter_size').text)
self.pool_size = int(layerxml.find('pool_size').text)
self.log_output = bool(layerxml.find('log_output').text)
# The input type to the ConvPoolLayer is restricted (we expect the input
# to be an image, so only layers that have image inputs are allowed).
if prev_layer.type != "ConvPoolLayer" and \
prev_layer.type != "InputLayerSW" and \
prev_layer.type != "DropoutLayer":
raise NotImplementedError()
# DropoutLayer is a special case. It's output can be either 4D or 2D,
# depending on what came before it. Make sure this is correct.
if len(prev_layer.out_size) != 4:
raise NotImplementedError()
poolsize=(self.pool_size, self.pool_size)
self.in_size = prev_layer.out_size
self.out_size = (params.batch_size, self.nkerns, self.in_size[2]/poolsize[0], self.in_size[3]/poolsize[1])
print("ConvPoolLayer in_size: " + str(self.in_size))
print("ConvPoolLayer out_size: " + str(self.out_size))
# Filter shape is (#n_output_feats, #n_input_feats, filter_size, filter_size)
self.filter_shape = (self.nkerns, self.in_size[1], self.filter_size, self.filter_size)
# there are "num input feature maps * filter height * filter width"
# inputs to each hidden unit
fan_in = numpy.prod(self.filter_shape[1:])
# each unit in the lower layer receives a gradient from:
# "num output feature maps * filter height * filter width" /
# pooling size
fan_out = (self.filter_shape[0] * numpy.prod(self.filter_shape[2:]) / numpy.prod(poolsize))
# initialize weights with random weights
W_bound = numpy.sqrt(1. / (fan_in))
self.W = Weight(self.filter_shape, self.load_weights, params.weights_dir, W_bound, 'W_' + str(self.id), params.epoch_no)
self.b = Weight(self.filter_shape[0], self.load_weights, params.weights_dir, W_bound, 'b_' + str(self.id), params.epoch_no)
def compute(self, input, params):
poolsize=(self.pool_size, self.pool_size)
# convolve input feature maps with filters
conv_out = conv.conv2d(input=input, filters=self.W.val, image_shape=self.in_size, border_mode='full')
mid = numpy.asarray(numpy.floor(numpy.asarray(self.filter_shape[2:]) / 2.), dtype=int)
conv_out = conv_out[:, :, mid[0]:-mid[0], mid[1]:-mid[1]]
# downsample each feature map individually, using maxpooling
pooled_out = downsample.max_pool_2d(input=conv_out,
ds=poolsize, ignore_border=True)
# add the bias term. Since the bias is a vector (1D array), we first
# reshape it to a tensor of shape (1,n_filters,1,1). Each bias will
# thus be broadcasted across mini-batches and feature map
# width & height
self.output = pooled_out + self.b.val.dimshuffle('x', 0, 'x', 'x')
self.output = T.maximum(self.output, 0) #T.nnet.sigmoid(self.output) #T.nnet.sigmoid(self.output) #T.tanh(self.output) #
# store parameters of this layer
self.params = []
if self.weight_update:
self.params = [self.W.val, self.b.val]
def write_log(self, params, get_output_layer, epoch, iter):
# write filters
figname = '{0:s}/Filters-id{1:d}-epoch-{2:04d}-iter{3:04d}.png'.format(params.op_dir, self.layer_no-1, epoch, iter)
filter_img = numpy.reshape(self.W.val.get_value()[:, 0, :, :], (self.nkerns, self.filter_size * self.filter_size))
tools.write_image(filter_img, (self.filter_size, self.filter_size, 1), figname)
self.representative_image = figname
if self.log_output:
# write conv-pool layer output
idx_in_minibatch = 0;
convname = '{0:s}/ConvOut-test-id{1:d}-epoch-{2:04d}iter{3:04d}.png'.format(params.op_dir, self.layer_no-1, epoch, iter)
conv_out = get_output_layer(idx_in_minibatch)
convpool_out_img = numpy.reshape(conv_out[0, :, :, :], (self.nkerns, conv_out.shape[2] * conv_out.shape[3]))
after_pooling_imgshape = (conv_out.shape[2], conv_out.shape[3], 1)
tools.write_image(convpool_out_img, after_pooling_imgshape, convname)
| gpl-2.0 |
moberweger/deep-prior | src/main_nyu_posereg_embedding.py | 1 | 7776 | """This is the main file for training hand joint classifier on NYU dataset
Copyright 2015 Markus Oberweger, ICG,
Graz University of Technology <oberweger@icg.tugraz.at>
This file is part of DeepPrior.
DeepPrior is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
DeepPrior is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with DeepPrior. If not, see <http://www.gnu.org/licenses/>.
"""
import numpy
import matplotlib
matplotlib.use('Agg') # plot to file
import matplotlib.pyplot as plt
import theano
import os
import cPickle
import sys
from sklearn.decomposition import PCA
from trainer.poseregnettrainer import PoseRegNetTrainer, PoseRegNetTrainerParams
from net.poseregnet import PoseRegNetParams, PoseRegNet
from data.importers import NYUImporter
from data.dataset import NYUDataset
from util.handpose_evaluation import NYUHandposeEvaluation
from data.transformations import transformPoint2D
from net.hiddenlayer import HiddenLayer, HiddenLayerParams
if __name__ == '__main__':
eval_prefix = 'NYU_EMB_t0nF8mp421fD553h1024_PCA30'
if not os.path.exists('./eval/'+eval_prefix+'/'):
os.makedirs('./eval/'+eval_prefix+'/')
floatX = theano.config.floatX # @UndefinedVariable
rng = numpy.random.RandomState(23455)
print("create data")
di = NYUImporter('../data/NYU/')
Seq1 = di.loadSequence('train', shuffle=True, rng=rng)
trainSeqs = [Seq1]
Seq2_1 = di.loadSequence('test_1')
Seq2_2 = di.loadSequence('test_2')
testSeqs = [Seq2_1, Seq2_2]
# create training data
trainDataSet = NYUDataset(trainSeqs)
train_data, train_gt3D = trainDataSet.imgStackDepthOnly('train')
mb = (train_data.nbytes) / (1024 * 1024)
print("data size: {}Mb".format(mb))
valDataSet = NYUDataset(testSeqs)
val_data, val_gt3D = valDataSet.imgStackDepthOnly('test_1')
testDataSet = NYUDataset(testSeqs)
test_data1, test_gt3D1 = testDataSet.imgStackDepthOnly('test_1')
test_data2, test_gt3D2 = testDataSet.imgStackDepthOnly('test_2')
print train_gt3D.max(), test_gt3D1.max(), train_gt3D.min(), test_gt3D1.min()
print train_data.max(), test_data1.max(), train_data.min(), test_data1.min()
imgSizeW = train_data.shape[3]
imgSizeH = train_data.shape[2]
nChannels = train_data.shape[1]
####################################
# convert data to embedding
pca = PCA(n_components=30)
pca.fit(train_gt3D.reshape((train_gt3D.shape[0], train_gt3D.shape[1]*3)))
train_gt3D_embed = pca.transform(train_gt3D.reshape((train_gt3D.shape[0], train_gt3D.shape[1]*3)))
test_gt3D_embed1 = pca.transform(test_gt3D1.reshape((test_gt3D1.shape[0], test_gt3D1.shape[1]*3)))
test_gt3D_embed2 = pca.transform(test_gt3D2.reshape((test_gt3D2.shape[0], test_gt3D2.shape[1]*3)))
val_gt3D_embed = pca.transform(val_gt3D.reshape((val_gt3D.shape[0], val_gt3D.shape[1]*3)))
############################################################################
print("create network")
batchSize = 128
poseNetParams = PoseRegNetParams(type=0, nChan=nChannels, wIn=imgSizeW, hIn=imgSizeH, batchSize=batchSize, numJoints=1, nDims=train_gt3D_embed.shape[1])
poseNet = PoseRegNet(rng, cfgParams=poseNetParams)
poseNetTrainerParams = PoseRegNetTrainerParams()
poseNetTrainerParams.batch_size = batchSize
poseNetTrainerParams.learning_rate = 0.01
print("setup trainer")
poseNetTrainer = PoseRegNetTrainer(poseNet, poseNetTrainerParams, rng)
poseNetTrainer.setData(train_data, train_gt3D_embed, val_data, val_gt3D_embed)
poseNetTrainer.compileFunctions(compileDebugFcts=False)
###################################################################
#
# TRAIN
nEpochs = 100
train_res = poseNetTrainer.train(n_epochs=nEpochs, storeFilters=True)
train_costs = train_res[0]
wvals = train_res[1]
val_errs = train_res[2]
###################################################################
# TEST
# plot cost
fig = plt.figure()
plt.semilogy(train_costs)
plt.show(block=False)
fig.savefig('./eval/'+eval_prefix+'/'+eval_prefix+'_cost.png')
fig = plt.figure()
plt.semilogy(val_errs)
plt.show(block=False)
fig.savefig('./eval/'+eval_prefix+'/'+eval_prefix+'_errs.png')
# save results
poseNet.save("./eval/{}/net_{}.pkl".format(eval_prefix,eval_prefix))
# poseNet.load("./eval/{}/net_{}.pkl".format(eval_prefix,eval_prefix))
# add prior to network
cfg = HiddenLayerParams(inputDim=(batchSize, train_gt3D_embed.shape[1]), outputDim=(batchSize, numpy.prod(train_gt3D.shape[1:])), activation=None)
pcalayer = HiddenLayer(rng, poseNet.layers[-1].output, cfg, copyLayer=None, layerNum=len(poseNet.layers))
pcalayer.W.set_value(pca.components_)
pcalayer.b.set_value(pca.mean_)
poseNet.layers.append(pcalayer)
poseNet.output = pcalayer.output
poseNet.cfgParams.numJoints = train_gt3D.shape[1]
poseNet.cfgParams.nDims = train_gt3D.shape[2]
poseNet.cfgParams.outputDim = pcalayer.cfgParams.outputDim
poseNet.save("./eval/{}/network_prior.pkl".format(eval_prefix))
###################################################################
# test
print("Testing ...")
gt3D = []
joints = []
for seq in testSeqs:
gt3D.extend([j.gt3Dorig for j in seq.data])
test_data, _ = testDataSet.imgStackDepthOnly(seq.name)
jts_embed = poseNet.computeOutput(test_data)
# Backtransform from embedding
# jts = pca.inverse_transform(jts_embed)
jts = jts_embed
for i in range(test_data.shape[0]):
joints.append(jts[i].reshape(gt3D[0].shape[0], 3)*(seq.config['cube'][2]/2.) + seq.data[i].com)
joints = numpy.array(joints)
hpe = NYUHandposeEvaluation(gt3D, joints)
hpe.subfolder += '/'+eval_prefix+'/'
mean_error = hpe.getMeanError()
max_error = hpe.getMaxError()
print("Train samples: {}, test samples: {}".format(train_data.shape[0], len(gt3D)))
print("Mean error: {}mm, max error: {}mm".format(mean_error, max_error))
print("MD score: {}".format(hpe.getMDscore(80)))
print("{}".format([hpe.getJointMeanError(j) for j in range(joints[0].shape[0])]))
print("{}".format([hpe.getJointMaxError(j) for j in range(joints[0].shape[0])]))
# save results
cPickle.dump(joints, open("./eval/{}/result_{}_{}.pkl".format(eval_prefix,os.path.split(__file__)[1],eval_prefix), "wb"), protocol=cPickle.HIGHEST_PROTOCOL)
print "Testing baseline"
#################################
# BASELINE
# Load the evaluation
data_baseline = di.loadBaseline('../data/NYU/test/test_predictions.mat', numpy.asarray(gt3D))
hpe_base = NYUHandposeEvaluation(gt3D, data_baseline)
hpe_base.subfolder += '/'+eval_prefix+'/'
print("Mean error: {}mm".format(hpe_base.getMeanError()))
hpe.plotEvaluation(eval_prefix, methodName='Our regr',baseline=[('Tompson et al.',hpe_base)])
ind = 0
for i in testSeqs[0].data:
if ind % 20 != 0:
ind += 1
continue
jt = joints[ind]
jtI = di.joints3DToImg(jt)
for joint in range(jt.shape[0]):
t=transformPoint2D(jtI[joint], i.T)
jtI[joint, 0] = t[0]
jtI[joint, 1] = t[1]
hpe.plotResult(i.dpt, i.gtcrop, jtI, "{}_{}".format(eval_prefix, ind))
ind+=1
| gpl-3.0 |
q1ang/scikit-learn | examples/linear_model/plot_ransac.py | 250 | 1673 | """
===========================================
Robust linear model estimation using RANSAC
===========================================
In this example we see how to robustly fit a linear model to faulty data using
the RANSAC algorithm.
"""
import numpy as np
from matplotlib import pyplot as plt
from sklearn import linear_model, datasets
n_samples = 1000
n_outliers = 50
X, y, coef = datasets.make_regression(n_samples=n_samples, n_features=1,
n_informative=1, noise=10,
coef=True, random_state=0)
# Add outlier data
np.random.seed(0)
X[:n_outliers] = 3 + 0.5 * np.random.normal(size=(n_outliers, 1))
y[:n_outliers] = -3 + 10 * np.random.normal(size=n_outliers)
# Fit line using all data
model = linear_model.LinearRegression()
model.fit(X, y)
# Robustly fit linear model with RANSAC algorithm
model_ransac = linear_model.RANSACRegressor(linear_model.LinearRegression())
model_ransac.fit(X, y)
inlier_mask = model_ransac.inlier_mask_
outlier_mask = np.logical_not(inlier_mask)
# Predict data of estimated models
line_X = np.arange(-5, 5)
line_y = model.predict(line_X[:, np.newaxis])
line_y_ransac = model_ransac.predict(line_X[:, np.newaxis])
# Compare estimated coefficients
print("Estimated coefficients (true, normal, RANSAC):")
print(coef, model.coef_, model_ransac.estimator_.coef_)
plt.plot(X[inlier_mask], y[inlier_mask], '.g', label='Inliers')
plt.plot(X[outlier_mask], y[outlier_mask], '.r', label='Outliers')
plt.plot(line_X, line_y, '-k', label='Linear regressor')
plt.plot(line_X, line_y_ransac, '-b', label='RANSAC regressor')
plt.legend(loc='lower right')
plt.show()
| bsd-3-clause |
alexeyum/scikit-learn | examples/linear_model/plot_multi_task_lasso_support.py | 102 | 2319 | #!/usr/bin/env python
"""
=============================================
Joint feature selection with multi-task Lasso
=============================================
The multi-task lasso allows to fit multiple regression problems
jointly enforcing the selected features to be the same across
tasks. This example simulates sequential measurements, each task
is a time instant, and the relevant features vary in amplitude
over time while being the same. The multi-task lasso imposes that
features that are selected at one time point are select for all time
point. This makes feature selection by the Lasso more stable.
"""
print(__doc__)
# Author: Alexandre Gramfort <alexandre.gramfort@inria.fr>
# License: BSD 3 clause
import matplotlib.pyplot as plt
import numpy as np
from sklearn.linear_model import MultiTaskLasso, Lasso
rng = np.random.RandomState(42)
# Generate some 2D coefficients with sine waves with random frequency and phase
n_samples, n_features, n_tasks = 100, 30, 40
n_relevant_features = 5
coef = np.zeros((n_tasks, n_features))
times = np.linspace(0, 2 * np.pi, n_tasks)
for k in range(n_relevant_features):
coef[:, k] = np.sin((1. + rng.randn(1)) * times + 3 * rng.randn(1))
X = rng.randn(n_samples, n_features)
Y = np.dot(X, coef.T) + rng.randn(n_samples, n_tasks)
coef_lasso_ = np.array([Lasso(alpha=0.5).fit(X, y).coef_ for y in Y.T])
coef_multi_task_lasso_ = MultiTaskLasso(alpha=1.).fit(X, Y).coef_
###############################################################################
# Plot support and time series
fig = plt.figure(figsize=(8, 5))
plt.subplot(1, 2, 1)
plt.spy(coef_lasso_)
plt.xlabel('Feature')
plt.ylabel('Time (or Task)')
plt.text(10, 5, 'Lasso')
plt.subplot(1, 2, 2)
plt.spy(coef_multi_task_lasso_)
plt.xlabel('Feature')
plt.ylabel('Time (or Task)')
plt.text(10, 5, 'MultiTaskLasso')
fig.suptitle('Coefficient non-zero location')
feature_to_plot = 0
plt.figure()
lw = 2
plt.plot(coef[:, feature_to_plot], color='seagreen', linewidth=lw,
label='Ground truth')
plt.plot(coef_lasso_[:, feature_to_plot], color='cornflowerblue', linewidth=lw,
label='Lasso')
plt.plot(coef_multi_task_lasso_[:, feature_to_plot], color='gold', linewidth=lw,
label='MultiTaskLasso')
plt.legend(loc='upper center')
plt.axis('tight')
plt.ylim([-1.1, 1.1])
plt.show()
| bsd-3-clause |
yanikou19/pymatgen | pymatgen/phasediagram/plotter.py | 1 | 30386 | # coding: utf-8
from __future__ import division, unicode_literals
"""
This module provides classes for plotting PhaseDiagram objects.
"""
from six.moves import zip
__author__ = "Shyue Ping Ong"
__copyright__ = "Copyright 2011, The Materials Project"
__version__ = "1.1"
__maintainer__ = "Shyue Ping Ong"
__email__ = "shyuep@gmail.com"
__status__ = "Production"
__date__ = "Jun 15, 2012"
import math
import numpy as np
import itertools
from pymatgen.phasediagram.pdanalyzer import PDAnalyzer
from pymatgen.util.string_utils import latexify
from pymatgen.util.plotting_utils import get_publication_quality_plot
from pymatgen.util.coord_utils import in_coord_list
class PDPlotter(object):
"""
A plotter class for phase diagrams.
Args:
phasediagram: PhaseDiagram object.
show_unstable: Whether unstable phases will be plotted as well as
red crosses. Defaults to False.
"""
def __init__(self, phasediagram, show_unstable=False):
self._pd = phasediagram
self._dim = len(self._pd.elements)
if self._dim > 4:
raise ValueError("Only 1-4 components supported!")
self.lines = uniquelines(self._pd.facets) if self._dim > 1 else \
[[self._pd.facets[0][0], self._pd.facets[0][0]]]
self.show_unstable = show_unstable
@property
def pd_plot_data(self):
"""
Plot data for phase diagram.
2-comp - Full hull with energies
3/4-comp - Projection into 2D or 3D Gibbs triangle.
Returns:
(lines, stable_entries, unstable_entries):
- lines is a list of list of coordinates for lines in the PD.
- stable_entries is a {coordinate : entry} for each stable node
in the phase diagram. (Each coordinate can only have one
stable phase)
- unstable_entries is a {entry: coordinates} for all unstable
nodes in the phase diagram.
"""
pd = self._pd
entries = pd.qhull_entries
data = np.array(pd.qhull_data)
lines = []
stable_entries = {}
for line in self.lines:
entry1 = entries[line[0]]
entry2 = entries[line[1]]
if self._dim < 3:
x = [data[line[0]][0], data[line[1]][0]]
y = [pd.get_form_energy_per_atom(entry1),
pd.get_form_energy_per_atom(entry2)]
coord = [x, y]
elif self._dim == 3:
coord = triangular_coord(data[line, 0:2])
else:
coord = tet_coord(data[line, 0:3])
lines.append(coord)
labelcoord = list(zip(*coord))
stable_entries[labelcoord[0]] = entry1
stable_entries[labelcoord[1]] = entry2
all_entries = pd.all_entries
all_data = np.array(pd.all_entries_hulldata)
unstable_entries = dict()
stable = pd.stable_entries
for i in range(0, len(all_entries)):
entry = all_entries[i]
if entry not in stable:
if self._dim < 3:
x = [all_data[i][0], all_data[i][0]]
y = [pd.get_form_energy_per_atom(entry),
pd.get_form_energy_per_atom(entry)]
coord = [x, y]
elif self._dim == 3:
coord = triangular_coord([all_data[i, 0:2],
all_data[i, 0:2]])
else:
coord = tet_coord([all_data[i, 0:3], all_data[i, 0:3],
all_data[i, 0:3]])
labelcoord = list(zip(*coord))
unstable_entries[entry] = labelcoord[0]
return lines, stable_entries, unstable_entries
def get_plot(self, label_stable=True, label_unstable=True, ordering=None,
energy_colormap=None, process_attributes=False):
if self._dim < 4:
plt = self._get_2d_plot(label_stable, label_unstable, ordering,
energy_colormap,
process_attributes=process_attributes)
elif self._dim == 4:
plt = self._get_3d_plot(label_stable)
return plt
def show(self, label_stable=True, label_unstable=True, ordering=None,
energy_colormap=None, process_attributes=False):
"""
Draws the phase diagram using Matplotlib and show it.
"""
self.get_plot(label_stable=label_stable, label_unstable=label_unstable,
ordering=ordering, energy_colormap=energy_colormap,
process_attributes=process_attributes).show()
def _get_2d_plot(self, label_stable=True, label_unstable=True,
ordering=None, energy_colormap=None, vmin_mev=-60.0,
vmax_mev=60.0, show_colorbar=True,
process_attributes=False):
"""
Shows the plot using pylab. Usually I won't do imports in methods,
but since plotting is a fairly expensive library to load and not all
machines have matplotlib installed, I have done it this way.
"""
plt = get_publication_quality_plot(8, 6)
from matplotlib.font_manager import FontProperties
if ordering is None:
(lines, labels, unstable) = self.pd_plot_data
else:
(_lines, _labels, _unstable) = self.pd_plot_data
(lines, labels, unstable) = order_phase_diagram(
_lines, _labels, _unstable, ordering)
if energy_colormap is None:
if process_attributes:
for x, y in lines:
plt.plot(x, y, "k-", linewidth=3, markeredgecolor="k")
# One should think about a clever way to have "complex"
# attributes with complex processing options but with a clear
# logic. At this moment, I just use the attributes to know
# whether an entry is a new compound or an existing (from the
# ICSD or from the MP) one.
for x, y in labels.keys():
if labels[(x, y)].attribute is None or \
labels[(x, y)].attribute == "existing":
plt.plot(x, y, "ko", linewidth=3, markeredgecolor="k",
markerfacecolor="b", markersize=12)
else:
plt.plot(x, y, "k*", linewidth=3, markeredgecolor="k",
markerfacecolor="g", markersize=18)
else:
for x, y in lines:
plt.plot(x, y, "ko-", linewidth=3, markeredgecolor="k",
markerfacecolor="b", markersize=15)
else:
from matplotlib.colors import Normalize, LinearSegmentedColormap
from matplotlib.cm import ScalarMappable
pda = PDAnalyzer(self._pd)
for x, y in lines:
plt.plot(x, y, "k-", linewidth=3, markeredgecolor="k")
vmin = vmin_mev / 1000.0
vmax = vmax_mev / 1000.0
if energy_colormap == 'default':
mid = - vmin / (vmax - vmin)
cmap = LinearSegmentedColormap.from_list(
'my_colormap', [(0.0, '#005500'), (mid, '#55FF55'),
(mid, '#FFAAAA'), (1.0, '#FF0000')])
else:
cmap = energy_colormap
norm = Normalize(vmin=vmin, vmax=vmax)
_map = ScalarMappable(norm=norm, cmap=cmap)
_energies = [pda.get_equilibrium_reaction_energy(entry)
for coord, entry in labels.items()]
energies = [en if en < 0.0 else -0.00000001 for en in _energies]
vals_stable = _map.to_rgba(energies)
ii = 0
if process_attributes:
for x, y in labels.keys():
if labels[(x, y)].attribute is None or \
labels[(x, y)].attribute == "existing":
plt.plot(x, y, "o", markerfacecolor=vals_stable[ii],
markersize=12)
else:
plt.plot(x, y, "*", markerfacecolor=vals_stable[ii],
markersize=18)
ii += 1
else:
for x, y in labels.keys():
plt.plot(x, y, "o", markerfacecolor=vals_stable[ii],
markersize=15)
ii += 1
font = FontProperties()
font.set_weight("bold")
font.set_size(24)
# Sets a nice layout depending on the type of PD. Also defines a
# "center" for the PD, which then allows the annotations to be spread
# out in a nice manner.
if len(self._pd.elements) == 3:
plt.axis("equal")
plt.xlim((-0.1, 1.2))
plt.ylim((-0.1, 1.0))
plt.axis("off")
center = (0.5, math.sqrt(3) / 6)
else:
all_coords = labels.keys()
miny = min([c[1] for c in all_coords])
ybuffer = max(abs(miny) * 0.1, 0.1)
plt.xlim((-0.1, 1.1))
plt.ylim((miny - ybuffer, ybuffer))
center = (0.5, miny / 2)
plt.xlabel("Fraction", fontsize=28, fontweight='bold')
plt.ylabel("Formation energy (eV/fu)", fontsize=28,
fontweight='bold')
for coords in sorted(labels.keys(), key=lambda x: -x[1]):
entry = labels[coords]
label = entry.name
# The follow defines an offset for the annotation text emanating
# from the center of the PD. Results in fairly nice layouts for the
# most part.
vec = (np.array(coords) - center)
vec = vec / np.linalg.norm(vec) * 10 if np.linalg.norm(vec) != 0 \
else vec
valign = "bottom" if vec[1] > 0 else "top"
if vec[0] < -0.01:
halign = "right"
elif vec[0] > 0.01:
halign = "left"
else:
halign = "center"
if label_stable:
if process_attributes and entry.attribute == 'new':
plt.annotate(latexify(label), coords, xytext=vec,
textcoords="offset points",
horizontalalignment=halign,
verticalalignment=valign,
fontproperties=font,
color='g')
else:
plt.annotate(latexify(label), coords, xytext=vec,
textcoords="offset points",
horizontalalignment=halign,
verticalalignment=valign,
fontproperties=font)
if self.show_unstable:
font = FontProperties()
font.set_size(16)
pda = PDAnalyzer(self._pd)
energies_unstable = [pda.get_e_above_hull(entry)
for entry, coord in unstable.items()]
if energy_colormap is not None:
energies.extend(energies_unstable)
vals_unstable = _map.to_rgba(energies_unstable)
ii = 0
for entry, coords in unstable.items():
vec = (np.array(coords) - center)
vec = vec / np.linalg.norm(vec) * 10 \
if np.linalg.norm(vec) != 0 else vec
label = entry.name
if energy_colormap is None:
plt.plot(coords[0], coords[1], "ks", linewidth=3,
markeredgecolor="k", markerfacecolor="r",
markersize=8)
else:
plt.plot(coords[0], coords[1], "s", linewidth=3,
markeredgecolor="k",
markerfacecolor=vals_unstable[ii],
markersize=8)
if label_unstable:
plt.annotate(latexify(label), coords, xytext=vec,
textcoords="offset points",
horizontalalignment=halign, color="b",
verticalalignment=valign,
fontproperties=font)
ii += 1
if energy_colormap is not None and show_colorbar:
_map.set_array(energies)
cbar = plt.colorbar(_map)
cbar.set_label(
'Energy [meV/at] above hull (in red)\nInverse energy ['
'meV/at] above hull (in green)',
rotation=-90, ha='left', va='center')
ticks = cbar.ax.get_yticklabels()
cbar.ax.set_yticklabels(['${v}$'.format(
v=float(t.get_text().strip('$'))*1000.0) for t in ticks])
f = plt.gcf()
f.set_size_inches((8, 6))
plt.subplots_adjust(left=0.09, right=0.98, top=0.98, bottom=0.07)
return plt
def _get_3d_plot(self, label_stable=True):
"""
Shows the plot using pylab. Usually I won"t do imports in methods,
but since plotting is a fairly expensive library to load and not all
machines have matplotlib installed, I have done it this way.
"""
import matplotlib.pyplot as plt
import mpl_toolkits.mplot3d.axes3d as p3
from matplotlib.font_manager import FontProperties
fig = plt.figure()
ax = p3.Axes3D(fig)
font = FontProperties()
font.set_weight("bold")
font.set_size(20)
(lines, labels, unstable) = self.pd_plot_data
count = 1
newlabels = list()
for x, y, z in lines:
ax.plot(x, y, z, "bo-", linewidth=3, markeredgecolor="b",
markerfacecolor="r", markersize=10)
for coords in sorted(labels.keys()):
entry = labels[coords]
label = entry.name
if label_stable:
if len(entry.composition.elements) == 1:
ax.text(coords[0], coords[1], coords[2], label)
else:
ax.text(coords[0], coords[1], coords[2], str(count))
newlabels.append("{} : {}".format(count, latexify(label)))
count += 1
plt.figtext(0.01, 0.01, "\n".join(newlabels))
ax.axis("off")
return plt
def write_image(self, stream, image_format="svg", label_stable=True,
label_unstable=True, ordering=None,
energy_colormap=None, process_attributes=False):
"""
Writes the phase diagram to an image in a stream.
Args:
stream:
stream to write to. Can be a file stream or a StringIO stream.
image_format
format for image. Can be any of matplotlib supported formats.
Defaults to svg for best results for vector graphics.
"""
plt = self.get_plot(
label_stable=label_stable, label_unstable=label_unstable,
ordering=ordering, energy_colormap=energy_colormap,
process_attributes=process_attributes)
f = plt.gcf()
f.set_size_inches((12, 10))
plt.savefig(stream, format=image_format)
def plot_chempot_range_map(self, elements, referenced=True):
"""
Plot the chemical potential range _map. Currently works only for
3-component PDs.
Args:
elements: Sequence of elements to be considered as independent
variables. E.g., if you want to show the stability ranges of
all Li-Co-O phases wrt to uLi and uO, you will supply
[Element("Li"), Element("O")]
referenced: if True, gives the results with a reference being the
energy of the elemental phase. If False, gives absolute values.
"""
self.get_chempot_range_map_plot(elements, referenced=referenced).show()
def get_chempot_range_map_plot(self, elements,referenced=True):
"""
Returns a plot of the chemical potential range _map. Currently works
only for 3-component PDs.
Args:
elements: Sequence of elements to be considered as independent
variables. E.g., if you want to show the stability ranges of
all Li-Co-O phases wrt to uLi and uO, you will supply
[Element("Li"), Element("O")]
referenced: if True, gives the results with a reference being the
energy of the elemental phase. If False, gives absolute values.
Returns:
A matplotlib plot object.
"""
plt = get_publication_quality_plot(12, 8)
analyzer = PDAnalyzer(self._pd)
chempot_ranges = analyzer.get_chempot_range_map(elements,referenced=referenced)
missing_lines = {}
excluded_region = []
for entry, lines in chempot_ranges.items():
comp = entry.composition
center_x = 0
center_y = 0
coords = []
contain_zero = any([comp.get_atomic_fraction(el) == 0
for el in elements])
is_boundary = (not contain_zero) and \
sum([comp.get_atomic_fraction(el) for el in elements]) == 1
for line in lines:
(x, y) = line.coords.transpose()
plt.plot(x, y, "k-")
for coord in line.coords:
if not in_coord_list(coords, coord):
coords.append(coord.tolist())
center_x += coord[0]
center_y += coord[1]
if is_boundary:
excluded_region.extend(line.coords)
if coords and contain_zero:
missing_lines[entry] = coords
else:
xy = (center_x / len(coords), center_y / len(coords))
plt.annotate(latexify(entry.name), xy, fontsize=22)
ax = plt.gca()
xlim = ax.get_xlim()
ylim = ax.get_ylim()
#Shade the forbidden chemical potential regions.
excluded_region.append([xlim[1], ylim[1]])
excluded_region = sorted(excluded_region, key=lambda c: c[0])
(x, y) = np.transpose(excluded_region)
plt.fill(x, y, "0.80")
#The hull does not generate the missing horizontal and vertical lines.
#The following code fixes this.
el0 = elements[0]
el1 = elements[1]
for entry, coords in missing_lines.items():
center_x = sum([c[0] for c in coords])
center_y = sum([c[1] for c in coords])
comp = entry.composition
is_x = comp.get_atomic_fraction(el0) < 0.01
is_y = comp.get_atomic_fraction(el1) < 0.01
n = len(coords)
if not (is_x and is_y):
if is_x:
coords = sorted(coords, key=lambda c: c[1])
for i in [0, -1]:
x = [min(xlim), coords[i][0]]
y = [coords[i][1], coords[i][1]]
plt.plot(x, y, "k")
center_x += min(xlim)
center_y += coords[i][1]
elif is_y:
coords = sorted(coords, key=lambda c: c[0])
for i in [0, -1]:
x = [coords[i][0], coords[i][0]]
y = [coords[i][1], min(ylim)]
plt.plot(x, y, "k")
center_x += coords[i][0]
center_y += min(ylim)
xy = (center_x / (n + 2), center_y / (n + 2))
else:
center_x = sum(coord[0] for coord in coords) + xlim[0]
center_y = sum(coord[1] for coord in coords) + ylim[0]
xy = (center_x / (n + 1), center_y / (n + 1))
plt.annotate(latexify(entry.name), xy,
horizontalalignment="center",
verticalalignment="center", fontsize=22)
plt.xlabel("$\mu_{{{0}}} - \mu_{{{0}}}^0$ (eV)"
.format(el0.symbol))
plt.ylabel("$\mu_{{{0}}} - \mu_{{{0}}}^0$ (eV)"
.format(el1.symbol))
plt.tight_layout()
return plt
def get_contour_pd_plot(self):
"""
Plot a contour phase diagram plot, where phase triangles are colored
according to degree of instability by interpolation. Currently only
works for 3-component phase diagrams.
Returns:
A matplotlib plot object.
"""
from scipy import interpolate
from matplotlib import cm
pd = self._pd
entries = pd.qhull_entries
data = np.array(pd.qhull_data)
plt = self._get_2d_plot()
analyzer = PDAnalyzer(pd)
data[:, 0:2] = triangular_coord(data[:, 0:2]).transpose()
for i, e in enumerate(entries):
data[i, 2] = analyzer.get_e_above_hull(e)
gridsize = 0.005
xnew = np.arange(0, 1., gridsize)
ynew = np.arange(0, 1, gridsize)
f = interpolate.LinearNDInterpolator(data[:, 0:2], data[:, 2])
znew = np.zeros((len(ynew), len(xnew)))
for (i, xval) in enumerate(xnew):
for (j, yval) in enumerate(ynew):
znew[j, i] = f(xval, yval)
plt.contourf(xnew, ynew, znew, 1000, cmap=cm.autumn_r)
plt.colorbar()
return plt
def uniquelines(q):
"""
Given all the facets, convert it into a set of unique lines. Specifically
used for converting convex hull facets into line pairs of coordinates.
Args:
q: A 2-dim sequence, where each row represents a facet. E.g.,
[[1,2,3],[3,6,7],...]
Returns:
setoflines:
A set of tuple of lines. E.g., ((1,2), (1,3), (2,3), ....)
"""
setoflines = set()
for facets in q:
for line in itertools.combinations(facets, 2):
setoflines.add(tuple(sorted(line)))
return setoflines
def triangular_coord(coord):
"""
Convert a 2D coordinate into a triangle-based coordinate system for a
prettier phase diagram.
Args:
coordinate: coordinate used in the convex hull computation.
Returns:
coordinates in a triangular-based coordinate system.
"""
unitvec = np.array([[1, 0], [0.5, math.sqrt(3) / 2]])
result = np.dot(np.array(coord), unitvec)
return result.transpose()
def tet_coord(coord):
"""
Convert a 3D coordinate into a tetrahedron based coordinate system for a
prettier phase diagram.
Args:
coordinate: coordinate used in the convex hull computation.
Returns:
coordinates in a tetrahedron-based coordinate system.
"""
unitvec = np.array([[1, 0, 0], [0.5, math.sqrt(3) / 2, 0],
[0.5, 1.0 / 3.0 * math.sqrt(3) / 2, math.sqrt(6) / 3]])
result = np.dot(np.array(coord), unitvec)
return result.transpose()
def order_phase_diagram(lines, stable_entries, unstable_entries, ordering):
"""
Orders the entries (their coordinates) in a phase diagram plot according
to the user specified ordering.
Ordering should be given as ['Up', 'Left', 'Right'], where Up,
Left and Right are the names of the entries in the upper, left and right
corners of the triangle respectively.
Args:
lines: list of list of coordinates for lines in the PD.
stable_entries: {coordinate : entry} for each stable node in the
phase diagram. (Each coordinate can only have one stable phase)
unstable_entries: {entry: coordinates} for all unstable nodes in the
phase diagram.
ordering: Ordering of the phase diagram, given as a list ['Up',
'Left','Right']
Returns:
(newlines, newstable_entries, newunstable_entries):
- newlines is a list of list of coordinates for lines in the PD.
- newstable_entries is a {coordinate : entry} for each stable node
in the phase diagram. (Each coordinate can only have one
stable phase)
- newunstable_entries is a {entry: coordinates} for all unstable
nodes in the phase diagram.
"""
yup = -1000.0
xleft = 1000.0
xright = -1000.0
for coord in stable_entries:
if coord[0] > xright:
xright = coord[0]
nameright = stable_entries[coord].name
if coord[0] < xleft:
xleft = coord[0]
nameleft = stable_entries[coord].name
if coord[1] > yup:
yup = coord[1]
nameup = stable_entries[coord].name
if (not nameup in ordering) or (not nameright in ordering) or \
(not nameleft in ordering):
raise ValueError(
'Error in ordering_phase_diagram : \n"{up}", "{left}" and "{'
'right}"'
' should be in ordering : {ord}'.format(up=nameup, left=nameleft,
right=nameright,
ord=ordering))
cc = np.array([0.5, np.sqrt(3.0) / 6.0], np.float)
if nameup == ordering[0]:
if nameleft == ordering[1]:
# The coordinates were already in the user ordering
return lines, stable_entries, unstable_entries
else:
newlines = [[np.array(1.0 - x), y] for x, y in lines]
newstable_entries = {(1.0 - c[0], c[1]): entry
for c, entry in stable_entries.items()}
newunstable_entries = {entry: (1.0 - c[0], c[1])
for entry, c in
unstable_entries.items()}
return newlines, newstable_entries, newunstable_entries
elif nameup == ordering[1]:
if nameleft == ordering[2]:
c120 = np.cos(2.0 * np.pi / 3.0)
s120 = np.sin(2.0 * np.pi / 3.0)
newlines = []
for x, y in lines:
newx = np.zeros_like(x)
newy = np.zeros_like(y)
for ii, xx in enumerate(x):
newx[ii] = c120 * (xx - cc[0]) - s120 * (y[ii] - cc[1]) + \
cc[0]
newy[ii] = s120 * (xx - cc[0]) + c120 * (y[ii] - cc[1]) + \
cc[1]
newlines.append([newx, newy])
newstable_entries = {
(c120 * (c[0] - cc[0]) - s120 * (c[1] - cc[1]) + cc[0],
s120 * (c[0] - cc[0]) + c120 * (c[1] - cc[1]) + cc[1]): entry
for c, entry in stable_entries.items()}
newunstable_entries = {
entry: (c120 * (c[0] - cc[0]) - s120 * (c[1] - cc[1]) + cc[0],
s120 * (c[0] - cc[0]) + c120 * (c[1] - cc[1]) + cc[1])
for entry, c in unstable_entries.items()}
return newlines, newstable_entries, newunstable_entries
else:
c120 = np.cos(2.0 * np.pi / 3.0)
s120 = np.sin(2.0 * np.pi / 3.0)
newlines = []
for x, y in lines:
newx = np.zeros_like(x)
newy = np.zeros_like(y)
for ii, xx in enumerate(x):
newx[ii] = -c120 * (xx - 1.0) - s120 * y[ii] + 1.0
newy[ii] = -s120 * (xx - 1.0) + c120 * y[ii]
newlines.append([newx, newy])
newstable_entries = {(-c120 * (c[0] - 1.0) - s120 * c[1] + 1.0,
-s120 * (c[0] - 1.0) + c120 * c[1]): entry
for c, entry in stable_entries.items()}
newunstable_entries = {
entry: (-c120 * (c[0] - 1.0) - s120 * c[1] + 1.0,
-s120 * (c[0] - 1.0) + c120 * c[1])
for entry, c in unstable_entries.items()}
return newlines, newstable_entries, newunstable_entries
elif nameup == ordering[2]:
if nameleft == ordering[0]:
c240 = np.cos(4.0 * np.pi / 3.0)
s240 = np.sin(4.0 * np.pi / 3.0)
newlines = []
for x, y in lines:
newx = np.zeros_like(x)
newy = np.zeros_like(y)
for ii, xx in enumerate(x):
newx[ii] = c240 * (xx - cc[0]) - s240 * (y[ii] - cc[1]) + \
cc[0]
newy[ii] = s240 * (xx - cc[0]) + c240 * (y[ii] - cc[1]) + \
cc[1]
newlines.append([newx, newy])
newstable_entries = {
(c240 * (c[0] - cc[0]) - s240 * (c[1] - cc[1]) + cc[0],
s240 * (c[0] - cc[0]) + c240 * (c[1] - cc[1]) + cc[1]): entry
for c, entry in stable_entries.items()}
newunstable_entries = {
entry: (c240 * (c[0] - cc[0]) - s240 * (c[1] - cc[1]) + cc[0],
s240 * (c[0] - cc[0]) + c240 * (c[1] - cc[1]) + cc[1])
for entry, c in unstable_entries.items()}
return newlines, newstable_entries, newunstable_entries
else:
c240 = np.cos(4.0 * np.pi / 3.0)
s240 = np.sin(4.0 * np.pi / 3.0)
newlines = []
for x, y in lines:
newx = np.zeros_like(x)
newy = np.zeros_like(y)
for ii, xx in enumerate(x):
newx[ii] = -c240 * xx - s240 * y[ii]
newy[ii] = -s240 * xx + c240 * y[ii]
newlines.append([newx, newy])
newstable_entries = {(-c240 * c[0] - s240 * c[1],
-s240 * c[0] + c240 * c[1]): entry
for c, entry in stable_entries.items()}
newunstable_entries = {entry: (-c240 * c[0] - s240 * c[1],
-s240 * c[0] + c240 * c[1])
for entry, c in unstable_entries.items()}
return newlines, newstable_entries, newunstable_entries
| mit |
andaag/scikit-learn | examples/mixture/plot_gmm_selection.py | 248 | 3223 | """
=================================
Gaussian Mixture Model Selection
=================================
This example shows that model selection can be performed with
Gaussian Mixture Models using information-theoretic criteria (BIC).
Model selection concerns both the covariance type
and the number of components in the model.
In that case, AIC also provides the right result (not shown to save time),
but BIC is better suited if the problem is to identify the right model.
Unlike Bayesian procedures, such inferences are prior-free.
In that case, the model with 2 components and full covariance
(which corresponds to the true generative model) is selected.
"""
print(__doc__)
import itertools
import numpy as np
from scipy import linalg
import matplotlib.pyplot as plt
import matplotlib as mpl
from sklearn import mixture
# Number of samples per component
n_samples = 500
# Generate random sample, two components
np.random.seed(0)
C = np.array([[0., -0.1], [1.7, .4]])
X = np.r_[np.dot(np.random.randn(n_samples, 2), C),
.7 * np.random.randn(n_samples, 2) + np.array([-6, 3])]
lowest_bic = np.infty
bic = []
n_components_range = range(1, 7)
cv_types = ['spherical', 'tied', 'diag', 'full']
for cv_type in cv_types:
for n_components in n_components_range:
# Fit a mixture of Gaussians with EM
gmm = mixture.GMM(n_components=n_components, covariance_type=cv_type)
gmm.fit(X)
bic.append(gmm.bic(X))
if bic[-1] < lowest_bic:
lowest_bic = bic[-1]
best_gmm = gmm
bic = np.array(bic)
color_iter = itertools.cycle(['k', 'r', 'g', 'b', 'c', 'm', 'y'])
clf = best_gmm
bars = []
# Plot the BIC scores
spl = plt.subplot(2, 1, 1)
for i, (cv_type, color) in enumerate(zip(cv_types, color_iter)):
xpos = np.array(n_components_range) + .2 * (i - 2)
bars.append(plt.bar(xpos, bic[i * len(n_components_range):
(i + 1) * len(n_components_range)],
width=.2, color=color))
plt.xticks(n_components_range)
plt.ylim([bic.min() * 1.01 - .01 * bic.max(), bic.max()])
plt.title('BIC score per model')
xpos = np.mod(bic.argmin(), len(n_components_range)) + .65 +\
.2 * np.floor(bic.argmin() / len(n_components_range))
plt.text(xpos, bic.min() * 0.97 + .03 * bic.max(), '*', fontsize=14)
spl.set_xlabel('Number of components')
spl.legend([b[0] for b in bars], cv_types)
# Plot the winner
splot = plt.subplot(2, 1, 2)
Y_ = clf.predict(X)
for i, (mean, covar, color) in enumerate(zip(clf.means_, clf.covars_,
color_iter)):
v, w = linalg.eigh(covar)
if not np.any(Y_ == i):
continue
plt.scatter(X[Y_ == i, 0], X[Y_ == i, 1], .8, color=color)
# Plot an ellipse to show the Gaussian component
angle = np.arctan2(w[0][1], w[0][0])
angle = 180 * angle / np.pi # convert to degrees
v *= 4
ell = mpl.patches.Ellipse(mean, v[0], v[1], 180 + angle, color=color)
ell.set_clip_box(splot.bbox)
ell.set_alpha(.5)
splot.add_artist(ell)
plt.xlim(-10, 10)
plt.ylim(-3, 6)
plt.xticks(())
plt.yticks(())
plt.title('Selected GMM: full model, 2 components')
plt.subplots_adjust(hspace=.35, bottom=.02)
plt.show()
| bsd-3-clause |
amolkahat/pandas | pandas/core/base.py | 2 | 40524 | """
Base and utility classes for pandas objects.
"""
import warnings
import textwrap
from pandas import compat
from pandas.compat import builtins
import numpy as np
from pandas.core.dtypes.missing import isna
from pandas.core.dtypes.generic import ABCDataFrame, ABCSeries, ABCIndexClass
from pandas.core.dtypes.common import (
is_datetimelike,
is_object_dtype,
is_list_like,
is_scalar,
is_extension_type,
is_extension_array_dtype)
from pandas.util._validators import validate_bool_kwarg
from pandas.errors import AbstractMethodError
from pandas.core import common as com, algorithms
import pandas.core.nanops as nanops
import pandas._libs.lib as lib
from pandas.compat.numpy import function as nv
from pandas.compat import PYPY, OrderedDict
from pandas.util._decorators import Appender, cache_readonly, Substitution
from pandas.core.accessor import DirNamesMixin
_shared_docs = dict()
_indexops_doc_kwargs = dict(klass='IndexOpsMixin', inplace='',
unique='IndexOpsMixin', duplicated='IndexOpsMixin')
class StringMixin(object):
"""implements string methods so long as object defines a `__unicode__`
method.
Handles Python2/3 compatibility transparently.
"""
# side note - this could be made into a metaclass if more than one
# object needs
# ----------------------------------------------------------------------
# Formatting
def __unicode__(self):
raise AbstractMethodError(self)
def __str__(self):
"""
Return a string representation for a particular Object
Invoked by str(df) in both py2/py3.
Yields Bytestring in Py2, Unicode String in py3.
"""
if compat.PY3:
return self.__unicode__()
return self.__bytes__()
def __bytes__(self):
"""
Return a string representation for a particular object.
Invoked by bytes(obj) in py3 only.
Yields a bytestring in both py2/py3.
"""
from pandas.core.config import get_option
encoding = get_option("display.encoding")
return self.__unicode__().encode(encoding, 'replace')
def __repr__(self):
"""
Return a string representation for a particular object.
Yields Bytestring in Py2, Unicode String in py3.
"""
return str(self)
class PandasObject(StringMixin, DirNamesMixin):
"""baseclass for various pandas objects"""
@property
def _constructor(self):
"""class constructor (for this class it's just `__class__`"""
return self.__class__
def __unicode__(self):
"""
Return a string representation for a particular object.
Invoked by unicode(obj) in py2 only. Yields a Unicode String in both
py2/py3.
"""
# Should be overwritten by base classes
return object.__repr__(self)
def _reset_cache(self, key=None):
"""
Reset cached properties. If ``key`` is passed, only clears that key.
"""
if getattr(self, '_cache', None) is None:
return
if key is None:
self._cache.clear()
else:
self._cache.pop(key, None)
def __sizeof__(self):
"""
Generates the total memory usage for an object that returns
either a value or Series of values
"""
if hasattr(self, 'memory_usage'):
mem = self.memory_usage(deep=True)
if not is_scalar(mem):
mem = mem.sum()
return int(mem)
# no memory_usage attribute, so fall back to
# object's 'sizeof'
return super(PandasObject, self).__sizeof__()
class NoNewAttributesMixin(object):
"""Mixin which prevents adding new attributes.
Prevents additional attributes via xxx.attribute = "something" after a
call to `self.__freeze()`. Mainly used to prevent the user from using
wrong attributes on a accessor (`Series.cat/.str/.dt`).
If you really want to add a new attribute at a later time, you need to use
`object.__setattr__(self, key, value)`.
"""
def _freeze(self):
"""Prevents setting additional attributes"""
object.__setattr__(self, "__frozen", True)
# prevent adding any attribute via s.xxx.new_attribute = ...
def __setattr__(self, key, value):
# _cache is used by a decorator
# We need to check both 1.) cls.__dict__ and 2.) getattr(self, key)
# because
# 1.) getattr is false for attributes that raise errors
# 2.) cls.__dict__ doesn't traverse into base classes
if (getattr(self, "__frozen", False) and not
(key == "_cache" or
key in type(self).__dict__ or
getattr(self, key, None) is not None)):
raise AttributeError("You cannot add any new attribute '{key}'".
format(key=key))
object.__setattr__(self, key, value)
class GroupByError(Exception):
pass
class DataError(GroupByError):
pass
class SpecificationError(GroupByError):
pass
class SelectionMixin(object):
"""
mixin implementing the selection & aggregation interface on a group-like
object sub-classes need to define: obj, exclusions
"""
_selection = None
_internal_names = ['_cache', '__setstate__']
_internal_names_set = set(_internal_names)
_builtin_table = OrderedDict((
(builtins.sum, np.sum),
(builtins.max, np.max),
(builtins.min, np.min),
))
_cython_table = OrderedDict((
(builtins.sum, 'sum'),
(builtins.max, 'max'),
(builtins.min, 'min'),
(np.all, 'all'),
(np.any, 'any'),
(np.sum, 'sum'),
(np.mean, 'mean'),
(np.prod, 'prod'),
(np.std, 'std'),
(np.var, 'var'),
(np.median, 'median'),
(np.max, 'max'),
(np.min, 'min'),
(np.cumprod, 'cumprod'),
(np.cumsum, 'cumsum'),
))
@property
def _selection_name(self):
"""
return a name for myself; this would ideally be called
the 'name' property, but we cannot conflict with the
Series.name property which can be set
"""
if self._selection is None:
return None # 'result'
else:
return self._selection
@property
def _selection_list(self):
if not isinstance(self._selection, (list, tuple, ABCSeries,
ABCIndexClass, np.ndarray)):
return [self._selection]
return self._selection
@cache_readonly
def _selected_obj(self):
if self._selection is None or isinstance(self.obj, ABCSeries):
return self.obj
else:
return self.obj[self._selection]
@cache_readonly
def ndim(self):
return self._selected_obj.ndim
@cache_readonly
def _obj_with_exclusions(self):
if self._selection is not None and isinstance(self.obj,
ABCDataFrame):
return self.obj.reindex(columns=self._selection_list)
if len(self.exclusions) > 0:
return self.obj.drop(self.exclusions, axis=1)
else:
return self.obj
def __getitem__(self, key):
if self._selection is not None:
raise IndexError('Column(s) {selection} already selected'
.format(selection=self._selection))
if isinstance(key, (list, tuple, ABCSeries, ABCIndexClass,
np.ndarray)):
if len(self.obj.columns.intersection(key)) != len(key):
bad_keys = list(set(key).difference(self.obj.columns))
raise KeyError("Columns not found: {missing}"
.format(missing=str(bad_keys)[1:-1]))
return self._gotitem(list(key), ndim=2)
elif not getattr(self, 'as_index', False):
if key not in self.obj.columns:
raise KeyError("Column not found: {key}".format(key=key))
return self._gotitem(key, ndim=2)
else:
if key not in self.obj:
raise KeyError("Column not found: {key}".format(key=key))
return self._gotitem(key, ndim=1)
def _gotitem(self, key, ndim, subset=None):
"""
sub-classes to define
return a sliced object
Parameters
----------
key : string / list of selections
ndim : 1,2
requested ndim of result
subset : object, default None
subset to act on
"""
raise AbstractMethodError(self)
def aggregate(self, func, *args, **kwargs):
raise AbstractMethodError(self)
agg = aggregate
def _try_aggregate_string_function(self, arg, *args, **kwargs):
"""
if arg is a string, then try to operate on it:
- try to find a function (or attribute) on ourselves
- try to find a numpy function
- raise
"""
assert isinstance(arg, compat.string_types)
f = getattr(self, arg, None)
if f is not None:
if callable(f):
return f(*args, **kwargs)
# people may try to aggregate on a non-callable attribute
# but don't let them think they can pass args to it
assert len(args) == 0
assert len([kwarg for kwarg in kwargs
if kwarg not in ['axis', '_level']]) == 0
return f
f = getattr(np, arg, None)
if f is not None:
return f(self, *args, **kwargs)
raise ValueError("{arg} is an unknown string function".format(arg=arg))
def _aggregate(self, arg, *args, **kwargs):
"""
provide an implementation for the aggregators
Parameters
----------
arg : string, dict, function
*args : args to pass on to the function
**kwargs : kwargs to pass on to the function
Returns
-------
tuple of result, how
Notes
-----
how can be a string describe the required post-processing, or
None if not required
"""
is_aggregator = lambda x: isinstance(x, (list, tuple, dict))
is_nested_renamer = False
_axis = kwargs.pop('_axis', None)
if _axis is None:
_axis = getattr(self, 'axis', 0)
_level = kwargs.pop('_level', None)
if isinstance(arg, compat.string_types):
return self._try_aggregate_string_function(arg, *args,
**kwargs), None
if isinstance(arg, dict):
# aggregate based on the passed dict
if _axis != 0: # pragma: no cover
raise ValueError('Can only pass dict with axis=0')
obj = self._selected_obj
def nested_renaming_depr(level=4):
# deprecation of nested renaming
# GH 15931
warnings.warn(
("using a dict with renaming "
"is deprecated and will be removed in a future "
"version"),
FutureWarning, stacklevel=level)
# if we have a dict of any non-scalars
# eg. {'A' : ['mean']}, normalize all to
# be list-likes
if any(is_aggregator(x) for x in compat.itervalues(arg)):
new_arg = compat.OrderedDict()
for k, v in compat.iteritems(arg):
if not isinstance(v, (tuple, list, dict)):
new_arg[k] = [v]
else:
new_arg[k] = v
# the keys must be in the columns
# for ndim=2, or renamers for ndim=1
# ok for now, but deprecated
# {'A': { 'ra': 'mean' }}
# {'A': { 'ra': ['mean'] }}
# {'ra': ['mean']}
# not ok
# {'ra' : { 'A' : 'mean' }}
if isinstance(v, dict):
is_nested_renamer = True
if k not in obj.columns:
msg = ('cannot perform renaming for {key} with a '
'nested dictionary').format(key=k)
raise SpecificationError(msg)
nested_renaming_depr(4 + (_level or 0))
elif isinstance(obj, ABCSeries):
nested_renaming_depr()
elif (isinstance(obj, ABCDataFrame) and
k not in obj.columns):
raise KeyError(
"Column '{col}' does not exist!".format(col=k))
arg = new_arg
else:
# deprecation of renaming keys
# GH 15931
keys = list(compat.iterkeys(arg))
if (isinstance(obj, ABCDataFrame) and
len(obj.columns.intersection(keys)) != len(keys)):
nested_renaming_depr()
from pandas.core.reshape.concat import concat
def _agg_1dim(name, how, subset=None):
"""
aggregate a 1-dim with how
"""
colg = self._gotitem(name, ndim=1, subset=subset)
if colg.ndim != 1:
raise SpecificationError("nested dictionary is ambiguous "
"in aggregation")
return colg.aggregate(how, _level=(_level or 0) + 1)
def _agg_2dim(name, how):
"""
aggregate a 2-dim with how
"""
colg = self._gotitem(self._selection, ndim=2,
subset=obj)
return colg.aggregate(how, _level=None)
def _agg(arg, func):
"""
run the aggregations over the arg with func
return an OrderedDict
"""
result = compat.OrderedDict()
for fname, agg_how in compat.iteritems(arg):
result[fname] = func(fname, agg_how)
return result
# set the final keys
keys = list(compat.iterkeys(arg))
result = compat.OrderedDict()
# nested renamer
if is_nested_renamer:
result = list(_agg(arg, _agg_1dim).values())
if all(isinstance(r, dict) for r in result):
result, results = compat.OrderedDict(), result
for r in results:
result.update(r)
keys = list(compat.iterkeys(result))
else:
if self._selection is not None:
keys = None
# some selection on the object
elif self._selection is not None:
sl = set(self._selection_list)
# we are a Series like object,
# but may have multiple aggregations
if len(sl) == 1:
result = _agg(arg, lambda fname,
agg_how: _agg_1dim(self._selection, agg_how))
# we are selecting the same set as we are aggregating
elif not len(sl - set(keys)):
result = _agg(arg, _agg_1dim)
# we are a DataFrame, with possibly multiple aggregations
else:
result = _agg(arg, _agg_2dim)
# no selection
else:
try:
result = _agg(arg, _agg_1dim)
except SpecificationError:
# we are aggregating expecting all 1d-returns
# but we have 2d
result = _agg(arg, _agg_2dim)
# combine results
def is_any_series():
# return a boolean if we have *any* nested series
return any(isinstance(r, ABCSeries)
for r in compat.itervalues(result))
def is_any_frame():
# return a boolean if we have *any* nested series
return any(isinstance(r, ABCDataFrame)
for r in compat.itervalues(result))
if isinstance(result, list):
return concat(result, keys=keys, axis=1, sort=True), True
elif is_any_frame():
# we have a dict of DataFrames
# return a MI DataFrame
return concat([result[k] for k in keys],
keys=keys, axis=1), True
elif isinstance(self, ABCSeries) and is_any_series():
# we have a dict of Series
# return a MI Series
try:
result = concat(result)
except TypeError:
# we want to give a nice error here if
# we have non-same sized objects, so
# we don't automatically broadcast
raise ValueError("cannot perform both aggregation "
"and transformation operations "
"simultaneously")
return result, True
# fall thru
from pandas import DataFrame, Series
try:
result = DataFrame(result)
except ValueError:
# we have a dict of scalars
result = Series(result,
name=getattr(self, 'name', None))
return result, True
elif is_list_like(arg) and arg not in compat.string_types:
# we require a list, but not an 'str'
return self._aggregate_multiple_funcs(arg,
_level=_level,
_axis=_axis), None
else:
result = None
f = self._is_cython_func(arg)
if f and not args and not kwargs:
return getattr(self, f)(), None
# caller can react
return result, True
def _aggregate_multiple_funcs(self, arg, _level, _axis):
from pandas.core.reshape.concat import concat
if _axis != 0:
raise NotImplementedError("axis other than 0 is not supported")
if self._selected_obj.ndim == 1:
obj = self._selected_obj
else:
obj = self._obj_with_exclusions
results = []
keys = []
# degenerate case
if obj.ndim == 1:
for a in arg:
try:
colg = self._gotitem(obj.name, ndim=1, subset=obj)
results.append(colg.aggregate(a))
# make sure we find a good name
name = com.get_callable_name(a) or a
keys.append(name)
except (TypeError, DataError):
pass
except SpecificationError:
raise
# multiples
else:
for index, col in enumerate(obj):
try:
colg = self._gotitem(col, ndim=1,
subset=obj.iloc[:, index])
results.append(colg.aggregate(arg))
keys.append(col)
except (TypeError, DataError):
pass
except ValueError:
# cannot aggregate
continue
except SpecificationError:
raise
# if we are empty
if not len(results):
raise ValueError("no results")
try:
return concat(results, keys=keys, axis=1, sort=False)
except TypeError:
# we are concatting non-NDFrame objects,
# e.g. a list of scalars
from pandas.core.dtypes.cast import is_nested_object
from pandas import Series
result = Series(results, index=keys, name=self.name)
if is_nested_object(result):
raise ValueError("cannot combine transform and "
"aggregation operations")
return result
def _shallow_copy(self, obj=None, obj_type=None, **kwargs):
""" return a new object with the replacement attributes """
if obj is None:
obj = self._selected_obj.copy()
if obj_type is None:
obj_type = self._constructor
if isinstance(obj, obj_type):
obj = obj.obj
for attr in self._attributes:
if attr not in kwargs:
kwargs[attr] = getattr(self, attr)
return obj_type(obj, **kwargs)
def _is_cython_func(self, arg):
""" if we define an internal function for this argument, return it """
return self._cython_table.get(arg)
def _is_builtin_func(self, arg):
"""
if we define an builtin function for this argument, return it,
otherwise return the arg
"""
return self._builtin_table.get(arg, arg)
class IndexOpsMixin(object):
""" common ops mixin to support a unified interface / docs for Series /
Index
"""
# ndarray compatibility
__array_priority__ = 1000
def transpose(self, *args, **kwargs):
""" return the transpose, which is by definition self """
nv.validate_transpose(args, kwargs)
return self
T = property(transpose, doc="return the transpose, which is by "
"definition self")
@property
def _is_homogeneous_type(self):
"""Whether the object has a single dtype.
By definition, Series and Index are always considered homogeneous.
A MultiIndex may or may not be homogeneous, depending on the
dtypes of the levels.
See Also
--------
DataFrame._is_homogeneous_type
MultiIndex._is_homogeneous_type
"""
return True
@property
def shape(self):
""" return a tuple of the shape of the underlying data """
return self._values.shape
@property
def ndim(self):
""" return the number of dimensions of the underlying data,
by definition 1
"""
return 1
def item(self):
""" return the first element of the underlying data as a python
scalar
"""
try:
return self.values.item()
except IndexError:
# copy numpy's message here because Py26 raises an IndexError
raise ValueError('can only convert an array of size 1 to a '
'Python scalar')
@property
def data(self):
""" return the data pointer of the underlying data """
warnings.warn("{obj}.data is deprecated and will be removed "
"in a future version".format(obj=type(self).__name__),
FutureWarning, stacklevel=2)
return self.values.data
@property
def itemsize(self):
""" return the size of the dtype of the item of the underlying data """
warnings.warn("{obj}.itemsize is deprecated and will be removed "
"in a future version".format(obj=type(self).__name__),
FutureWarning, stacklevel=2)
return self._ndarray_values.itemsize
@property
def nbytes(self):
""" return the number of bytes in the underlying data """
return self._values.nbytes
@property
def strides(self):
""" return the strides of the underlying data """
warnings.warn("{obj}.strides is deprecated and will be removed "
"in a future version".format(obj=type(self).__name__),
FutureWarning, stacklevel=2)
return self._ndarray_values.strides
@property
def size(self):
""" return the number of elements in the underlying data """
return self._values.size
@property
def flags(self):
""" return the ndarray.flags for the underlying data """
warnings.warn("{obj}.flags is deprecated and will be removed "
"in a future version".format(obj=type(self).__name__),
FutureWarning, stacklevel=2)
return self.values.flags
@property
def base(self):
""" return the base object if the memory of the underlying data is
shared
"""
warnings.warn("{obj}.base is deprecated and will be removed "
"in a future version".format(obj=type(self).__name__),
FutureWarning, stacklevel=2)
return self.values.base
@property
def _ndarray_values(self):
# type: () -> np.ndarray
"""The data as an ndarray, possibly losing information.
The expectation is that this is cheap to compute, and is primarily
used for interacting with our indexers.
- categorical -> codes
"""
if is_extension_array_dtype(self):
return self.values._ndarray_values
return self.values
@property
def empty(self):
return not self.size
def max(self):
"""
Return the maximum value of the Index.
Returns
-------
scalar
Maximum value.
See Also
--------
Index.min : Return the minimum value in an Index.
Series.max : Return the maximum value in a Series.
DataFrame.max : Return the maximum values in a DataFrame.
Examples
--------
>>> idx = pd.Index([3, 2, 1])
>>> idx.max()
3
>>> idx = pd.Index(['c', 'b', 'a'])
>>> idx.max()
'c'
For a MultiIndex, the maximum is determined lexicographically.
>>> idx = pd.MultiIndex.from_product([('a', 'b'), (2, 1)])
>>> idx.max()
('b', 2)
"""
return nanops.nanmax(self.values)
def argmax(self, axis=None):
"""
return a ndarray of the maximum argument indexer
See also
--------
numpy.ndarray.argmax
"""
return nanops.nanargmax(self.values)
def min(self):
"""
Return the minimum value of the Index.
Returns
-------
scalar
Minimum value.
See Also
--------
Index.max : Return the maximum value of the object.
Series.min : Return the minimum value in a Series.
DataFrame.min : Return the minimum values in a DataFrame.
Examples
--------
>>> idx = pd.Index([3, 2, 1])
>>> idx.min()
1
>>> idx = pd.Index(['c', 'b', 'a'])
>>> idx.min()
'a'
For a MultiIndex, the minimum is determined lexicographically.
>>> idx = pd.MultiIndex.from_product([('a', 'b'), (2, 1)])
>>> idx.min()
('a', 1)
"""
return nanops.nanmin(self.values)
def argmin(self, axis=None):
"""
return a ndarray of the minimum argument indexer
See also
--------
numpy.ndarray.argmin
"""
return nanops.nanargmin(self.values)
def tolist(self):
"""
Return a list of the values.
These are each a scalar type, which is a Python scalar
(for str, int, float) or a pandas scalar
(for Timestamp/Timedelta/Interval/Period)
See Also
--------
numpy.ndarray.tolist
"""
if is_datetimelike(self._values):
return [com.maybe_box_datetimelike(x) for x in self._values]
elif is_extension_array_dtype(self._values):
return list(self._values)
else:
return self._values.tolist()
def __iter__(self):
"""
Return an iterator of the values.
These are each a scalar type, which is a Python scalar
(for str, int, float) or a pandas scalar
(for Timestamp/Timedelta/Interval/Period)
"""
return iter(self.tolist())
@cache_readonly
def hasnans(self):
""" return if I have any nans; enables various perf speedups """
return isna(self).any()
def _reduce(self, op, name, axis=0, skipna=True, numeric_only=None,
filter_type=None, **kwds):
""" perform the reduction type operation if we can """
func = getattr(self, name, None)
if func is None:
raise TypeError("{klass} cannot perform the operation {op}".format(
klass=self.__class__.__name__, op=name))
return func(**kwds)
def _map_values(self, mapper, na_action=None):
"""An internal function that maps values using the input
correspondence (which can be a dict, Series, or function).
Parameters
----------
mapper : function, dict, or Series
The input correspondence object
na_action : {None, 'ignore'}
If 'ignore', propagate NA values, without passing them to the
mapping function
Returns
-------
applied : Union[Index, MultiIndex], inferred
The output of the mapping function applied to the index.
If the function returns a tuple with more than one element
a MultiIndex will be returned.
"""
# we can fastpath dict/Series to an efficient map
# as we know that we are not going to have to yield
# python types
if isinstance(mapper, dict):
if hasattr(mapper, '__missing__'):
# If a dictionary subclass defines a default value method,
# convert mapper to a lookup function (GH #15999).
dict_with_default = mapper
mapper = lambda x: dict_with_default[x]
else:
# Dictionary does not have a default. Thus it's safe to
# convert to an Series for efficiency.
# we specify the keys here to handle the
# possibility that they are tuples
from pandas import Series
mapper = Series(mapper)
if isinstance(mapper, ABCSeries):
# Since values were input this means we came from either
# a dict or a series and mapper should be an index
if is_extension_type(self.dtype):
values = self._values
else:
values = self.values
indexer = mapper.index.get_indexer(values)
new_values = algorithms.take_1d(mapper._values, indexer)
return new_values
# we must convert to python types
if is_extension_type(self.dtype):
values = self._values
if na_action is not None:
raise NotImplementedError
map_f = lambda values, f: values.map(f)
else:
values = self.astype(object)
values = getattr(values, 'values', values)
if na_action == 'ignore':
def map_f(values, f):
return lib.map_infer_mask(values, f,
isna(values).view(np.uint8))
else:
map_f = lib.map_infer
# mapper is a function
new_values = map_f(values, mapper)
return new_values
def value_counts(self, normalize=False, sort=True, ascending=False,
bins=None, dropna=True):
"""
Return a Series containing counts of unique values.
The resulting object will be in descending order so that the
first element is the most frequently-occurring element.
Excludes NA values by default.
Parameters
----------
normalize : boolean, default False
If True then the object returned will contain the relative
frequencies of the unique values.
sort : boolean, default True
Sort by values.
ascending : boolean, default False
Sort in ascending order.
bins : integer, optional
Rather than count values, group them into half-open bins,
a convenience for ``pd.cut``, only works with numeric data.
dropna : boolean, default True
Don't include counts of NaN.
Returns
-------
counts : Series
See Also
--------
Series.count: number of non-NA elements in a Series
DataFrame.count: number of non-NA elements in a DataFrame
Examples
--------
>>> index = pd.Index([3, 1, 2, 3, 4, np.nan])
>>> index.value_counts()
3.0 2
4.0 1
2.0 1
1.0 1
dtype: int64
With `normalize` set to `True`, returns the relative frequency by
dividing all values by the sum of values.
>>> s = pd.Series([3, 1, 2, 3, 4, np.nan])
>>> s.value_counts(normalize=True)
3.0 0.4
4.0 0.2
2.0 0.2
1.0 0.2
dtype: float64
**bins**
Bins can be useful for going from a continuous variable to a
categorical variable; instead of counting unique
apparitions of values, divide the index in the specified
number of half-open bins.
>>> s.value_counts(bins=3)
(2.0, 3.0] 2
(0.996, 2.0] 2
(3.0, 4.0] 1
dtype: int64
**dropna**
With `dropna` set to `False` we can also see NaN index values.
>>> s.value_counts(dropna=False)
3.0 2
NaN 1
4.0 1
2.0 1
1.0 1
dtype: int64
"""
from pandas.core.algorithms import value_counts
result = value_counts(self, sort=sort, ascending=ascending,
normalize=normalize, bins=bins, dropna=dropna)
return result
def unique(self):
values = self._values
if hasattr(values, 'unique'):
result = values.unique()
else:
from pandas.core.algorithms import unique1d
result = unique1d(values)
return result
def nunique(self, dropna=True):
"""
Return number of unique elements in the object.
Excludes NA values by default.
Parameters
----------
dropna : boolean, default True
Don't include NaN in the count.
Returns
-------
nunique : int
"""
uniqs = self.unique()
n = len(uniqs)
if dropna and isna(uniqs).any():
n -= 1
return n
@property
def is_unique(self):
"""
Return boolean if values in the object are unique
Returns
-------
is_unique : boolean
"""
return self.nunique() == len(self)
@property
def is_monotonic(self):
"""
Return boolean if values in the object are
monotonic_increasing
.. versionadded:: 0.19.0
Returns
-------
is_monotonic : boolean
"""
from pandas import Index
return Index(self).is_monotonic
is_monotonic_increasing = is_monotonic
@property
def is_monotonic_decreasing(self):
"""
Return boolean if values in the object are
monotonic_decreasing
.. versionadded:: 0.19.0
Returns
-------
is_monotonic_decreasing : boolean
"""
from pandas import Index
return Index(self).is_monotonic_decreasing
def memory_usage(self, deep=False):
"""
Memory usage of the values
Parameters
----------
deep : bool
Introspect the data deeply, interrogate
`object` dtypes for system-level memory consumption
Returns
-------
bytes used
Notes
-----
Memory usage does not include memory consumed by elements that
are not components of the array if deep=False or if used on PyPy
See Also
--------
numpy.ndarray.nbytes
"""
if hasattr(self.values, 'memory_usage'):
return self.values.memory_usage(deep=deep)
v = self.values.nbytes
if deep and is_object_dtype(self) and not PYPY:
v += lib.memory_usage_of_objects(self.values)
return v
@Substitution(
values='', order='', size_hint='',
sort=textwrap.dedent("""\
sort : boolean, default False
Sort `uniques` and shuffle `labels` to maintain the
relationship.
"""))
@Appender(algorithms._shared_docs['factorize'])
def factorize(self, sort=False, na_sentinel=-1):
return algorithms.factorize(self, sort=sort, na_sentinel=na_sentinel)
_shared_docs['searchsorted'] = (
"""Find indices where elements should be inserted to maintain order.
Find the indices into a sorted %(klass)s `self` such that, if the
corresponding elements in `value` were inserted before the indices,
the order of `self` would be preserved.
Parameters
----------
value : array_like
Values to insert into `self`.
side : {'left', 'right'}, optional
If 'left', the index of the first suitable location found is given.
If 'right', return the last such index. If there is no suitable
index, return either 0 or N (where N is the length of `self`).
sorter : 1-D array_like, optional
Optional array of integer indices that sort `self` into ascending
order. They are typically the result of ``np.argsort``.
Returns
-------
indices : array of ints
Array of insertion points with the same shape as `value`.
See Also
--------
numpy.searchsorted
Notes
-----
Binary search is used to find the required insertion points.
Examples
--------
>>> x = pd.Series([1, 2, 3])
>>> x
0 1
1 2
2 3
dtype: int64
>>> x.searchsorted(4)
array([3])
>>> x.searchsorted([0, 4])
array([0, 3])
>>> x.searchsorted([1, 3], side='left')
array([0, 2])
>>> x.searchsorted([1, 3], side='right')
array([1, 3])
>>> x = pd.Categorical(['apple', 'bread', 'bread',
'cheese', 'milk'], ordered=True)
[apple, bread, bread, cheese, milk]
Categories (4, object): [apple < bread < cheese < milk]
>>> x.searchsorted('bread')
array([1]) # Note: an array, not a scalar
>>> x.searchsorted(['bread'], side='right')
array([3])
""")
@Substitution(klass='IndexOpsMixin')
@Appender(_shared_docs['searchsorted'])
def searchsorted(self, value, side='left', sorter=None):
# needs coercion on the key (DatetimeIndex does already)
return self.values.searchsorted(value, side=side, sorter=sorter)
def drop_duplicates(self, keep='first', inplace=False):
inplace = validate_bool_kwarg(inplace, 'inplace')
if isinstance(self, ABCIndexClass):
if self.is_unique:
return self._shallow_copy()
duplicated = self.duplicated(keep=keep)
result = self[np.logical_not(duplicated)]
if inplace:
return self._update_inplace(result)
else:
return result
def duplicated(self, keep='first'):
from pandas.core.algorithms import duplicated
if isinstance(self, ABCIndexClass):
if self.is_unique:
return np.zeros(len(self), dtype=np.bool)
return duplicated(self, keep=keep)
else:
return self._constructor(duplicated(self, keep=keep),
index=self.index).__finalize__(self)
# ----------------------------------------------------------------------
# abstracts
def _update_inplace(self, result, **kwargs):
raise AbstractMethodError(self)
| bsd-3-clause |
schreiberx/sweet | doc/rexi/rexi_with_cauchy_integral/test_cauchy_chebyshev/CauchyPhiQuadrature.py | 1 | 3633 | #! /usr/bin/env python3
#
# Author: Martin Schreiber <M.Schreiber@exeter.ac.uk>
# Date: 2017-08-16
#
import math
import cmath
import numpy as np
import sys
import matplotlib.pyplot as plt
class CauchyPhiQuadrature:
alpha = []
beta = []
#
# Phi 0-N functions
#
def phi(self, n, z):
if n == 0:
return cmath.exp(z)
if n != 0:
if abs(z) < 1e-8:
return 1.0/math.factorial(n)
raise Exception("Z close to zero, not yet supported for phi "+str(n)+" !!!")
return (self.phi(n-1, z) - 1.0/math.factorial(n-1))/z
raise Exception("Phi function not supported yet")
#
# Constructor
# See setup(...) for documentation on parameters
#
def __init__(self, phiN, contour_boundary, contour_int_method):
if phiN == -1:
return
self.setup(phiN, contour_boundary, contour_int_method)
def setup(
self,
phiN, # phi function id, use 0 for exp(lambda) standard ODE integration
contour_boundary, # boundary description
contour_int_method # integration method
):
self.contour_boundary = contour_boundary
self.contour_int_method = contour_int_method
if self.contour_boundary['shape'] == 'circle':
self.R = contour_boundary['R']
self.mu = contour_boundary['mu']
if self.contour_int_method['method'] == 'trapezoidal':
self.phiN = phiN
self.N = contour_int_method['N']
#
# Compute support points of quadrature
#
self.coords = []
for j in range(self.N):
theta_j = 2.0*math.pi*(j+0.5)/self.N
gamma_j = self.R*cmath.exp(1j*theta_j) + self.mu
self.coords.append(gamma_j)
self.alpha = []
self.beta = []
for j in range(self.N):
theta_j = 2.0*math.pi*(j+0.5)/self.N
gamma_j = self.R*cmath.exp(1j*theta_j) + self.mu
k = self.R*cmath.exp(1j*theta_j)
beta = -self.phi(phiN, gamma_j)*k
beta /= self.N
alpha = -(k + self.mu)
self.alpha.append(alpha)
self.beta.append(beta)
return
elif self.contour_int_method['method'] == 'chebyshev':
self.phiN = phiN
self.N = contour_int_method['N']
raise Exception("Unsupported combination of contour_boundary "+contour_boundary['shape']+" and integration method "+contour_int_method['method'])
def plot(self, filename = None):
points_re = []
points_im = []
for j in range(self.N):
points_re.append(self.coords[j].real)
points_im.append(self.coords[j].imag)
points_re.append(points_re[0])
points_im.append(points_im[0])
plt.plot(points_re, points_im, '-bo')
if filename != None:
plt.savefig(filename)
else:
plt.show()
def approx_phi_pde(self, dt_L, U):
S = len(dt_L)
accum = np.array([0.j, 0.j])
for j in range(len(self.alpha)):
M_inv = np.linalg.inv(dt_L + np.identity(S)*self.alpha[j])
accum += self.beta[j] * np.dot(M_inv, U)
return accum
def approx_phi_ode(self, dt_L, U):
accum = 0.0
for j in range(len(self.alpha)):
M_inv = 1.0/(dt_L + self.alpha[j])
accum += self.beta[j] * M_inv * U
return accum
def analytical_phi_pde(self, dt_L, U):
S = len(dt_L)
# Setup eigenvalues and Eigenvectors for analytical solution
LEvals, LEvecs = np.linalg.eig(dt_L)
LEvecs_inv = np.linalg.inv(LEvecs)
if True:
error = np.sum(np.absolute(dt_L - np.dot(np.dot(LEvecs, np.diag(LEvals)), LEvecs_inv)))
if error > 1e-10:
raise Exception("Error "+str(error)+" too large")
Uwave = np.dot(LEvecs_inv, U)
tmp = np.array([self.phi(self.phiN, LEvals[i])*Uwave[i] for i in range(S)])
return np.dot(LEvecs, tmp)
def analytical_phi_ode(self, dt_L, U):
# Setup eigenvalues and Eigenvectors for analytical solution
return self.phi(self.phiN, dt_L)*U
| mit |
akionakamura/scikit-learn | benchmarks/bench_multilabel_metrics.py | 86 | 7286 | #!/usr/bin/env python
"""
A comparison of multilabel target formats and metrics over them
"""
from __future__ import division
from __future__ import print_function
from timeit import timeit
from functools import partial
import itertools
import argparse
import sys
import matplotlib.pyplot as plt
import scipy.sparse as sp
import numpy as np
from sklearn.datasets import make_multilabel_classification
from sklearn.metrics import (f1_score, accuracy_score, hamming_loss,
jaccard_similarity_score)
from sklearn.utils.testing import ignore_warnings
METRICS = {
'f1': partial(f1_score, average='micro'),
'f1-by-sample': partial(f1_score, average='samples'),
'accuracy': accuracy_score,
'hamming': hamming_loss,
'jaccard': jaccard_similarity_score,
}
FORMATS = {
'sequences': lambda y: [list(np.flatnonzero(s)) for s in y],
'dense': lambda y: y,
'csr': lambda y: sp.csr_matrix(y),
'csc': lambda y: sp.csc_matrix(y),
}
@ignore_warnings
def benchmark(metrics=tuple(v for k, v in sorted(METRICS.items())),
formats=tuple(v for k, v in sorted(FORMATS.items())),
samples=1000, classes=4, density=.2,
n_times=5):
"""Times metric calculations for a number of inputs
Parameters
----------
metrics : array-like of callables (1d or 0d)
The metric functions to time.
formats : array-like of callables (1d or 0d)
These may transform a dense indicator matrix into multilabel
representation.
samples : array-like of ints (1d or 0d)
The number of samples to generate as input.
classes : array-like of ints (1d or 0d)
The number of classes in the input.
density : array-like of ints (1d or 0d)
The density of positive labels in the input.
n_times : int
Time calling the metric n_times times.
Returns
-------
array of floats shaped like (metrics, formats, samples, classes, density)
Time in seconds.
"""
metrics = np.atleast_1d(metrics)
samples = np.atleast_1d(samples)
classes = np.atleast_1d(classes)
density = np.atleast_1d(density)
formats = np.atleast_1d(formats)
out = np.zeros((len(metrics), len(formats), len(samples), len(classes),
len(density)), dtype=float)
it = itertools.product(samples, classes, density)
for i, (s, c, d) in enumerate(it):
_, y_true = make_multilabel_classification(n_samples=s, n_features=1,
n_classes=c, n_labels=d * c,
return_indicator=True,
random_state=42)
_, y_pred = make_multilabel_classification(n_samples=s, n_features=1,
n_classes=c, n_labels=d * c,
return_indicator=True,
random_state=84)
for j, f in enumerate(formats):
f_true = f(y_true)
f_pred = f(y_pred)
for k, metric in enumerate(metrics):
t = timeit(partial(metric, f_true, f_pred), number=n_times)
out[k, j].flat[i] = t
return out
def _tabulate(results, metrics, formats):
"""Prints results by metric and format
Uses the last ([-1]) value of other fields
"""
column_width = max(max(len(k) for k in formats) + 1, 8)
first_width = max(len(k) for k in metrics)
head_fmt = ('{:<{fw}s}' + '{:>{cw}s}' * len(formats))
row_fmt = ('{:<{fw}s}' + '{:>{cw}.3f}' * len(formats))
print(head_fmt.format('Metric', *formats,
cw=column_width, fw=first_width))
for metric, row in zip(metrics, results[:, :, -1, -1, -1]):
print(row_fmt.format(metric, *row,
cw=column_width, fw=first_width))
def _plot(results, metrics, formats, title, x_ticks, x_label,
format_markers=('x', '|', 'o', '+'),
metric_colors=('c', 'm', 'y', 'k', 'g', 'r', 'b')):
"""
Plot the results by metric, format and some other variable given by
x_label
"""
fig = plt.figure('scikit-learn multilabel metrics benchmarks')
plt.title(title)
ax = fig.add_subplot(111)
for i, metric in enumerate(metrics):
for j, format in enumerate(formats):
ax.plot(x_ticks, results[i, j].flat,
label='{}, {}'.format(metric, format),
marker=format_markers[j],
color=metric_colors[i % len(metric_colors)])
ax.set_xlabel(x_label)
ax.set_ylabel('Time (s)')
ax.legend()
plt.show()
if __name__ == "__main__":
ap = argparse.ArgumentParser()
ap.add_argument('metrics', nargs='*', default=sorted(METRICS),
help='Specifies metrics to benchmark, defaults to all. '
'Choices are: {}'.format(sorted(METRICS)))
ap.add_argument('--formats', nargs='+', choices=sorted(FORMATS),
help='Specifies multilabel formats to benchmark '
'(defaults to all).')
ap.add_argument('--samples', type=int, default=1000,
help='The number of samples to generate')
ap.add_argument('--classes', type=int, default=10,
help='The number of classes')
ap.add_argument('--density', type=float, default=.2,
help='The average density of labels per sample')
ap.add_argument('--plot', choices=['classes', 'density', 'samples'],
default=None,
help='Plot time with respect to this parameter varying '
'up to the specified value')
ap.add_argument('--n-steps', default=10, type=int,
help='Plot this many points for each metric')
ap.add_argument('--n-times',
default=5, type=int,
help="Time performance over n_times trials")
args = ap.parse_args()
if args.plot is not None:
max_val = getattr(args, args.plot)
if args.plot in ('classes', 'samples'):
min_val = 2
else:
min_val = 0
steps = np.linspace(min_val, max_val, num=args.n_steps + 1)[1:]
if args.plot in ('classes', 'samples'):
steps = np.unique(np.round(steps).astype(int))
setattr(args, args.plot, steps)
if args.metrics is None:
args.metrics = sorted(METRICS)
if args.formats is None:
args.formats = sorted(FORMATS)
results = benchmark([METRICS[k] for k in args.metrics],
[FORMATS[k] for k in args.formats],
args.samples, args.classes, args.density,
args.n_times)
_tabulate(results, args.metrics, args.formats)
if args.plot is not None:
print('Displaying plot', file=sys.stderr)
title = ('Multilabel metrics with %s' %
', '.join('{0}={1}'.format(field, getattr(args, field))
for field in ['samples', 'classes', 'density']
if args.plot != field))
_plot(results, args.metrics, args.formats, title, steps, args.plot)
| bsd-3-clause |
alexandrebarachant/mne-python | doc/conf.py | 5 | 9979 | # -*- coding: utf-8 -*-
#
# MNE documentation build configuration file, created by
# sphinx-quickstart on Fri Jun 11 10:45:48 2010.
#
# This file is execfile()d with the current directory set to its containing
# dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import sys
import os
from datetime import date
import sphinx_gallery
import sphinx_bootstrap_theme
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
curdir = os.path.dirname(__file__)
sys.path.append(os.path.abspath(os.path.join(curdir, '..', 'mne')))
sys.path.append(os.path.abspath(os.path.join(curdir, 'sphinxext')))
import mne
if not os.path.isdir('_images'):
os.mkdir('_images')
# -- General configuration ------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
from numpydoc import numpydoc, docscrape
docscrape.ClassDoc.extra_public_methods = mne.utils._doc_special_members
extensions = [
'sphinx.ext.autodoc',
'sphinx.ext.autosummary',
'sphinx.ext.doctest',
'sphinx.ext.intersphinx',
'sphinx.ext.todo',
'sphinx.ext.coverage',
'sphinx.ext.mathjax',
'sphinx_gallery.gen_gallery',
]
extensions += ['numpydoc']
extensions += ['gen_commands'] # auto generate the doc for the python commands
# extensions += ['flow_diagram] # generate flow chart in cookbook
autosummary_generate = True
autodoc_default_flags = ['inherited-members']
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'MNE'
td = date.today()
copyright = u'2012-%s, MNE Developers. Last updated on %s' % (td.year,
td.isoformat())
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = mne.__version__
# The full version, including alpha/beta/rc tags.
release = version
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of documents that shouldn't be included in the build.
unused_docs = []
# List of directories, relative to source directory, that shouldn't be searched
# for source files.
exclude_trees = ['_build']
exclude_patterns = ['source/generated']
# The reST default role (used for this markup: `text`) to use for all
# documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
modindex_common_prefix = ['mne.']
# If true, keep warnings as "system message" paragraphs in the built documents.
#keep_warnings = False
# -- Options for HTML output ----------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = 'bootstrap'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
html_theme_options = {
'navbar_title': ' ',
'source_link_position': "footer",
'bootswatch_theme': "flatly",
'navbar_sidebarrel': False,
'bootstrap_version': "3",
'navbar_links': [
("Get started", "getting_started"),
("Tutorials", "tutorials"),
("Gallery", "auto_examples/index"),
("API", "python_reference"),
("Manual", "manual/index"),
("FAQ", "faq"),
],
}
# Add any paths that contain custom themes here, relative to this directory.
html_theme_path = sphinx_bootstrap_theme.get_html_theme_path()
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
html_logo = "_static/mne_logo_small.png"
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
html_favicon = "_static/favicon.ico"
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static', '_images']
# Add any extra paths that contain custom files (such as robots.txt or
# .htaccess) here, relative to this directory. These files are copied
# directly to the root of the documentation.
#html_extra_path = []
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_domain_indices = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
html_show_sourcelink = False
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
html_show_sphinx = False
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# variables to pass to HTML templating engine
build_dev_html = bool(int(os.environ.get('BUILD_DEV_HTML', False)))
html_context = {'use_google_analytics': True, 'use_twitter': True,
'use_media_buttons': True, 'build_dev_html': build_dev_html}
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
# Output file base name for HTML help builder.
htmlhelp_basename = 'mne-doc'
# -- Options for LaTeX output ---------------------------------------------
# The paper size ('letter' or 'a4').
# latex_paper_size = 'letter'
# The font size ('10pt', '11pt' or '12pt').
# latex_font_size = '10pt'
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title, author, documentclass
# [howto/manual]).
latex_documents = [
# ('index', 'MNE.tex', u'MNE Manual',
# u'MNE Contributors', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
latex_logo = "_static/logo.png"
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
latex_use_parts = True
# Additional stuff for the LaTeX preamble.
# latex_preamble = ''
# Documents to append as an appendix to all manuals.
# latex_appendices = []
# If false, no module index is generated.
#latex_domain_indices = True
trim_doctests_flags = True
# Example configuration for intersphinx: refer to the Python standard library.
intersphinx_mapping = {
'python': ('http://docs.python.org/', None),
'numpy': ('http://docs.scipy.org/doc/numpy-dev/', None),
'scipy': ('http://scipy.github.io/devdocs/', None),
}
examples_dirs = ['../examples', '../tutorials']
gallery_dirs = ['auto_examples', 'auto_tutorials']
try:
from mayavi import mlab
find_mayavi_figures = True
# Do not pop up any mayavi windows while running the
# examples. These are very annoying since they steal the focus.
mlab.options.offscreen = True
except Exception:
find_mayavi_figures = False
sphinx_gallery_conf = {
'doc_module': ('mne',),
'reference_url': {
'mne': None,
'matplotlib': 'http://matplotlib.org',
'numpy': 'http://docs.scipy.org/doc/numpy-1.10.1',
'scipy': 'http://docs.scipy.org/doc/scipy-0.17.0/reference',
'mayavi': 'http://docs.enthought.com/mayavi/mayavi'},
'examples_dirs': examples_dirs,
'gallery_dirs': gallery_dirs,
'find_mayavi_figures': find_mayavi_figures,
'default_thumb_file': os.path.join('_static', 'mne_helmet.png'),
'mod_example_dir': 'generated',
}
numpydoc_class_members_toctree = False
| bsd-3-clause |
JasonKessler/scattertext | demo_emoji.py | 1 | 1739 | import io
from zipfile import ZipFile
import agefromname
import nltk
import pandas as pd
import urllib.request
import scattertext as st
from scattertext.termranking import OncePerDocFrequencyRanker
try:
print("Downloading tweet dataset")
df_mf = pd.read_csv('emoji_data.csv')
except:
print("Downloading tweet dataset")
with ZipFile(io.BytesIO(urllib.request.urlopen(
'http://followthehashtag.com/content/uploads/USA-Geolocated-tweets-free-dataset-Followthehashtag.zip'
).read())) as zf:
df = pd.read_excel(zf.open('dashboard_x_usa_x_filter_nativeretweets.xlsx'))
df['first_name'] = df['User Name'].apply(
lambda x: x.split()[0].lower() if type(x) == str and len(x.split()) > 0 else x)
male_prob = agefromname.AgeFromName().get_all_name_male_prob()
df_aug = pd.merge(df, male_prob, left_on='first_name', right_index=True)
df_aug['gender'] = df_aug['prob'].apply(lambda x: 'm' if x > 0.9 else 'f' if x < 0.1 else '?')
df_mf = df_aug[df_aug['gender'].isin(['m', 'f'])]
df_mf.to_csv('emoji_data.csv', index=False)
nlp = st.tweet_tokenizier_factory(nltk.tokenize.TweetTokenizer())
df_mf['parse'] = df_mf['Tweet content'].apply(nlp)
corpus = st.CorpusFromParsedDocuments(
df_mf,
parsed_col='parse',
category_col='gender',
feats_from_spacy_doc=st.FeatsFromSpacyDocOnlyEmoji()
).build()
html = st.produce_scattertext_explorer(
corpus,
category='f',
category_name='Female',
not_category_name='Male',
use_full_doc=True,
term_ranker=OncePerDocFrequencyRanker,
sort_by_dist=False,
metadata=(df_mf['User Name']
+ ' (@' + df_mf['Nickname'] + ') '
+ df_mf['Date'].astype(str)),
width_in_pixels=1000
)
print('writing EmojiGender.html')
open("EmojiGender.html", 'wb').write(html.encode('utf-8'))
| apache-2.0 |
dkillick/iris | lib/iris/tests/unit/quickplot/test_pcolormesh.py | 11 | 2344 | # (C) British Crown Copyright 2014 - 2016, Met Office
#
# This file is part of Iris.
#
# Iris is free software: you can redistribute it and/or modify it under
# the terms of the GNU Lesser General Public License as published by the
# Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Iris is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with Iris. If not, see <http://www.gnu.org/licenses/>.
"""Unit tests for the `iris.quickplot.pcolormesh` function."""
from __future__ import (absolute_import, division, print_function)
from six.moves import (filter, input, map, range, zip) # noqa
# Import iris.tests first so that some things can be initialised before
# importing anything else.
import iris.tests as tests
import numpy as np
from iris.tests.stock import simple_2d
from iris.tests.unit.plot import TestGraphicStringCoord, MixinCoords
if tests.MPL_AVAILABLE:
import iris.quickplot as qplt
@tests.skip_plot
class TestStringCoordPlot(TestGraphicStringCoord):
def test_yaxis_labels(self):
qplt.pcolormesh(self.cube, coords=('bar', 'str_coord'))
self.assertBoundsTickLabels('yaxis')
def test_xaxis_labels(self):
qplt.pcolormesh(self.cube, coords=('str_coord', 'bar'))
self.assertBoundsTickLabels('xaxis')
@tests.skip_plot
class TestCoords(tests.IrisTest, MixinCoords):
def setUp(self):
# We have a 2d cube with dimensionality (bar: 3; foo: 4)
self.cube = simple_2d(with_bounds=True)
coord = self.cube.coord('foo')
self.foo = coord.contiguous_bounds()
self.foo_index = np.arange(coord.points.size + 1)
coord = self.cube.coord('bar')
self.bar = coord.contiguous_bounds()
self.bar_index = np.arange(coord.points.size + 1)
self.data = self.cube.data
self.dataT = self.data.T
self.mpl_patch = self.patch('matplotlib.pyplot.pcolormesh',
return_value=None)
self.draw_func = qplt.pcolormesh
if __name__ == "__main__":
tests.main()
| lgpl-3.0 |
andersy005/spark-xarray | setup.py | 1 | 2574 | #!/usr/bin/env python
from setuptools import setup
from setuptools import find_packages
import os
from ast import parse
LONG_DESCRIPTION = """
**spark-xarray**:
Spark-xarray is a high level, Apache Spark and xarray-based Python library for working
with netCDF climate model data with Apache Spark.
Important links
------------------
- Official source code repo: https://github.com/andersy005/spark-xarray
- Issue tracker: https://github.com/andersy005/spark-xarray/issues
"""
NAME = 'spark-xarray'
def version():
"""Return version string."""
with open(os.path.join(os.path.abspath(os.path.dirname(__file__)),'sparkxarray', '__init__.py')) as input_file:
for line in input_file:
if line.startswith('__version__'):
return parse(line).body[0].value.s
#for line in open('sparkxarray/__init__.py').readlines():
# if line.startswith('__version__'):
# exec(line)
INSTALL_REQUIRES = (['numpy >= 1.7',
'scipy >= 0.16',
'pandas >= 0.15.0',
'netCDF4 >= 1.2',
'xarray>=0.9.5',
'dask >= 0.14',
'distributed >= 1.16.1',
'geopandas >= 0.3.0',
'toolz>=0.8.2',
'cloudpickle >= 0.2.1'])
packages = ['sparkxarray', 'sparkxarray.tests']
package_data = {'sparkxarray': ['data/*.nc']}
setup(name=NAME,
version=version(),
author='Anderson Banihirwe, Kevin Paul',
author_email='axbanihirwe@gmail.com',
description='Big Atmospheric & Oceanic Data Analysis with Apache Spark + xarray',
url='https://github.com/andersy005/spark-xarray',
long_description=LONG_DESCRIPTION,
install_requires=INSTALL_REQUIRES,
packages=packages,
package_data=package_data,
keywords=[' Climate Science', 'xarray', 'Apache Spark', 'Distributed', 'netCDF', 'Parallel'],
classifiers=[
'Development Status :: 1 - Beta',
'Intended Audience :: Science/Research',
'License :: OSI Approved :: Apache',
'Natural Language :: English',
'Operating System :: OS Independent',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.4',
'Programming Language :: Python :: 3.5',
'Programming Language :: Python :: 3.6',
'Topic :: Scientific/Engineering :: Atmospheric Science'
],
zip_safe=False,
)
| apache-2.0 |
BiaDarkia/scikit-learn | examples/cluster/plot_digits_agglomeration.py | 49 | 1699 | #!/usr/bin/python
# -*- coding: utf-8 -*-
"""
=========================================================
Feature agglomeration
=========================================================
These images how similar features are merged together using
feature agglomeration.
"""
print(__doc__)
# Code source: Gaël Varoquaux
# Modified for documentation by Jaques Grobler
# License: BSD 3 clause
import numpy as np
import matplotlib.pyplot as plt
from sklearn import datasets, cluster
from sklearn.feature_extraction.image import grid_to_graph
digits = datasets.load_digits()
images = digits.images
X = np.reshape(images, (len(images), -1))
connectivity = grid_to_graph(*images[0].shape)
agglo = cluster.FeatureAgglomeration(connectivity=connectivity,
n_clusters=32)
agglo.fit(X)
X_reduced = agglo.transform(X)
X_restored = agglo.inverse_transform(X_reduced)
images_restored = np.reshape(X_restored, images.shape)
plt.figure(1, figsize=(4, 3.5))
plt.clf()
plt.subplots_adjust(left=.01, right=.99, bottom=.01, top=.91)
for i in range(4):
plt.subplot(3, 4, i + 1)
plt.imshow(images[i], cmap=plt.cm.gray, vmax=16, interpolation='nearest')
plt.xticks(())
plt.yticks(())
if i == 1:
plt.title('Original data')
plt.subplot(3, 4, 4 + i + 1)
plt.imshow(images_restored[i], cmap=plt.cm.gray, vmax=16,
interpolation='nearest')
if i == 1:
plt.title('Agglomerated data')
plt.xticks(())
plt.yticks(())
plt.subplot(3, 4, 10)
plt.imshow(np.reshape(agglo.labels_, images[0].shape),
interpolation='nearest', cmap=plt.cm.nipy_spectral)
plt.xticks(())
plt.yticks(())
plt.title('Labels')
plt.show()
| bsd-3-clause |
boland1992/seissuite_iran | seissuite/sort_later/more_jiggle.py | 2 | 21581 | # -*- coding: utf-8 -*-
"""
Created on Mon Jun 20 12:28:32 2015
@author: boland
"""
#-----------------------------------------------------------------------------
# IMPORT MODULES
#-----------------------------------------------------------------------------
import os
import fiona
import pickle
import pyproj
import random
import datetime
import itertools
import numpy as np
import pointshape as ps
import multiprocessing as mp
import matplotlib.pyplot as plt
from math import sqrt, radians, cos, sin, asin
from scipy.cluster.vq import kmeans
from shapely import geometry
#-----------------------------------------------------------------------------
# GENERATE SECOND SET OF VARIABLES AND STATES
#-----------------------------------------------------------------------------
verbose = False
#Enter path to boundary shape file.
shape_path = "/home/boland/Dropbox/University/UniMelb\
/AGOS/PROGRAMS/ANT/Versions/26.04.2015/shapefiles/aus.shp"
# Enter number of stations required.
N = 130
# Enter km spacing between path density points.
km_points = 100.0
# Reference elipsoid to calculate distance.
wgs84 = pyproj.Geod(ellps='WGS84')
# Enter number of bins for 2D Histogram density calculation.
nbins = 200
# Enter estimated average shear wave velocity. 3kms-1 is the default!
velocity = 3.0
# Define your ambient noise period range OR individual period in seconds.
global period_range
period_range = [1,40]
#-----------------------------------------------------------------------------
#SHAPEFILE FUNCTIONS
#-----------------------------------------------------------------------------
def shape_(input_shape):
with fiona.open(input_shape) as fiona_collection:
# In this case, we'll assume the shapefile only has one record/layer
shapefile_record = fiona_collection.next()
# Use Shapely to create the polygon
return geometry.asShape( shapefile_record['geometry'] )
shape = shape_(shape_path)
def point_check(coord, shape=shape):
point = geometry.Point(coord[0], coord[1])
if shape.contains(point):
return coord
def many_points(shape, coords):
"""
Funtion that returns lat-lon coodinates of many random points within
a shapefile shape e.g. boundaries of a state or country.
"""
minx, miny, maxx, maxy = shape.bounds
X, Y = coords[:,0], coords[:,1]
coords = np.column_stack((X,Y))
coord_points = []
#generate points in parallel for speed, order is not preserved.
pool = mp.Pool()
coord_points = pool.map(point_check, coords)
pool.close()
pool.join()
#remove None values from above numpy array!
X = [i[0] for i in coord_points if i != None]
Y = [i[1] for i in coord_points if i != None]
#convert np array to kmeans function friendly nx2 matrix!
#output is a nx2 vector stacked matrix called coord_points,
#coord_points[:,0] are lons and coord_points[:,1] are lats
return np.column_stack((X,Y))
#-----------------------------------------------------------------------------
#PATHS AND DISTANCES ON GREAT-CIRCLE FUNCTIONS
#-----------------------------------------------------------------------------
def remove_distance(period_range, max_dist = 2000):
"""
Function that returns a given possible resolvable ambient noise
tomography structure distance range, given the maximum period range
availabe to the study. The distance returned is in km.
Maximum distance default can be reassigned based on the cut-off found
by your time-lag plots for your study!
"""
if type(period_range) == list:
min_dist = min(period_range) * 9
return [min_dist, max_dist]
elif type(period_range) == int or float:
return [period_range*9, max_dist]
global dist_range
dist_range = remove_distance(period_range, max_dist = 2000)
def haversine2(lon1, lat1, lon2, lat2):
"""
Calculate the great circle distance between two points
on the earth (specified in decimal degrees).
"""
# convert decimal degrees to radians
lon1, lat1, lon2, lat2 = map(radians, [lon1, lat1, lon2, lat2])
# haversine formula
dlon, dlat = lon2 - lon1, lat2 - lat1
a = sin(dlat/2)**2 + cos(lat1) * cos(lat2) * sin(dlon/2)**2
c = 2 * asin(sqrt(a))
km = 6367 * c
return km
def fast_geodesic(lon1,lat1,lon2,lat2,npts):
"""
Returns a list of *npts* points along the geodesic between
(and including) *coord1* and *coord2*, in an array of
shape (*npts*, 2).
@rtype: L{ndarray}
"""
if npts < 2:
raise Exception('nb of points must be at least 2')
path = wgs84.npts(lon1=lon1, lat1=lat1,
lon2=lon2, lat2=lat2,
npts=npts-2)
return np.array([[lon1,lat1]] + path + [[lon2,lat2]])
def cluster_points(coord_points, N):
"""
Function that returns k which is an nx2 matrix of lon-lat vector columns
containing the optimal cluster centroid spacings within a large set of random
numbers e.g. those produced by the many_points() function above!
"""
return kmeans(coord_points, N)[0]
def paths_func(path_info, km=km_points):
lon1, lat1, lon2, lat2 = path_info[0], \
path_info[1], path_info[2], path_info[3]
# interpoint distance <= 1 km, and nb of points >= 100
dist = haversine2(lon1, lat1, lon2, lat2)
npts = max(int((np.ceil(dist) + 1)/km), 100)
path = fast_geodesic(lon1,lat1,lon2,lat2, npts)
#lons, lats = [lon1 for i in range(0,len(path))], \
#[lat1 for i in range(0,len(path))]
#path = np.column_stack((path,lons,lats))
if min(dist_range) < dist < max(dist_range):
#remove the closest points along this line that fall below the distance
#find the index of the first point that is above this distance away!
pts_km = npts / float((np.ceil(dist) + 1)) #this gives pts/km
#remove all points below this index in the paths list
dist_index = pts_km * min(dist_range)
path = path[dist_index:]
return path
else:
return np.zeros_like(path)
def points_and_distances(path_info):
lon1, lat1, lon2, lat2 = path_info[0], \
path_info[1], path_info[2], path_info[3]
dist = haversine2(lon1, lat1, lon2, lat2)
return [dist, lon2, lat2]
def fast_dists(nets):
dist_points = map(points_and_distances, nets)
return np.vstack(dist_points)
def spread_paths(nets):
return map(paths_func, nets)
#-----------------------------------------------------------------------------
# GENERATE SECOND SET OF VARIABLES AND STATES
#-----------------------------------------------------------------------------
ideal_path = 'ideal_coordinates.pickle'
#if no paths have been done before, start afresh!
if not os.path.exists(ideal_path):
#generate N kmeans cluster points from massive M number of randomly
#distributed points inside the shape file.
M = 1e5
lot_points = ps.points_in_shape(shape_path, M)
coords = cluster_points(lot_points, N)
#else import already processed coordinates if the program has already done so.
else:
f = open(name=ideal_path, mode='rb')
coords = pickle.load(f)
f.close()
lonmin, lonmax = np.floor(min(coords[:,0])), np.ceil(max(coords[:,0]))
latmin, latmax = np.floor(min(coords[:,1])), np.ceil(max(coords[:,1]))
print lonmin,lonmax,latmin,latmax
kappa = [np.vstack([[coord1[0],coord1[1],coord2[0],coord2[1]]\
for coord2 in coords]) for coord1 in coords]
t0 = datetime.datetime.now()
pool = mp.Pool()
paths = pool.map(spread_paths, kappa)
pool.close()
pool.join()
t1 = datetime.datetime.now()
counter, counter2 = 0, 0
#cd Desktop/Link\ to\ SIMULATIONS/Network_Tracks/smarter_model/
grad_ideal, grad_check1, grad_check2, H_avg1, H_avg2 = 0, 0, 0, 0, 0
SHAPE = (1,1)
perc_high = 0.01
low_counter = 0
random_counter = 0
new_coord = 0
infinite_counter = 0
find_it = []
check_coord = None
use_old_path = False
searches_per_point = 3
while infinite_counter <= 1:
rand_indicator = random.randint(1,10)
if verbose:
print "check coord", check_coord
t0 = datetime.datetime.now()
print use_old_path
#the following while loop is a work around.
#new paths shape: (130, 100, 4) rather than being (130,)
while SHAPE != (130,):
if check_coord is None or rand_indicator < 4:
#------------------------------------------------------------------
# Option one: randomly select old coordinate to move.
#------------------------------------------------------------------
while len(find_it) == 0:
# Remove random set of paths associated with the N coordinates.
rand_int = random.randint(0,len(paths)-1)
old_path = paths[rand_int]
# Determine which old coordinate to remove.
old_coord = coords[rand_int]#[old_path[0][0][0],old_path[0][0][1]]
# Find index of array in nested array to remove!
find_it = np.where(coords==old_coord)[0]
coord_index = find_it[0]
counter2 = 0
print "a1"
elif counter2 == searches_per_point-1 or not use_old_path:
#------------------------------------------------------------------
# Option two: select new high density point if too many searches per
# point OR it is stated that use_old_path = False
#------------------------------------------------------------------
print counter2 % searches_per_point
while len(find_it) == 0:
old_coord = check_coord
find_it = np.where(coords==old_coord)[0]
for paths_index, path in enumerate(paths):
for dots in path:
find_paths = np.where(dots==old_coord[0])[0]
if len(find_paths) != 0:
rand_int = False
break
coord_index = find_it[0]
counter2 = 0
print "a2"
elif use_old_path:
#------------------------------------------------------------------
# Option three: if conditions not met, move same old point.
#------------------------------------------------------------------
coord_index = -1
old_path = paths[coord_index]
old_coord = coords[coord_index]
counter2 += 1
print "a3"
if verbose:
print "coord index to remove is:", coord_index
coords = list(coords)
del coords[coord_index]
coords = np.asarray(coords)
new_coord_first = new_coord
#----------------------------------------------------------------------
# Generate new point coordinate.
#----------------------------------------------------------------------
if counter <= 1:
#------------------------------------------------------------------
# Option one: generate random new coordinate within boundary shape.
#------------------------------------------------------------------
new_coord = ps.points_in_shape(shape_path, 1)[0]
else:
#------------------------------------------------------------------
# Option two: go with whatever previous calculation had been made.
#------------------------------------------------------------------
new_coord = new_coord
# Place new coordinate in old set of coordinates
coords = np.append(coords, [new_coord], axis=0)
# Generate new array of coordinate combinations for new paths.
new_coord_set = np.vstack([[new_coord[0],new_coord[1],coord1[0],\
coord1[1]] for coord1 in coords])
# Generate new path points.
new_paths = map(paths_func, new_coord_set)
SHAPE = np.asarray(new_paths).shape
if not SHAPE == (130,):
#remove substitute back the old coordinate for the new coordinate!
coords = list(coords)
#find index of array in nested array to remove!
del coords[-1]
coords = np.asarray(coords)
#place new coordinate in old set of coordinates
coords = np.append(coords, [old_coord], axis=0)
#----------------------------------------------------------------------
# Delete old paths points.
#----------------------------------------------------------------------
if not rand_int:
if verbose:
print "path index to remove is:", coord_index
del paths[paths_index]
elif use_old_path:
del paths[-1]
else:
if verbose:
print "paths index to remove is:", coord_index
del paths[rand_int]
# Reset shape to work around error from above.
SHAPE = (1,1)
# Append new set of paths now that old set has been deleted.
paths = np.append(paths, [new_paths], axis=0)
#create a flattened numpy array of size 2xN from the paths created!
paths_density_check = list(itertools.chain(*paths))
paths_density_check = np.asarray(list(itertools.chain\
(*paths_density_check)))
#keep all but the repeated coordinates by keeping only unique whole rows!
b = np.ascontiguousarray(paths_density_check).view(np.dtype\
((np.void, paths_density_check.dtype.itemsize * \
paths_density_check.shape[1])))
_, idx = np.unique(b, return_index=True)
paths_density_check = np.unique(b).view(paths_density_check.dtype)\
.reshape(-1, paths_density_check.shape[1])
#remove all path points that lay outside the shape file polygon
#paths_density_check = ps.paths_in_shape(paths_density_check)
# Reset paths as a list to be able to delete indices on next loop.
paths = list(paths)
#-------------------------------------------------------------------------
# Remove zeroes from path_density_check to ensure all paths that
# were NOT in the distance threshold are removed from the path density
# calculation!
#-------------------------------------------------------------------------
path_density_lons, path_density_lats = paths_density_check[:,0], \
paths_density_check[:,1]
FIND_ZERO1 = np.where(paths_density_check[:,0]==0)[0]
FIND_ZERO2 = np.where(paths_density_check[:,1]==0)[0]
if len(FIND_ZERO1) != 0 and len(FIND_ZERO2) != 0:
path_density_lons = np.delete(path_density_lons, FIND_ZERO1)
path_density_lats = np.delete(path_density_lats, FIND_ZERO2)
#-------------------------------------------------------------------------
# Set up path density calculations using numpy's histogram2d function.
#-------------------------------------------------------------------------
H, xedges, yedges = np.histogram2d(path_density_lons,
path_density_lats,
bins=nbins)
# Calculate the gradient field of the path density field.
GRAD = np.abs(np.asarray(np.gradient(H)[0]))
# H needs to be rotated and flipped.
H = np.rot90(H)
#GRAD = np.rot90(GRAD)
H = np.flipud(H)
#GRAD = np.flipud(GRAD)
# Mask zeros
H = np.ma.masked_where(H==0,H)
#GRAD = np.ma.masked_where(GRAD==0,GRAD)
H_avg1 = np.average(H)
grad_check1 = np.std(GRAD)
#-------------------------------------------------------------------------
# Find coordinates of high density within path density field.
#-------------------------------------------------------------------------
search = np.where(H > 0.50 * np.max(H))
# Scale these points with respect to the lat-lon limits!
Hmaxx, Hmaxy = search[1], search[0]
Hmaxx = (lonmax-lonmin)/(nbins) * Hmaxx + lonmin
Hmaxy = (latmax-latmin)/(nbins) * Hmaxy + latmin
# Make sure all low density coordinates ARE within shapefile!
highest_density_coords = ps.paths_in_shape(np.column_stack((Hmaxx, Hmaxy)))
WHERE = np.where(H < perc_high*H_avg1)
if 0 < rand_indicator <= 3:
#half the time move the coordinates to low density locations.
#scale these points with respect to the lat-lon limits!
Hminx, Hminy = WHERE[1], WHERE[0]
Hminx = (lonmax-lonmin)/(nbins) * Hminx + lonmin
Hminy = (latmax-latmin)/(nbins) * Hminy + latmin
# Make sure all low density coordinates ARE within shapefile!
low_density_coords = ps.paths_in_shape(np.column_stack((Hminx, Hminy)))
# Low_density_coords = many_points(shape, np.column_stack((Hminx, Hminy)))
if len(low_density_coords) == 0:
new_coord = ps.points_in_shape(shape_path, 1)[0]
#increase percentage of search if no new low density points are created!
perc_high +=0.05
elif len(low_density_coords) == 1:
new_coord = low_density_coords[0]
perc_high +=0.05
else:
new_coord = low_density_coords[random.randint(0,len(low_density_coords)-1)]
elif 3 < rand_indicator <= 10:
# Half the time move coordinates to random locations.
new_coord = ps.points_in_shape(shape_path, 1)[0]
if counter == 0:
grad_ideal = 1e6
avg_ideal = 0
#fig5 = plt.figure()
#plt.scatter(coords[:,0], coords[:,1],c='b', s=10)
#plt.scatter(new_coord[0], new_coord[1],c='r', s=30)
#plt.scatter(old_coord[0], old_coord[1],c='g', s=30)
#fig5.savefig("coords{}.png".format(counter))
#fig5.clf()
if grad_check1 < grad_ideal and avg_ideal < H_avg1:
with open(u'ideal_coordinates.pickle', 'wb') as f:
print "\nExporting new ideal coordinates."
pickle.dump(coords, f, protocol=2)
fig2 = plt.figure()
plt.pcolormesh(xedges,yedges,H)
plt.xlabel('longitude (degrees)')
plt.ylabel('latitude (degrees)')
plt.xlim(lonmin-0.05*abs(lonmax-lonmin), lonmax+0.05*abs(lonmax-lonmin))
plt.ylim(latmin-0.05*abs(latmax-latmin),latmax+0.05*abs(latmax-latmin))
col = plt.colorbar()
col.ax.set_ylabel('Counts')
#plt.scatter(highest_density_coords[:,0],\
#highest_density_coords[:,1],c='orange', s=10)
plt.scatter(new_coord[0], new_coord[1],c='r', s=30)
plt.scatter(old_coord[0], old_coord[1],c='g', s=30)
fig2.savefig("min_density{}.png".format(counter))
fig2.clf()
# Assign new values.
use_old_path = False
grad_ideal = grad_check1
avg_ideal = H_avg1
else:
#RESET values!
#remove new coordinate and replace with old coordinate
coords = list(coords)
del coords[-1]
coords = np.asarray(coords)
#place new coordinate in old set of coordinates
coords = np.append(coords, [old_coord], axis=0)
#remove new path and replace it with the old set!
paths = list(paths)
del paths[-1]
paths = list(np.append(paths, [old_path], axis=0))
use_old_path = True
#--------------------------------------------------------------------------
#--------------------------------------------------------------------------
if not use_old_path:
# Calculate the combinations of coordinates from real points, to high points!
high_point_coord_combs = [np.vstack([[coord1[0],coord1[1],coord2[0],\
coord2[1]] for coord1 in highest_density_coords for coord2 in coords])]
#print "high point coordinate combitions:", high_point_coord_combs
pool = mp.Pool()
high_point_coords = pool.map(fast_dists, high_point_coord_combs)
pool.close()
pool.join()
high_point_coords = np.vstack(np.asarray(\
list(itertools.chain(*high_point_coords))))
high_point_coords = high_point_coords[high_point_coords[:,0].argsort()]
# t1 = datetime.datetime.now()
# print "fancy index sorting time: ", t1-t0
# SELECT 20% OF THESE LOWEST DISTANCE COORDINATES FROM HIGH DENSITY POINTS!
point_index = int(0.2*len(high_point_coords))
high_point_coords = high_point_coords[:point_index]
# Create unique list of coordinates high_point_coords and remove distances.
high_point_coords = np.column_stack((high_point_coords[:,1],
high_point_coords[:,2]))
b = np.ascontiguousarray(high_point_coords).view(np.dtype\
((np.void, high_point_coords.dtype.itemsize * \
high_point_coords.shape[1])))
_, idx = np.unique(b, return_index=True)
high_point_coords = np.unique(b).view(high_point_coords.dtype)\
.reshape(-1, high_point_coords.shape[1])
# Find random high density coord. This is the old coordinate to remove.
check_coord = high_point_coords[random.randint(0,len(high_point_coords)-1)]
if check_coord in coords:
old_coord = check_coord
elif not use_old_path:
check_coord = None
find_it = []
counter+=1
print "counter2:",counter2
t1 = datetime.datetime.now()
print "That loop took: ", t1-t0 | gpl-3.0 |
petosegan/scikit-learn | examples/linear_model/plot_omp.py | 385 | 2263 | """
===========================
Orthogonal Matching Pursuit
===========================
Using orthogonal matching pursuit for recovering a sparse signal from a noisy
measurement encoded with a dictionary
"""
print(__doc__)
import matplotlib.pyplot as plt
import numpy as np
from sklearn.linear_model import OrthogonalMatchingPursuit
from sklearn.linear_model import OrthogonalMatchingPursuitCV
from sklearn.datasets import make_sparse_coded_signal
n_components, n_features = 512, 100
n_nonzero_coefs = 17
# generate the data
###################
# y = Xw
# |x|_0 = n_nonzero_coefs
y, X, w = make_sparse_coded_signal(n_samples=1,
n_components=n_components,
n_features=n_features,
n_nonzero_coefs=n_nonzero_coefs,
random_state=0)
idx, = w.nonzero()
# distort the clean signal
##########################
y_noisy = y + 0.05 * np.random.randn(len(y))
# plot the sparse signal
########################
plt.figure(figsize=(7, 7))
plt.subplot(4, 1, 1)
plt.xlim(0, 512)
plt.title("Sparse signal")
plt.stem(idx, w[idx])
# plot the noise-free reconstruction
####################################
omp = OrthogonalMatchingPursuit(n_nonzero_coefs=n_nonzero_coefs)
omp.fit(X, y)
coef = omp.coef_
idx_r, = coef.nonzero()
plt.subplot(4, 1, 2)
plt.xlim(0, 512)
plt.title("Recovered signal from noise-free measurements")
plt.stem(idx_r, coef[idx_r])
# plot the noisy reconstruction
###############################
omp.fit(X, y_noisy)
coef = omp.coef_
idx_r, = coef.nonzero()
plt.subplot(4, 1, 3)
plt.xlim(0, 512)
plt.title("Recovered signal from noisy measurements")
plt.stem(idx_r, coef[idx_r])
# plot the noisy reconstruction with number of non-zeros set by CV
##################################################################
omp_cv = OrthogonalMatchingPursuitCV()
omp_cv.fit(X, y_noisy)
coef = omp_cv.coef_
idx_r, = coef.nonzero()
plt.subplot(4, 1, 4)
plt.xlim(0, 512)
plt.title("Recovered signal from noisy measurements with CV")
plt.stem(idx_r, coef[idx_r])
plt.subplots_adjust(0.06, 0.04, 0.94, 0.90, 0.20, 0.38)
plt.suptitle('Sparse signal recovery with Orthogonal Matching Pursuit',
fontsize=16)
plt.show()
| bsd-3-clause |
fishstamp82/moltools | moltools/read_dal.py | 1 | 45486 | #!/usr/bin/env python
__all__ = [ 'read_beta_hf_string' ]
import os,sys, re, argparse, ctypes, multiprocessing, functools
import numpy as np
import math as m
#from particles import *
from matplotlib import pyplot as plt
from .molecules import Atom
from .template import Template
try:
from applequist.gaussian import *
except ImportError:
pass
a0 = 0.52917721092
lab = [ "X", "Y", "Z"]
charge_dic = {"H1": 1.0 ,"H2":1.0 , "C1":6.0, "C7":6.0, "H3":1.0,
"H4":1.0, "H6": 1.0, "H8":1.0,
"H9":1.0, "H10": 1.0, "H12":1.0,
"O5":8.0, "O11": 8.0,
"H": 1.0, "C": 6.0, "N": 7.0, "O": 8.0, "S": 16.0}
mass_dict = {"H": 1.008, "C": 6.0, "N": 7.0, "O": 15.999, "S": 16.0}
freq_dict = {"0.0": "static","0.0238927": "1907_nm", "0.0428227" : "1064_nm",
"0.0773571" : "589_nm" }
allowed_elements = ( 'H', 'O' )
def polar_to_cartesian( r, tau, theta):
x, y, z = r* np.sin( theta )*np.cos( tau ) \
, r* np.sin( theta )*np.sin( tau ) \
, r* np.cos( theta )
return x , y , z
def write_related( args ):
if args.xyz.endswith(".pdb"):
name = args.xyz.split(".")[0] + "_" + str(args.waters) + ".mol"
waters = molecules.Water.read_waters( args.xyz ,
in_AA = args.xAA,
out_AA = args.oAA,
N_waters = args.waters )
elif args.xyz.endswith( ".xyz" ):
name = args.x.split(".")[0] + ".mol"
f_ = open( name , "w" )
if args.oAA:
str_ = "Angstrom"
else:
str_ = ""
f_.write( "ATOMBASIS\n\nComment\nmolecules.Atomtypes=2 Charge=0 Nosymm %s\n" %str_)
if not args.wat:
"Can't write to .mol file, didn't read water molecules"
raise SystemExit
hCnt = len(waters) * 2
oCnt = len(waters)
f_.write( "Charge=1.0 molecules.Atoms=%d Basis=cc-pVDZ\n" % hCnt)
for i in waters:
for j in i:
if j.element == "H":
f_.write( "%s %.5f %.5f %.5f\n" %( j.element, j.x, j.y, j.z ))
f_.write( "Charge=8.0 molecules.Atoms=%d Basis=cc-pVDZ\n" % oCnt)
for i in waters:
for j in i:
if j.element == "O":
f_.write( "%s %.5f %.5f %.5f\n" %( j.element, j.x, j.y, j.z ))
print "Finished writing mol files %s" %name
raise SystemExit
def run_argparse( args ):
A = argparse.ArgumentParser( )
# ----------------------------
# GENERIC VARIABLES
# ----------------------------
#
A.add_argument("-dal", type= str, default = 'hflin' )
A.add_argument("-mol", type= str, default = 'tip3p' )
A.add_argument( "-dist", action = "store_true", default = False )
# ----------------------------
# READ ALPHA
# ----------------------------
A.add_argument( "-alpha", type = str, )
# ----------------------------
# BETA ANALYSIS RELATED
# ----------------------------
A.add_argument( "-beta_analysis_par", action = "store_true", default = False )
A.add_argument( "-beta_analysis", action = "store_true", default = False )
A.add_argument( "-freq", type = str, default = "0.0",
choices = ["0.0", "0.0238927", "0.0428227", "0.0773571"] )
A.add_argument( "-R", type = float, default = 0.000001)
A.add_argument( "-beta",dest="beta", type = str,help="File that contains QUADRATIC response output with hyperpolarizabilities" )
A.add_argument( "-in_AA", action = "store_true", default = False )
A.add_argument( "-out_AA", action = "store_true", default = False )
A.add_argument( "-basis", type= str, nargs = '*', default = "ANOPVDZ" )
A.add_argument( "-beta_dal", type= str, default = "hfqua_" )
A.add_argument( "-Ncpu", type= int, default = "4" )
A.add_argument( "-N_waters", type= int, default = 15 )
A.add_argument( "-model", default = "tip3p" )
# ----------------------------
# ALPHA ANALYSIS RELATED
# ----------------------------
#
A.add_argument( "-alpha_analysis", action = "store_true", default = False )
A.add_argument( "-nums", type = str, nargs = '*',
default = map(str, range(1,10)) )
A.add_argument( "-x", type = str, default = ["nums"],
choices = ["snaps", "nums", "freqs"] )
A.add_argument( "-y", type = str, default = ["yy"],
choices = ["xx", "yy", "zz", "mean", "aniso"] )
A.add_argument( "-freqs", type = str, nargs = '*',
default = ['0.0', "0.0238927", "0.0428227", "0.0773571"]
)
A.add_argument( "-comps", type = str, nargs = '*', default = ["xx", "yy", "zz"],
choices = ["xx", "yy", "zz", "mean", "aniso"])
A.add_argument( "-snaps", type = str, nargs = '*',
default = map(str, range(10)) )
A.add_argument( "-eps_out", type = str )
A.add_argument( "-template_freq", type = str,
choices = ['0.0', "0.0238927", "0.0428227", "0.0773571"]
)
A.add_argument( "-hdf", action = "store_true", default = False )
# ----------------------------
# RELATED TO PLOT WINDOW APPEARANCE
# ----------------------------
#
A.add_argument( "-ymin", type = float, default = -0.10 )
A.add_argument( "-ymax", type = float, default = 0.10 )
# ----------------------------
# QM GENERATION RELATED
# ----------------------------
A.add_argument( "-qm_generation", action = "store_true", default = False )
# ----------------------------
# QM ANALYSIS RELATED
# ----------------------------
A.add_argument( "-qm_analysis", action = "store_true", default = False )
# ----------------------------
# QMMM GENERATION RELATED
# ----------------------------
A.add_argument( "-qmmm_generation", action = "store_true", default = False )
A.add_argument( "-potstyle", default = "QMMM",
choices = ["QMMM", "PEQM"])
A.add_argument( "-qm_waters", type = int, nargs = '*',
default = [1] )
A.add_argument( "-mm_waters", type = int, nargs = '*',
default = [1] )
A.add_argument( "-file_type", type = str, default = "pdb" )
A.add_argument( "-tname", type = str, default = "TIP3P" )
A.add_argument( "-tmethod", type = str, default = "HF" )
A.add_argument( "-tbasis", type = str, default = "ANOPVDZ" )
#also share same arguments -snaps -freqs with -alpha_analysis
# ----------------------------
# QMMM ANALYSIS RELATED
# ----------------------------
A.add_argument( "-qmmm_analysis", action = "store_true", default = False )
A.add_argument( "-n_qm", type = str, nargs = '*',
default = map(str, range(1,10)) )
A.add_argument( "-n_mm", type = str, nargs = '*',
default = map(str, range(1,101)) )
A.add_argument( "-potfreqs", type = str, nargs = '*',
default = ["0.0", "0.0238927", "0.0428227", "0.0773571"] )
# ----------------------------
# WRITE RELATED pdb to mol generation RELATED
# ----------------------------
A.add_argument("-waters", type = int , default = 4, help = "how many waters to take closest to center atom, default: 4")
A.add_argument("-v","--verbose", action='store_true' , default = False)
A.add_argument("-write", nargs='*', default = [], help = "Supply any which files to write from a selection: pot, xyz" )
A.add_argument( "-xyz", dest="xyz", type = str, help = 'Coordinate file with water molecules for the output .pot file. [ xyz , pdb ]')
A.add_argument( "-xAA", default = False ,action='store_true',
help = 'Default coordinate type in AA or AU in -x input water coordinate file, default: False ')
A.add_argument( "-oAA", default = False, action='store_true' , help='Default coordinate type AA or AU for -op output potential file, default: "AU"' )
A.add_argument( "-tw", type = float, default = 0.0 )
A.add_argument( "-wat", action = 'store_true' , default= True )
a = A.parse_args( args[1:] )
return a
def is_ccsd( filename):
""" Return true if the filename, which is DALTON .out file, is a quadratic ccsd calculation"""
pat_ccsd = re.compile(r'FINAL CCSD RESULTS FOR THE FIRST HYPERPOLARIZABILITIES')
for i in open(filename).readlines():
if pat_ccsd.search( i ):
return True
return False
def read_alpha_hf( fstr, freq = '0.0', in_AA = False, freqs = 1 ):
# If freqs > 1, will return a tuple of all alphas for each frequency
#
# Reading in Alpha tensor
fre = freq[0:7]
pat_alpha = re.compile(r'([XYZ])DIPLEN.*([XYZ])DIPLEN.*= *(-?\d*\.{1}\d+D*-?\+?\d*)')
pat_new_freq = re.compile(r'FREQUENCY.*SECOND ORDER')
alpha = np.zeros( [3,3,] )
lab = ['X', 'Y', 'Z', ]
# For every new frequency, will append this one and store alpha in the last
# element, otherwise, first column is first frequency by default
freqlist = None
lines = fstr.split('\n')
for i in lines:
if pat_new_freq.search( i ):
if freqlist is None:
freqlist = []
freqlist.append( np.zeros( (3,3 )) )
if pat_alpha.search( i ):
matched = pat_alpha.search(i).groups()
if "D" in matched[2]:
frac = float( matched[2].replace("D","E") )
else:
frac = float( matched[2] )
A = matched[0]
B = matched[1]
alpha[ lab.index( A ) , lab.index( B ) ] = frac
freqlist[-1][lab.index( A ), lab.index( B ) ] = frac
if A == "X" and B == "Y":
alpha[ lab.index( B ) , lab.index( A ) ] = frac
freqlist[-1][lab.index( B ), lab.index( A ) ] = frac
if A == "X" and B == "Z":
alpha[ lab.index( B ) , lab.index( A ) ] = frac
freqlist[-1][lab.index( B ), lab.index( A ) ] = frac
if A == "Y" and B == "Z":
alpha[ lab.index( B ) , lab.index( A ) ] = frac
freqlist[-1][lab.index( B ), lab.index( A ) ] = frac
if freqs > 1:
return freqlist
return alpha
def read_energy( fname, calctype = 'HF' ):
"""Return the energy from dalton .out file fname"""
for line in open(fname).readlines():
if re.compile(r'.*Final.*energy').match(line):
return line.split()[-1]
def read_alpha_ccsd( fstr ):
mol_dip = np.zeros(3)
alpha = np.zeros( [3,3])
beta = np.zeros( [3,3,3])
beta_dict = {}
atoms = []
lab = ["X", "Y", "Z"]
pat_dipole = re.compile(r'Total Molecular Dipole Moment')
pat_xyz = re.compile(r'^\s*(\w+)\s+(-*\d*\.+\d+)\s+(-*\d*\.+\d+)\s+(-*\d*\.+\d+) *$')
pat_pol = re.compile(r'([XYZ])DIPLEN.*total.*:')
pat_alpha= re.compile(r'([XYZ])DIPLEN.*([XYZ])DIPLEN.*')
pat_beta= re.compile(r'([XYZ])DIPLEN.*([XYZ])DIPLEN.*([XYZ])DIPLEN')
lines = fstr.split('\n')
# Reading in Alfa
for i in lines:
if pat_alpha.search( i ):
if len(i.split()) < 8:
try:
if "D" in i.split()[-1]:
frac = float( i.split()[-1].replace("D","E") )
else:
frac = float( i.split()[-1] )
except IndexError:
if "D" in i.split()[-1]:
frac = float( i.split()[-1].strip("=").replace("D","E") )
else:
frac = float( i.split()[-1].strip("=") )
A = pat_alpha.search(i).groups(1)[0]
B = pat_alpha.search(i).groups(1)[1]
alpha[ lab.index( A ) , lab.index( B ) ] = frac
if A == "X" and B == "Y":
alpha[ lab.index( B ) , lab.index( A ) ] = frac
if A == "X" and B == "Z":
alpha[ lab.index( B ) , lab.index( A ) ] = frac
if A == "Y" and B == "Z":
alpha[ lab.index( B ) , lab.index( A ) ] = frac
return alpha
def read_beta_ccsd( fstr ):
mol_dip = np.zeros(3)
alpha = np.zeros( [3,3])
beta = np.zeros( [3,3,3])
beta_dict = {}
atoms = []
lab = ["X", "Y", "Z"]
pat_dipole = re.compile(r'Total Molecular Dipole Moment')
pat_xyz = re.compile(r'^\s*(\w+)\s+(-*\d*\.+\d+)\s+(-*\d*\.+\d+)\s+(-*\d*\.+\d+) *$')
pat_pol = re.compile(r'([XYZ])DIPLEN.*total.*:')
pat_alpha= re.compile(r'([XYZ])DIPLEN.*([XYZ])DIPLEN.*')
pat_beta= re.compile(r'([XYZ])DIPLEN.*([XYZ])DIPLEN.*([XYZ])DIPLEN')
# Reading in dipole
lines = fstr.split('\n')
for i in range(len( lines )):
if pat_dipole.search( lines[i] ):
mol_dip[0] = lines[i+5].split()[1]
mol_dip[1] = lines[i+6].split()[1]
mol_dip[2] = lines[i+7].split()[1]
# Reading in Alfa
for i in lines:
if pat_alpha.search( i ):
if len(i.split()) < 8:
try:
if "D" in i.split()[-1]:
frac = float( i.split()[-1].replace("D","E") )
else:
frac = float( i.split()[-1] )
except IndexError:
if "D" in i.split()[-1]:
frac = float( i.split()[-1].strip("=").replace("D","E") )
else:
frac = float( i.split()[-1].strip("=") )
A = pat_alpha.search(i).groups(1)[0]
B = pat_alpha.search(i).groups(1)[1]
alpha[ lab.index( A ) , lab.index( B ) ] = frac
if A == "X" and B == "Y":
alpha[ lab.index( B ) , lab.index( A ) ] = frac
if A == "X" and B == "Z":
alpha[ lab.index( B ) , lab.index( A ) ] = frac
if A == "Y" and B == "Z":
alpha[ lab.index( B ) , lab.index( A ) ] = frac
#For Beta
for i in lines:
if pat_beta.search( i ):
if len(i.split()) >8:
try:
if "D" in i.split()[-1]:
frac = float( i.split()[-1].replace("D","E") )
else:
frac = float( i.split()[-1] )
except IndexError:
if "D" in i.split()[-1]:
frac = float( i.split()[-1].strip("=").replace("D","E") )
else:
frac = float( i.split()[-1].strip("=") )
lab1 = pat_beta.search(i).groups(1)[0]
lab2 = pat_beta.search(i).groups(1)[1]
lab3 = pat_beta.search(i).groups(1)[2]
beta_dict[ "".join( [lab1 + lab2 + lab3]) ] = frac
for i, l1 in enumerate(lab):
for j, l2 in enumerate(lab):
for k, l3 in enumerate(lab):
beta[i, j, k] = beta_dict[ l1 + l2 + l3 ]
return atoms, mol_dip, alpha , beta
def read_beta( fstr, freq = "0.0", in_AA = False, out_AA = False ):
nuc_dip = np.zeros(3)
el_dip = np.zeros(3)
alpha = np.zeros([3,3])
beta = np.zeros([3,3,3])
tmp = []
atoms = []
missing = {}
exists = {}
# Reading in Beta tensor
fre = str("%.5f" % float(freq))
lab = ['X', 'Y', 'Z', ]
pat_beta = re.compile(r'@ B-freq')
lines = fstr.split('\n')
for i in lines:
if pat_beta.match(i):
try:
if i.split()[7].lstrip("beta") in exists:
continue
exists[ i.split()[7].lstrip("beta") ] = float(i.split()[9] )
except ValueError:
a, b, c = i.split()[9].lstrip("beta").strip("()").split(",")
if i.split()[7].lstrip("beta") in missing:
continue
missing[ i.split()[7].lstrip("beta") ] = "(%s;%s,%s)"%(a,b,c)
for i in range(3):
for j in range(3):
for k in range(3):
try:
beta[i][j][k] = exists[ "(%s;%s,%s)" %(lab[i],lab[j],lab[k])]
except KeyError:
beta[i][j][k] = exists[ missing["(%s;%s,%s)"%(lab[i],lab[j],lab[k]) ] ]
return beta
def read_beta_hf( fstr, freq = "0.0", in_AA = False, out_AA = False ):
nuc_dip = np.zeros(3)
el_dip = np.zeros(3)
alpha = np.zeros([3,3])
beta = np.zeros([3,3,3])
tmp = []
atoms = []
missing = {}
exists = {}
lab = ["X", "Y", "Z"]
pat_Q = re.compile(r'Total charge of the molecule')
pat_xyz = re.compile(r'^\s*(\w+)\s+(-*\d*\.+\d+)\s+(-*\d*\.+\d+)\s+(-*\d*\.+\d+) *$')
pat_pol = re.compile(r'([XYZ])DIPLEN.*total.*:')
#Special xyz hack for camb3lyp output from akka dalton to find atoms
pat_akka_xyz = re.compile(r'^\s*(\w+)\s+:\s+\d\s+x\s+(-*\d*\.+\d+)\s+\d\s+y\s+(-*\d*\.+\d+)\s+\d\s+z\s+(-*\d*\.+\d+) *$')
pat_labels_xyz = re.compile(r'^\s*(\S+-+\S+)\s+(-*\d*\.+\d+)\s+(-*\d*\.+\d+)\s+(-*\d*\.+\d+) *$')
# Reading in dipole and charge
lines = fstr.split( '\n' )
for i in lines:
if pat_Q.search( i ):
Q = float(i.split()[-1])
if pat_xyz.match(i):
f = pat_xyz.match(i).groups()
matched = pat_xyz.match(i).groups()
#Skip coordinates in out file that are for MM region from QMMM
kwargs = { "AA": in_AA, "element" : matched[0], "x" : matched[1],
"y" : matched[2], "z" : matched[3] }
tmpAtom = molecules.Atom( **kwargs )
atoms.append( tmpAtom )
elif pat_akka_xyz.match(i):
f = pat_akka_xyz.match(i).groups()
matched = pat_akka_xyz.match(i).groups()
#Skip coordinates in out file that are for MM region from QMMM
kwargs = { "AA": in_AA, "element" : matched[0], "x" : matched[1],
"y" : matched[2], "z" : matched[3] }
tmpAtom = molecules.Atom( **kwargs )
atoms.append( tmpAtom )
elif pat_labels_xyz.match(i):
f = pat_labels_xyz.match(i).groups()
matched = pat_labels_xyz.match(i).groups()
lab = matched[0]
if len(lab.split('-')) == 4:
element = "H"
else:
element = lab.split('-')[2][0]
kwargs = { "AA": in_AA, "element" : element, "x" : matched[1],
"y" : matched[2], "z" : matched[3] }
tmpAtom = molecules.Atom( **kwargs )
atoms.append( tmpAtom )
if pat_pol.search(i):
if pat_pol.search(i).group(1) == "X":
try:
if "D" in i.split()[3]:
frac = float(i.split()[3].replace("D","E"))
else:
frac = float(i.split()[3])
except IndexError:
if "D" in i.split()[2]:
frac = float( i.split()[2].strip(":").replace("D","E"))
else:
frac = float( i.split()[2].strip(":"))
el_dip[0] += frac
if pat_pol.search(i).group(1) == "Y":
try:
if "D" in i.split()[3]:
frac = float(i.split()[3].replace("D","E"))
else:
frac = float(i.split()[3])
except IndexError:
if "D" in i.split()[2]:
frac = float( i.split()[2].strip(":").replace("D","E"))
else:
frac = float( i.split()[2].strip(":"))
el_dip[1] += frac
if pat_pol.search(i).group(1) == "Z":
try:
if "D" in i.split()[3]:
frac = float(i.split()[3].replace("D","E"))
else:
frac = float(i.split()[3])
except IndexError:
if "D" in i.split()[2]:
frac = float( i.split()[2].strip(":").replace("D","E"))
else:
frac = float( i.split()[2].strip(":"))
el_dip[2] += frac
#Set center of nuceli charge to 0
coc = sum([ x.r * charge_dic[x.element] for x in atoms ]) /\
sum([ charge_dic[x.element] for x in atoms ])
for i in atoms:
nuc_dip += charge_dic[ i.element ] * (i.r - coc )
if in_AA:
# Make sure center of charge is in Atomic units to give correct electronic dipole
coc /= a0
# Reading in Alfa and Beta tensor
fre = str("%.5f" % float(freq))
pat_alpha = re.compile(r'@.*QRLRVE.*([XYZ])DIPLEN.*([XYZ])DIPLEN.*%s' %fre)
alpha = np.zeros( [3,3,] )
lab = ['X', 'Y', 'Z', ]
for i in lines:
if pat_alpha.match( i ):
try:
if "D" in i.split()[-1]:
frac = float( i.split()[-1].replace("D","E") )
else:
frac = float( i.split()[-1] )
except IndexError:
if "D" in i.split()[-1]:
frac = float( i.split()[-1].strip("=").replace("D","E") )
else:
frac = float( i.split()[-1].strip("=") )
A = pat_alpha.match(i).groups(1)[0]
B = pat_alpha.match(i).groups(1)[1]
alpha[ lab.index( A ) , lab.index( B ) ] = frac
if A == "X" and B == "Y":
alpha[ lab.index( B ) , lab.index( A ) ] = frac
if A == "X" and B == "Z":
alpha[ lab.index( B ) , lab.index( A ) ] = frac
if A == "Y" and B == "Z":
alpha[ lab.index( B ) , lab.index( A ) ] = frac
pat_beta = re.compile(r'@ B-freq')
for i in lines:
if pat_beta.match(i):
try:
if i.split()[7].lstrip("beta") in exists:
continue
exists[ i.split()[7].lstrip("beta") ] = float(i.split()[9] )
except ValueError:
a, b, c = i.split()[9].lstrip("beta").strip("()").split(",")
if i.split()[7].lstrip("beta") in missing:
continue
missing[ i.split()[7].lstrip("beta") ] = "(%s;%s,%s)"%(a,b,c)
for i in range(3):
for j in range(3):
for k in range(3):
try:
beta[i][j][k] = exists[ "(%s;%s,%s)" %(lab[i],lab[j],lab[k])]
except KeyError:
beta[i][j][k] = exists[ missing["(%s;%s,%s)"%(lab[i],lab[j],lab[k]) ] ]
N_el = sum([charge_dic[at.element] for at in atoms]) - Q
tot_dip = el_dip - coc * N_el
return atoms, tot_dip, alpha , beta
def read_props_qmmm( file_, freq = "0.0", in_AA = False ):
""" Same as read_beta_hf but skips coordinates not in allowd_elements
"""
nuc_dip = np.zeros(3)
el_dip = np.zeros(3)
alpha = np.zeros([3,3])
beta = np.zeros([3,3,3])
tmp = []
atoms = []
missing = {}
exists = {}
lab = ["X", "Y", "Z"]
pat_xyz = re.compile(r'^\s*(\w+)\s+(-*\d*\.+\d+)\s+(-*\d*\.+\d+)\s+(-*\d*\.+\d+) *$')
pat_pol = re.compile(r'([XYZ])DIPLEN.*total.*:')
# Reading in dipole
for i in open( file_ ).readlines():
if pat_xyz.match(i):
f = pat_xyz.match(i).groups()
matched = pat_xyz.match(i).groups()
#Skip coordinates in out file that are for MM region from QMMM
if matched[0] not in allowed_elements:
continue
kwargs = { "element" : matched[0], "x" : matched[1],
"y" : matched[2], "z" : matched[3] }
tmpAtom = molecules.Atom( **kwargs )
atoms.append( tmpAtom )
if pat_pol.search(i):
if pat_pol.search(i).group(1) == "X":
try:
if "D" in i.split()[3]:
frac = float(i.split()[3].replace("D","E"))
else:
frac = float(i.split()[3])
except IndexError:
if "D" in i.split()[2]:
frac = float( i.split()[2].strip(":").replace("D","E"))
else:
frac = float( i.split()[2].strip(":"))
el_dip[0] += frac
if pat_pol.search(i).group(1) == "Y":
try:
if "D" in i.split()[3]:
frac = float(i.split()[3].replace("D","E"))
else:
frac = float(i.split()[3])
except IndexError:
if "D" in i.split()[2]:
frac = float( i.split()[2].strip(":").replace("D","E"))
else:
frac = float( i.split()[2].strip(":"))
el_dip[1] += frac
if pat_pol.search(i).group(1) == "Z":
try:
if "D" in i.split()[3]:
frac = float(i.split()[3].replace("D","E"))
else:
frac = float(i.split()[3])
except IndexError:
if "D" in i.split()[2]:
frac = float( i.split()[2].strip(":").replace("D","E"))
else:
frac = float( i.split()[2].strip(":"))
el_dip[2] += frac
for i in atoms:
nuc_dip[0] += charge_dic[ i.element ] * i.x
nuc_dip[1] += charge_dic[ i.element ] * i.y
nuc_dip[2] += charge_dic[ i.element ] * i.z
# Reading in Alfa and Beta tensor
fre = str("%.5f" % float(freq))
pat_alpha = re.compile(r'@.*QRLRVE.*([XYZ])DIPLEN.*([XYZ])DIPLEN.*%s' %fre)
alpha = np.zeros( [3,3,] )
lab = ['X', 'Y', 'Z', ]
for i in open( file_ ).readlines():
if pat_alpha.match( i ):
try:
if "D" in i.split()[-1]:
frac = float( i.split()[-1].replace("D","E") )
else:
frac = float( i.split()[-1] )
except IndexError:
if "D" in i.split()[-1]:
frac = float( i.split()[-1].strip("=").replace("D","E") )
else:
frac = float( i.split()[-1].strip("=") )
A = pat_alpha.match(i).groups(1)[0]
B = pat_alpha.match(i).groups(1)[1]
alpha[ lab.index( A ) , lab.index( B ) ] = frac
if A == "X" and B == "Y":
alpha[ lab.index( B ) , lab.index( A ) ] = frac
if A == "X" and B == "Z":
alpha[ lab.index( B ) , lab.index( A ) ] = frac
if A == "Y" and B == "Z":
alpha[ lab.index( B ) , lab.index( A ) ] = frac
pat_beta = re.compile(r'@ B-freq')
for i in open( file_ ).readlines():
if pat_beta.match(i):
try:
if i.split()[7].lstrip("beta") in exists:
continue
exists[ i.split()[7].lstrip("beta") ] = float(i.split()[9] )
except ValueError:
a, b, c = i.split()[9].lstrip("beta").strip("()").split(",")
if i.split()[7].lstrip("beta") in missing:
continue
missing[ i.split()[7].lstrip("beta") ] = "(%s;%s,%s)"%(a,b,c)
for i in range(3):
for j in range(3):
for k in range(3):
try:
beta[i][j][k] = exists[ "(%s;%s,%s)" %(lab[i],lab[j],lab[k])]
except KeyError:
beta[i][j][k] = exists[ missing["(%s;%s,%s)"%(lab[i],lab[j],lab[k]) ] ]
if in_AA:
nuc_dip /= a0
tot_dip = nuc_dip - el_dip
return atoms, nuc_dip - el_dip, alpha , beta
def main():
"""
Program reads alpha and beta tensor and dipole moment from DALTON output
"""
args = run_argparse( sys.argv )
if args.alpha:
a = read_alpha( args.alpha, )
if args.beta_analysis:
beta_analysis(args, basis = args.basis,
dal = args.beta_dal, in_AA = args.in_AA,
out_AA = args.out_AA,
ncpu = args.Ncpu,
N_waters = args.N_waters)
if args.beta_analysis_par:
run_beta_analysis_par( N_waters = args.N_waters,
ncpu = args.Ncpu,
model = args.model )
if args.alpha_analysis:
alpha_analysis(args)
if args.qm_generation:
qm_generation(
qm_waters = args.qm_waters,
basis = args.basis
)
if args.qmmm_generation:
qmmm_generation(
qm_waters = args.qm_waters,
mm_waters = args.mm_waters,
potfreqs = args.potfreqs,
potstyle = args.potstyle,
basis = args.basis)
if args.qm_analysis:
qm_analysis( in_AA = args.in_AA,
out_AA = args.out_AA )
if args.qmmm_analysis:
qmmm_analysis( args )
if args.write:
write_related( args )
def read_dipole( file_, freq = "0.0", in_AA = False, out_AA = False ):
nuc_dip = np.zeros(3)
el_dip = np.zeros(3)
alpha = np.zeros([3,3])
beta = np.zeros([3,3,3])
tmp = []
atoms = []
missing = {}
exists = {}
lab = ["X", "Y", "Z"]
pat_Q = re.compile(r'Total charge of the molecule')
pat_xyz = re.compile(r'^\s*(\w+)\s+(-*\d*\.+\d+)\s+(-*\d*\.+\d+)\s+(-*\d*\.+\d+) *$')
pat_pol = re.compile(r'([XYZ])DIPLEN.*total.*:')
#Special xyz hack for camb3lyp output from akka dalton to find atoms
pat_akka_xyz = re.compile(r'^\s*(\w+)\s+:\s+\d\s+x\s+(-*\d*\.+\d+)\s+\d\s+y\s+(-*\d*\.+\d+)\s+\d\s+z\s+(-*\d*\.+\d+) *$')
pat_labels_xyz = re.compile(r'^\s*((?!-)\S+)\s+(-*\d*\.+\d+)\s+(-*\d*\.+\d+)\s+(-*\d*\.+\d+) *$')
# Reading in dipole and charge
for i in open( file_ ).readlines():
if pat_Q.search( i ):
Q = float(i.split()[-1])
if pat_xyz.match(i):
f = pat_xyz.match(i).groups()
matched = pat_xyz.match(i).groups()
#Skip coordinates in out file that are for MM region from QMMM
kwargs = { "AA": in_AA, "element" : matched[0], "x" : matched[1],
"y" : matched[2], "z" : matched[3] }
tmpAtom = Atom( **kwargs )
atoms.append( tmpAtom )
elif pat_akka_xyz.match(i):
f = pat_akka_xyz.match(i).groups()
matched = pat_akka_xyz.match(i).groups()
#Skip coordinates in out file that are for MM region from QMMM
kwargs = { "AA": in_AA, "element" : matched[0], "x" : matched[1],
"y" : matched[2], "z" : matched[3] }
tmpAtom = Atom( **kwargs )
atoms.append( tmpAtom )
elif pat_labels_xyz.match(i):
f = pat_labels_xyz.match(i).groups()
matched = pat_labels_xyz.match(i).groups()
lab = matched[0]
if len(lab.split('-')) == 4:
element = "H"
else:
try:
element = lab.split('-')[2][0]
except IndexError as e:
warnings.warn( 'Occured when finding wrong pattern for .xyz in read_beta_hf_string ' )
continue
kwargs = { "AA": in_AA, "element" : element, "x" : matched[1],
"y" : matched[2], "z" : matched[3] }
tmpAtom = Atom( **kwargs )
atoms.append( tmpAtom )
if pat_pol.search(i):
if pat_pol.search(i).group(1) == "X":
try:
if "D" in i.split()[3]:
frac = float(i.split()[3].replace("D","E"))
else:
frac = float(i.split()[3])
except IndexError:
if "D" in i.split()[2]:
frac = float( i.split()[2].strip(":").replace("D","E"))
else:
frac = float( i.split()[2].strip(":"))
el_dip[0] += frac
if pat_pol.search(i).group(1) == "Y":
try:
if "D" in i.split()[3]:
frac = float(i.split()[3].replace("D","E"))
else:
frac = float(i.split()[3])
except IndexError:
if "D" in i.split()[2]:
frac = float( i.split()[2].strip(":").replace("D","E"))
else:
frac = float( i.split()[2].strip(":"))
el_dip[1] += frac
if pat_pol.search(i).group(1) == "Z":
try:
if "D" in i.split()[3]:
frac = float(i.split()[3].replace("D","E"))
else:
frac = float(i.split()[3])
except IndexError:
if "D" in i.split()[2]:
frac = float( i.split()[2].strip(":").replace("D","E"))
else:
frac = float( i.split()[2].strip(":"))
el_dip[2] += frac
#Set center of nuceli charge to 0
coc = sum([ x.r * charge_dic[x.element] for x in atoms ]) /\
sum([ charge_dic[x.element] for x in atoms ])
for i in atoms:
nuc_dip += charge_dic[ i.element ] * (i.r - coc )
if in_AA:
# Make sure center of charge is in Atomic units to give correct electronic dipole
coc /= a0
N_el = sum([charge_dic[at.element] for at in atoms]) - Q
tot_dip = el_dip - coc * N_el
return tot_dip
def read_props_qmmm( file_, freq = "0.0", in_AA = False ):
""" Same as read_beta_hf but skips coordinates not in allowd_elements
"""
nuc_dip = np.zeros(3)
el_dip = np.zeros(3)
alpha = np.zeros([3,3])
beta = np.zeros([3,3,3])
tmp = []
atoms = []
missing = {}
exists = {}
lab = ["X", "Y", "Z"]
pat_xyz = re.compile(r'^\s*(\w+)\s+(-*\d*\.+\d+)\s+(-*\d*\.+\d+)\s+(-*\d*\.+\d+) *$')
pat_pol = re.compile(r'([XYZ])DIPLEN.*total.*:')
# Reading in dipole
for i in open( file_ ).readlines():
if pat_xyz.match(i):
f = pat_xyz.match(i).groups()
matched = pat_xyz.match(i).groups()
#Skip coordinates in out file that are for MM region from QMMM
if matched[0] not in allowed_elements:
continue
kwargs = { "element" : matched[0], "x" : matched[1],
"y" : matched[2], "z" : matched[3] }
tmpAtom = Atom( **kwargs )
atoms.append( tmpAtom )
if pat_pol.search(i):
if pat_pol.search(i).group(1) == "X":
try:
if "D" in i.split()[3]:
frac = float(i.split()[3].replace("D","E"))
else:
frac = float(i.split()[3])
except IndexError:
if "D" in i.split()[2]:
frac = float( i.split()[2].strip(":").replace("D","E"))
else:
frac = float( i.split()[2].strip(":"))
el_dip[0] += frac
if pat_pol.search(i).group(1) == "Y":
try:
if "D" in i.split()[3]:
frac = float(i.split()[3].replace("D","E"))
else:
frac = float(i.split()[3])
except IndexError:
if "D" in i.split()[2]:
frac = float( i.split()[2].strip(":").replace("D","E"))
else:
frac = float( i.split()[2].strip(":"))
el_dip[1] += frac
if pat_pol.search(i).group(1) == "Z":
try:
if "D" in i.split()[3]:
frac = float(i.split()[3].replace("D","E"))
else:
frac = float(i.split()[3])
except IndexError:
if "D" in i.split()[2]:
frac = float( i.split()[2].strip(":").replace("D","E"))
else:
frac = float( i.split()[2].strip(":"))
el_dip[2] += frac
for i in atoms:
nuc_dip[0] += charge_dic[ i.element ] * i.x
nuc_dip[1] += charge_dic[ i.element ] * i.y
nuc_dip[2] += charge_dic[ i.element ] * i.z
# Reading in Alfa and Beta tensor
fre = str("%.5f" % float(freq))
pat_alpha = re.compile(r'@.*QRLRVE.*([XYZ])DIPLEN.*([XYZ])DIPLEN.*%s' %fre)
alpha = np.zeros( [3,3,] )
lab = ['X', 'Y', 'Z', ]
for i in open( file_ ).readlines():
if pat_alpha.match( i ):
try:
if "D" in i.split()[-1]:
frac = float( i.split()[-1].replace("D","E") )
else:
frac = float( i.split()[-1] )
except IndexError:
if "D" in i.split()[-1]:
frac = float( i.split()[-1].strip("=").replace("D","E") )
else:
frac = float( i.split()[-1].strip("=") )
A = pat_alpha.match(i).groups(1)[0]
B = pat_alpha.match(i).groups(1)[1]
alpha[ lab.index( A ) , lab.index( B ) ] = frac
if A == "X" and B == "Y":
alpha[ lab.index( B ) , lab.index( A ) ] = frac
if A == "X" and B == "Z":
alpha[ lab.index( B ) , lab.index( A ) ] = frac
if A == "Y" and B == "Z":
alpha[ lab.index( B ) , lab.index( A ) ] = frac
pat_beta = re.compile(r'@ B-freq')
for i in open( file_ ).readlines():
if pat_beta.match(i):
try:
if i.split()[7].lstrip("beta") in exists:
continue
exists[ i.split()[7].lstrip("beta") ] = float(i.split()[9] )
except ValueError:
a, b, c = i.split()[9].lstrip("beta").strip("()").split(",")
if i.split()[7].lstrip("beta") in missing:
continue
missing[ i.split()[7].lstrip("beta") ] = "(%s;%s,%s)"%(a,b,c)
for i in range(3):
for j in range(3):
for k in range(3):
try:
beta[i][j][k] = exists[ "(%s;%s,%s)" %(lab[i],lab[j],lab[k])]
except KeyError:
beta[i][j][k] = exists[ missing["(%s;%s,%s)"%(lab[i],lab[j],lab[k]) ] ]
if in_AA:
nuc_dip /= a0
tot_dip = nuc_dip - el_dip
return atoms, nuc_dip - el_dip, alpha , beta
def read_beta_hf( file_, freq = "0.0", in_AA = False, out_AA = False ):
with open( file_ ) as f:
return read_beta_hf_string( f.read(), freq = freq,
in_AA = in_AA, out_AA = out_AA )
def read_beta_hf_string( string_, freq = "0.0", in_AA = False, out_AA = False, akka = False):
nuc_dip = np.zeros(3)
el_dip = np.zeros(3)
alpha = np.zeros([3,3])
beta = np.zeros([3,3,3])
tmp = []
atoms = []
missing = {}
exists = {}
lab = ["X", "Y", "Z"]
pat_Q = re.compile(r'Total charge of the molecule')
pat_xyz = re.compile(r'^\s*(\w+)\s+(-*\d*\.+\d+)\s+(-*\d*\.+\d+)\s+(-*\d*\.+\d+) *$')
pat_pol = re.compile(r'([XYZ])DIPLEN.*total.*:')
#Special xyz hack for camb3lyp output from akka dalton to find atoms
if akka:
pat_akka_xyz = re.compile(r'^\s*(\w+)\s+:\s+\d\s+x\s+(-*\d*\.+\d+)\s+\d\s+y\s+(-*\d*\.+\d+)\s+\d\s+z\s+(-*\d*\.+\d+) *$')
else:
pat_akka_xyz = re.compile(r'^(?!a)a')
pat_labels_xyz = re.compile(r'^\s*((?!-)\S+)\s+(-*\d*\.+\d+)\s+(-*\d*\.+\d+)\s+(-*\d*\.+\d+) *$')
# Reading in dipole and charge
for i in string_.split('\n'):
if pat_Q.search( i ):
Q = float(i.split()[-1])
if pat_xyz.match(i):
f = pat_xyz.match(i).groups()
matched = pat_xyz.match(i).groups()
#Skip coordinates in out file that are for MM region from QMMM
kwargs = { "AA": in_AA, "element" : matched[0], "x" : matched[1],
"y" : matched[2], "z" : matched[3] }
tmpAtom = Atom( **kwargs )
atoms.append( tmpAtom )
elif pat_akka_xyz.match(i):
print i
print 'asdf'
raise SystemExit
f = pat_akka_xyz.match(i).groups()
matched = pat_akka_xyz.match(i).groups()
#Skip coordinates in out file that are for MM region from QMMM
kwargs = { "AA": in_AA, "element" : matched[0], "x" : matched[1],
"y" : matched[2], "z" : matched[3] }
tmpAtom = Atom( **kwargs )
atoms.append( tmpAtom )
elif pat_labels_xyz.match(i):
f = pat_labels_xyz.match(i).groups()
matched = pat_labels_xyz.match(i).groups()
lab = matched[0]
if len(lab.split('-')) == 4:
element = "H"
else:
try:
element = lab.split('-')[2][0]
except IndexError as e:
warnings.warn( 'Occured when finding wrong pattern for .xyz in read_beta_hf_string ' )
continue
kwargs = { "AA": in_AA, "element" : element, "x" : matched[1],
"y" : matched[2], "z" : matched[3] }
tmpAtom = Atom( **kwargs )
atoms.append( tmpAtom )
if pat_pol.search(i):
if pat_pol.search(i).group(1) == "X":
try:
if "D" in i.split()[3]:
frac = float(i.split()[3].replace("D","E"))
else:
frac = float(i.split()[3])
except IndexError:
if "D" in i.split()[2]:
frac = float( i.split()[2].strip(":").replace("D","E"))
else:
frac = float( i.split()[2].strip(":"))
el_dip[0] += frac
if pat_pol.search(i).group(1) == "Y":
try:
if "D" in i.split()[3]:
frac = float(i.split()[3].replace("D","E"))
else:
frac = float(i.split()[3])
except IndexError:
if "D" in i.split()[2]:
frac = float( i.split()[2].strip(":").replace("D","E"))
else:
frac = float( i.split()[2].strip(":"))
el_dip[1] += frac
if pat_pol.search(i).group(1) == "Z":
try:
if "D" in i.split()[3]:
frac = float(i.split()[3].replace("D","E"))
else:
frac = float(i.split()[3])
except IndexError:
if "D" in i.split()[2]:
frac = float( i.split()[2].strip(":").replace("D","E"))
else:
frac = float( i.split()[2].strip(":"))
el_dip[2] += frac
remove = []
for ind, at in enumerate(atoms[:-1]):
for other in atoms[ind+1:]:
if at.equal( other ):
remove.append( other )
for each in remove:
atoms.remove( each )
#Set center of nuceli charge to 0
coc = sum([ x.r * charge_dic[x.element] for x in atoms ]) /\
sum([ charge_dic[x.element] for x in atoms ])
for i in atoms:
nuc_dip += charge_dic[ i.element ] * (i.r - coc )
if in_AA and not out_AA:
# Make sure center of charge is in Atomic units to give correct electronic dipole
coc /= a0
# Reading in Alfa and Beta tensor
fre = str("%.5f" % float(freq))
pat_alpha = re.compile(r'@.*QRLRVE.*([XYZ])DIPLEN.*([XYZ])DIPLEN.*%s' %fre)
alpha = np.zeros( [3,3,] )
lab = ['X', 'Y', 'Z', ]
for i in string_.split('\n'):
if pat_alpha.match( i ):
try:
if "D" in i.split()[-1]:
frac = float( i.split()[-1].replace("D","E") )
else:
frac = float( i.split()[-1] )
except IndexError:
if "D" in i.split()[-1]:
frac = float( i.split()[-1].strip("=").replace("D","E") )
else:
frac = float( i.split()[-1].strip("=") )
A = pat_alpha.match(i).groups(1)[0]
B = pat_alpha.match(i).groups(1)[1]
alpha[ lab.index( A ) , lab.index( B ) ] = frac
if A == "X" and B == "Y":
alpha[ lab.index( B ) , lab.index( A ) ] = frac
if A == "X" and B == "Z":
alpha[ lab.index( B ) , lab.index( A ) ] = frac
if A == "Y" and B == "Z":
alpha[ lab.index( B ) , lab.index( A ) ] = frac
pat_beta = re.compile(r'@ B-freq')
for i in string_.split('\n'):
if pat_beta.match(i):
try:
if i.split()[7].lstrip("beta") in exists:
continue
exists[ i.split()[7].lstrip("beta") ] = float(i.split()[9] )
except ValueError:
a, b, c = i.split()[9].lstrip("beta").strip("()").split(",")
if i.split()[7].lstrip("beta") in missing:
continue
missing[ i.split()[7].lstrip("beta") ] = "(%s;%s,%s)"%(a,b,c)
for i in range(3):
for j in range(3):
for k in range(3):
try:
beta[i][j][k] = exists[ "(%s;%s,%s)" %(lab[i],lab[j],lab[k])]
except KeyError:
beta[i][j][k] = exists[ missing["(%s;%s,%s)"%(lab[i],lab[j],lab[k]) ] ]
N_el = sum([charge_dic[at.element] for at in atoms]) - Q
tot_dip = -el_dip + coc * N_el
return atoms, tot_dip, alpha , beta
if __name__ == '__main__':
main()
| mit |
zonca/healpy | doc/create_images.py | 7 | 1087 | import healpy as hp
import numpy as np
import matplotlib.pyplot as plt
SIZE = 400
DPI = 60
m = np.arange(hp.nside2npix(32))
hp.mollview(m, nest = True, xsize=SIZE, title='Mollview image NESTED')
plt.savefig('static/moll_nside32_nest.png', dpi=DPI)
hp.mollview(m, nest = False, xsize=SIZE, title='Mollview image RING')
plt.savefig('static/moll_nside32_ring.png', dpi=DPI)
wmap_map_I = hp.read_map('../healpy/test/data/wmap_band_imap_r9_7yr_W_v4.fits')
hp.mollview(wmap_map_I, coord=['G','E'], title='Histogram equalized Ecliptic', unit='mK', norm='hist', min=-1,max=1, xsize=SIZE)
hp.graticule()
plt.savefig('static/wmap_histeq_ecl.png', dpi=DPI)
mask = hp.read_map('../healpy/test/data/wmap_temperature_analysis_mask_r9_7yr_v4.fits').astype(np.bool)
wmap_map_I_masked = hp.ma(wmap_map_I)
wmap_map_I_masked.mask = np.logical_not(mask)
LMAX = 1024
cl = hp.anafast(wmap_map_I_masked.filled(), lmax=LMAX)
ell = np.arange(len(cl))
plt.figure()
plt.plot(ell, ell * (ell+1) * cl)
plt.xlabel('ell'); plt.ylabel('ell(ell+1)cl'); plt.grid()
plt.savefig('static/wmap_powspec.png', dpi=DPI)
| gpl-2.0 |
ChristinaLK/chtc_sheets | chtc_sheets.py | 1 | 12833 | # Python code to extra user/project/hours used information
# from second half of CHTC daily (weekly/monthly/yearly) reports
# input is a single tsv file in the "data" directory
# output is written to "reports" directory
# can be run in the ipython notebook (replace the "filename" string in cell 4)
# or on the command line as:
# $python chtc_sheets data/spreadsheet.tsv
# dictionary for assigning projects -> departments
dept_dictionary = {
'Economics_Roys':'Economics',
'BMRB':'Biochemistry',
'ECE_VanVeen':'Electrical and Computer Engineering',
'CMS':'Physics',
'Physics_Knezevic':'Physics',
'Statistics_YazhenWang':'Statistics',
'Cs_Ron':'Computer Sciences',
'Math_Boston':'Mathematics',
'IceCube':'Physics',
'WID_Biology_Vetsigian':'Bacteriology',
'Economics_Gandhi':'Economics',
'Statistics_Tsui':'Statistics',
'ERC':'Mechanical Engineering',
'Zoology_Lee':'Zoology',
'MaterialScience_Morgan':'Materials Science and Engineering',
'OSG':'off-campus',
'Economics_Gregory':'Economics',
'CAE':'multi-departmental',
'Statistics_Ane':'Statistics',
'WID_Biology_Roy':'Biostatistics and Medical Informatics',
'Biostat_Broman':'Biostatistics and Medical Informatics',
'LMCG':'Genetics',
'WID':'multi-departmental',
'MedPhysics_Bednarz':'Medical Physics',
'BME_Williams':'Biomedical Engineering',
'SSEC':'Atmospheric Sciences',
'Botany_Spalding':'Botany',
'Biochem_Mitchell':'Biochemistry',
'ChemE_Mavrikakis':'Chemical and Biological Engineering',
'BMI_Craven':'Biostatistics and Medical Informatics',
'Astronomy_Heinz':'Astronomy',
'MSE_Szlufarska':'Materials Science and Engineering',
'Cs_Re':'Computer Sciences',
'AnimalSciences_Berres':'Animal Sciences',
'Purdue':'off-campus',
'Genetics_Pool':'Genetics',
'Physics_Friesen':'Physics',
'Chemistry_Schmidt':'Chemistry',
'Chemistry_Yethiraj':'Chemistry',
'MechE_Trujillo':'Mechanical Engineering',
'Physics_Bai':'Physics',
'EarthCube':'Geoscience',
'Economics_Freyberger':'Economics',
'CS':'Computer Sciences',
'Ece_Hagness':'Electrical and Computer Engineering',
'AnimalSciences_Wu':'Animal Sciences',
'CHTC':'Computer Sciences',
'Entomology_Zhu':'Entomology',
'Medicine_Johnson':'Medicine',
'ChE_dePablo':'Chemical and Biological Engineering',
'CEE_Loheide':'Civil and Environmental Engineering',
'Botany_Givnish':'Botany',
'Genetics_Payseur':'Genetics',
'MechE_Rutland':'Mechanical Engineering',
'Waisman_Alexander':'Psychiatry',
'EngrPhysics_Wilson':'Engineering Physics',
'MSE_Voyles':'Materials Science and Engineering',
'SOAR':'Computer Sciences',
'Chemistry_Cui':'Chemistry',
'Physics_Sarff':'Physics',
'Biostat':'Biostatistics and Medical Informatics',
'Waisman_Center':'multi-departmental',
'Math_Thiffeault':'Mathematics',
'Astronomy_DOnghia':'Astronomy',
'WID_Biology_Vestigian':'Bacteriology',
'Physics_Knutson':'Physics',
'Physics_Perkins':'Physics',
'Oncology_Hill':'Oncology',
'UWEC_Ma':'off-campus',
'Oncology_Sugden':'Oncology',
'ECE_Ramanathan':'Electrical and Computer Engineering',
'Statistics_Zhang':'Statistics',
'Geoscience_DeMets':'Geoscience',
'NucEngr_Schmitz':'Engineering Physics',
'Biostat_Wang':'Biostatistics and Medical Informatics',
'MedPhysics_Bender':'Medical Physics',
'Business_Gofman':'Finance',
'Economics_Sorensen':'Economics',
'MedPhysics_Campagnola':'Medical Physics',
'Astronomy_Townsend':'Astronomy',
'Business_Levine':'Finance',
'Cs_Sohi':'Computer Sciences',
'Biochem_Fox':'Biochemistry',
'SmallMolecule_Hoffman':'Oncology',
'Physics_Joynt':'Physics',
'MechE_Kokjohn':'Mechanical Engineering',
'Economics_Kennan':'Economics',
'Cs_Hill':'Computer Sciences',
'Biostat_Zhao':'Biostatistics and Medical Informatics',
'Physics_Forest':'Physics',
'EngrPhysics_Sovinec':'Engineering Physics',
'Chemistry_Coon':'Chemistry',
'Pharmacy_Kwan':'Pharmaceutical Sciences',
'Biostat_Singh':'Biostatistics and Medical Informatics',
'Psychology_Rogers':'Pyschology',
'Statistics_Shao':'Statistics',
'Waisman_Vorperian':'multi-departmental',
'UCSB':'off-campus',
'AnimalSciences_Rosa':'Animal Sciences',
'Math_Spagnolie':'Mathematics',
'Neurology_Hermann':'Neurology',
'EngrPhysics_Anderson':'Engineering Physics',
'GLBRC_WEI':'multi-departmental',
'EngrPhysics_Hegna':'Engineering Physics',
'BMI_Pack':'Biostatistics and Medical Informatics',
'MechE_Thelen':'Mechanical Engineering',
'History_Chowkwanyun':'History',
'Cs_Sankaralingam':'Computer Sciences',
'Statistics_Hanlon':'Statistics',
'other':'other',
'Botany_Sytsma':'Botany',
'EdPsychology_Kaplan':'Educational Psychology',
'UWPlatt_Haasl':'off-campus',
'MedPhysics_Varghese':'Medical Physics',
'Page_Learn':'Biostatistics and Medical Informatics',
'Psychiatry_Tononi':'Psychiatry',
'Zoology_Turner':'Zoology',
'Astronomy_Bershady':'Astronomy',
'Primate_Oconnor':'Pathology and Laboratory Medicine',
'Psychiatry_Koenigs':'Psychiatry',
'OSG-SS':'off-campus',
'Economics_Shi':'Economics',
'Arizona_iPlant':'off-campus',
'WID_LEL':'multi-departmental',
'Bacteriology_Rey':'Bacteriology',
'Botany_Cameron':'Botany',
'Math_Stechmann':'Mathematics',
'Medicine_Pepperell':'Medicine',
'EdPsychology_Steiner':'Educational Psychology',
'BotanyMath_Staff':'multi-departmental',
'Psychiatry_Kalin':'Psychiatry',
'BMI_Gitter':'Biostatistics and Medical Informatics',
'MIR_Mackie':'Medical Physics',
'CEE_Wu':'Civil and Environmental Engineering',
'Geoscience_Feigl':'Geoscience',
'BME_Ashton':'Biomedical Engineering',
'Surgery_Jiang':'Surgery',
'Biochemistry_Denu':'Biochemistry',
'GLBRC_Benton':'multi-departmental',
'Botany_Gilroy':'Botany',
'Astronomy_Tremonti':'Astronomy',
'Math':'Mathematics',
'Biochem':'Biochemistry',
'Economics_Atalay':'Economics',
'CsBanerjee':'Computer Sciences',
'Wempec_Jahns':'Electrical and Computer Engineering',
'Marschfield_Hebbring':'off-campus',
'Bacteriology_McMahon':'Bacteriology',
'Statistics_Yu':'Statistics',
'Psychiatry_Abercrombie':'Psychiatry',
'Radiology_Brace':'Radiology',
'Neurology_Gallagher':'Neurology',
'Geoscience_Cardiff':'Geoscience',
'CEE_hedegaard':'Civil and Environmental Engineering',
'UWMilwaukee':'off-campus',
'Atlas':'Physics',
'MechE_Pfotenhauer':'Mechanical Engineering',
'CEE_GinderVogel':'Civil and Environmental Engineering',
'CEE_Hedegaard':'Civil and Environmental Engineering',
'StatisticsYazhenWang':'Statistics',
'Pathobio_Friedrich':'Pathobiological Sciences',
'Loci_Eliceiri':'Biomedical Engineering',
'ChemBioEngr_Graham':'Chemical and Biological Engineering',
'Oncology_Ahlquist':'Oncology',
'AnimalSciences':'Animal Sciences',
'backfill':'other',
'Statistics_Keles':'Statistics',
'Botany_Graham':'Botany',
'Psychiatry_Cirelli':'Psychiatry',
'UWEC':'off-campus',
'Pathology_Oconnor':'Pathology and Laboratory Medicine',
'CBE_Yin':'Chemical and Biological Engineering',
'Geoscience_Staff':'Geoscience',
'History_Chowkanyun':'History',
'AgAplliedEcon_Grainger':'Agricultural and Applied Economics',
'DoIT':'Computer Sciences',
'MedPhysics_DeWerd':'Medical Physics',
'EdPsychology_Wollack':'Educational Psychology',
'OSG-People':'off-campus',
'GeoDeepDive':'Geoscience',
'SCO_Wiscland':'Geography',
'Bionates_Saha':'Biomedical Engineering',
'AAE_NETS':'Agricultural and Applied Economics',
'ECE_Sethares':'Electrical and Computer Engineering',
'BME_Chesler':'Biomedical Engineering',
'MechE_Qian':'Mechanical Engineering',
'Physics_Barger':'Physics',
'MechE_Negrut':'Mechanical Engineering',
'Astronomy_Stanimirovic':'Astronomy',
# dictionary for assigning depts -> colleges
college_dictionary = {
'Statistics':'Letters and Sciences',
'Genetics':'Agricultural and Life Sciences',
'Biochemistry':'Agricultural and Life Sciences',
'Pharmaceutical Sciences':'Pharmacy',
'Psychiatry':'Medicine and Public Health',
'Radiology':'Medicine and Public Health',
'Civil and Environmental Engineering':'Engineering',
'Medicine':'Medicine and Public Health',
'Mathematics':'Letters and Sciences',
'multi-departmental':'other',
'Finance':'Business',
'Mechanical Engineering':'Engineering',
'Biomedical Engineering':'Engineering',
'Entomology':'Agricultural and Life Sciences',
'Engineering Physics':'Engineering',
'Electrical and Computer Engineering':'Engineering',
'other':'other',
'Biostatistics and Medical Informatics':'Medicine and Public Health',
'Astronomy':'Letters and Sciences',
'Medical Physics':'Medicine and Public Health',
'Economics':'Letters and Sciences',
'Computer Sciences':'Letters and Sciences',
'Materials Science and Engineering':'Engineering',
'Agricultural and Applied Economics':'Agricultural and Life Sciences',
'Pathology and Laboratory Medicine':'Medicine and Public Health',
'Bacteriology':'Agricultural and Life Sciences',
'Chemistry':'Letters and Sciences',
'Physics':'Letters and Sciences',
'Geoscience':'Letters and Sciences',
'History':'Letters and Sciences',
'Botany':'Letters and Sciences',
'Oncology':'Medicine and Public Health',
'Atmospheric Sciences':'Letters and Sciences',
'Animal Sciences':'Agricultural and Life Sciences',
'Educational Psychology':'Education',
'off-campus':'off-campus',
'Chemical and Biological Engineering':'Engineering',
'Pyschology':'Letters and Sciences',
'Neurology':'Medicine and Public Health',
'Zoology':'Letters and Sciences',
'Pathobiological Sciences':'Veterinary Medicine',
'Surgery':'Medicine and Public Health',
'Geography':'Letters and Sciences'
}
import sys
import pandas
#set filename and extract useful pieces for creating output file names
filename = sys.argv[1]
base_fn = filename.split('/')[1].split('.')[0]
date_fn = base_fn.split('_')[1]+'_'+base_fn.split('_')[2]
#read in data and a list of headers
data = pandas.read_csv(filename, delimiter='\t', skiprows=[1,2], thousands=',')
headers = list(data.columns.values)
date = headers[1]
#extract data
#(usernames, depts, colleges, #submit point#, total hours, broken down into htc, hpc, osg, non-chtc)
# as individual lists
# list of usernames
def username(row):
if row[date].split('_')[0] == 'nu':
return row[date].split('_')[0]+"_"+row[date].split('_')[1]
else:
return row[date].split('_')[0]
usernames = list(data.apply(username, axis=1))
# list of Projects
projects = list(data["Project"])
# list of departments
def dept(row):
proj_dept = row['Project']
return dept_dictionary[proj_dept]
depts = list(data.apply(dept, axis=1))
# list of colleges
def college(row):
proj_dept = row['Project']
dept = dept_dictionary[proj_dept]
return college_dictionary[dept]
colleges = list(data.apply(college, axis=1))
# list of submit points
#def submit(row):
# return row[date].split('_')[-1]
#submit_point = list(data.apply(submit, axis=1))
# lists of hours (non chtc, chtc (htc), slurm (hpc), and osg)
slurm = list(data["SLURM"])
chtc = list(data["CHTC"])
osg = list(data["OSG"])
total = list(data["Total"])
def nonchtc(row):
return row['Total'] - row['CHTC'] - row['SLURM'] - row['OSG']
outside = list(data.apply(nonchtc, axis=1))
#make user_data table from all lists above
user_data = pandas.DataFrame.from_items( [('username', usernames), ('project_name',projects),
('dept', depts), ('college', colleges),
('total', total), ('htc', chtc),
('hpc', slurm), ('osg', osg), ('other', outside)])
# save all reorganized data
cleaned_fn = 'reports/'+date_fn+'_cleaned.tsv'
cleaned_file = open(cleaned_fn, 'w')
user_data.to_csv(cleaned_file, sep='\t', index=False)
cleaned_file.close()
# get chtc users with more than 0 hours
# this includes all people with hours on chtc supported hardware
# (which necessarily includes everyone submitting from a CHTC submit point)
chtc_users = user_data[((user_data.osg > 0) |
(user_data.htc > 0) |
(user_data.hpc > 0)) & (user_data.total > 0)]
# save chtc specific data
chtc_fn = 'reports/'+date_fn+'_chtc.tsv'
chtc_file = open(chtc_fn, 'w')
chtc_users.to_csv(chtc_file, sep='\t', index=False)
chtc_file.close()
| mit |
psi4/DatenQM | qcfractal/interface/collections/reaction_dataset.py | 1 | 33261 | """
QCPortal Database ODM
"""
import itertools as it
from enum import Enum
from typing import TYPE_CHECKING, Any, Dict, List, Optional, Set, Tuple, Union
import numpy as np
import pandas as pd
from qcelemental import constants
from ..models import Molecule, ProtoModel
from ..util import replace_dict_keys
from .collection_utils import nCr, register_collection
from .dataset import Dataset
if TYPE_CHECKING: # pragma: no cover
from .. import FractalClient
from ..models import ComputeResponse
class _ReactionTypeEnum(str, Enum):
"""Helper class for locking the reaction type into one or the other"""
rxn = "rxn"
ie = "ie"
class ReactionEntry(ProtoModel):
"""Data model for the `reactions` list in Dataset"""
attributes: Dict[str, Union[int, float, str]] # Might be overloaded key types
reaction_results: Dict[str, dict]
name: str
stoichiometry: Dict[str, Dict[str, float]]
extras: Dict[str, Any] = {}
class ReactionDataset(Dataset):
"""
The ReactionDataset class for homogeneous computations on many reactions.
Attributes
----------
client : client.FractalClient
A FractalClient connected to a server
data : ReactionDataset.DataModel
A Model representation of the database backbone
df : pd.DataFrame
The underlying dataframe for the Dataset object
rxn_index : pd.Index
The unrolled reaction index for all reactions in the Dataset
"""
def __init__(self, name: str, client: Optional["FractalClient"] = None, ds_type: str = "rxn", **kwargs) -> None:
"""
Initializer for the Dataset object. If no Portal is supplied or the database name
is not present on the server that the Portal is connected to a blank database will be
created.
Parameters
----------
name : str
The name of the Dataset
client : client.FractalClient, optional
A FractalClient connected to a server
ds_type : str, optional
The type of Dataset involved
"""
ds_type = ds_type.lower()
super().__init__(name, client=client, ds_type=ds_type, **kwargs)
class DataModel(Dataset.DataModel):
ds_type: _ReactionTypeEnum = _ReactionTypeEnum.rxn
records: Optional[List[ReactionEntry]] = None
history: Set[Tuple[str, str, str, Optional[str], Optional[str], str]] = set()
history_keys: Tuple[str, str, str, str, str, str] = (
"driver",
"program",
"method",
"basis",
"keywords",
"stoichiometry",
)
def _entry_index(self, subset: Optional[List[str]] = None) -> None:
if self.data.records is None:
self._get_data_records_from_db()
# Unroll the index
tmp_index = []
for rxn in self.data.records:
name = rxn.name
for stoich_name in list(rxn.stoichiometry):
for mol_hash, coef in rxn.stoichiometry[stoich_name].items():
tmp_index.append([name, stoich_name, mol_hash, coef])
ret = pd.DataFrame(tmp_index, columns=["name", "stoichiometry", "molecule", "coefficient"])
if subset is None:
return ret
else:
return ret.reset_index().set_index("name").loc[subset].reset_index().set_index("index")
def _molecule_indexer(
self,
stoich: Union[str, List[str]],
subset: Optional[Union[str, Set[str]]] = None,
coefficients: bool = False,
force: bool = False,
) -> Tuple[Dict[Tuple[str, ...], "ObjectId"], Tuple[str]]:
"""Provides a {index: molecule_id} mapping for a given subset.
Parameters
----------
stoich : Union[str, List[str]]
The stoichiometries, or list of stoichiometries to return
subset : Optional[Union[str, Set[str]]], optional
The indices of the desired subset. Return all indices if subset is None.
coefficients : bool, optional
Returns the coefficients if as part of the index if True
No Longer Returned
------------------
Dict[str, 'ObjectId']
Molecule index to molecule ObjectId map
Returns
-------
Tuple[Dict[Tuple[str, ...], 'ObjectId'], Tuple[str]]
Molecule index to molecule ObjectId map, and index names
"""
if isinstance(stoich, str):
stoich = [stoich]
index = self.get_entries(subset=subset, force=force)
matched_rows = index[np.in1d(index["stoichiometry"], stoich)]
if subset:
matched_rows = matched_rows[np.in1d(matched_rows["name"], subset)]
names = ("name", "stoichiometry", "idx")
if coefficients:
names = names + ("coefficient",)
ret = {}
for gb_idx, group in matched_rows.groupby(["name", "stoichiometry"]):
for cnt, (idx, row) in enumerate(group.iterrows()):
if coefficients:
ret[gb_idx + (cnt, row["coefficient"])] = row["molecule"]
else:
ret[gb_idx + (cnt,)] = row["molecule"]
return ret, names
def valid_stoich(self, subset=None, force: bool = False) -> Set[str]:
entries = self.get_entries(subset=subset, force=force)
return set(entries["stoichiometry"].unique())
def _validate_stoich(self, stoich: Union[List[str], str], subset=None, force: bool = False) -> None:
if isinstance(stoich, str):
stoich = [stoich]
if isinstance(subset, str):
subset = [subset]
valid_stoich = self.valid_stoich(subset=subset, force=force)
for s in stoich:
if s.lower() not in valid_stoich:
raise KeyError("Stoichiometry not understood, valid keys are {}.".format(valid_stoich))
def _pre_save_prep(self, client: "FractalClient") -> None:
self._canonical_pre_save(client)
mol_ret = self._add_molecules_by_dict(client, self._new_molecules)
# Update internal molecule UUID's to servers UUID's
for record in self._new_records:
stoichiometry = replace_dict_keys(record.stoichiometry, mol_ret)
new_record = record.copy(update={"stoichiometry": stoichiometry})
self.data.records.append(new_record)
self._new_records: List[ReactionEntry] = []
self._new_molecules = {}
self._entry_index()
def get_values(
self,
method: Optional[Union[str, List[str]]] = None,
basis: Optional[Union[str, List[str]]] = None,
keywords: Optional[str] = None,
program: Optional[str] = None,
driver: Optional[str] = None,
stoich: str = "default",
name: Optional[Union[str, List[str]]] = None,
native: Optional[bool] = None,
subset: Optional[Union[str, List[str]]] = None,
force: bool = False,
) -> pd.DataFrame:
"""
Obtains values from the known history from the search paramaters provided for the expected `return_result` values.
Defaults to the standard programs and keywords if not provided.
Note that unlike `get_records`, `get_values` will automatically expand searches and return multiple method
and basis combinations simultaneously.
`None` is a wildcard selector. To search for `None`, use `"None"`.
Parameters
----------
method : Optional[Union[str, List[str]]], optional
The computational method (B3LYP)
basis : Optional[Union[str, List[str]]], optional
The computational basis (6-31G)
keywords : Optional[str], optional
The keyword alias
program : Optional[str], optional
The underlying QC program
driver : Optional[str], optional
The type of calculation (e.g. energy, gradient, hessian, dipole...)
stoich : str, optional
Stoichiometry of the reaction.
name : Optional[Union[str, List[str]]], optional
Canonical name of the record. Overrides the above selectors.
native: Optional[bool], optional
True: only include data computed with QCFractal
False: only include data contributed from outside sources
None: include both
subset: Optional[List[str]], optional
The indices of the desired subset. Return all indices if subset is None.
force : bool, optional
Data is typically cached, forces a new query if True
Returns
------
DataFrame
A DataFrame of values with columns corresponding to methods and rows corresponding to reaction entries.
Contributed (native=False) columns are marked with "(contributed)" and may include units in square brackets
if their units differ in dimensionality from the ReactionDataset's default units.
"""
return self._get_values(
method=method,
basis=basis,
keywords=keywords,
program=program,
driver=driver,
stoich=stoich,
name=name,
native=native,
subset=subset,
force=force,
)
def _get_native_values(
self,
subset: Set[str],
method: Optional[str] = None,
basis: Optional[str] = None,
keywords: Optional[str] = None,
program: Optional[str] = None,
stoich: Optional[str] = None,
name: Optional[str] = None,
force: bool = False,
) -> pd.DataFrame:
self._validate_stoich(stoich, subset=subset, force=force)
# So that datasets with no records do not require a default program and default keywords
if len(self.list_records()) == 0:
return pd.DataFrame(index=self.get_index(subset))
queries = self._form_queries(
method=method, basis=basis, keywords=keywords, program=program, stoich=stoich, name=name
)
if len(queries) == 0:
return pd.DataFrame(index=self.get_index(subset))
stoich_complex = queries.pop("stoichiometry").values[0]
stoich_monomer = "".join([x for x in stoich_complex if not x.isdigit()]) + "1"
def _query_apply_coeffients(stoich, query):
# Build the starting table
indexer, names = self._molecule_indexer(stoich=stoich, coefficients=True, force=force)
df = self._get_records(indexer, query, include=["return_result"], merge=True)
df.index = pd.MultiIndex.from_tuples(df.index, names=names)
df.reset_index(inplace=True)
# Block out null values `groupby.sum()` will return 0 rather than NaN in all cases
null_mask = df[["name", "return_result"]].copy()
null_mask["return_result"] = null_mask["return_result"].isnull()
null_mask = null_mask.groupby(["name"])["return_result"].sum() != False
# Multiply by coefficients and sum
df["return_result"] *= df["coefficient"]
df = df.groupby(["name"])["return_result"].sum()
df[null_mask] = np.nan
return df
names = []
new_queries = []
new_data = pd.DataFrame(index=subset)
for _, query in queries.iterrows():
query = query.replace({np.nan: None}).to_dict()
qname = query["name"]
names.append(qname)
if force or not self._subset_in_cache(qname, subset):
self._column_metadata[qname] = query
new_queries.append(query)
if not self._use_view(force):
units: Dict[str, str] = {}
for query in new_queries:
qname = query.pop("name")
if self.data.ds_type == _ReactionTypeEnum.ie:
# This implements 1-body counterpoise correction
# TODO: this will need to contain the logic for VMFC or other method-of-increments strategies
data_complex = _query_apply_coeffients(stoich_complex, query)
data_monomer = _query_apply_coeffients(stoich_monomer, query)
data = data_complex - data_monomer
elif self.data.ds_type == _ReactionTypeEnum.rxn:
data = _query_apply_coeffients(stoich_complex, query)
else:
raise ValueError(
f"ReactionDataset ds_type is not a member of _ReactionTypeEnum. (Got {self.data.ds_type}.)"
)
new_data[qname] = data * constants.conversion_factor("hartree", self.units)
query["name"] = qname
units[qname] = self.units
else:
for query in new_queries:
query["native"] = True
new_data, units = self._view.get_values(new_queries)
for query in new_queries:
qname = query["name"]
new_data[qname] = new_data[qname] * constants.conversion_factor(units[qname], self.units)
for query in new_queries:
qname = query["name"]
self._column_metadata[qname].update({"native": True, "units": units[qname]})
self._update_cache(new_data)
return self.df.loc[subset, names]
def visualize(
self,
method: Optional[str] = None,
basis: Optional[str] = None,
keywords: Optional[str] = None,
program: Optional[str] = None,
stoich: str = "default",
groupby: Optional[str] = None,
metric: str = "UE",
bench: Optional[str] = None,
kind: str = "bar",
return_figure: Optional[bool] = None,
show_incomplete: bool = False,
) -> "plotly.Figure":
"""
Parameters
----------
method : Optional[str], optional
Methods to query
basis : Optional[str], optional
Bases to query
keywords : Optional[str], optional
Keyword aliases to query
program : Optional[str], optional
Programs aliases to query
stoich : str, optional
Stoichiometry to query
groupby : Optional[str], optional
Groups the plot by this index.
metric : str, optional
The metric to use either UE (unsigned error) or URE (unsigned relative error)
bench : Optional[str], optional
The benchmark level of theory to use
kind : str, optional
The kind of chart to produce, either 'bar' or 'violin'
return_figure : Optional[bool], optional
If True, return the raw plotly figure. If False, returns a hosted iPlot. If None, return a iPlot display in Jupyter notebook and a raw plotly figure in all other circumstances.
show_incomplete: bool, optional
Display statistics method/basis set combinations where results are incomplete
Returns
-------
plotly.Figure
The requested figure.
"""
query = {"method": method, "basis": basis, "keywords": keywords, "program": program, "stoichiometry": stoich}
query = {k: v for k, v in query.items() if v is not None}
return self._visualize(
metric,
bench,
query=query,
groupby=groupby,
return_figure=return_figure,
kind=kind,
show_incomplete=show_incomplete,
)
def get_molecules(
self,
subset: Optional[Union[str, Set[str]]] = None,
stoich: Union[str, List[str]] = "default",
force: bool = False,
) -> pd.DataFrame:
"""Queries full Molecules from the database.
Parameters
----------
subset : Optional[Union[str, Set[str]]], optional
The index subset to query on
stoich : Union[str, List[str]], optional
The stoichiometries to pull from, either a single or multiple stoichiometries
force : bool, optional
Force pull of molecules from server
Return
------
pd.DataFrame
Indexed Molecules which match the stoich and subset string.
"""
self._check_client()
self._check_state()
if isinstance(subset, str):
subset = [subset]
self._validate_stoich(stoich, subset=subset, force=force)
indexer, names = self._molecule_indexer(stoich=stoich, subset=subset, force=force)
df = self._get_molecules(indexer, force=force)
df.index = pd.MultiIndex.from_tuples(df.index, names=names)
return df
def get_records(
self,
method: str,
basis: Optional[str] = None,
*,
keywords: Optional[str] = None,
program: Optional[str] = None,
stoich: Union[str, List[str]] = "default",
include: Optional[List[str]] = None,
subset: Optional[Union[str, Set[str]]] = None,
) -> Union[pd.DataFrame, "ResultRecord"]:
"""
Queries the local Portal for the requested keys and stoichiometry.
Parameters
----------
method : str
The computational method to query on (B3LYP)
basis : Optional[str], optional
The computational basis to query on (6-31G)
keywords : Optional[str], optional
The option token desired
program : Optional[str], optional
The program to query on
stoich : Union[str, List[str]], optional
The given stoichiometry to compute.
include : Optional[Dict[str, bool]], optional
The attribute project to perform on the query, otherwise returns ResultRecord objects.
subset : Optional[Union[str, Set[str]]], optional
The index subset to query on
Returns
-------
Union[pd.DataFrame, 'ResultRecord']
The name of the queried column
"""
self._check_client()
self._check_state()
method = method.upper()
if isinstance(stoich, str):
stoich = [stoich]
ret = []
for s in stoich:
name, _, history = self._default_parameters(program, method, basis, keywords, stoich=s)
history.pop("stoichiometry")
indexer, names = self._molecule_indexer(stoich=s, subset=subset, force=True)
df = self._get_records(
indexer,
history,
include=include,
merge=False,
raise_on_plan="`get_records` can only be used for non-composite quantities. You likely queried a DFT+D method or similar that requires a combination of DFT and -D. Please query each piece separately.",
)
df = df[0]
df.index = pd.MultiIndex.from_tuples(df.index, names=names)
ret.append(df)
ret = pd.concat(ret)
ret.sort_index(inplace=True)
return ret
def compute(
self,
method: str,
basis: Optional[str] = None,
*,
keywords: Optional[str] = None,
program: Optional[str] = None,
stoich: str = "default",
ignore_ds_type: bool = False,
tag: Optional[str] = None,
priority: Optional[str] = None,
) -> "ComputeResponse":
"""Executes a computational method for all reactions in the Dataset.
Previously completed computations are not repeated.
Parameters
----------
method : str
The computational method to compute (B3LYP)
basis : Optional[str], optional
The computational basis to compute (6-31G)
keywords : Optional[str], optional
The keyword alias for the requested compute
program : Optional[str], optional
The underlying QC program
stoich : str, optional
The stoichiometry of the requested compute (cp/nocp/etc)
ignore_ds_type : bool, optional
Optionally only compute the "default" geometry
tag : Optional[str], optional
The queue tag to use when submitting compute requests.
priority : Optional[str], optional
The priority of the jobs low, medium, or high.
Returns
-------
ComputeResponse
An object that contains the submitted ObjectIds of the new compute. This object has the following fields:
- ids: The ObjectId's of the task in the order of input molecules
- submitted: A list of ObjectId's that were submitted to the compute queue
- existing: A list of ObjectId's of tasks already in the database
"""
self._check_client()
self._check_state()
entry_index = self.get_entries(force=True)
self._validate_stoich(stoich, subset=None, force=True)
compute_keys = {"program": program, "method": method, "basis": basis, "keywords": keywords, "stoich": stoich}
# Figure out molecules that we need
if (not ignore_ds_type) and (self.data.ds_type.lower() == "ie"):
monomer_stoich = "".join([x for x in stoich if not x.isdigit()]) + "1"
tmp_monomer = entry_index[entry_index["stoichiometry"] == monomer_stoich].copy()
ret1 = self._compute(compute_keys, tmp_monomer["molecule"], tag, priority)
tmp_complex = entry_index[entry_index["stoichiometry"] == stoich].copy()
ret2 = self._compute(compute_keys, tmp_complex["molecule"], tag, priority)
ret = ret1.merge(ret2)
else:
tmp_complex = entry_index[entry_index["stoichiometry"] == stoich].copy()
ret = self._compute(compute_keys, tmp_complex["molecule"], tag, priority)
# Update the record that this was computed
self.save()
return ret
def get_rxn(self, name: str) -> ReactionEntry:
"""
Returns the JSON object of a specific reaction.
Parameters
----------
name : str
The name of the reaction to query
Returns
-------
ret : dict
The JSON representation of the reaction
"""
found = []
for num, x in enumerate(self.data.records):
if x.name == name:
found.append(num)
if len(found) == 0:
raise KeyError("Dataset:get_rxn: Reaction name '{}' not found.".format(name))
if len(found) > 1:
raise KeyError("Dataset:get_rxn: Multiple reactions of name '{}' found. Dataset failure.".format(name))
return self.data.records[found[0]]
# Visualization
def ternary(self, cvals=None):
"""Plots a ternary diagram of the DataBase if available
Parameters
----------
cvals : None, optional
Description
"""
raise Exception("MPL not avail")
# return visualization.Ternary2D(self.df, cvals=cvals)
# Adders
def parse_stoichiometry(self, stoichiometry: List[Tuple[Union[Molecule, str], float]]) -> Dict[str, float]:
"""
Parses a stiochiometry list.
Parameters
----------
stoichiometry : list
A list of tuples describing the stoichiometry.
Returns
-------
Dict[str, float]
A dictionary describing the stoichiometry for use in the database.
Keys are molecule hashes. Values are stoichiometric coefficients.
Notes
-----
This function attempts to convert the molecule into its corresponding hash. The following will happen depending on the form of the Molecule.
- Molecule hash - Used directly in the stoichiometry.
- Molecule class - Hash is obtained and the molecule will be added to the database upon saving.
- Molecule string - Molecule will be converted to a Molecule class and the same process as the above will occur.
"""
mol_hashes = []
mol_values = []
for line in stoichiometry:
if len(line) != 2:
raise KeyError("Dataset: Parse stoichiometry: passed in as a list, must be of key : value type")
# Get the values
try:
mol_values.append(float(line[1]))
except:
raise TypeError("Dataset: Parse stoichiometry: must be able to cast second value to a float.")
# What kind of molecule is it?
mol = line[0]
if isinstance(mol, str) and (len(mol) == 40):
molecule_hash = mol
elif isinstance(mol, str):
qcf_mol = Molecule.from_data(mol)
molecule_hash = qcf_mol.get_hash()
if molecule_hash not in list(self._new_molecules):
self._new_molecules[molecule_hash] = qcf_mol
elif isinstance(mol, Molecule):
molecule_hash = mol.get_hash()
if molecule_hash not in list(self._new_molecules):
self._new_molecules[molecule_hash] = mol
else:
raise TypeError(
"Dataset: Parse stoichiometry: first value must either be a molecule hash, "
"a molecule str, or a Molecule class."
)
mol_hashes.append(molecule_hash)
# Sum together the coefficients of duplicates
ret: Dict[str, float] = {}
for mol, coef in zip(mol_hashes, mol_values):
if mol in list(ret):
ret[mol] += coef
else:
ret[mol] = coef
return ret
def add_rxn(
self,
name: str,
stoichiometry: Dict[str, List[Tuple[Molecule, float]]],
reaction_results: Optional[Dict[str, str]] = None,
attributes: Optional[Dict[str, Union[int, float, str]]] = None,
other_fields: Optional[Dict[str, Any]] = None,
) -> ReactionEntry:
"""
Adds a reaction to a database object.
Parameters
----------
name : str
Name of the reaction.
stoichiometry : list or dict
Either a list or dictionary of lists
reaction_results : dict or None, Optional, Default: None
A dictionary of the computed total interaction energy results
attributes : dict or None, Optional, Default: None
A dictionary of attributes to assign to the reaction
other_fields : dict or None, Optional, Default: None
A dictionary of additional user defined fields to add to the reaction entry
Returns
-------
ReactionEntry
A complete specification of the reaction
"""
if reaction_results is None:
reaction_results = {}
if attributes is None:
attributes = {}
if other_fields is None:
other_fields = {}
rxn_dict: Dict[str, Any] = {"name": name}
# Set name
if name in self.get_index():
raise KeyError(
"Dataset: Name '{}' already exists. "
"Please either delete this entry or call the update function.".format(name)
)
# Set stoich
if isinstance(stoichiometry, dict):
rxn_dict["stoichiometry"] = {}
if "default" not in list(stoichiometry):
raise KeyError("Dataset:add_rxn: Stoichiometry dict must have a 'default' key.")
for k, v in stoichiometry.items():
rxn_dict["stoichiometry"][k] = self.parse_stoichiometry(v)
elif isinstance(stoichiometry, (tuple, list)):
rxn_dict["stoichiometry"] = {}
rxn_dict["stoichiometry"]["default"] = self.parse_stoichiometry(stoichiometry)
else:
raise TypeError("Dataset:add_rxn: Type of stoichiometry input was not recognized:", type(stoichiometry))
# Set attributes
if not isinstance(attributes, dict):
raise TypeError("Dataset:add_rxn: attributes must be a dictionary, not '{}'".format(type(attributes)))
rxn_dict["attributes"] = attributes
if not isinstance(other_fields, dict):
raise TypeError("Dataset:add_rxn: other_fields must be a dictionary, not '{}'".format(type(attributes)))
rxn_dict["extras"] = other_fields
if "default" in list(reaction_results):
rxn_dict["reaction_results"] = reaction_results
elif isinstance(reaction_results, dict):
rxn_dict["reaction_results"] = {}
rxn_dict["reaction_results"]["default"] = reaction_results
else:
raise TypeError("Passed in reaction_results not understood.")
rxn = ReactionEntry(**rxn_dict)
self._new_records.append(rxn)
return rxn
def add_ie_rxn(self, name: str, mol: Molecule, **kwargs) -> ReactionEntry:
"""Add a interaction energy reaction entry to the database. Automatically
builds CP and no-CP reactions for the fragmented molecule.
Parameters
----------
name : str
The name of the reaction
mol : Molecule
A molecule with multiple fragments
**kwargs
Additional kwargs to pass into `build_id_fragments`.
Returns
-------
ReactionEntry
A representation of the new reaction.
"""
reaction_results = kwargs.pop("reaction_results", {})
attributes = kwargs.pop("attributes", {})
other_fields = kwargs.pop("other_fields", {})
stoichiometry = self.build_ie_fragments(mol, name=name, **kwargs)
return self.add_rxn(
name, stoichiometry, reaction_results=reaction_results, attributes=attributes, other_fields=other_fields
)
@staticmethod
def build_ie_fragments(mol: Molecule, **kwargs) -> Dict[str, List[Tuple[Molecule, float]]]:
"""
Build the stoichiometry for an Interaction Energy.
Parameters
----------
mol : Molecule class or str
Molecule to fragment.
do_default : bool
Create the default (noCP) stoichiometry.
do_cp : bool
Create the counterpoise (CP) corrected stoichiometry.
do_vmfc : bool
Create the Valiron-Mayer Function Counterpoise (VMFC) corrected stoichiometry.
max_nbody : int
The maximum fragment level built, if zero defaults to the maximum number of fragments.
Notes
-----
Returns
-------
ret : dict
A JSON representation of the fragmented molecule.
"""
do_default = kwargs.pop("do_default", True)
do_cp = kwargs.pop("do_cp", True)
do_vmfc = kwargs.pop("do_vmfc", False)
max_nbody = kwargs.pop("max_nbody", 0)
if not isinstance(mol, Molecule):
mol = Molecule.from_data(mol, **kwargs)
ret = {}
max_frag = len(mol.fragments)
if max_nbody == 0:
max_nbody = max_frag
if max_frag < 2:
raise AttributeError("Dataset:build_ie_fragments: Molecule must have at least two fragments.")
# Build some info
fragment_range = list(range(max_frag))
# Loop over the bodis
for nbody in range(1, max_nbody):
nocp_tmp = []
cp_tmp = []
for k in range(1, nbody + 1):
take_nk = nCr(max_frag - k - 1, nbody - k)
sign = (-1) ** (nbody - k)
coef = take_nk * sign
for frag in it.combinations(fragment_range, k):
if do_default:
nocp_tmp.append((mol.get_fragment(frag, orient=True, group_fragments=True), coef))
if do_cp:
ghost = list(set(fragment_range) - set(frag))
cp_tmp.append((mol.get_fragment(frag, ghost, orient=True, group_fragments=True), coef))
if do_default:
ret["default" + str(nbody)] = nocp_tmp
if do_cp:
ret["cp" + str(nbody)] = cp_tmp
# VMFC is a special beast
if do_vmfc:
raise KeyError("VMFC isnt quite ready for primetime!")
# ret.update({"vmfc" + str(nbody): [] for nbody in range(1, max_nbody)})
# nbody_range = list(range(1, max_nbody))
# for nbody in nbody_range:
# for cp_combos in it.combinations(fragment_range, nbody):
# basis_tuple = tuple(cp_combos)
# for interior_nbody in nbody_range:
# for x in it.combinations(cp_combos, interior_nbody):
# ghost = list(set(basis_tuple) - set(x))
# ret["vmfc" + str(interior_nbody)].append((mol.get_fragment(x, ghost), 1.0))
# Add in the maximal position
if do_default:
ret["default"] = [(mol, 1.0)]
if do_cp:
ret["cp"] = [(mol, 1.0)]
# if do_vmfc:
# ret["vmfc"] = [(mol, 1.0)]
return ret
register_collection(ReactionDataset)
| bsd-3-clause |
sahat/bokeh | bokeh/protocol.py | 1 | 3541 | import json
import logging
import time
import datetime as dt
import numpy as np
from six.moves import cPickle as pickle
from .utils import get_ref
try:
import pandas as pd
is_pandas = True
except ImportError:
is_pandas = False
try:
from dateutil.relativedelta import relativedelta
is_dateutil = True
except ImportError:
is_dateutil = False
log = logging.getLogger(__name__)
millifactor = 10 ** 6.
class BokehJSONEncoder(json.JSONEncoder):
def transform_series(self, obj):
"""transform series
"""
vals = obj.values
return self.transform_array(vals)
def transform_array(self, obj):
"""Transform arrays into lists of json safe types
also handles pandas series, and replacing
nans and infs with strings
"""
## not quite correct, truncates to ms..
if obj.dtype.kind == 'M':
return obj.astype('datetime64[ms]').astype('int64').tolist()
elif obj.dtype.kind in ('u', 'i', 'f'):
return self.transform_numerical_array(obj)
return obj.tolist()
def transform_numerical_array(self, obj):
"""handles nans/inf conversion
"""
if not np.isnan(obj).any() and not np.isinf(obj).any():
return obj.tolist()
else:
transformed = obj.astype('object')
transformed[np.isnan(obj)] = 'NaN'
transformed[np.isposinf(obj)] = 'Infinity'
transformed[np.isneginf(obj)] = '-Infinity'
return transformed.tolist()
def transform_python_types(self, obj):
"""handle special scalars, default to default json encoder
"""
if is_pandas and isinstance(obj, pd.tslib.Timestamp):
return obj.value / millifactor
elif isinstance(obj, np.float):
return float(obj)
elif isinstance(obj, np.int):
return int(obj)
elif isinstance(obj, (dt.datetime, dt.date)):
return time.mktime(obj.timetuple()) * 1000.
elif isinstance(obj, dt.time):
return (obj.hour*3600 + obj.minute*60 + obj.second)*1000 + obj.microsecond
elif is_dateutil and isinstance(obj, relativedelta):
return dict(years=obj.years, months=obj.months, days=obj.days, hours=obj.hours,
minutes=obj.minutes, seconds=obj.seconds, microseconds=obj.microseconds)
else:
return super(BokehJSONEncoder, self).default(obj)
def default(self, obj):
#argh! local import!
from .plot_object import PlotObject
from .properties import HasProps
from .colors import Color
## array types
if is_pandas and isinstance(obj, (pd.Series, pd.Index)):
return self.transform_series(obj)
elif isinstance(obj, np.ndarray):
return self.transform_array(obj)
elif isinstance(obj, PlotObject):
return get_ref(obj)
elif isinstance(obj, HasProps):
return obj.to_dict()
elif isinstance(obj, Color):
return obj.toCSS()
else:
return self.transform_python_types(obj)
def serialize_json(obj, encoder=BokehJSONEncoder, **kwargs):
return json.dumps(obj, cls=encoder, **kwargs)
deserialize_json = json.loads
serialize_web = serialize_json
deserialize_web = deserialize_json
def status_obj(status):
return {'msgtype': 'status',
'status': status}
def error_obj(error_msg):
return {
'msgtype': 'error',
'error_msg': error_msg}
| bsd-3-clause |
almarklein/scikit-image | doc/examples/plot_rank_mean.py | 3 | 1498 | """
============
Mean filters
============
This example compares the following mean filters of the rank filter package:
* **local mean**: all pixels belonging to the structuring element to compute
average gray level.
* **percentile mean**: only use values between percentiles p0 and p1
(here 10% and 90%).
* **bilateral mean**: only use pixels of the structuring element having a gray
level situated inside g-s0 and g+s1 (here g-500 and g+500)
Percentile and usual mean give here similar results, these filters smooth the
complete image (background and details). Bilateral mean exhibits a high
filtering rate for continuous area (i.e. background) while higher image
frequencies remain untouched.
"""
import numpy as np
import matplotlib.pyplot as plt
from skimage import data
from skimage.morphology import disk
from skimage.filter import rank
image = (data.coins()).astype(np.uint16) * 16
selem = disk(20)
percentile_result = rank.mean_percentile(image, selem=selem, p0=.1, p1=.9)
bilateral_result = rank.mean_bilateral(image, selem=selem, s0=500, s1=500)
normal_result = rank.mean(image, selem=selem)
fig, axes = plt.subplots(nrows=3, figsize=(8, 10))
ax0, ax1, ax2 = axes
ax0.imshow(np.hstack((image, percentile_result)))
ax0.set_title('Percentile mean')
ax0.axis('off')
ax1.imshow(np.hstack((image, bilateral_result)))
ax1.set_title('Bilateral mean')
ax1.axis('off')
ax2.imshow(np.hstack((image, normal_result)))
ax2.set_title('Local mean')
ax2.axis('off')
plt.show()
| bsd-3-clause |
Vimos/scikit-learn | sklearn/metrics/tests/test_ranking.py | 46 | 41270 | from __future__ import division, print_function
import numpy as np
from itertools import product
import warnings
from scipy.sparse import csr_matrix
from sklearn import datasets
from sklearn import svm
from sklearn.datasets import make_multilabel_classification
from sklearn.random_projection import sparse_random_matrix
from sklearn.utils.validation import check_array, check_consistent_length
from sklearn.utils.validation import check_random_state
from sklearn.utils.testing import assert_raises, clean_warning_registry
from sklearn.utils.testing import assert_raise_message
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_almost_equal
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_warns
from sklearn.metrics import auc
from sklearn.metrics import average_precision_score
from sklearn.metrics import coverage_error
from sklearn.metrics import label_ranking_average_precision_score
from sklearn.metrics import precision_recall_curve
from sklearn.metrics import label_ranking_loss
from sklearn.metrics import roc_auc_score
from sklearn.metrics import roc_curve
from sklearn.exceptions import UndefinedMetricWarning
###############################################################################
# Utilities for testing
def make_prediction(dataset=None, binary=False):
"""Make some classification predictions on a toy dataset using a SVC
If binary is True restrict to a binary classification problem instead of a
multiclass classification problem
"""
if dataset is None:
# import some data to play with
dataset = datasets.load_iris()
X = dataset.data
y = dataset.target
if binary:
# restrict to a binary classification task
X, y = X[y < 2], y[y < 2]
n_samples, n_features = X.shape
p = np.arange(n_samples)
rng = check_random_state(37)
rng.shuffle(p)
X, y = X[p], y[p]
half = int(n_samples / 2)
# add noisy features to make the problem harder and avoid perfect results
rng = np.random.RandomState(0)
X = np.c_[X, rng.randn(n_samples, 200 * n_features)]
# run classifier, get class probabilities and label predictions
clf = svm.SVC(kernel='linear', probability=True, random_state=0)
probas_pred = clf.fit(X[:half], y[:half]).predict_proba(X[half:])
if binary:
# only interested in probabilities of the positive case
# XXX: do we really want a special API for the binary case?
probas_pred = probas_pred[:, 1]
y_pred = clf.predict(X[half:])
y_true = y[half:]
return y_true, y_pred, probas_pred
###############################################################################
# Tests
def _auc(y_true, y_score):
"""Alternative implementation to check for correctness of
`roc_auc_score`."""
pos_label = np.unique(y_true)[1]
# Count the number of times positive samples are correctly ranked above
# negative samples.
pos = y_score[y_true == pos_label]
neg = y_score[y_true != pos_label]
diff_matrix = pos.reshape(1, -1) - neg.reshape(-1, 1)
n_correct = np.sum(diff_matrix > 0)
return n_correct / float(len(pos) * len(neg))
def _average_precision(y_true, y_score):
"""Alternative implementation to check for correctness of
`average_precision_score`."""
pos_label = np.unique(y_true)[1]
n_pos = np.sum(y_true == pos_label)
order = np.argsort(y_score)[::-1]
y_score = y_score[order]
y_true = y_true[order]
score = 0
for i in range(len(y_score)):
if y_true[i] == pos_label:
# Compute precision up to document i
# i.e, percentage of relevant documents up to document i.
prec = 0
for j in range(0, i + 1):
if y_true[j] == pos_label:
prec += 1.0
prec /= (i + 1.0)
score += prec
return score / n_pos
def test_roc_curve():
# Test Area under Receiver Operating Characteristic (ROC) curve
y_true, _, probas_pred = make_prediction(binary=True)
expected_auc = _auc(y_true, probas_pred)
for drop in [True, False]:
fpr, tpr, thresholds = roc_curve(y_true, probas_pred,
drop_intermediate=drop)
roc_auc = auc(fpr, tpr)
assert_array_almost_equal(roc_auc, expected_auc, decimal=2)
assert_almost_equal(roc_auc, roc_auc_score(y_true, probas_pred))
assert_equal(fpr.shape, tpr.shape)
assert_equal(fpr.shape, thresholds.shape)
def test_roc_curve_end_points():
# Make sure that roc_curve returns a curve start at 0 and ending and
# 1 even in corner cases
rng = np.random.RandomState(0)
y_true = np.array([0] * 50 + [1] * 50)
y_pred = rng.randint(3, size=100)
fpr, tpr, thr = roc_curve(y_true, y_pred, drop_intermediate=True)
assert_equal(fpr[0], 0)
assert_equal(fpr[-1], 1)
assert_equal(fpr.shape, tpr.shape)
assert_equal(fpr.shape, thr.shape)
def test_roc_returns_consistency():
# Test whether the returned threshold matches up with tpr
# make small toy dataset
y_true, _, probas_pred = make_prediction(binary=True)
fpr, tpr, thresholds = roc_curve(y_true, probas_pred)
# use the given thresholds to determine the tpr
tpr_correct = []
for t in thresholds:
tp = np.sum((probas_pred >= t) & y_true)
p = np.sum(y_true)
tpr_correct.append(1.0 * tp / p)
# compare tpr and tpr_correct to see if the thresholds' order was correct
assert_array_almost_equal(tpr, tpr_correct, decimal=2)
assert_equal(fpr.shape, tpr.shape)
assert_equal(fpr.shape, thresholds.shape)
def test_roc_curve_multi():
# roc_curve not applicable for multi-class problems
y_true, _, probas_pred = make_prediction(binary=False)
assert_raises(ValueError, roc_curve, y_true, probas_pred)
def test_roc_curve_confidence():
# roc_curve for confidence scores
y_true, _, probas_pred = make_prediction(binary=True)
fpr, tpr, thresholds = roc_curve(y_true, probas_pred - 0.5)
roc_auc = auc(fpr, tpr)
assert_array_almost_equal(roc_auc, 0.90, decimal=2)
assert_equal(fpr.shape, tpr.shape)
assert_equal(fpr.shape, thresholds.shape)
def test_roc_curve_hard():
# roc_curve for hard decisions
y_true, pred, probas_pred = make_prediction(binary=True)
# always predict one
trivial_pred = np.ones(y_true.shape)
fpr, tpr, thresholds = roc_curve(y_true, trivial_pred)
roc_auc = auc(fpr, tpr)
assert_array_almost_equal(roc_auc, 0.50, decimal=2)
assert_equal(fpr.shape, tpr.shape)
assert_equal(fpr.shape, thresholds.shape)
# always predict zero
trivial_pred = np.zeros(y_true.shape)
fpr, tpr, thresholds = roc_curve(y_true, trivial_pred)
roc_auc = auc(fpr, tpr)
assert_array_almost_equal(roc_auc, 0.50, decimal=2)
assert_equal(fpr.shape, tpr.shape)
assert_equal(fpr.shape, thresholds.shape)
# hard decisions
fpr, tpr, thresholds = roc_curve(y_true, pred)
roc_auc = auc(fpr, tpr)
assert_array_almost_equal(roc_auc, 0.78, decimal=2)
assert_equal(fpr.shape, tpr.shape)
assert_equal(fpr.shape, thresholds.shape)
def test_roc_curve_one_label():
y_true = [1, 1, 1, 1, 1, 1, 1, 1, 1, 1]
y_pred = [0, 1, 0, 1, 0, 1, 0, 1, 0, 1]
# assert there are warnings
w = UndefinedMetricWarning
fpr, tpr, thresholds = assert_warns(w, roc_curve, y_true, y_pred)
# all true labels, all fpr should be nan
assert_array_equal(fpr,
np.nan * np.ones(len(thresholds)))
assert_equal(fpr.shape, tpr.shape)
assert_equal(fpr.shape, thresholds.shape)
# assert there are warnings
fpr, tpr, thresholds = assert_warns(w, roc_curve,
[1 - x for x in y_true],
y_pred)
# all negative labels, all tpr should be nan
assert_array_equal(tpr,
np.nan * np.ones(len(thresholds)))
assert_equal(fpr.shape, tpr.shape)
assert_equal(fpr.shape, thresholds.shape)
def test_roc_curve_toydata():
# Binary classification
y_true = [0, 1]
y_score = [0, 1]
tpr, fpr, _ = roc_curve(y_true, y_score)
roc_auc = roc_auc_score(y_true, y_score)
assert_array_almost_equal(tpr, [0, 1])
assert_array_almost_equal(fpr, [1, 1])
assert_almost_equal(roc_auc, 1.)
y_true = [0, 1]
y_score = [1, 0]
tpr, fpr, _ = roc_curve(y_true, y_score)
roc_auc = roc_auc_score(y_true, y_score)
assert_array_almost_equal(tpr, [0, 1, 1])
assert_array_almost_equal(fpr, [0, 0, 1])
assert_almost_equal(roc_auc, 0.)
y_true = [1, 0]
y_score = [1, 1]
tpr, fpr, _ = roc_curve(y_true, y_score)
roc_auc = roc_auc_score(y_true, y_score)
assert_array_almost_equal(tpr, [0, 1])
assert_array_almost_equal(fpr, [0, 1])
assert_almost_equal(roc_auc, 0.5)
y_true = [1, 0]
y_score = [1, 0]
tpr, fpr, _ = roc_curve(y_true, y_score)
roc_auc = roc_auc_score(y_true, y_score)
assert_array_almost_equal(tpr, [0, 1])
assert_array_almost_equal(fpr, [1, 1])
assert_almost_equal(roc_auc, 1.)
y_true = [1, 0]
y_score = [0.5, 0.5]
tpr, fpr, _ = roc_curve(y_true, y_score)
roc_auc = roc_auc_score(y_true, y_score)
assert_array_almost_equal(tpr, [0, 1])
assert_array_almost_equal(fpr, [0, 1])
assert_almost_equal(roc_auc, .5)
y_true = [0, 0]
y_score = [0.25, 0.75]
# assert UndefinedMetricWarning because of no positive sample in y_true
tpr, fpr, _ = assert_warns(UndefinedMetricWarning, roc_curve, y_true, y_score)
assert_raises(ValueError, roc_auc_score, y_true, y_score)
assert_array_almost_equal(tpr, [0., 0.5, 1.])
assert_array_almost_equal(fpr, [np.nan, np.nan, np.nan])
y_true = [1, 1]
y_score = [0.25, 0.75]
# assert UndefinedMetricWarning because of no negative sample in y_true
tpr, fpr, _ = assert_warns(UndefinedMetricWarning, roc_curve, y_true, y_score)
assert_raises(ValueError, roc_auc_score, y_true, y_score)
assert_array_almost_equal(tpr, [np.nan, np.nan])
assert_array_almost_equal(fpr, [0.5, 1.])
# Multi-label classification task
y_true = np.array([[0, 1], [0, 1]])
y_score = np.array([[0, 1], [0, 1]])
assert_raises(ValueError, roc_auc_score, y_true, y_score, average="macro")
assert_raises(ValueError, roc_auc_score, y_true, y_score,
average="weighted")
assert_almost_equal(roc_auc_score(y_true, y_score, average="samples"), 1.)
assert_almost_equal(roc_auc_score(y_true, y_score, average="micro"), 1.)
y_true = np.array([[0, 1], [0, 1]])
y_score = np.array([[0, 1], [1, 0]])
assert_raises(ValueError, roc_auc_score, y_true, y_score, average="macro")
assert_raises(ValueError, roc_auc_score, y_true, y_score,
average="weighted")
assert_almost_equal(roc_auc_score(y_true, y_score, average="samples"), 0.5)
assert_almost_equal(roc_auc_score(y_true, y_score, average="micro"), 0.5)
y_true = np.array([[1, 0], [0, 1]])
y_score = np.array([[0, 1], [1, 0]])
assert_almost_equal(roc_auc_score(y_true, y_score, average="macro"), 0)
assert_almost_equal(roc_auc_score(y_true, y_score, average="weighted"), 0)
assert_almost_equal(roc_auc_score(y_true, y_score, average="samples"), 0)
assert_almost_equal(roc_auc_score(y_true, y_score, average="micro"), 0)
y_true = np.array([[1, 0], [0, 1]])
y_score = np.array([[0.5, 0.5], [0.5, 0.5]])
assert_almost_equal(roc_auc_score(y_true, y_score, average="macro"), .5)
assert_almost_equal(roc_auc_score(y_true, y_score, average="weighted"), .5)
assert_almost_equal(roc_auc_score(y_true, y_score, average="samples"), .5)
assert_almost_equal(roc_auc_score(y_true, y_score, average="micro"), .5)
def test_roc_curve_drop_intermediate():
# Test that drop_intermediate drops the correct thresholds
y_true = [0, 0, 0, 0, 1, 1]
y_score = [0., 0.2, 0.5, 0.6, 0.7, 1.0]
tpr, fpr, thresholds = roc_curve(y_true, y_score, drop_intermediate=True)
assert_array_almost_equal(thresholds, [1., 0.7, 0.])
# Test dropping thresholds with repeating scores
y_true = [0, 0, 0, 0, 0, 0, 0,
1, 1, 1, 1, 1, 1]
y_score = [0., 0.1, 0.6, 0.6, 0.7, 0.8, 0.9,
0.6, 0.7, 0.8, 0.9, 0.9, 1.0]
tpr, fpr, thresholds = roc_curve(y_true, y_score, drop_intermediate=True)
assert_array_almost_equal(thresholds,
[1.0, 0.9, 0.7, 0.6, 0.])
def test_auc():
# Test Area Under Curve (AUC) computation
x = [0, 1]
y = [0, 1]
assert_array_almost_equal(auc(x, y), 0.5)
x = [1, 0]
y = [0, 1]
assert_array_almost_equal(auc(x, y), 0.5)
x = [1, 0, 0]
y = [0, 1, 1]
assert_array_almost_equal(auc(x, y), 0.5)
x = [0, 1]
y = [1, 1]
assert_array_almost_equal(auc(x, y), 1)
x = [0, 0.5, 1]
y = [0, 0.5, 1]
assert_array_almost_equal(auc(x, y), 0.5)
def test_auc_duplicate_values():
# Test Area Under Curve (AUC) computation with duplicate values
# auc() was previously sorting the x and y arrays according to the indices
# from numpy.argsort(x), which was reordering the tied 0's in this example
# and resulting in an incorrect area computation. This test detects the
# error.
x = [-2.0, 0.0, 0.0, 0.0, 1.0]
y1 = [2.0, 0.0, 0.5, 1.0, 1.0]
y2 = [2.0, 1.0, 0.0, 0.5, 1.0]
y3 = [2.0, 1.0, 0.5, 0.0, 1.0]
for y in (y1, y2, y3):
assert_array_almost_equal(auc(x, y, reorder=True), 3.0)
def test_auc_errors():
# Incompatible shapes
assert_raises(ValueError, auc, [0.0, 0.5, 1.0], [0.1, 0.2])
# Too few x values
assert_raises(ValueError, auc, [0.0], [0.1])
# x is not in order
assert_raises(ValueError, auc, [1.0, 0.0, 0.5], [0.0, 0.0, 0.0])
def test_auc_score_non_binary_class():
# Test that roc_auc_score function returns an error when trying
# to compute AUC for non-binary class values.
rng = check_random_state(404)
y_pred = rng.rand(10)
# y_true contains only one class value
y_true = np.zeros(10, dtype="int")
assert_raise_message(ValueError, "ROC AUC score is not defined",
roc_auc_score, y_true, y_pred)
y_true = np.ones(10, dtype="int")
assert_raise_message(ValueError, "ROC AUC score is not defined",
roc_auc_score, y_true, y_pred)
y_true = -np.ones(10, dtype="int")
assert_raise_message(ValueError, "ROC AUC score is not defined",
roc_auc_score, y_true, y_pred)
# y_true contains three different class values
y_true = rng.randint(0, 3, size=10)
assert_raise_message(ValueError, "multiclass format is not supported",
roc_auc_score, y_true, y_pred)
clean_warning_registry()
with warnings.catch_warnings(record=True):
rng = check_random_state(404)
y_pred = rng.rand(10)
# y_true contains only one class value
y_true = np.zeros(10, dtype="int")
assert_raise_message(ValueError, "ROC AUC score is not defined",
roc_auc_score, y_true, y_pred)
y_true = np.ones(10, dtype="int")
assert_raise_message(ValueError, "ROC AUC score is not defined",
roc_auc_score, y_true, y_pred)
y_true = -np.ones(10, dtype="int")
assert_raise_message(ValueError, "ROC AUC score is not defined",
roc_auc_score, y_true, y_pred)
# y_true contains three different class values
y_true = rng.randint(0, 3, size=10)
assert_raise_message(ValueError, "multiclass format is not supported",
roc_auc_score, y_true, y_pred)
def test_precision_recall_curve():
y_true, _, probas_pred = make_prediction(binary=True)
_test_precision_recall_curve(y_true, probas_pred)
# Use {-1, 1} for labels; make sure original labels aren't modified
y_true[np.where(y_true == 0)] = -1
y_true_copy = y_true.copy()
_test_precision_recall_curve(y_true, probas_pred)
assert_array_equal(y_true_copy, y_true)
labels = [1, 0, 0, 1]
predict_probas = [1, 2, 3, 4]
p, r, t = precision_recall_curve(labels, predict_probas)
assert_array_almost_equal(p, np.array([0.5, 0.33333333, 0.5, 1., 1.]))
assert_array_almost_equal(r, np.array([1., 0.5, 0.5, 0.5, 0.]))
assert_array_almost_equal(t, np.array([1, 2, 3, 4]))
assert_equal(p.size, r.size)
assert_equal(p.size, t.size + 1)
def test_precision_recall_curve_pos_label():
y_true, _, probas_pred = make_prediction(binary=False)
pos_label = 2
p, r, thresholds = precision_recall_curve(y_true,
probas_pred[:, pos_label],
pos_label=pos_label)
p2, r2, thresholds2 = precision_recall_curve(y_true == pos_label,
probas_pred[:, pos_label])
assert_array_almost_equal(p, p2)
assert_array_almost_equal(r, r2)
assert_array_almost_equal(thresholds, thresholds2)
assert_equal(p.size, r.size)
assert_equal(p.size, thresholds.size + 1)
def _test_precision_recall_curve(y_true, probas_pred):
# Test Precision-Recall and aread under PR curve
p, r, thresholds = precision_recall_curve(y_true, probas_pred)
precision_recall_auc = auc(r, p)
assert_array_almost_equal(precision_recall_auc, 0.85, 2)
assert_array_almost_equal(precision_recall_auc,
average_precision_score(y_true, probas_pred))
assert_almost_equal(_average_precision(y_true, probas_pred),
precision_recall_auc, 1)
assert_equal(p.size, r.size)
assert_equal(p.size, thresholds.size + 1)
# Smoke test in the case of proba having only one value
p, r, thresholds = precision_recall_curve(y_true,
np.zeros_like(probas_pred))
precision_recall_auc = auc(r, p)
assert_array_almost_equal(precision_recall_auc, 0.75, 3)
assert_equal(p.size, r.size)
assert_equal(p.size, thresholds.size + 1)
def test_precision_recall_curve_errors():
# Contains non-binary labels
assert_raises(ValueError, precision_recall_curve,
[0, 1, 2], [[0.0], [1.0], [1.0]])
def test_precision_recall_curve_toydata():
with np.errstate(all="raise"):
# Binary classification
y_true = [0, 1]
y_score = [0, 1]
p, r, _ = precision_recall_curve(y_true, y_score)
auc_prc = average_precision_score(y_true, y_score)
assert_array_almost_equal(p, [1, 1])
assert_array_almost_equal(r, [1, 0])
assert_almost_equal(auc_prc, 1.)
y_true = [0, 1]
y_score = [1, 0]
p, r, _ = precision_recall_curve(y_true, y_score)
auc_prc = average_precision_score(y_true, y_score)
assert_array_almost_equal(p, [0.5, 0., 1.])
assert_array_almost_equal(r, [1., 0., 0.])
assert_almost_equal(auc_prc, 0.25)
y_true = [1, 0]
y_score = [1, 1]
p, r, _ = precision_recall_curve(y_true, y_score)
auc_prc = average_precision_score(y_true, y_score)
assert_array_almost_equal(p, [0.5, 1])
assert_array_almost_equal(r, [1., 0])
assert_almost_equal(auc_prc, .75)
y_true = [1, 0]
y_score = [1, 0]
p, r, _ = precision_recall_curve(y_true, y_score)
auc_prc = average_precision_score(y_true, y_score)
assert_array_almost_equal(p, [1, 1])
assert_array_almost_equal(r, [1, 0])
assert_almost_equal(auc_prc, 1.)
y_true = [1, 0]
y_score = [0.5, 0.5]
p, r, _ = precision_recall_curve(y_true, y_score)
auc_prc = average_precision_score(y_true, y_score)
assert_array_almost_equal(p, [0.5, 1])
assert_array_almost_equal(r, [1, 0.])
assert_almost_equal(auc_prc, .75)
y_true = [0, 0]
y_score = [0.25, 0.75]
assert_raises(Exception, precision_recall_curve, y_true, y_score)
assert_raises(Exception, average_precision_score, y_true, y_score)
y_true = [1, 1]
y_score = [0.25, 0.75]
p, r, _ = precision_recall_curve(y_true, y_score)
assert_almost_equal(average_precision_score(y_true, y_score), 1.)
assert_array_almost_equal(p, [1., 1., 1.])
assert_array_almost_equal(r, [1, 0.5, 0.])
# Multi-label classification task
y_true = np.array([[0, 1], [0, 1]])
y_score = np.array([[0, 1], [0, 1]])
assert_raises(Exception, average_precision_score, y_true, y_score,
average="macro")
assert_raises(Exception, average_precision_score, y_true, y_score,
average="weighted")
assert_almost_equal(average_precision_score(y_true, y_score,
average="samples"), 1.)
assert_almost_equal(average_precision_score(y_true, y_score,
average="micro"), 1.)
y_true = np.array([[0, 1], [0, 1]])
y_score = np.array([[0, 1], [1, 0]])
assert_raises(Exception, average_precision_score, y_true, y_score,
average="macro")
assert_raises(Exception, average_precision_score, y_true, y_score,
average="weighted")
assert_almost_equal(average_precision_score(y_true, y_score,
average="samples"), 0.625)
assert_almost_equal(average_precision_score(y_true, y_score,
average="micro"), 0.625)
y_true = np.array([[1, 0], [0, 1]])
y_score = np.array([[0, 1], [1, 0]])
assert_almost_equal(average_precision_score(y_true, y_score,
average="macro"), 0.25)
assert_almost_equal(average_precision_score(y_true, y_score,
average="weighted"), 0.25)
assert_almost_equal(average_precision_score(y_true, y_score,
average="samples"), 0.25)
assert_almost_equal(average_precision_score(y_true, y_score,
average="micro"), 0.25)
y_true = np.array([[1, 0], [0, 1]])
y_score = np.array([[0.5, 0.5], [0.5, 0.5]])
assert_almost_equal(average_precision_score(y_true, y_score,
average="macro"), 0.75)
assert_almost_equal(average_precision_score(y_true, y_score,
average="weighted"), 0.75)
assert_almost_equal(average_precision_score(y_true, y_score,
average="samples"), 0.75)
assert_almost_equal(average_precision_score(y_true, y_score,
average="micro"), 0.75)
def test_score_scale_invariance():
# Test that average_precision_score and roc_auc_score are invariant by
# the scaling or shifting of probabilities
# This test was expanded (added scaled_down) in response to github
# issue #3864 (and others), where overly aggressive rounding was causing
# problems for users with very small y_score values
y_true, _, probas_pred = make_prediction(binary=True)
roc_auc = roc_auc_score(y_true, probas_pred)
roc_auc_scaled_up = roc_auc_score(y_true, 100 * probas_pred)
roc_auc_scaled_down = roc_auc_score(y_true, 1e-6 * probas_pred)
roc_auc_shifted = roc_auc_score(y_true, probas_pred - 10)
assert_equal(roc_auc, roc_auc_scaled_up)
assert_equal(roc_auc, roc_auc_scaled_down)
assert_equal(roc_auc, roc_auc_shifted)
pr_auc = average_precision_score(y_true, probas_pred)
pr_auc_scaled_up = average_precision_score(y_true, 100 * probas_pred)
pr_auc_scaled_down = average_precision_score(y_true, 1e-6 * probas_pred)
pr_auc_shifted = average_precision_score(y_true, probas_pred - 10)
assert_equal(pr_auc, pr_auc_scaled_up)
assert_equal(pr_auc, pr_auc_scaled_down)
assert_equal(pr_auc, pr_auc_shifted)
def check_lrap_toy(lrap_score):
# Check on several small example that it works
assert_almost_equal(lrap_score([[0, 1]], [[0.25, 0.75]]), 1)
assert_almost_equal(lrap_score([[0, 1]], [[0.75, 0.25]]), 1 / 2)
assert_almost_equal(lrap_score([[1, 1]], [[0.75, 0.25]]), 1)
assert_almost_equal(lrap_score([[0, 0, 1]], [[0.25, 0.5, 0.75]]), 1)
assert_almost_equal(lrap_score([[0, 1, 0]], [[0.25, 0.5, 0.75]]), 1 / 2)
assert_almost_equal(lrap_score([[0, 1, 1]], [[0.25, 0.5, 0.75]]), 1)
assert_almost_equal(lrap_score([[1, 0, 0]], [[0.25, 0.5, 0.75]]), 1 / 3)
assert_almost_equal(lrap_score([[1, 0, 1]], [[0.25, 0.5, 0.75]]),
(2 / 3 + 1 / 1) / 2)
assert_almost_equal(lrap_score([[1, 1, 0]], [[0.25, 0.5, 0.75]]),
(2 / 3 + 1 / 2) / 2)
assert_almost_equal(lrap_score([[0, 0, 1]], [[0.75, 0.5, 0.25]]), 1 / 3)
assert_almost_equal(lrap_score([[0, 1, 0]], [[0.75, 0.5, 0.25]]), 1 / 2)
assert_almost_equal(lrap_score([[0, 1, 1]], [[0.75, 0.5, 0.25]]),
(1 / 2 + 2 / 3) / 2)
assert_almost_equal(lrap_score([[1, 0, 0]], [[0.75, 0.5, 0.25]]), 1)
assert_almost_equal(lrap_score([[1, 0, 1]], [[0.75, 0.5, 0.25]]),
(1 + 2 / 3) / 2)
assert_almost_equal(lrap_score([[1, 1, 0]], [[0.75, 0.5, 0.25]]), 1)
assert_almost_equal(lrap_score([[1, 1, 1]], [[0.75, 0.5, 0.25]]), 1)
assert_almost_equal(lrap_score([[0, 0, 1]], [[0.5, 0.75, 0.25]]), 1 / 3)
assert_almost_equal(lrap_score([[0, 1, 0]], [[0.5, 0.75, 0.25]]), 1)
assert_almost_equal(lrap_score([[0, 1, 1]], [[0.5, 0.75, 0.25]]),
(1 + 2 / 3) / 2)
assert_almost_equal(lrap_score([[1, 0, 0]], [[0.5, 0.75, 0.25]]), 1 / 2)
assert_almost_equal(lrap_score([[1, 0, 1]], [[0.5, 0.75, 0.25]]),
(1 / 2 + 2 / 3) / 2)
assert_almost_equal(lrap_score([[1, 1, 0]], [[0.5, 0.75, 0.25]]), 1)
assert_almost_equal(lrap_score([[1, 1, 1]], [[0.5, 0.75, 0.25]]), 1)
# Tie handling
assert_almost_equal(lrap_score([[1, 0]], [[0.5, 0.5]]), 0.5)
assert_almost_equal(lrap_score([[0, 1]], [[0.5, 0.5]]), 0.5)
assert_almost_equal(lrap_score([[1, 1]], [[0.5, 0.5]]), 1)
assert_almost_equal(lrap_score([[0, 0, 1]], [[0.25, 0.5, 0.5]]), 0.5)
assert_almost_equal(lrap_score([[0, 1, 0]], [[0.25, 0.5, 0.5]]), 0.5)
assert_almost_equal(lrap_score([[0, 1, 1]], [[0.25, 0.5, 0.5]]), 1)
assert_almost_equal(lrap_score([[1, 0, 0]], [[0.25, 0.5, 0.5]]), 1 / 3)
assert_almost_equal(lrap_score([[1, 0, 1]], [[0.25, 0.5, 0.5]]),
(2 / 3 + 1 / 2) / 2)
assert_almost_equal(lrap_score([[1, 1, 0]], [[0.25, 0.5, 0.5]]),
(2 / 3 + 1 / 2) / 2)
assert_almost_equal(lrap_score([[1, 1, 1]], [[0.25, 0.5, 0.5]]), 1)
assert_almost_equal(lrap_score([[1, 1, 0]], [[0.5, 0.5, 0.5]]), 2 / 3)
assert_almost_equal(lrap_score([[1, 1, 1, 0]], [[0.5, 0.5, 0.5, 0.5]]),
3 / 4)
def check_zero_or_all_relevant_labels(lrap_score):
random_state = check_random_state(0)
for n_labels in range(2, 5):
y_score = random_state.uniform(size=(1, n_labels))
y_score_ties = np.zeros_like(y_score)
# No relevant labels
y_true = np.zeros((1, n_labels))
assert_equal(lrap_score(y_true, y_score), 1.)
assert_equal(lrap_score(y_true, y_score_ties), 1.)
# Only relevant labels
y_true = np.ones((1, n_labels))
assert_equal(lrap_score(y_true, y_score), 1.)
assert_equal(lrap_score(y_true, y_score_ties), 1.)
# Degenerate case: only one label
assert_almost_equal(lrap_score([[1], [0], [1], [0]],
[[0.5], [0.5], [0.5], [0.5]]), 1.)
def check_lrap_error_raised(lrap_score):
# Raise value error if not appropriate format
assert_raises(ValueError, lrap_score,
[0, 1, 0], [0.25, 0.3, 0.2])
assert_raises(ValueError, lrap_score, [0, 1, 2],
[[0.25, 0.75, 0.0], [0.7, 0.3, 0.0], [0.8, 0.2, 0.0]])
assert_raises(ValueError, lrap_score, [(0), (1), (2)],
[[0.25, 0.75, 0.0], [0.7, 0.3, 0.0], [0.8, 0.2, 0.0]])
# Check that y_true.shape != y_score.shape raise the proper exception
assert_raises(ValueError, lrap_score, [[0, 1], [0, 1]], [0, 1])
assert_raises(ValueError, lrap_score, [[0, 1], [0, 1]], [[0, 1]])
assert_raises(ValueError, lrap_score, [[0, 1], [0, 1]], [[0], [1]])
assert_raises(ValueError, lrap_score, [[0, 1]], [[0, 1], [0, 1]])
assert_raises(ValueError, lrap_score, [[0], [1]], [[0, 1], [0, 1]])
assert_raises(ValueError, lrap_score, [[0, 1], [0, 1]], [[0], [1]])
def check_lrap_only_ties(lrap_score):
# Check tie handling in score
# Basic check with only ties and increasing label space
for n_labels in range(2, 10):
y_score = np.ones((1, n_labels))
# Check for growing number of consecutive relevant
for n_relevant in range(1, n_labels):
# Check for a bunch of positions
for pos in range(n_labels - n_relevant):
y_true = np.zeros((1, n_labels))
y_true[0, pos:pos + n_relevant] = 1
assert_almost_equal(lrap_score(y_true, y_score),
n_relevant / n_labels)
def check_lrap_without_tie_and_increasing_score(lrap_score):
# Check that Label ranking average precision works for various
# Basic check with increasing label space size and decreasing score
for n_labels in range(2, 10):
y_score = n_labels - (np.arange(n_labels).reshape((1, n_labels)) + 1)
# First and last
y_true = np.zeros((1, n_labels))
y_true[0, 0] = 1
y_true[0, -1] = 1
assert_almost_equal(lrap_score(y_true, y_score),
(2 / n_labels + 1) / 2)
# Check for growing number of consecutive relevant label
for n_relevant in range(1, n_labels):
# Check for a bunch of position
for pos in range(n_labels - n_relevant):
y_true = np.zeros((1, n_labels))
y_true[0, pos:pos + n_relevant] = 1
assert_almost_equal(lrap_score(y_true, y_score),
sum((r + 1) / ((pos + r + 1) * n_relevant)
for r in range(n_relevant)))
def _my_lrap(y_true, y_score):
"""Simple implementation of label ranking average precision"""
check_consistent_length(y_true, y_score)
y_true = check_array(y_true)
y_score = check_array(y_score)
n_samples, n_labels = y_true.shape
score = np.empty((n_samples, ))
for i in range(n_samples):
# The best rank correspond to 1. Rank higher than 1 are worse.
# The best inverse ranking correspond to n_labels.
unique_rank, inv_rank = np.unique(y_score[i], return_inverse=True)
n_ranks = unique_rank.size
rank = n_ranks - inv_rank
# Rank need to be corrected to take into account ties
# ex: rank 1 ex aequo means that both label are rank 2.
corr_rank = np.bincount(rank, minlength=n_ranks + 1).cumsum()
rank = corr_rank[rank]
relevant = y_true[i].nonzero()[0]
if relevant.size == 0 or relevant.size == n_labels:
score[i] = 1
continue
score[i] = 0.
for label in relevant:
# Let's count the number of relevant label with better rank
# (smaller rank).
n_ranked_above = sum(rank[r] <= rank[label] for r in relevant)
# Weight by the rank of the actual label
score[i] += n_ranked_above / rank[label]
score[i] /= relevant.size
return score.mean()
def check_alternative_lrap_implementation(lrap_score, n_classes=5,
n_samples=20, random_state=0):
_, y_true = make_multilabel_classification(n_features=1,
allow_unlabeled=False,
random_state=random_state,
n_classes=n_classes,
n_samples=n_samples)
# Score with ties
y_score = sparse_random_matrix(n_components=y_true.shape[0],
n_features=y_true.shape[1],
random_state=random_state)
if hasattr(y_score, "toarray"):
y_score = y_score.toarray()
score_lrap = label_ranking_average_precision_score(y_true, y_score)
score_my_lrap = _my_lrap(y_true, y_score)
assert_almost_equal(score_lrap, score_my_lrap)
# Uniform score
random_state = check_random_state(random_state)
y_score = random_state.uniform(size=(n_samples, n_classes))
score_lrap = label_ranking_average_precision_score(y_true, y_score)
score_my_lrap = _my_lrap(y_true, y_score)
assert_almost_equal(score_lrap, score_my_lrap)
def test_label_ranking_avp():
for fn in [label_ranking_average_precision_score, _my_lrap]:
yield check_lrap_toy, fn
yield check_lrap_without_tie_and_increasing_score, fn
yield check_lrap_only_ties, fn
yield check_zero_or_all_relevant_labels, fn
yield check_lrap_error_raised, label_ranking_average_precision_score
for n_samples, n_classes, random_state in product((1, 2, 8, 20),
(2, 5, 10),
range(1)):
yield (check_alternative_lrap_implementation,
label_ranking_average_precision_score,
n_classes, n_samples, random_state)
def test_coverage_error():
# Toy case
assert_almost_equal(coverage_error([[0, 1]], [[0.25, 0.75]]), 1)
assert_almost_equal(coverage_error([[0, 1]], [[0.75, 0.25]]), 2)
assert_almost_equal(coverage_error([[1, 1]], [[0.75, 0.25]]), 2)
assert_almost_equal(coverage_error([[0, 0]], [[0.75, 0.25]]), 0)
assert_almost_equal(coverage_error([[0, 0, 0]], [[0.25, 0.5, 0.75]]), 0)
assert_almost_equal(coverage_error([[0, 0, 1]], [[0.25, 0.5, 0.75]]), 1)
assert_almost_equal(coverage_error([[0, 1, 0]], [[0.25, 0.5, 0.75]]), 2)
assert_almost_equal(coverage_error([[0, 1, 1]], [[0.25, 0.5, 0.75]]), 2)
assert_almost_equal(coverage_error([[1, 0, 0]], [[0.25, 0.5, 0.75]]), 3)
assert_almost_equal(coverage_error([[1, 0, 1]], [[0.25, 0.5, 0.75]]), 3)
assert_almost_equal(coverage_error([[1, 1, 0]], [[0.25, 0.5, 0.75]]), 3)
assert_almost_equal(coverage_error([[1, 1, 1]], [[0.25, 0.5, 0.75]]), 3)
assert_almost_equal(coverage_error([[0, 0, 0]], [[0.75, 0.5, 0.25]]), 0)
assert_almost_equal(coverage_error([[0, 0, 1]], [[0.75, 0.5, 0.25]]), 3)
assert_almost_equal(coverage_error([[0, 1, 0]], [[0.75, 0.5, 0.25]]), 2)
assert_almost_equal(coverage_error([[0, 1, 1]], [[0.75, 0.5, 0.25]]), 3)
assert_almost_equal(coverage_error([[1, 0, 0]], [[0.75, 0.5, 0.25]]), 1)
assert_almost_equal(coverage_error([[1, 0, 1]], [[0.75, 0.5, 0.25]]), 3)
assert_almost_equal(coverage_error([[1, 1, 0]], [[0.75, 0.5, 0.25]]), 2)
assert_almost_equal(coverage_error([[1, 1, 1]], [[0.75, 0.5, 0.25]]), 3)
assert_almost_equal(coverage_error([[0, 0, 0]], [[0.5, 0.75, 0.25]]), 0)
assert_almost_equal(coverage_error([[0, 0, 1]], [[0.5, 0.75, 0.25]]), 3)
assert_almost_equal(coverage_error([[0, 1, 0]], [[0.5, 0.75, 0.25]]), 1)
assert_almost_equal(coverage_error([[0, 1, 1]], [[0.5, 0.75, 0.25]]), 3)
assert_almost_equal(coverage_error([[1, 0, 0]], [[0.5, 0.75, 0.25]]), 2)
assert_almost_equal(coverage_error([[1, 0, 1]], [[0.5, 0.75, 0.25]]), 3)
assert_almost_equal(coverage_error([[1, 1, 0]], [[0.5, 0.75, 0.25]]), 2)
assert_almost_equal(coverage_error([[1, 1, 1]], [[0.5, 0.75, 0.25]]), 3)
# Non trival case
assert_almost_equal(coverage_error([[0, 1, 0], [1, 1, 0]],
[[0.1, 10., -3], [0, 1, 3]]),
(1 + 3) / 2.)
assert_almost_equal(coverage_error([[0, 1, 0], [1, 1, 0], [0, 1, 1]],
[[0.1, 10, -3], [0, 1, 3], [0, 2, 0]]),
(1 + 3 + 3) / 3.)
assert_almost_equal(coverage_error([[0, 1, 0], [1, 1, 0], [0, 1, 1]],
[[0.1, 10, -3], [3, 1, 3], [0, 2, 0]]),
(1 + 3 + 3) / 3.)
def test_coverage_tie_handling():
assert_almost_equal(coverage_error([[0, 0]], [[0.5, 0.5]]), 0)
assert_almost_equal(coverage_error([[1, 0]], [[0.5, 0.5]]), 2)
assert_almost_equal(coverage_error([[0, 1]], [[0.5, 0.5]]), 2)
assert_almost_equal(coverage_error([[1, 1]], [[0.5, 0.5]]), 2)
assert_almost_equal(coverage_error([[0, 0, 0]], [[0.25, 0.5, 0.5]]), 0)
assert_almost_equal(coverage_error([[0, 0, 1]], [[0.25, 0.5, 0.5]]), 2)
assert_almost_equal(coverage_error([[0, 1, 0]], [[0.25, 0.5, 0.5]]), 2)
assert_almost_equal(coverage_error([[0, 1, 1]], [[0.25, 0.5, 0.5]]), 2)
assert_almost_equal(coverage_error([[1, 0, 0]], [[0.25, 0.5, 0.5]]), 3)
assert_almost_equal(coverage_error([[1, 0, 1]], [[0.25, 0.5, 0.5]]), 3)
assert_almost_equal(coverage_error([[1, 1, 0]], [[0.25, 0.5, 0.5]]), 3)
assert_almost_equal(coverage_error([[1, 1, 1]], [[0.25, 0.5, 0.5]]), 3)
def test_label_ranking_loss():
assert_almost_equal(label_ranking_loss([[0, 1]], [[0.25, 0.75]]), 0)
assert_almost_equal(label_ranking_loss([[0, 1]], [[0.75, 0.25]]), 1)
assert_almost_equal(label_ranking_loss([[0, 0, 1]], [[0.25, 0.5, 0.75]]),
0)
assert_almost_equal(label_ranking_loss([[0, 1, 0]], [[0.25, 0.5, 0.75]]),
1 / 2)
assert_almost_equal(label_ranking_loss([[0, 1, 1]], [[0.25, 0.5, 0.75]]),
0)
assert_almost_equal(label_ranking_loss([[1, 0, 0]], [[0.25, 0.5, 0.75]]),
2 / 2)
assert_almost_equal(label_ranking_loss([[1, 0, 1]], [[0.25, 0.5, 0.75]]),
1 / 2)
assert_almost_equal(label_ranking_loss([[1, 1, 0]], [[0.25, 0.5, 0.75]]),
2 / 2)
# Undefined metrics - the ranking doesn't matter
assert_almost_equal(label_ranking_loss([[0, 0]], [[0.75, 0.25]]), 0)
assert_almost_equal(label_ranking_loss([[1, 1]], [[0.75, 0.25]]), 0)
assert_almost_equal(label_ranking_loss([[0, 0]], [[0.5, 0.5]]), 0)
assert_almost_equal(label_ranking_loss([[1, 1]], [[0.5, 0.5]]), 0)
assert_almost_equal(label_ranking_loss([[0, 0, 0]], [[0.5, 0.75, 0.25]]),
0)
assert_almost_equal(label_ranking_loss([[1, 1, 1]], [[0.5, 0.75, 0.25]]),
0)
assert_almost_equal(label_ranking_loss([[0, 0, 0]], [[0.25, 0.5, 0.5]]),
0)
assert_almost_equal(label_ranking_loss([[1, 1, 1]], [[0.25, 0.5, 0.5]]), 0)
# Non trival case
assert_almost_equal(label_ranking_loss([[0, 1, 0], [1, 1, 0]],
[[0.1, 10., -3], [0, 1, 3]]),
(0 + 2 / 2) / 2.)
assert_almost_equal(label_ranking_loss(
[[0, 1, 0], [1, 1, 0], [0, 1, 1]],
[[0.1, 10, -3], [0, 1, 3], [0, 2, 0]]),
(0 + 2 / 2 + 1 / 2) / 3.)
assert_almost_equal(label_ranking_loss(
[[0, 1, 0], [1, 1, 0], [0, 1, 1]],
[[0.1, 10, -3], [3, 1, 3], [0, 2, 0]]),
(0 + 2 / 2 + 1 / 2) / 3.)
# Sparse csr matrices
assert_almost_equal(label_ranking_loss(
csr_matrix(np.array([[0, 1, 0], [1, 1, 0]])),
[[0.1, 10, -3], [3, 1, 3]]),
(0 + 2 / 2) / 2.)
def test_ranking_appropriate_input_shape():
# Check that y_true.shape != y_score.shape raise the proper exception
assert_raises(ValueError, label_ranking_loss, [[0, 1], [0, 1]], [0, 1])
assert_raises(ValueError, label_ranking_loss, [[0, 1], [0, 1]], [[0, 1]])
assert_raises(ValueError, label_ranking_loss,
[[0, 1], [0, 1]], [[0], [1]])
assert_raises(ValueError, label_ranking_loss, [[0, 1]], [[0, 1], [0, 1]])
assert_raises(ValueError, label_ranking_loss,
[[0], [1]], [[0, 1], [0, 1]])
assert_raises(ValueError, label_ranking_loss, [[0, 1], [0, 1]], [[0], [1]])
def test_ranking_loss_ties_handling():
# Tie handling
assert_almost_equal(label_ranking_loss([[1, 0]], [[0.5, 0.5]]), 1)
assert_almost_equal(label_ranking_loss([[0, 1]], [[0.5, 0.5]]), 1)
assert_almost_equal(label_ranking_loss([[0, 0, 1]], [[0.25, 0.5, 0.5]]),
1 / 2)
assert_almost_equal(label_ranking_loss([[0, 1, 0]], [[0.25, 0.5, 0.5]]),
1 / 2)
assert_almost_equal(label_ranking_loss([[0, 1, 1]], [[0.25, 0.5, 0.5]]), 0)
assert_almost_equal(label_ranking_loss([[1, 0, 0]], [[0.25, 0.5, 0.5]]), 1)
assert_almost_equal(label_ranking_loss([[1, 0, 1]], [[0.25, 0.5, 0.5]]), 1)
assert_almost_equal(label_ranking_loss([[1, 1, 0]], [[0.25, 0.5, 0.5]]), 1)
| bsd-3-clause |
GuessWhoSamFoo/pandas | pandas/tests/indexes/datetimes/test_tools.py | 1 | 77777 | """ test to_datetime """
import calendar
from datetime import datetime, time
from distutils.version import LooseVersion
import locale
import dateutil
from dateutil.parser import parse
from dateutil.tz.tz import tzoffset
import numpy as np
import pytest
import pytz
from pandas._libs import tslib
from pandas._libs.tslibs import iNaT, parsing
from pandas.compat import PY3, lmap
from pandas.errors import OutOfBoundsDatetime
import pandas.util._test_decorators as td
from pandas.core.dtypes.common import is_datetime64_ns_dtype
import pandas as pd
from pandas import (
DataFrame, DatetimeIndex, Index, NaT, Series, Timestamp, compat,
date_range, isna, to_datetime)
from pandas.core.arrays import DatetimeArray
from pandas.core.tools import datetimes as tools
from pandas.util import testing as tm
from pandas.util.testing import assert_series_equal
class TestTimeConversionFormats(object):
@pytest.mark.parametrize('cache', [True, False])
def test_to_datetime_format(self, cache):
values = ['1/1/2000', '1/2/2000', '1/3/2000']
results1 = [Timestamp('20000101'), Timestamp('20000201'),
Timestamp('20000301')]
results2 = [Timestamp('20000101'), Timestamp('20000102'),
Timestamp('20000103')]
for vals, expecteds in [(values, (Index(results1), Index(results2))),
(Series(values),
(Series(results1), Series(results2))),
(values[0], (results1[0], results2[0])),
(values[1], (results1[1], results2[1])),
(values[2], (results1[2], results2[2]))]:
for i, fmt in enumerate(['%d/%m/%Y', '%m/%d/%Y']):
result = to_datetime(vals, format=fmt, cache=cache)
expected = expecteds[i]
if isinstance(expected, Series):
assert_series_equal(result, Series(expected))
elif isinstance(expected, Timestamp):
assert result == expected
else:
tm.assert_index_equal(result, expected)
@pytest.mark.parametrize('cache', [True, False])
def test_to_datetime_format_YYYYMMDD(self, cache):
s = Series([19801222, 19801222] + [19810105] * 5)
expected = Series([Timestamp(x) for x in s.apply(str)])
result = to_datetime(s, format='%Y%m%d', cache=cache)
assert_series_equal(result, expected)
result = to_datetime(s.apply(str), format='%Y%m%d', cache=cache)
assert_series_equal(result, expected)
# with NaT
expected = Series([Timestamp("19801222"), Timestamp("19801222")] +
[Timestamp("19810105")] * 5)
expected[2] = np.nan
s[2] = np.nan
result = to_datetime(s, format='%Y%m%d', cache=cache)
assert_series_equal(result, expected)
# string with NaT
s = s.apply(str)
s[2] = 'nat'
result = to_datetime(s, format='%Y%m%d', cache=cache)
assert_series_equal(result, expected)
# coercion
# GH 7930
s = Series([20121231, 20141231, 99991231])
result = pd.to_datetime(s, format='%Y%m%d', errors='ignore',
cache=cache)
expected = Series([datetime(2012, 12, 31),
datetime(2014, 12, 31), datetime(9999, 12, 31)],
dtype=object)
tm.assert_series_equal(result, expected)
result = pd.to_datetime(s, format='%Y%m%d', errors='coerce',
cache=cache)
expected = Series(['20121231', '20141231', 'NaT'], dtype='M8[ns]')
assert_series_equal(result, expected)
@pytest.mark.parametrize('cache', [True, False])
def test_to_datetime_format_integer(self, cache):
# GH 10178
s = Series([2000, 2001, 2002])
expected = Series([Timestamp(x) for x in s.apply(str)])
result = to_datetime(s, format='%Y', cache=cache)
assert_series_equal(result, expected)
s = Series([200001, 200105, 200206])
expected = Series([Timestamp(x[:4] + '-' + x[4:]) for x in s.apply(str)
])
result = to_datetime(s, format='%Y%m', cache=cache)
assert_series_equal(result, expected)
@pytest.mark.parametrize('cache', [True, False])
def test_to_datetime_format_microsecond(self, cache):
# these are locale dependent
lang, _ = locale.getlocale()
month_abbr = calendar.month_abbr[4]
val = '01-{}-2011 00:00:01.978'.format(month_abbr)
format = '%d-%b-%Y %H:%M:%S.%f'
result = to_datetime(val, format=format, cache=cache)
exp = datetime.strptime(val, format)
assert result == exp
@pytest.mark.parametrize('cache', [True, False])
def test_to_datetime_format_time(self, cache):
data = [
['01/10/2010 15:20', '%m/%d/%Y %H:%M',
Timestamp('2010-01-10 15:20')],
['01/10/2010 05:43', '%m/%d/%Y %I:%M',
Timestamp('2010-01-10 05:43')],
['01/10/2010 13:56:01', '%m/%d/%Y %H:%M:%S',
Timestamp('2010-01-10 13:56:01')] # ,
# ['01/10/2010 08:14 PM', '%m/%d/%Y %I:%M %p',
# Timestamp('2010-01-10 20:14')],
# ['01/10/2010 07:40 AM', '%m/%d/%Y %I:%M %p',
# Timestamp('2010-01-10 07:40')],
# ['01/10/2010 09:12:56 AM', '%m/%d/%Y %I:%M:%S %p',
# Timestamp('2010-01-10 09:12:56')]
]
for s, format, dt in data:
assert to_datetime(s, format=format, cache=cache) == dt
@td.skip_if_has_locale
@pytest.mark.parametrize('cache', [True, False])
def test_to_datetime_with_non_exact(self, cache):
# GH 10834
# 8904
# exact kw
s = Series(['19MAY11', 'foobar19MAY11', '19MAY11:00:00:00',
'19MAY11 00:00:00Z'])
result = to_datetime(s, format='%d%b%y', exact=False, cache=cache)
expected = to_datetime(s.str.extract(r'(\d+\w+\d+)', expand=False),
format='%d%b%y', cache=cache)
assert_series_equal(result, expected)
@pytest.mark.parametrize('cache', [True, False])
def test_parse_nanoseconds_with_formula(self, cache):
# GH8989
# trunctaing the nanoseconds when a format was provided
for v in ["2012-01-01 09:00:00.000000001",
"2012-01-01 09:00:00.000001",
"2012-01-01 09:00:00.001",
"2012-01-01 09:00:00.001000",
"2012-01-01 09:00:00.001000000", ]:
expected = pd.to_datetime(v, cache=cache)
result = pd.to_datetime(v, format="%Y-%m-%d %H:%M:%S.%f",
cache=cache)
assert result == expected
@pytest.mark.parametrize('cache', [True, False])
def test_to_datetime_format_weeks(self, cache):
data = [
['2009324', '%Y%W%w', Timestamp('2009-08-13')],
['2013020', '%Y%U%w', Timestamp('2013-01-13')]
]
for s, format, dt in data:
assert to_datetime(s, format=format, cache=cache) == dt
@pytest.mark.parametrize("box,const", [
[True, pd.Index],
[False, np.array]])
@pytest.mark.parametrize("fmt,dates,expected_dates", [
['%Y-%m-%d %H:%M:%S %Z',
['2010-01-01 12:00:00 UTC'] * 2,
[pd.Timestamp('2010-01-01 12:00:00', tz='UTC')] * 2],
['%Y-%m-%d %H:%M:%S %Z',
['2010-01-01 12:00:00 UTC',
'2010-01-01 12:00:00 GMT',
'2010-01-01 12:00:00 US/Pacific'],
[pd.Timestamp('2010-01-01 12:00:00', tz='UTC'),
pd.Timestamp('2010-01-01 12:00:00', tz='GMT'),
pd.Timestamp('2010-01-01 12:00:00', tz='US/Pacific')]],
['%Y-%m-%d %H:%M:%S%z',
['2010-01-01 12:00:00+0100'] * 2,
[pd.Timestamp('2010-01-01 12:00:00',
tzinfo=pytz.FixedOffset(60))] * 2],
['%Y-%m-%d %H:%M:%S %z',
['2010-01-01 12:00:00 +0100'] * 2,
[pd.Timestamp('2010-01-01 12:00:00',
tzinfo=pytz.FixedOffset(60))] * 2],
['%Y-%m-%d %H:%M:%S %z',
['2010-01-01 12:00:00 +0100', '2010-01-01 12:00:00 -0100'],
[pd.Timestamp('2010-01-01 12:00:00',
tzinfo=pytz.FixedOffset(60)),
pd.Timestamp('2010-01-01 12:00:00',
tzinfo=pytz.FixedOffset(-60))]],
['%Y-%m-%d %H:%M:%S %z',
['2010-01-01 12:00:00 Z', '2010-01-01 12:00:00 Z'],
[pd.Timestamp('2010-01-01 12:00:00',
tzinfo=pytz.FixedOffset(0)), # pytz coerces to UTC
pd.Timestamp('2010-01-01 12:00:00',
tzinfo=pytz.FixedOffset(0))]]])
def test_to_datetime_parse_tzname_or_tzoffset(self, box, const,
fmt, dates, expected_dates):
# GH 13486
result = pd.to_datetime(dates, format=fmt, box=box)
expected = const(expected_dates)
tm.assert_equal(result, expected)
with pytest.raises(ValueError):
pd.to_datetime(dates, format=fmt, box=box, utc=True)
@pytest.mark.parametrize('offset', [
'+0', '-1foo', 'UTCbar', ':10', '+01:000:01', ''])
def test_to_datetime_parse_timezone_malformed(self, offset):
fmt = '%Y-%m-%d %H:%M:%S %z'
date = '2010-01-01 12:00:00 ' + offset
with pytest.raises(ValueError):
pd.to_datetime([date], format=fmt)
def test_to_datetime_parse_timezone_keeps_name(self):
# GH 21697
fmt = '%Y-%m-%d %H:%M:%S %z'
arg = pd.Index(['2010-01-01 12:00:00 Z'], name='foo')
result = pd.to_datetime(arg, format=fmt)
expected = pd.DatetimeIndex(['2010-01-01 12:00:00'], tz='UTC',
name='foo')
tm.assert_index_equal(result, expected)
class TestToDatetime(object):
@pytest.mark.parametrize('tz', [None, 'US/Central'])
def test_to_datetime_dtarr(self, tz):
# DatetimeArray
dti = date_range('1965-04-03', periods=19, freq='2W', tz=tz)
arr = DatetimeArray(dti)
result = to_datetime(arr)
assert result is arr
result = to_datetime(arr, box=True)
assert result is arr
def test_to_datetime_pydatetime(self):
actual = pd.to_datetime(datetime(2008, 1, 15))
assert actual == datetime(2008, 1, 15)
def test_to_datetime_YYYYMMDD(self):
actual = pd.to_datetime('20080115')
assert actual == datetime(2008, 1, 15)
def test_to_datetime_unparseable_ignore(self):
# unparseable
s = 'Month 1, 1999'
assert pd.to_datetime(s, errors='ignore') == s
@td.skip_if_windows # `tm.set_timezone` does not work in windows
def test_to_datetime_now(self):
# See GH#18666
with tm.set_timezone('US/Eastern'):
npnow = np.datetime64('now').astype('datetime64[ns]')
pdnow = pd.to_datetime('now')
pdnow2 = pd.to_datetime(['now'])[0]
# These should all be equal with infinite perf; this gives
# a generous margin of 10 seconds
assert abs(pdnow.value - npnow.astype(np.int64)) < 1e10
assert abs(pdnow2.value - npnow.astype(np.int64)) < 1e10
assert pdnow.tzinfo is None
assert pdnow2.tzinfo is None
@td.skip_if_windows # `tm.set_timezone` does not work in windows
def test_to_datetime_today(self):
# See GH#18666
# Test with one timezone far ahead of UTC and another far behind, so
# one of these will _almost_ alawys be in a different day from UTC.
# Unfortunately this test between 12 and 1 AM Samoa time
# this both of these timezones _and_ UTC will all be in the same day,
# so this test will not detect the regression introduced in #18666.
with tm.set_timezone('Pacific/Auckland'): # 12-13 hours ahead of UTC
nptoday = np.datetime64('today')\
.astype('datetime64[ns]').astype(np.int64)
pdtoday = pd.to_datetime('today')
pdtoday2 = pd.to_datetime(['today'])[0]
tstoday = pd.Timestamp('today')
tstoday2 = pd.Timestamp.today()
# These should all be equal with infinite perf; this gives
# a generous margin of 10 seconds
assert abs(pdtoday.normalize().value - nptoday) < 1e10
assert abs(pdtoday2.normalize().value - nptoday) < 1e10
assert abs(pdtoday.value - tstoday.value) < 1e10
assert abs(pdtoday.value - tstoday2.value) < 1e10
assert pdtoday.tzinfo is None
assert pdtoday2.tzinfo is None
with tm.set_timezone('US/Samoa'): # 11 hours behind UTC
nptoday = np.datetime64('today')\
.astype('datetime64[ns]').astype(np.int64)
pdtoday = pd.to_datetime('today')
pdtoday2 = pd.to_datetime(['today'])[0]
# These should all be equal with infinite perf; this gives
# a generous margin of 10 seconds
assert abs(pdtoday.normalize().value - nptoday) < 1e10
assert abs(pdtoday2.normalize().value - nptoday) < 1e10
assert pdtoday.tzinfo is None
assert pdtoday2.tzinfo is None
def test_to_datetime_today_now_unicode_bytes(self):
to_datetime([u'now'])
to_datetime([u'today'])
if not PY3:
to_datetime(['now'])
to_datetime(['today'])
@pytest.mark.parametrize('cache', [True, False])
def test_to_datetime_dt64s(self, cache):
in_bound_dts = [
np.datetime64('2000-01-01'),
np.datetime64('2000-01-02'),
]
for dt in in_bound_dts:
assert pd.to_datetime(dt, cache=cache) == Timestamp(dt)
@pytest.mark.parametrize('dt', [np.datetime64('1000-01-01'),
np.datetime64('5000-01-02')])
@pytest.mark.parametrize('cache', [True, False])
def test_to_datetime_dt64s_out_of_bounds(self, cache, dt):
msg = "Out of bounds nanosecond timestamp: {}".format(dt)
with pytest.raises(OutOfBoundsDatetime, match=msg):
pd.to_datetime(dt, errors='raise')
with pytest.raises(OutOfBoundsDatetime, match=msg):
Timestamp(dt)
assert pd.to_datetime(dt, errors='coerce', cache=cache) is NaT
@pytest.mark.parametrize('cache', [True, False])
def test_to_datetime_array_of_dt64s(self, cache):
dts = [np.datetime64('2000-01-01'), np.datetime64('2000-01-02'), ]
# Assuming all datetimes are in bounds, to_datetime() returns
# an array that is equal to Timestamp() parsing
tm.assert_numpy_array_equal(
pd.to_datetime(dts, box=False, cache=cache),
np.array([Timestamp(x).asm8 for x in dts])
)
# A list of datetimes where the last one is out of bounds
dts_with_oob = dts + [np.datetime64('9999-01-01')]
msg = "Out of bounds nanosecond timestamp: 9999-01-01 00:00:00"
with pytest.raises(OutOfBoundsDatetime, match=msg):
pd.to_datetime(dts_with_oob, errors='raise')
tm.assert_numpy_array_equal(
pd.to_datetime(dts_with_oob, box=False, errors='coerce',
cache=cache),
np.array(
[
Timestamp(dts_with_oob[0]).asm8,
Timestamp(dts_with_oob[1]).asm8,
tslib.iNaT,
],
dtype='M8'
)
)
# With errors='ignore', out of bounds datetime64s
# are converted to their .item(), which depending on the version of
# numpy is either a python datetime.datetime or datetime.date
tm.assert_numpy_array_equal(
pd.to_datetime(dts_with_oob, box=False, errors='ignore',
cache=cache),
np.array(
[dt.item() for dt in dts_with_oob],
dtype='O'
)
)
@pytest.mark.parametrize('cache', [True, False])
def test_to_datetime_tz(self, cache):
# xref 8260
# uniform returns a DatetimeIndex
arr = [pd.Timestamp('2013-01-01 13:00:00-0800', tz='US/Pacific'),
pd.Timestamp('2013-01-02 14:00:00-0800', tz='US/Pacific')]
result = pd.to_datetime(arr, cache=cache)
expected = DatetimeIndex(
['2013-01-01 13:00:00', '2013-01-02 14:00:00'], tz='US/Pacific')
tm.assert_index_equal(result, expected)
# mixed tzs will raise
arr = [pd.Timestamp('2013-01-01 13:00:00', tz='US/Pacific'),
pd.Timestamp('2013-01-02 14:00:00', tz='US/Eastern')]
msg = ("Tz-aware datetime.datetime cannot be converted to datetime64"
" unless utc=True")
with pytest.raises(ValueError, match=msg):
pd.to_datetime(arr, cache=cache)
@pytest.mark.parametrize('cache', [True, False])
def test_to_datetime_tz_pytz(self, cache):
# see gh-8260
us_eastern = pytz.timezone('US/Eastern')
arr = np.array([us_eastern.localize(datetime(year=2000, month=1, day=1,
hour=3, minute=0)),
us_eastern.localize(datetime(year=2000, month=6, day=1,
hour=3, minute=0))],
dtype=object)
result = pd.to_datetime(arr, utc=True, cache=cache)
expected = DatetimeIndex(['2000-01-01 08:00:00+00:00',
'2000-06-01 07:00:00+00:00'],
dtype='datetime64[ns, UTC]', freq=None)
tm.assert_index_equal(result, expected)
@pytest.mark.parametrize('cache', [True, False])
@pytest.mark.parametrize("init_constructor, end_constructor, test_method",
[(Index, DatetimeIndex, tm.assert_index_equal),
(list, DatetimeIndex, tm.assert_index_equal),
(np.array, DatetimeIndex, tm.assert_index_equal),
(Series, Series, tm.assert_series_equal)])
def test_to_datetime_utc_true(self,
cache,
init_constructor,
end_constructor,
test_method):
# See gh-11934 & gh-6415
data = ['20100102 121314', '20100102 121315']
expected_data = [pd.Timestamp('2010-01-02 12:13:14', tz='utc'),
pd.Timestamp('2010-01-02 12:13:15', tz='utc')]
result = pd.to_datetime(init_constructor(data),
format='%Y%m%d %H%M%S',
utc=True,
cache=cache)
expected = end_constructor(expected_data)
test_method(result, expected)
# Test scalar case as well
for scalar, expected in zip(data, expected_data):
result = pd.to_datetime(scalar, format='%Y%m%d %H%M%S', utc=True,
cache=cache)
assert result == expected
@pytest.mark.parametrize('cache', [True, False])
def test_to_datetime_utc_true_with_series_single_value(self, cache):
# GH 15760 UTC=True with Series
ts = 1.5e18
result = pd.to_datetime(pd.Series([ts]), utc=True, cache=cache)
expected = pd.Series([pd.Timestamp(ts, tz='utc')])
tm.assert_series_equal(result, expected)
@pytest.mark.parametrize('cache', [True, False])
def test_to_datetime_utc_true_with_series_tzaware_string(self, cache):
ts = '2013-01-01 00:00:00-01:00'
expected_ts = '2013-01-01 01:00:00'
data = pd.Series([ts] * 3)
result = pd.to_datetime(data, utc=True, cache=cache)
expected = pd.Series([pd.Timestamp(expected_ts, tz='utc')] * 3)
tm.assert_series_equal(result, expected)
@pytest.mark.parametrize('cache', [True, False])
@pytest.mark.parametrize('date, dtype',
[('2013-01-01 01:00:00', 'datetime64[ns]'),
('2013-01-01 01:00:00', 'datetime64[ns, UTC]')])
def test_to_datetime_utc_true_with_series_datetime_ns(self, cache, date,
dtype):
expected = pd.Series([pd.Timestamp('2013-01-01 01:00:00', tz='UTC')])
result = pd.to_datetime(pd.Series([date], dtype=dtype), utc=True,
cache=cache)
tm.assert_series_equal(result, expected)
@pytest.mark.parametrize('cache', [True, False])
def test_to_datetime_tz_psycopg2(self, cache):
# xref 8260
try:
import psycopg2
except ImportError:
pytest.skip("no psycopg2 installed")
# misc cases
tz1 = psycopg2.tz.FixedOffsetTimezone(offset=-300, name=None)
tz2 = psycopg2.tz.FixedOffsetTimezone(offset=-240, name=None)
arr = np.array([datetime(2000, 1, 1, 3, 0, tzinfo=tz1),
datetime(2000, 6, 1, 3, 0, tzinfo=tz2)],
dtype=object)
result = pd.to_datetime(arr, errors='coerce', utc=True, cache=cache)
expected = DatetimeIndex(['2000-01-01 08:00:00+00:00',
'2000-06-01 07:00:00+00:00'],
dtype='datetime64[ns, UTC]', freq=None)
tm.assert_index_equal(result, expected)
# dtype coercion
i = pd.DatetimeIndex([
'2000-01-01 08:00:00'
], tz=psycopg2.tz.FixedOffsetTimezone(offset=-300, name=None))
assert is_datetime64_ns_dtype(i)
# tz coerceion
result = pd.to_datetime(i, errors='coerce', cache=cache)
tm.assert_index_equal(result, i)
result = pd.to_datetime(i, errors='coerce', utc=True, cache=cache)
expected = pd.DatetimeIndex(['2000-01-01 13:00:00'],
dtype='datetime64[ns, UTC]')
tm.assert_index_equal(result, expected)
@pytest.mark.parametrize(
'cache',
[pytest.param(True,
marks=pytest.mark.skipif(True, reason="GH 18111")),
False])
def test_datetime_bool(self, cache):
# GH13176
with pytest.raises(TypeError):
to_datetime(False)
assert to_datetime(False, errors="coerce", cache=cache) is NaT
assert to_datetime(False, errors="ignore", cache=cache) is False
with pytest.raises(TypeError):
to_datetime(True)
assert to_datetime(True, errors="coerce", cache=cache) is NaT
assert to_datetime(True, errors="ignore", cache=cache) is True
with pytest.raises(TypeError):
to_datetime([False, datetime.today()], cache=cache)
with pytest.raises(TypeError):
to_datetime(['20130101', True], cache=cache)
tm.assert_index_equal(to_datetime([0, False, NaT, 0.0],
errors="coerce", cache=cache),
DatetimeIndex([to_datetime(0, cache=cache),
NaT,
NaT,
to_datetime(0, cache=cache)]))
def test_datetime_invalid_datatype(self):
# GH13176
with pytest.raises(TypeError):
pd.to_datetime(bool)
with pytest.raises(TypeError):
pd.to_datetime(pd.to_datetime)
@pytest.mark.parametrize('value', ["a", "00:01:99"])
@pytest.mark.parametrize('infer', [True, False])
@pytest.mark.parametrize('format', [None, 'H%:M%:S%'])
def test_datetime_invalid_scalar(self, value, format, infer):
# GH24763
res = pd.to_datetime(value, errors='ignore', format=format,
infer_datetime_format=infer)
assert res == value
res = pd.to_datetime(value, errors='coerce', format=format,
infer_datetime_format=infer)
assert res is pd.NaT
with pytest.raises(ValueError):
pd.to_datetime(value, errors='raise', format=format,
infer_datetime_format=infer)
@pytest.mark.parametrize('value', ["3000/12/11 00:00:00"])
@pytest.mark.parametrize('infer', [True, False])
@pytest.mark.parametrize('format', [None, 'H%:M%:S%'])
def test_datetime_outofbounds_scalar(self, value, format, infer):
# GH24763
res = pd.to_datetime(value, errors='ignore', format=format,
infer_datetime_format=infer)
assert res == value
res = pd.to_datetime(value, errors='coerce', format=format,
infer_datetime_format=infer)
assert res is pd.NaT
if format is not None:
with pytest.raises(ValueError):
pd.to_datetime(value, errors='raise', format=format,
infer_datetime_format=infer)
else:
with pytest.raises(OutOfBoundsDatetime):
pd.to_datetime(value, errors='raise', format=format,
infer_datetime_format=infer)
@pytest.mark.parametrize('values', [["a"], ["00:01:99"],
["a", "b", "99:00:00"]])
@pytest.mark.parametrize('infer', [True, False])
@pytest.mark.parametrize('format', [None, 'H%:M%:S%'])
def test_datetime_invalid_index(self, values, format, infer):
# GH24763
res = pd.to_datetime(values, errors='ignore', format=format,
infer_datetime_format=infer)
tm.assert_index_equal(res, pd.Index(values))
res = pd.to_datetime(values, errors='coerce', format=format,
infer_datetime_format=infer)
tm.assert_index_equal(res, pd.DatetimeIndex([pd.NaT] * len(values)))
with pytest.raises(ValueError):
pd.to_datetime(values, errors='raise', format=format,
infer_datetime_format=infer)
@pytest.mark.parametrize("utc", [True, None])
@pytest.mark.parametrize("format", ['%Y%m%d %H:%M:%S', None])
@pytest.mark.parametrize("box", [True, False])
@pytest.mark.parametrize("constructor", [list, tuple, np.array, pd.Index])
def test_to_datetime_cache(self, utc, format, box, constructor):
date = '20130101 00:00:00'
test_dates = [date] * 10**5
data = constructor(test_dates)
result = pd.to_datetime(data, utc=utc, format=format, box=box,
cache=True)
expected = pd.to_datetime(data, utc=utc, format=format, box=box,
cache=False)
if box:
tm.assert_index_equal(result, expected)
else:
tm.assert_numpy_array_equal(result, expected)
@pytest.mark.parametrize("utc", [True, None])
@pytest.mark.parametrize("format", ['%Y%m%d %H:%M:%S', None])
def test_to_datetime_cache_series(self, utc, format):
date = '20130101 00:00:00'
test_dates = [date] * 10**5
data = pd.Series(test_dates)
result = pd.to_datetime(data, utc=utc, format=format, cache=True)
expected = pd.to_datetime(data, utc=utc, format=format, cache=False)
tm.assert_series_equal(result, expected)
def test_to_datetime_cache_scalar(self):
date = '20130101 00:00:00'
result = pd.to_datetime(date, cache=True)
expected = pd.Timestamp('20130101 00:00:00')
assert result == expected
@pytest.mark.parametrize('date, format',
[('2017-20', '%Y-%W'),
('20 Sunday', '%W %A'),
('20 Sun', '%W %a'),
('2017-21', '%Y-%U'),
('20 Sunday', '%U %A'),
('20 Sun', '%U %a')])
def test_week_without_day_and_calendar_year(self, date, format):
# GH16774
msg = "Cannot use '%W' or '%U' without day and year"
with pytest.raises(ValueError, match=msg):
pd.to_datetime(date, format=format)
def test_iso_8601_strings_with_same_offset(self):
# GH 17697, 11736
ts_str = "2015-11-18 15:30:00+05:30"
result = to_datetime(ts_str)
expected = Timestamp(ts_str)
assert result == expected
expected = DatetimeIndex([Timestamp(ts_str)] * 2)
result = to_datetime([ts_str] * 2)
tm.assert_index_equal(result, expected)
result = DatetimeIndex([ts_str] * 2)
tm.assert_index_equal(result, expected)
def test_iso_8601_strings_same_offset_no_box(self):
# GH 22446
data = ['2018-01-04 09:01:00+09:00', '2018-01-04 09:02:00+09:00']
result = pd.to_datetime(data, box=False)
expected = np.array([
datetime(2018, 1, 4, 9, 1, tzinfo=pytz.FixedOffset(540)),
datetime(2018, 1, 4, 9, 2, tzinfo=pytz.FixedOffset(540))
],
dtype=object)
tm.assert_numpy_array_equal(result, expected)
def test_iso_8601_strings_with_different_offsets(self):
# GH 17697, 11736
ts_strings = ["2015-11-18 15:30:00+05:30",
"2015-11-18 16:30:00+06:30",
NaT]
result = to_datetime(ts_strings)
expected = np.array([datetime(2015, 11, 18, 15, 30,
tzinfo=tzoffset(None, 19800)),
datetime(2015, 11, 18, 16, 30,
tzinfo=tzoffset(None, 23400)),
NaT],
dtype=object)
# GH 21864
expected = Index(expected)
tm.assert_index_equal(result, expected)
result = to_datetime(ts_strings, utc=True)
expected = DatetimeIndex([Timestamp(2015, 11, 18, 10),
Timestamp(2015, 11, 18, 10),
NaT], tz='UTC')
tm.assert_index_equal(result, expected)
def test_iss8601_strings_mixed_offsets_with_naive(self):
# GH 24992
result = pd.to_datetime([
'2018-11-28T00:00:00',
'2018-11-28T00:00:00+12:00',
'2018-11-28T00:00:00',
'2018-11-28T00:00:00+06:00',
'2018-11-28T00:00:00'
], utc=True)
expected = pd.to_datetime([
'2018-11-28T00:00:00',
'2018-11-27T12:00:00',
'2018-11-28T00:00:00',
'2018-11-27T18:00:00',
'2018-11-28T00:00:00'
], utc=True)
tm.assert_index_equal(result, expected)
items = ['2018-11-28T00:00:00+12:00', '2018-11-28T00:00:00']
result = pd.to_datetime(items, utc=True)
expected = pd.to_datetime(list(reversed(items)), utc=True)[::-1]
tm.assert_index_equal(result, expected)
def test_non_iso_strings_with_tz_offset(self):
result = to_datetime(['March 1, 2018 12:00:00+0400'] * 2)
expected = DatetimeIndex([datetime(2018, 3, 1, 12,
tzinfo=pytz.FixedOffset(240))] * 2)
tm.assert_index_equal(result, expected)
@pytest.mark.parametrize('ts, expected', [
(Timestamp('2018-01-01'),
Timestamp('2018-01-01', tz='UTC')),
(Timestamp('2018-01-01', tz='US/Pacific'),
Timestamp('2018-01-01 08:00', tz='UTC'))])
def test_timestamp_utc_true(self, ts, expected):
# GH 24415
result = to_datetime(ts, utc=True)
assert result == expected
class TestToDatetimeUnit(object):
@pytest.mark.parametrize('cache', [True, False])
def test_unit(self, cache):
# GH 11758
# test proper behavior with erros
with pytest.raises(ValueError):
to_datetime([1], unit='D', format='%Y%m%d', cache=cache)
values = [11111111, 1, 1.0, iNaT, NaT, np.nan,
'NaT', '']
result = to_datetime(values, unit='D', errors='ignore', cache=cache)
expected = Index([11111111, Timestamp('1970-01-02'),
Timestamp('1970-01-02'), NaT,
NaT, NaT, NaT, NaT],
dtype=object)
tm.assert_index_equal(result, expected)
result = to_datetime(values, unit='D', errors='coerce', cache=cache)
expected = DatetimeIndex(['NaT', '1970-01-02', '1970-01-02',
'NaT', 'NaT', 'NaT', 'NaT', 'NaT'])
tm.assert_index_equal(result, expected)
with pytest.raises(tslib.OutOfBoundsDatetime):
to_datetime(values, unit='D', errors='raise', cache=cache)
values = [1420043460000, iNaT, NaT, np.nan, 'NaT']
result = to_datetime(values, errors='ignore', unit='s', cache=cache)
expected = Index([1420043460000, NaT, NaT,
NaT, NaT], dtype=object)
tm.assert_index_equal(result, expected)
result = to_datetime(values, errors='coerce', unit='s', cache=cache)
expected = DatetimeIndex(['NaT', 'NaT', 'NaT', 'NaT', 'NaT'])
tm.assert_index_equal(result, expected)
with pytest.raises(tslib.OutOfBoundsDatetime):
to_datetime(values, errors='raise', unit='s', cache=cache)
# if we have a string, then we raise a ValueError
# and NOT an OutOfBoundsDatetime
for val in ['foo', Timestamp('20130101')]:
try:
to_datetime(val, errors='raise', unit='s', cache=cache)
except tslib.OutOfBoundsDatetime:
raise AssertionError("incorrect exception raised")
except ValueError:
pass
@pytest.mark.parametrize('cache', [True, False])
def test_unit_consistency(self, cache):
# consistency of conversions
expected = Timestamp('1970-05-09 14:25:11')
result = pd.to_datetime(11111111, unit='s', errors='raise',
cache=cache)
assert result == expected
assert isinstance(result, Timestamp)
result = pd.to_datetime(11111111, unit='s', errors='coerce',
cache=cache)
assert result == expected
assert isinstance(result, Timestamp)
result = pd.to_datetime(11111111, unit='s', errors='ignore',
cache=cache)
assert result == expected
assert isinstance(result, Timestamp)
@pytest.mark.parametrize('cache', [True, False])
def test_unit_with_numeric(self, cache):
# GH 13180
# coercions from floats/ints are ok
expected = DatetimeIndex(['2015-06-19 05:33:20',
'2015-05-27 22:33:20'])
arr1 = [1.434692e+18, 1.432766e+18]
arr2 = np.array(arr1).astype('int64')
for errors in ['ignore', 'raise', 'coerce']:
result = pd.to_datetime(arr1, errors=errors, cache=cache)
tm.assert_index_equal(result, expected)
result = pd.to_datetime(arr2, errors=errors, cache=cache)
tm.assert_index_equal(result, expected)
# but we want to make sure that we are coercing
# if we have ints/strings
expected = DatetimeIndex(['NaT',
'2015-06-19 05:33:20',
'2015-05-27 22:33:20'])
arr = ['foo', 1.434692e+18, 1.432766e+18]
result = pd.to_datetime(arr, errors='coerce', cache=cache)
tm.assert_index_equal(result, expected)
expected = DatetimeIndex(['2015-06-19 05:33:20',
'2015-05-27 22:33:20',
'NaT',
'NaT'])
arr = [1.434692e+18, 1.432766e+18, 'foo', 'NaT']
result = pd.to_datetime(arr, errors='coerce', cache=cache)
tm.assert_index_equal(result, expected)
@pytest.mark.parametrize('cache', [True, False])
def test_unit_mixed(self, cache):
# mixed integers/datetimes
expected = DatetimeIndex(['2013-01-01', 'NaT', 'NaT'])
arr = [pd.Timestamp('20130101'), 1.434692e+18, 1.432766e+18]
result = pd.to_datetime(arr, errors='coerce', cache=cache)
tm.assert_index_equal(result, expected)
with pytest.raises(ValueError):
pd.to_datetime(arr, errors='raise', cache=cache)
expected = DatetimeIndex(['NaT',
'NaT',
'2013-01-01'])
arr = [1.434692e+18, 1.432766e+18, pd.Timestamp('20130101')]
result = pd.to_datetime(arr, errors='coerce', cache=cache)
tm.assert_index_equal(result, expected)
with pytest.raises(ValueError):
pd.to_datetime(arr, errors='raise', cache=cache)
@pytest.mark.parametrize('cache', [True, False])
def test_unit_rounding(self, cache):
# GH 14156: argument will incur floating point errors but no
# premature rounding
result = pd.to_datetime(1434743731.8770001, unit='s', cache=cache)
expected = pd.Timestamp('2015-06-19 19:55:31.877000093')
assert result == expected
@pytest.mark.parametrize('cache', [True, False])
def test_unit_ignore_keeps_name(self, cache):
# GH 21697
expected = pd.Index([15e9] * 2, name='name')
result = pd.to_datetime(expected, errors='ignore', box=True, unit='s',
cache=cache)
tm.assert_index_equal(result, expected)
@pytest.mark.parametrize('cache', [True, False])
def test_dataframe(self, cache):
df = DataFrame({'year': [2015, 2016],
'month': [2, 3],
'day': [4, 5],
'hour': [6, 7],
'minute': [58, 59],
'second': [10, 11],
'ms': [1, 1],
'us': [2, 2],
'ns': [3, 3]})
result = to_datetime({'year': df['year'],
'month': df['month'],
'day': df['day']}, cache=cache)
expected = Series([Timestamp('20150204 00:00:00'),
Timestamp('20160305 00:0:00')])
assert_series_equal(result, expected)
# dict-like
result = to_datetime(df[['year', 'month', 'day']].to_dict(),
cache=cache)
assert_series_equal(result, expected)
# dict but with constructable
df2 = df[['year', 'month', 'day']].to_dict()
df2['month'] = 2
result = to_datetime(df2, cache=cache)
expected2 = Series([Timestamp('20150204 00:00:00'),
Timestamp('20160205 00:0:00')])
assert_series_equal(result, expected2)
# unit mappings
units = [{'year': 'years',
'month': 'months',
'day': 'days',
'hour': 'hours',
'minute': 'minutes',
'second': 'seconds'},
{'year': 'year',
'month': 'month',
'day': 'day',
'hour': 'hour',
'minute': 'minute',
'second': 'second'},
]
for d in units:
result = to_datetime(df[list(d.keys())].rename(columns=d),
cache=cache)
expected = Series([Timestamp('20150204 06:58:10'),
Timestamp('20160305 07:59:11')])
assert_series_equal(result, expected)
d = {'year': 'year',
'month': 'month',
'day': 'day',
'hour': 'hour',
'minute': 'minute',
'second': 'second',
'ms': 'ms',
'us': 'us',
'ns': 'ns'}
result = to_datetime(df.rename(columns=d), cache=cache)
expected = Series([Timestamp('20150204 06:58:10.001002003'),
Timestamp('20160305 07:59:11.001002003')])
assert_series_equal(result, expected)
# coerce back to int
result = to_datetime(df.astype(str), cache=cache)
assert_series_equal(result, expected)
# passing coerce
df2 = DataFrame({'year': [2015, 2016],
'month': [2, 20],
'day': [4, 5]})
msg = ("cannot assemble the datetimes: time data .+ does not "
r"match format '%Y%m%d' \(match\)")
with pytest.raises(ValueError, match=msg):
to_datetime(df2, cache=cache)
result = to_datetime(df2, errors='coerce', cache=cache)
expected = Series([Timestamp('20150204 00:00:00'),
NaT])
assert_series_equal(result, expected)
# extra columns
msg = ("extra keys have been passed to the datetime assemblage: "
r"\[foo\]")
with pytest.raises(ValueError, match=msg):
df2 = df.copy()
df2['foo'] = 1
to_datetime(df2, cache=cache)
# not enough
msg = (r'to assemble mappings requires at least that \[year, month, '
r'day\] be specified: \[.+\] is missing')
for c in [['year'],
['year', 'month'],
['year', 'month', 'second'],
['month', 'day'],
['year', 'day', 'second']]:
with pytest.raises(ValueError, match=msg):
to_datetime(df[c], cache=cache)
# duplicates
msg = 'cannot assemble with duplicate keys'
df2 = DataFrame({'year': [2015, 2016],
'month': [2, 20],
'day': [4, 5]})
df2.columns = ['year', 'year', 'day']
with pytest.raises(ValueError, match=msg):
to_datetime(df2, cache=cache)
df2 = DataFrame({'year': [2015, 2016],
'month': [2, 20],
'day': [4, 5],
'hour': [4, 5]})
df2.columns = ['year', 'month', 'day', 'day']
with pytest.raises(ValueError, match=msg):
to_datetime(df2, cache=cache)
@pytest.mark.parametrize('cache', [True, False])
def test_dataframe_dtypes(self, cache):
# #13451
df = DataFrame({'year': [2015, 2016],
'month': [2, 3],
'day': [4, 5]})
# int16
result = to_datetime(df.astype('int16'), cache=cache)
expected = Series([Timestamp('20150204 00:00:00'),
Timestamp('20160305 00:00:00')])
assert_series_equal(result, expected)
# mixed dtypes
df['month'] = df['month'].astype('int8')
df['day'] = df['day'].astype('int8')
result = to_datetime(df, cache=cache)
expected = Series([Timestamp('20150204 00:00:00'),
Timestamp('20160305 00:00:00')])
assert_series_equal(result, expected)
# float
df = DataFrame({'year': [2000, 2001],
'month': [1.5, 1],
'day': [1, 1]})
with pytest.raises(ValueError):
to_datetime(df, cache=cache)
def test_dataframe_box_false(self):
# GH 23760
df = pd.DataFrame({'year': [2015, 2016],
'month': [2, 3],
'day': [4, 5]})
result = pd.to_datetime(df, box=False)
expected = np.array(['2015-02-04', '2016-03-05'],
dtype='datetime64[ns]')
tm.assert_numpy_array_equal(result, expected)
def test_dataframe_utc_true(self):
# GH 23760
df = pd.DataFrame({'year': [2015, 2016],
'month': [2, 3],
'day': [4, 5]})
result = pd.to_datetime(df, utc=True)
expected = pd.Series(np.array(['2015-02-04', '2016-03-05'],
dtype='datetime64[ns]')).dt.tz_localize('UTC')
tm.assert_series_equal(result, expected)
def test_to_datetime_errors_ignore_utc_true(self):
# GH 23758
result = pd.to_datetime([1], unit='s', box=True, utc=True,
errors='ignore')
expected = DatetimeIndex(['1970-01-01 00:00:01'], tz='UTC')
tm.assert_index_equal(result, expected)
class TestToDatetimeMisc(object):
def test_to_datetime_barely_out_of_bounds(self):
# GH#19529
# GH#19382 close enough to bounds that dropping nanos would result
# in an in-bounds datetime
arr = np.array(['2262-04-11 23:47:16.854775808'], dtype=object)
with pytest.raises(OutOfBoundsDatetime):
to_datetime(arr)
@pytest.mark.parametrize('cache', [True, False])
def test_to_datetime_iso8601(self, cache):
result = to_datetime(["2012-01-01 00:00:00"], cache=cache)
exp = Timestamp("2012-01-01 00:00:00")
assert result[0] == exp
result = to_datetime(['20121001'], cache=cache) # bad iso 8601
exp = Timestamp('2012-10-01')
assert result[0] == exp
@pytest.mark.parametrize('cache', [True, False])
def test_to_datetime_default(self, cache):
rs = to_datetime('2001', cache=cache)
xp = datetime(2001, 1, 1)
assert rs == xp
# dayfirst is essentially broken
# to_datetime('01-13-2012', dayfirst=True)
# pytest.raises(ValueError, to_datetime('01-13-2012',
# dayfirst=True))
@pytest.mark.parametrize('cache', [True, False])
def test_to_datetime_on_datetime64_series(self, cache):
# #2699
s = Series(date_range('1/1/2000', periods=10))
result = to_datetime(s, cache=cache)
assert result[0] == s[0]
@pytest.mark.parametrize('cache', [True, False])
def test_to_datetime_with_space_in_series(self, cache):
# GH 6428
s = Series(['10/18/2006', '10/18/2008', ' '])
msg = r"(\(u?')?String does not contain a date(:', ' '\))?"
with pytest.raises(ValueError, match=msg):
to_datetime(s, errors='raise', cache=cache)
result_coerce = to_datetime(s, errors='coerce', cache=cache)
expected_coerce = Series([datetime(2006, 10, 18),
datetime(2008, 10, 18),
NaT])
tm.assert_series_equal(result_coerce, expected_coerce)
result_ignore = to_datetime(s, errors='ignore', cache=cache)
tm.assert_series_equal(result_ignore, s)
@td.skip_if_has_locale
@pytest.mark.parametrize('cache', [True, False])
def test_to_datetime_with_apply(self, cache):
# this is only locale tested with US/None locales
# GH 5195
# with a format and coerce a single item to_datetime fails
td = Series(['May 04', 'Jun 02', 'Dec 11'], index=[1, 2, 3])
expected = pd.to_datetime(td, format='%b %y', cache=cache)
result = td.apply(pd.to_datetime, format='%b %y', cache=cache)
assert_series_equal(result, expected)
td = pd.Series(['May 04', 'Jun 02', ''], index=[1, 2, 3])
msg = r"time data '' does not match format '%b %y' \(match\)"
with pytest.raises(ValueError, match=msg):
pd.to_datetime(td, format='%b %y', errors='raise', cache=cache)
with pytest.raises(ValueError, match=msg):
td.apply(pd.to_datetime, format='%b %y',
errors='raise', cache=cache)
expected = pd.to_datetime(td, format='%b %y', errors='coerce',
cache=cache)
result = td.apply(
lambda x: pd.to_datetime(x, format='%b %y', errors='coerce',
cache=cache))
assert_series_equal(result, expected)
@pytest.mark.parametrize('cache', [True, False])
def test_to_datetime_types(self, cache):
# empty string
result = to_datetime('', cache=cache)
assert result is NaT
result = to_datetime(['', ''], cache=cache)
assert isna(result).all()
# ints
result = Timestamp(0)
expected = to_datetime(0, cache=cache)
assert result == expected
# GH 3888 (strings)
expected = to_datetime(['2012'], cache=cache)[0]
result = to_datetime('2012', cache=cache)
assert result == expected
# array = ['2012','20120101','20120101 12:01:01']
array = ['20120101', '20120101 12:01:01']
expected = list(to_datetime(array, cache=cache))
result = lmap(Timestamp, array)
tm.assert_almost_equal(result, expected)
# currently fails ###
# result = Timestamp('2012')
# expected = to_datetime('2012')
# assert result == expected
@pytest.mark.parametrize('cache', [True, False])
@pytest.mark.parametrize('box, klass', [
[True, Index],
[False, np.array]
])
def test_to_datetime_unprocessable_input(self, cache, box, klass):
# GH 4928
# GH 21864
result = to_datetime([1, '1'], errors='ignore', cache=cache, box=box)
expected = klass(np.array([1, '1'], dtype='O'))
tm.assert_equal(result, expected)
msg = "invalid string coercion to datetime"
with pytest.raises(TypeError, match=msg):
to_datetime([1, '1'], errors='raise', cache=cache, box=box)
def test_to_datetime_other_datetime64_units(self):
# 5/25/2012
scalar = np.int64(1337904000000000).view('M8[us]')
as_obj = scalar.astype('O')
index = DatetimeIndex([scalar])
assert index[0] == scalar.astype('O')
value = Timestamp(scalar)
assert value == as_obj
def test_to_datetime_list_of_integers(self):
rng = date_range('1/1/2000', periods=20)
rng = DatetimeIndex(rng.values)
ints = list(rng.asi8)
result = DatetimeIndex(ints)
tm.assert_index_equal(rng, result)
def test_to_datetime_overflow(self):
# gh-17637
# we are overflowing Timedelta range here
with pytest.raises(OverflowError):
date_range(start='1/1/1700', freq='B', periods=100000)
@pytest.mark.parametrize('cache', [True, False])
def test_string_na_nat_conversion(self, cache):
# GH #999, #858
from pandas.compat import parse_date
strings = np.array(['1/1/2000', '1/2/2000', np.nan,
'1/4/2000, 12:34:56'], dtype=object)
expected = np.empty(4, dtype='M8[ns]')
for i, val in enumerate(strings):
if isna(val):
expected[i] = iNaT
else:
expected[i] = parse_date(val)
result = tslib.array_to_datetime(strings)[0]
tm.assert_almost_equal(result, expected)
result2 = to_datetime(strings, cache=cache)
assert isinstance(result2, DatetimeIndex)
tm.assert_numpy_array_equal(result, result2.values)
malformed = np.array(['1/100/2000', np.nan], dtype=object)
# GH 10636, default is now 'raise'
msg = (r"\(u?'Unknown string format:', '1/100/2000'\)|"
"day is out of range for month")
with pytest.raises(ValueError, match=msg):
to_datetime(malformed, errors='raise', cache=cache)
result = to_datetime(malformed, errors='ignore', cache=cache)
# GH 21864
expected = Index(malformed)
tm.assert_index_equal(result, expected)
with pytest.raises(ValueError, match=msg):
to_datetime(malformed, errors='raise', cache=cache)
idx = ['a', 'b', 'c', 'd', 'e']
series = Series(['1/1/2000', np.nan, '1/3/2000', np.nan,
'1/5/2000'], index=idx, name='foo')
dseries = Series([to_datetime('1/1/2000', cache=cache), np.nan,
to_datetime('1/3/2000', cache=cache), np.nan,
to_datetime('1/5/2000', cache=cache)],
index=idx, name='foo')
result = to_datetime(series, cache=cache)
dresult = to_datetime(dseries, cache=cache)
expected = Series(np.empty(5, dtype='M8[ns]'), index=idx)
for i in range(5):
x = series[i]
if isna(x):
expected[i] = iNaT
else:
expected[i] = to_datetime(x, cache=cache)
assert_series_equal(result, expected, check_names=False)
assert result.name == 'foo'
assert_series_equal(dresult, expected, check_names=False)
assert dresult.name == 'foo'
@pytest.mark.parametrize('dtype', [
'datetime64[h]', 'datetime64[m]',
'datetime64[s]', 'datetime64[ms]',
'datetime64[us]', 'datetime64[ns]'])
@pytest.mark.parametrize('cache', [True, False])
def test_dti_constructor_numpy_timeunits(self, cache, dtype):
# GH 9114
base = pd.to_datetime(['2000-01-01T00:00', '2000-01-02T00:00', 'NaT'],
cache=cache)
values = base.values.astype(dtype)
tm.assert_index_equal(DatetimeIndex(values), base)
tm.assert_index_equal(to_datetime(values, cache=cache), base)
@pytest.mark.parametrize('cache', [True, False])
def test_dayfirst(self, cache):
# GH 5917
arr = ['10/02/2014', '11/02/2014', '12/02/2014']
expected = DatetimeIndex([datetime(2014, 2, 10), datetime(2014, 2, 11),
datetime(2014, 2, 12)])
idx1 = DatetimeIndex(arr, dayfirst=True)
idx2 = DatetimeIndex(np.array(arr), dayfirst=True)
idx3 = to_datetime(arr, dayfirst=True, cache=cache)
idx4 = to_datetime(np.array(arr), dayfirst=True, cache=cache)
idx5 = DatetimeIndex(Index(arr), dayfirst=True)
idx6 = DatetimeIndex(Series(arr), dayfirst=True)
tm.assert_index_equal(expected, idx1)
tm.assert_index_equal(expected, idx2)
tm.assert_index_equal(expected, idx3)
tm.assert_index_equal(expected, idx4)
tm.assert_index_equal(expected, idx5)
tm.assert_index_equal(expected, idx6)
class TestGuessDatetimeFormat(object):
@td.skip_if_not_us_locale
def test_guess_datetime_format_for_array(self):
expected_format = '%Y-%m-%d %H:%M:%S.%f'
dt_string = datetime(2011, 12, 30, 0, 0, 0).strftime(expected_format)
test_arrays = [
np.array([dt_string, dt_string, dt_string], dtype='O'),
np.array([np.nan, np.nan, dt_string], dtype='O'),
np.array([dt_string, 'random_string'], dtype='O'),
]
for test_array in test_arrays:
assert tools._guess_datetime_format_for_array(
test_array) == expected_format
format_for_string_of_nans = tools._guess_datetime_format_for_array(
np.array(
[np.nan, np.nan, np.nan], dtype='O'))
assert format_for_string_of_nans is None
class TestToDatetimeInferFormat(object):
@pytest.mark.parametrize('cache', [True, False])
def test_to_datetime_infer_datetime_format_consistent_format(self, cache):
s = pd.Series(pd.date_range('20000101', periods=50, freq='H'))
test_formats = ['%m-%d-%Y', '%m/%d/%Y %H:%M:%S.%f',
'%Y-%m-%dT%H:%M:%S.%f']
for test_format in test_formats:
s_as_dt_strings = s.apply(lambda x: x.strftime(test_format))
with_format = pd.to_datetime(s_as_dt_strings, format=test_format,
cache=cache)
no_infer = pd.to_datetime(s_as_dt_strings,
infer_datetime_format=False,
cache=cache)
yes_infer = pd.to_datetime(s_as_dt_strings,
infer_datetime_format=True,
cache=cache)
# Whether the format is explicitly passed, it is inferred, or
# it is not inferred, the results should all be the same
tm.assert_series_equal(with_format, no_infer)
tm.assert_series_equal(no_infer, yes_infer)
@pytest.mark.parametrize('cache', [True, False])
def test_to_datetime_infer_datetime_format_inconsistent_format(self,
cache):
s = pd.Series(np.array(['01/01/2011 00:00:00',
'01-02-2011 00:00:00',
'2011-01-03T00:00:00']))
# When the format is inconsistent, infer_datetime_format should just
# fallback to the default parsing
tm.assert_series_equal(pd.to_datetime(s, infer_datetime_format=False,
cache=cache),
pd.to_datetime(s, infer_datetime_format=True,
cache=cache))
s = pd.Series(np.array(['Jan/01/2011', 'Feb/01/2011', 'Mar/01/2011']))
tm.assert_series_equal(pd.to_datetime(s, infer_datetime_format=False,
cache=cache),
pd.to_datetime(s, infer_datetime_format=True,
cache=cache))
@pytest.mark.parametrize('cache', [True, False])
def test_to_datetime_infer_datetime_format_series_with_nans(self, cache):
s = pd.Series(np.array(['01/01/2011 00:00:00', np.nan,
'01/03/2011 00:00:00', np.nan]))
tm.assert_series_equal(pd.to_datetime(s, infer_datetime_format=False,
cache=cache),
pd.to_datetime(s, infer_datetime_format=True,
cache=cache))
@pytest.mark.parametrize('cache', [True, False])
def test_to_datetime_infer_datetime_format_series_start_with_nans(self,
cache):
s = pd.Series(np.array([np.nan, np.nan, '01/01/2011 00:00:00',
'01/02/2011 00:00:00', '01/03/2011 00:00:00']))
tm.assert_series_equal(pd.to_datetime(s, infer_datetime_format=False,
cache=cache),
pd.to_datetime(s, infer_datetime_format=True,
cache=cache))
@pytest.mark.parametrize('cache', [True, False])
def test_to_datetime_iso8601_noleading_0s(self, cache):
# GH 11871
s = pd.Series(['2014-1-1', '2014-2-2', '2015-3-3'])
expected = pd.Series([pd.Timestamp('2014-01-01'),
pd.Timestamp('2014-02-02'),
pd.Timestamp('2015-03-03')])
tm.assert_series_equal(pd.to_datetime(s, cache=cache), expected)
tm.assert_series_equal(pd.to_datetime(s, format='%Y-%m-%d',
cache=cache), expected)
class TestDaysInMonth(object):
# tests for issue #10154
@pytest.mark.parametrize('cache', [True, False])
def test_day_not_in_month_coerce(self, cache):
assert isna(to_datetime('2015-02-29', errors='coerce', cache=cache))
assert isna(to_datetime('2015-02-29', format="%Y-%m-%d",
errors='coerce', cache=cache))
assert isna(to_datetime('2015-02-32', format="%Y-%m-%d",
errors='coerce', cache=cache))
assert isna(to_datetime('2015-04-31', format="%Y-%m-%d",
errors='coerce', cache=cache))
@pytest.mark.parametrize('cache', [True, False])
def test_day_not_in_month_raise(self, cache):
msg = "day is out of range for month"
with pytest.raises(ValueError, match=msg):
to_datetime('2015-02-29', errors='raise', cache=cache)
msg = "time data 2015-02-29 doesn't match format specified"
with pytest.raises(ValueError, match=msg):
to_datetime('2015-02-29', errors='raise', format="%Y-%m-%d",
cache=cache)
msg = "time data 2015-02-32 doesn't match format specified"
with pytest.raises(ValueError, match=msg):
to_datetime('2015-02-32', errors='raise', format="%Y-%m-%d",
cache=cache)
msg = "time data 2015-04-31 doesn't match format specified"
with pytest.raises(ValueError, match=msg):
to_datetime('2015-04-31', errors='raise', format="%Y-%m-%d",
cache=cache)
@pytest.mark.parametrize('cache', [True, False])
def test_day_not_in_month_ignore(self, cache):
assert to_datetime('2015-02-29', errors='ignore',
cache=cache) == '2015-02-29'
assert to_datetime('2015-02-29', errors='ignore',
format="%Y-%m-%d", cache=cache) == '2015-02-29'
assert to_datetime('2015-02-32', errors='ignore',
format="%Y-%m-%d", cache=cache) == '2015-02-32'
assert to_datetime('2015-04-31', errors='ignore',
format="%Y-%m-%d", cache=cache) == '2015-04-31'
class TestDatetimeParsingWrappers(object):
@pytest.mark.parametrize('date_str,expected', list({
'2011-01-01': datetime(2011, 1, 1),
'2Q2005': datetime(2005, 4, 1),
'2Q05': datetime(2005, 4, 1),
'2005Q1': datetime(2005, 1, 1),
'05Q1': datetime(2005, 1, 1),
'2011Q3': datetime(2011, 7, 1),
'11Q3': datetime(2011, 7, 1),
'3Q2011': datetime(2011, 7, 1),
'3Q11': datetime(2011, 7, 1),
# quarterly without space
'2000Q4': datetime(2000, 10, 1),
'00Q4': datetime(2000, 10, 1),
'4Q2000': datetime(2000, 10, 1),
'4Q00': datetime(2000, 10, 1),
'2000q4': datetime(2000, 10, 1),
'2000-Q4': datetime(2000, 10, 1),
'00-Q4': datetime(2000, 10, 1),
'4Q-2000': datetime(2000, 10, 1),
'4Q-00': datetime(2000, 10, 1),
'00q4': datetime(2000, 10, 1),
'2005': datetime(2005, 1, 1),
'2005-11': datetime(2005, 11, 1),
'2005 11': datetime(2005, 11, 1),
'11-2005': datetime(2005, 11, 1),
'11 2005': datetime(2005, 11, 1),
'200511': datetime(2020, 5, 11),
'20051109': datetime(2005, 11, 9),
'20051109 10:15': datetime(2005, 11, 9, 10, 15),
'20051109 08H': datetime(2005, 11, 9, 8, 0),
'2005-11-09 10:15': datetime(2005, 11, 9, 10, 15),
'2005-11-09 08H': datetime(2005, 11, 9, 8, 0),
'2005/11/09 10:15': datetime(2005, 11, 9, 10, 15),
'2005/11/09 08H': datetime(2005, 11, 9, 8, 0),
"Thu Sep 25 10:36:28 2003": datetime(2003, 9, 25, 10, 36, 28),
"Thu Sep 25 2003": datetime(2003, 9, 25),
"Sep 25 2003": datetime(2003, 9, 25),
"January 1 2014": datetime(2014, 1, 1),
# GHE10537
'2014-06': datetime(2014, 6, 1),
'06-2014': datetime(2014, 6, 1),
'2014-6': datetime(2014, 6, 1),
'6-2014': datetime(2014, 6, 1),
'20010101 12': datetime(2001, 1, 1, 12),
'20010101 1234': datetime(2001, 1, 1, 12, 34),
'20010101 123456': datetime(2001, 1, 1, 12, 34, 56)}.items()))
@pytest.mark.parametrize('cache', [True, False])
def test_parsers(self, date_str, expected, cache):
# dateutil >= 2.5.0 defaults to yearfirst=True
# https://github.com/dateutil/dateutil/issues/217
yearfirst = True
result1, _, _ = parsing.parse_time_string(date_str,
yearfirst=yearfirst)
result2 = to_datetime(date_str, yearfirst=yearfirst)
result3 = to_datetime([date_str], yearfirst=yearfirst)
# result5 is used below
result4 = to_datetime(np.array([date_str], dtype=object),
yearfirst=yearfirst, cache=cache)
result6 = DatetimeIndex([date_str], yearfirst=yearfirst)
# result7 is used below
result8 = DatetimeIndex(Index([date_str]), yearfirst=yearfirst)
result9 = DatetimeIndex(Series([date_str]), yearfirst=yearfirst)
for res in [result1, result2]:
assert res == expected
for res in [result3, result4, result6, result8, result9]:
exp = DatetimeIndex([pd.Timestamp(expected)])
tm.assert_index_equal(res, exp)
# these really need to have yearfirst, but we don't support
if not yearfirst:
result5 = Timestamp(date_str)
assert result5 == expected
result7 = date_range(date_str, freq='S', periods=1,
yearfirst=yearfirst)
assert result7 == expected
def test_parsers_nat(self):
# Test that each of several string-accepting methods return pd.NaT
result1, _, _ = parsing.parse_time_string('NaT')
result2 = to_datetime('NaT')
result3 = Timestamp('NaT')
result4 = DatetimeIndex(['NaT'])[0]
assert result1 is NaT
assert result2 is NaT
assert result3 is NaT
assert result4 is NaT
@pytest.mark.parametrize('cache', [True, False])
def test_parsers_dayfirst_yearfirst(self, cache):
# OK
# 2.5.1 10-11-12 [dayfirst=0, yearfirst=0] -> 2012-10-11 00:00:00
# 2.5.2 10-11-12 [dayfirst=0, yearfirst=1] -> 2012-10-11 00:00:00
# 2.5.3 10-11-12 [dayfirst=0, yearfirst=0] -> 2012-10-11 00:00:00
# OK
# 2.5.1 10-11-12 [dayfirst=0, yearfirst=1] -> 2010-11-12 00:00:00
# 2.5.2 10-11-12 [dayfirst=0, yearfirst=1] -> 2010-11-12 00:00:00
# 2.5.3 10-11-12 [dayfirst=0, yearfirst=1] -> 2010-11-12 00:00:00
# bug fix in 2.5.2
# 2.5.1 10-11-12 [dayfirst=1, yearfirst=1] -> 2010-11-12 00:00:00
# 2.5.2 10-11-12 [dayfirst=1, yearfirst=1] -> 2010-12-11 00:00:00
# 2.5.3 10-11-12 [dayfirst=1, yearfirst=1] -> 2010-12-11 00:00:00
# OK
# 2.5.1 10-11-12 [dayfirst=1, yearfirst=0] -> 2012-11-10 00:00:00
# 2.5.2 10-11-12 [dayfirst=1, yearfirst=0] -> 2012-11-10 00:00:00
# 2.5.3 10-11-12 [dayfirst=1, yearfirst=0] -> 2012-11-10 00:00:00
# OK
# 2.5.1 20/12/21 [dayfirst=0, yearfirst=0] -> 2021-12-20 00:00:00
# 2.5.2 20/12/21 [dayfirst=0, yearfirst=0] -> 2021-12-20 00:00:00
# 2.5.3 20/12/21 [dayfirst=0, yearfirst=0] -> 2021-12-20 00:00:00
# OK
# 2.5.1 20/12/21 [dayfirst=0, yearfirst=1] -> 2020-12-21 00:00:00
# 2.5.2 20/12/21 [dayfirst=0, yearfirst=1] -> 2020-12-21 00:00:00
# 2.5.3 20/12/21 [dayfirst=0, yearfirst=1] -> 2020-12-21 00:00:00
# revert of bug in 2.5.2
# 2.5.1 20/12/21 [dayfirst=1, yearfirst=1] -> 2020-12-21 00:00:00
# 2.5.2 20/12/21 [dayfirst=1, yearfirst=1] -> month must be in 1..12
# 2.5.3 20/12/21 [dayfirst=1, yearfirst=1] -> 2020-12-21 00:00:00
# OK
# 2.5.1 20/12/21 [dayfirst=1, yearfirst=0] -> 2021-12-20 00:00:00
# 2.5.2 20/12/21 [dayfirst=1, yearfirst=0] -> 2021-12-20 00:00:00
# 2.5.3 20/12/21 [dayfirst=1, yearfirst=0] -> 2021-12-20 00:00:00
is_lt_253 = LooseVersion(dateutil.__version__) < LooseVersion('2.5.3')
# str : dayfirst, yearfirst, expected
cases = {'10-11-12': [(False, False,
datetime(2012, 10, 11)),
(True, False,
datetime(2012, 11, 10)),
(False, True,
datetime(2010, 11, 12)),
(True, True,
datetime(2010, 12, 11))],
'20/12/21': [(False, False,
datetime(2021, 12, 20)),
(True, False,
datetime(2021, 12, 20)),
(False, True,
datetime(2020, 12, 21)),
(True, True,
datetime(2020, 12, 21))]}
for date_str, values in compat.iteritems(cases):
for dayfirst, yearfirst, expected in values:
# odd comparisons across version
# let's just skip
if dayfirst and yearfirst and is_lt_253:
continue
# compare with dateutil result
dateutil_result = parse(date_str, dayfirst=dayfirst,
yearfirst=yearfirst)
assert dateutil_result == expected
result1, _, _ = parsing.parse_time_string(date_str,
dayfirst=dayfirst,
yearfirst=yearfirst)
# we don't support dayfirst/yearfirst here:
if not dayfirst and not yearfirst:
result2 = Timestamp(date_str)
assert result2 == expected
result3 = to_datetime(date_str, dayfirst=dayfirst,
yearfirst=yearfirst, cache=cache)
result4 = DatetimeIndex([date_str], dayfirst=dayfirst,
yearfirst=yearfirst)[0]
assert result1 == expected
assert result3 == expected
assert result4 == expected
@pytest.mark.parametrize('cache', [True, False])
def test_parsers_timestring(self, cache):
# must be the same as dateutil result
cases = {'10:15': (parse('10:15'), datetime(1, 1, 1, 10, 15)),
'9:05': (parse('9:05'), datetime(1, 1, 1, 9, 5))}
for date_str, (exp_now, exp_def) in compat.iteritems(cases):
result1, _, _ = parsing.parse_time_string(date_str)
result2 = to_datetime(date_str)
result3 = to_datetime([date_str])
result4 = Timestamp(date_str)
result5 = DatetimeIndex([date_str])[0]
# parse time string return time string based on default date
# others are not, and can't be changed because it is used in
# time series plot
assert result1 == exp_def
assert result2 == exp_now
assert result3 == exp_now
assert result4 == exp_now
assert result5 == exp_now
@td.skip_if_has_locale
def test_parsers_time(self):
# GH11818
strings = ["14:15", "1415", "2:15pm", "0215pm", "14:15:00", "141500",
"2:15:00pm", "021500pm", time(14, 15)]
expected = time(14, 15)
for time_string in strings:
assert tools.to_time(time_string) == expected
new_string = "14.15"
msg = r"Cannot convert arg \['14\.15'\] to a time"
with pytest.raises(ValueError, match=msg):
tools.to_time(new_string)
assert tools.to_time(new_string, format="%H.%M") == expected
arg = ["14:15", "20:20"]
expected_arr = [time(14, 15), time(20, 20)]
assert tools.to_time(arg) == expected_arr
assert tools.to_time(arg, format="%H:%M") == expected_arr
assert tools.to_time(arg, infer_time_format=True) == expected_arr
assert tools.to_time(arg, format="%I:%M%p",
errors="coerce") == [None, None]
res = tools.to_time(arg, format="%I:%M%p", errors="ignore")
tm.assert_numpy_array_equal(res, np.array(arg, dtype=np.object_))
with pytest.raises(ValueError):
tools.to_time(arg, format="%I:%M%p", errors="raise")
tm.assert_series_equal(tools.to_time(Series(arg, name="test")),
Series(expected_arr, name="test"))
res = tools.to_time(np.array(arg))
assert isinstance(res, list)
assert res == expected_arr
@pytest.mark.parametrize('cache', [True, False])
@pytest.mark.parametrize('dt_string, tz, dt_string_repr', [
('2013-01-01 05:45+0545', pytz.FixedOffset(345),
"Timestamp('2013-01-01 05:45:00+0545', tz='pytz.FixedOffset(345)')"),
('2013-01-01 05:30+0530', pytz.FixedOffset(330),
"Timestamp('2013-01-01 05:30:00+0530', tz='pytz.FixedOffset(330)')")])
def test_parsers_timezone_minute_offsets_roundtrip(self, cache, dt_string,
tz, dt_string_repr):
# GH11708
base = to_datetime("2013-01-01 00:00:00", cache=cache)
base = base.tz_localize('UTC').tz_convert(tz)
dt_time = to_datetime(dt_string, cache=cache)
assert base == dt_time
assert dt_string_repr == repr(dt_time)
@pytest.fixture(params=['D', 's', 'ms', 'us', 'ns'])
def units(request):
"""Day and some time units.
* D
* s
* ms
* us
* ns
"""
return request.param
@pytest.fixture
def epoch_1960():
"""Timestamp at 1960-01-01."""
return Timestamp('1960-01-01')
@pytest.fixture
def units_from_epochs():
return list(range(5))
@pytest.fixture(params=['timestamp', 'pydatetime', 'datetime64', 'str_1960'])
def epochs(epoch_1960, request):
"""Timestamp at 1960-01-01 in various forms.
* pd.Timestamp
* datetime.datetime
* numpy.datetime64
* str
"""
assert request.param in {'timestamp', 'pydatetime', 'datetime64',
"str_1960"}
if request.param == 'timestamp':
return epoch_1960
elif request.param == 'pydatetime':
return epoch_1960.to_pydatetime()
elif request.param == "datetime64":
return epoch_1960.to_datetime64()
else:
return str(epoch_1960)
@pytest.fixture
def julian_dates():
return pd.date_range('2014-1-1', periods=10).to_julian_date().values
class TestOrigin(object):
def test_to_basic(self, julian_dates):
# gh-11276, gh-11745
# for origin as julian
result = Series(pd.to_datetime(
julian_dates, unit='D', origin='julian'))
expected = Series(pd.to_datetime(
julian_dates - pd.Timestamp(0).to_julian_date(), unit='D'))
assert_series_equal(result, expected)
result = Series(pd.to_datetime(
[0, 1, 2], unit='D', origin='unix'))
expected = Series([Timestamp('1970-01-01'),
Timestamp('1970-01-02'),
Timestamp('1970-01-03')])
assert_series_equal(result, expected)
# default
result = Series(pd.to_datetime(
[0, 1, 2], unit='D'))
expected = Series([Timestamp('1970-01-01'),
Timestamp('1970-01-02'),
Timestamp('1970-01-03')])
assert_series_equal(result, expected)
def test_julian_round_trip(self):
result = pd.to_datetime(2456658, origin='julian', unit='D')
assert result.to_julian_date() == 2456658
# out-of-bounds
with pytest.raises(ValueError):
pd.to_datetime(1, origin="julian", unit='D')
def test_invalid_unit(self, units, julian_dates):
# checking for invalid combination of origin='julian' and unit != D
if units != 'D':
with pytest.raises(ValueError):
pd.to_datetime(julian_dates, unit=units, origin='julian')
def test_invalid_origin(self):
# need to have a numeric specified
with pytest.raises(ValueError):
pd.to_datetime("2005-01-01", origin="1960-01-01")
with pytest.raises(ValueError):
pd.to_datetime("2005-01-01", origin="1960-01-01", unit='D')
def test_epoch(self, units, epochs, epoch_1960, units_from_epochs):
expected = Series(
[pd.Timedelta(x, unit=units) +
epoch_1960 for x in units_from_epochs])
result = Series(pd.to_datetime(
units_from_epochs, unit=units, origin=epochs))
assert_series_equal(result, expected)
@pytest.mark.parametrize("origin, exc",
[('random_string', ValueError),
('epoch', ValueError),
('13-24-1990', ValueError),
(datetime(1, 1, 1), tslib.OutOfBoundsDatetime)])
def test_invalid_origins(self, origin, exc, units, units_from_epochs):
with pytest.raises(exc):
pd.to_datetime(units_from_epochs, unit=units,
origin=origin)
def test_invalid_origins_tzinfo(self):
# GH16842
with pytest.raises(ValueError):
pd.to_datetime(1, unit='D',
origin=datetime(2000, 1, 1, tzinfo=pytz.utc))
def test_processing_order(self):
# make sure we handle out-of-bounds *before*
# constructing the dates
result = pd.to_datetime(200 * 365, unit='D')
expected = Timestamp('2169-11-13 00:00:00')
assert result == expected
result = pd.to_datetime(200 * 365, unit='D', origin='1870-01-01')
expected = Timestamp('2069-11-13 00:00:00')
assert result == expected
result = pd.to_datetime(300 * 365, unit='D', origin='1870-01-01')
expected = Timestamp('2169-10-20 00:00:00')
assert result == expected
| bsd-3-clause |
jvahala/Space-Doodles | doodleverse_main/final_submission/doodleverse.py | 1 | 4519 |
import numpy as np
import search
import clustersearch
import feature_extract as f_e
import matplotlib.pyplot as plt
from matplotlib import image as mpimg
import cv2
def main():
plt.close("all")
[feature_points, bestpoint] = get_features('mj.png')
bestpoint = int(bestpoint)
print(feature_points,bestpoint)
star_tab = clustersearch.StarTable()
featpoints = []
for point in feature_points:
featpoints.append(search.Point(point))
featset = search.SetOfPoints()
featset.append(featpoints)
mag_constant = 0.5
searchdata = clustersearch.Search(star_tab,featset, bestpoint, mag_constant)
clustersearch.PlotEverything(searchdata)
print('Average mag is:',searchdata.avgmag)
print('Score is:',searchdata.score)
def get_features(image_name):
#import image as black/white
#example shapes: shape1.png (odd,no int), shape2.png (odd,no int), shape3.png (rounded, no int)
raw_img, image, contours, hierarchy = f_e.importImage(image_name)
img_main = mpimg.imread(image_name)
cnt1 = contours[1] #contour zero is border, contour 1 is outermost contour, ...etc
cnt = f_e.cleanContour(cnt1)
#create grayscale (uint8) all white image to draw features onto
draw_img = f_e.drawImage(raw_img,cnt)
#grab extreme points of contour
extrema = f_e.getExtrema(cnt)
#use harris corner detector
corners = f_e.getCorners(draw_img,50,0.01,50)
features = f_e.orderFeatures(cnt,extrema,corners)
#consolidate features
add_threshold = 0.01 #smaller values add more points (0.01 default)
remove_threshold = 0.01 #larger values mean less points (0.01 default)
clumpThresh = -100 #set negative to make it based on the 1/4 the best feature value, otherwise 70+ is a good value, higher values mean less points
n = 20 #number of divisions for determining normalized error (5 default)
index = 0 #default starting index (0 default)
count = 0
#find feature location on contour
new_features = f_e.featuresOnContour(features, cnt)
#add a bunch of features
new_features = f_e.addFeatures(index,new_features,cnt,n,add_threshold)
new_features = f_e.addFeatures(index,new_features,cnt,n,add_threshold*0.1)
#remove them slowly
new_features = f_e.removeMidpoints(index,new_features,cnt,n,remove_threshold)
new_features = f_e.removeMidpoints(index,new_features,cnt,n+10,remove_threshold*20)
new_features = f_e.removeMidpoints(index,new_features,cnt,n+20,remove_threshold*50)
#finalize features
best_features_sorted = f_e.findKeyFeatures(new_features)
new_features, best_features_sorted = f_e.removeClumps(new_features,best_features_sorted,clumpThresh)
best_features = new_features[:,0:2]
best_features.shape = (best_features.shape[0],2)
#best_features[:,[0, 1]] = best_features[:,[1, 0]] #switches columns
#plot feature points
fig1 = plt.figure(1)
plt.subplot(221)
plt.imshow(img_main)
plt.title('(a) Original Image', fontsize=10)
frame = plt.gca()
frame.axes.get_xaxis().set_ticks([])
frame.axes.get_yaxis().set_ticks([])
plt.subplot(222)
plt.imshow(draw_img.squeeze(),cmap='Greys')
plt.title('(b) Contour', fontsize=10)
frame = plt.gca()
frame.axes.get_xaxis().set_ticks([])
frame.axes.get_yaxis().set_ticks([])
plt.subplot(223)
plt.imshow(draw_img.squeeze(),cmap='Greys')
plt.hold(True)
plt.scatter(features[:,0],features[:,1],s=20,c='b',marker='x')
plt.plot(features[:,0],features[:,1])
plt.title('(c) Harris Corner Detector Features', fontsize=10)
plt.axis('image')
frame = plt.gca()
frame.axes.get_xaxis().set_ticks([])
frame.axes.get_yaxis().set_ticks([])
plt.subplot(224)
plt.imshow(draw_img.squeeze(),cmap='Greys')
plt.hold(True)
plt.scatter(new_features[:,0],new_features[:,1],s=20,c='r',marker='x')
plt.plot(new_features[:,0],new_features[:,1],'r-')
best_index = best_features_sorted[0,1]
best_triangle = new_features[(best_index-1):(best_index+2),:]
plt.scatter(best_triangle[:,0],best_triangle[:,1],s=30,facecolors='none',edgecolors='g',marker='o')
plt.title('(d) Optimized Features', fontsize=10)
plt.axis('image')
frame = plt.gca()
frame.axes.get_xaxis().set_ticks([])
frame.axes.get_yaxis().set_ticks([])
plt.show()
return [best_features, best_features_sorted[0,1]]
if __name__ == '__main__': main() | apache-2.0 |
weidel-p/nest-simulator | pynest/examples/vinit_example.py | 6 | 3192 | # -*- coding: utf-8 -*-
#
# vinit_example.py
#
# This file is part of NEST.
#
# Copyright (C) 2004 The NEST Initiative
#
# NEST is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 2 of the License, or
# (at your option) any later version.
#
# NEST is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with NEST. If not, see <http://www.gnu.org/licenses/>.
"""Initial membrane voltage
----------------------------
Plot several runs of the ``iaf_cond_exp_sfa_rr`` neuron without input for various
initial values of the membrane potential.
References
~~~~~~~~~~~~
.. [1] Dayan, P. and Abbott, L.F. (2001) Theoretical neuroscience,
MIT Press, page 166
"""
###############################################################################
# First, the necessary modules for simulation and plotting are imported.
import nest
import numpy
import matplotlib.pyplot as plt
###############################################################################
# A loop runs over a range of initial membrane voltages.
#
# In the beginning of each iteration, the simulation kernel is put back to
# its initial state using `ResetKernel`.
#
# Next, a neuron is instantiated with ``Create``. The used neuron model
# ``iaf_cond_exp_sfa_rr`` is an implementation of a spiking neuron with
# integrate-and-fire dynamics, conductance-based synapses, an additional
# spike-frequency adaptation and relative refractory mechanisms as described
# in [1]_. Incoming spike events induce a post-synaptic change of
# conductance modeled by an exponential function. ``SetStatus`` allows to
# assign the initial membrane voltage of the current loop run to the neuron.
#
# ``Create`` is used once more to instantiate a ``voltmeter`` as recording device
# which is subsequently connected to the neuron with ``Connect``.
#
# Then, a simulation with a duration of 75 ms is started with ``Simulate``.
#
# When the simulation has finished, the recorded times and membrane voltages
# are read from the voltmeter via ``GetStatus`` where they can be accessed
# through the key ``events`` of the status dictionary.
#
# Finally, the time course of the membrane voltages is plotted for each of
# the different inital values.
for vinit in numpy.arange(-100, -50, 10, float):
nest.ResetKernel()
cbn = nest.Create("iaf_cond_exp_sfa_rr")
nest.SetStatus(cbn, "V_m", vinit)
voltmeter = nest.Create("voltmeter")
nest.Connect(voltmeter, cbn)
nest.Simulate(75.0)
t = voltmeter.get("events", "times")
v = voltmeter.get("events", "V_m")
plt.plot(t, v, label="initial V_m = %.2f mV" % vinit)
###############################################################################
# Set the legend and the labels for the plot outside of the loop.
plt.legend(loc=4)
plt.xlabel("time (ms)")
plt.ylabel("V_m (mV)")
plt.show()
| gpl-2.0 |
google-research/google-research | ieg/dataset_utils/datasets.py | 1 | 14314 | # coding=utf-8
"""Loader for datasets."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import math
import os
from absl import flags
from ieg.dataset_utils.utils import cifar_process
from ieg.dataset_utils.utils import imagenet_preprocess_image
import numpy as np
import sklearn.metrics as sklearn_metrics
import tensorflow.compat.v1 as tf
import tensorflow_datasets as tfds
FLAGS = flags.FLAGS
def verbose_data(which_set, data, label):
"""Prints the number of data per class for a dataset.
Args:
which_set: a str
data: A numpy 4D array
label: A numpy array
"""
text = ['{} size: {}'.format(which_set, data.shape[0])]
for i in range(label.max() + 1):
text.append('class{}-{}'.format(i, len(np.where(label == i)[0])))
text.append('\n')
text = ' '.join(text)
tf.logging.info(text)
def shuffle_dataset(data, label, others=None, class_balanced=False):
"""Shuffles the dataset with class balancing option.
Args:
data: A numpy 4D array.
label: A numpy array.
others: Optional array corresponded with data and label.
class_balanced: If True, after shuffle, data of different classes are
interleaved and balanced [1,2,3...,1,2,3.].
Returns:
Shuffled inputs.
"""
if class_balanced:
sorted_ids = []
for i in range(label.max() + 1):
tmp_ids = np.where(label == i)[0]
np.random.shuffle(tmp_ids)
sorted_ids.append(tmp_ids)
sorted_ids = np.stack(sorted_ids, 0)
sorted_ids = np.transpose(sorted_ids, axes=[1, 0])
ids = np.reshape(sorted_ids, (-1,))
else:
ids = np.arange(data.shape[0])
np.random.shuffle(ids)
if others is None:
return data[ids], label[ids]
else:
return data[ids], label[ids], others[ids]
def load_asymmetric(x, y, noise_ratio, n_val, random_seed=12345):
"""Create asymmetric noisy data."""
def _generate_asymmetric_noise(y_train, n):
"""Generate cifar10 asymmetric label noise.
Asymmetric noise confuses
automobile <- truck
bird -> airplane
cat <-> dog
deer -> horse
Args:
y_train: label numpy tensor
n: noise ratio
Returns:
corrupted y_train.
"""
assert y_train.max() == 10 - 1
classes = 10
p = np.eye(classes)
# automobile <- truck
p[9, 9], p[9, 1] = 1. - n, n
# bird -> airplane
p[2, 2], p[2, 0] = 1. - n, n
# cat <-> dog
p[3, 3], p[3, 5] = 1. - n, n
p[5, 5], p[5, 3] = 1. - n, n
# automobile -> truck
p[4, 4], p[4, 7] = 1. - n, n
tf.logging.info('Asymmetric corruption p:\n {}'.format(p))
noise_y = y_train.copy()
r = np.random.RandomState(random_seed)
for i in range(noise_y.shape[0]):
c = y_train[i]
s = r.multinomial(1, p[c, :], 1)[0]
noise_y[i] = np.where(s == 1)[0]
actual_noise = (noise_y != y_train).mean()
assert actual_noise > 0.0
return noise_y
n_img = x.shape[0]
n_classes = 10
# holdout balanced clean
val_idx = []
if n_val > 0:
for cc in range(n_classes):
tmp_idx = np.where(y == cc)[0]
val_idx.append(
np.random.choice(tmp_idx, n_val // n_classes, replace=False))
val_idx = np.concatenate(val_idx, axis=0)
train_idx = list(set([a for a in range(n_img)]).difference(set(val_idx)))
if n_val > 0:
valdata, vallabel = x[val_idx], y[val_idx]
traindata, trainlabel = x[train_idx], y[train_idx]
trainlabel = trainlabel.squeeze()
label_corr_train = trainlabel.copy()
trainlabel = _generate_asymmetric_noise(trainlabel, noise_ratio)
if len(trainlabel.shape) == 1:
trainlabel = np.reshape(trainlabel, [trainlabel.shape[0], 1])
traindata, trainlabel, label_corr_train = shuffle_dataset(
traindata, trainlabel, label_corr_train)
if n_val > 0:
valdata, vallabel = shuffle_dataset(valdata, vallabel, class_balanced=True)
else:
valdata, vallabel = None, None
return (traindata, trainlabel, label_corr_train), (valdata, vallabel)
def load_train_val_uniform_noise(x, y, n_classes, n_val, noise_ratio):
"""Make noisy data and holdout a clean val data.
Constructs training and validation datasets, with controllable amount of
noise ratio.
Args:
x: 4D numpy array of images
y: 1D/2D numpy array of labels of images
n_classes: The number of classes.
n_val: The number of validation data to holdout from train.
noise_ratio: A float number that decides the random noise ratio.
Returns:
traindata: Train data.
trainlabel: Train noisy label.
label_corr_train: True clean label.
valdata: Validation data.
vallabel: Validation label.
"""
n_img = x.shape[0]
val_idx = []
if n_val > 0:
# Splits a clean holdout set
for cc in range(n_classes):
tmp_idx = np.where(y == cc)[0]
val_idx.append(
np.random.choice(tmp_idx, n_val // n_classes, replace=False))
val_idx = np.concatenate(val_idx, axis=0)
train_idx = list(set([a for a in range(n_img)]).difference(set(val_idx)))
# split validation set
if n_val > 0:
valdata, vallabel = x[val_idx], y[val_idx]
traindata, trainlabel = x[train_idx], y[train_idx]
# Copies the true label for verification
label_corr_train = trainlabel.copy()
# Adds uniform noises
mask = np.random.rand(len(trainlabel)) <= noise_ratio
random_labels = np.random.choice(n_classes, mask.sum())
trainlabel[mask] = random_labels[Ellipsis, np.newaxis]
# Shuffles dataset
traindata, trainlabel, label_corr_train = shuffle_dataset(
traindata, trainlabel, label_corr_train)
if n_val > 0:
valdata, vallabel = shuffle_dataset(valdata, vallabel, class_balanced=True)
else:
valdata, vallabel = None, None
return (traindata, trainlabel, label_corr_train), (valdata, vallabel)
class CIFAR(object):
"""CIFAR dataset class."""
def __init__(self):
self.dataset_name = FLAGS.dataset
self.is_cifar100 = 'cifar100' in self.dataset_name
if self.is_cifar100:
self.num_classes = 100
else:
self.num_classes = 10
self.noise_ratio = float(self.dataset_name.split('_')[-1])
assert self.noise_ratio >= 0 and self.noise_ratio <= 1,\
'The schema {} of dataset is not right'.format(self.dataset_name)
self.split_probe = FLAGS.probe_dataset_hold_ratio != 0
def create_loader(self):
"""Creates loader as tf.data.Dataset."""
# load data to memory.
if self.is_cifar100:
(x_train, y_train), (x_test,
y_test) = tf.keras.datasets.cifar100.load_data()
else:
(x_train, y_train), (x_test,
y_test) = tf.keras.datasets.cifar10.load_data()
y_train = y_train.astype(np.int32)
y_test = y_test.astype(np.int32)
x_train, y_train = shuffle_dataset(x_train, y_train)
n_probe = int(math.floor(x_train.shape[0] * FLAGS.probe_dataset_hold_ratio))
# TODO(zizhaoz): add other noise types.
if 'asymmetric' in self.dataset_name:
assert 'cifar100' not in self.dataset_name, 'Asymmetric only has CIFAR10'
(x_train, y_train, y_gold), (x_probe, y_probe) = load_asymmetric(
x_train,
y_train,
noise_ratio=self.noise_ratio,
n_val=n_probe,
random_seed=FLAGS.seed)
elif 'uniform' in self.dataset_name:
(x_train, y_train, y_gold), (x_probe,
y_probe) = load_train_val_uniform_noise(
x_train,
y_train,
n_classes=self.num_classes,
noise_ratio=self.noise_ratio,
n_val=n_probe)
else:
assert self.dataset_name in ['cifar10', 'cifar100']
if not self.split_probe and x_probe is not None:
# Usually used for supervised comparison.
tf.logging.info('Merge train and probe')
x_train = np.concatenate([x_train, x_probe], axis=0)
y_train = np.concatenate([y_train, y_probe], axis=0)
y_gold = np.concatenate([y_gold, y_probe], axis=0)
conf_mat = sklearn_metrics.confusion_matrix(y_gold, y_train)
conf_mat = conf_mat / np.sum(conf_mat, axis=1, keepdims=True)
tf.logging.info('Corrupted confusion matirx\n {}'.format(conf_mat))
x_test, y_test = shuffle_dataset(x_test, y_test)
self.train_dataset_size = x_train.shape[0]
self.val_dataset_size = x_test.shape[0]
if self.split_probe:
self.probe_size = x_probe.shape[0]
input_tuple = (x_train, y_train.squeeze())
self.train_dataflow = self.create_ds(input_tuple, is_train=True)
self.val_dataflow = self.create_ds((x_test, y_test.squeeze()),
is_train=False)
if self.split_probe:
self.probe_dataflow = self.create_ds((x_probe, y_probe.squeeze()),
is_train=True)
tf.logging.info('Init [{}] dataset loader'.format(self.dataset_name))
verbose_data('train', x_train, y_train)
verbose_data('test', x_test, y_test)
if self.split_probe:
verbose_data('probe', x_probe, y_probe)
return self
def create_ds(self, data, is_train=True):
"""Creates tf.data object given data.
Args:
data: data in format of tuple, e.g. (data, label)
is_train: bool indicate train stage the original copy, so the resulting
tensor is 5D
Returns:
An tf.data.Dataset object
"""
ds = tf.data.Dataset.from_tensor_slices(data)
map_fn = lambda x, y: (cifar_process(x, is_train), y)
ds = ds.map(map_fn, num_parallel_calls=tf.data.experimental.AUTOTUNE)
return ds
class WebVision(object):
"""Webvision dataset class."""
def __init__(self, root, version='webvisionmini', use_imagenet_as_eval=False):
self.version = version
self.num_classes = 50 if 'mini' in version else 1000
self.root = root
self.image_size = 224
self.use_imagenet_as_eval = use_imagenet_as_eval
default_n_per_class = 10
if '_' in FLAGS.dataset:
self.probe_size = int(FLAGS.dataset.split('_')[1]) * self.num_classes
else:
# Uses default ones, assume there is a dataset saved
self.probe_size = default_n_per_class * self.num_classes
self.probe_folder = 'probe_' + str(self.probe_size)
def wrapper_map_probe_v2(self, tfrecord):
"""tf.data.Dataset map function for probe data v2.
Args:
tfrecord: serilized by tf.data.Dataset.
Returns:
A map function
"""
def _extract_fn(tfrecord):
"""Extracts the functions."""
features = {
'image/encoded': tf.FixedLenFeature([], tf.string),
'image/label': tf.FixedLenFeature([], tf.int64)
}
example = tf.parse_single_example(tfrecord, features)
image, label = example['image/encoded'], tf.cast(
example['image/label'], dtype=tf.int32)
return [image, label]
image_bytes, label = _extract_fn(tfrecord)
label = tf.cast(label, tf.int64)
image = imagenet_preprocess_image(
image_bytes, is_training=True, image_size=self.image_size)
return image, label
def wrapper_map_v2(self, train):
"""tf.data.Dataset map function for train data v2."""
def _func(data):
img, label = data['image'], data['label']
image_bytes = tf.image.encode_jpeg(img)
image_1 = imagenet_preprocess_image(
image_bytes, is_training=train, image_size=self.image_size)
if train:
image_2 = imagenet_preprocess_image(
image_bytes,
is_training=train,
image_size=self.image_size,
autoaugment_name='v0',
use_cutout=True)
images = tf.concat(
[tf.expand_dims(image_1, 0),
tf.expand_dims(image_2, 0)], axis=0)
else:
images = image_1
return images, label
return _func
def create_loader(self):
"""Creates loader."""
if self.use_imagenet_as_eval:
# To evaluate on webvision eval, set this to False.
split = ['train']
val_ds, imagenet_info = tfds.load(
name='imagenet2012',
download=True,
split='validation',
data_dir=self.root,
with_info=True)
val_info = imagenet_info.splits['validation']
tf.logging.info('WebVision: use imagenet validation')
else:
split = ['train', 'val']
assert tfds.__version__.startswith('2.'),\
'tensorflow_dataset version must be 2.x.x to use image_label_folder.'
ds, self.info = tfds.load(
'image_label_folder',
split=split,
data_dir=self.root,
builder_kwargs=dict(dataset_name=self.version),
with_info=True)
train_info = self.info.splits['train']
if len(split) == 2:
train_ds, val_ds = ds
val_info = self.info.splits['val']
else:
train_ds = ds[0]
self.train_dataset_size = train_info.num_examples
self.val_dataset_size = val_info.num_examples
self.test_dataset_size = self.val_dataset_size
train_ds = train_ds.map(
self.wrapper_map_v2(True),
num_parallel_calls=tf.data.experimental.AUTOTUNE)
val_ds = val_ds.map(
self.wrapper_map_v2(False),
num_parallel_calls=tf.data.experimental.AUTOTUNE)
self.train_dataflow = train_ds
self.val_dataflow = val_ds
def _get_probe():
"""Create probe data tf.data.Dataset."""
probe_ds = tf.data.TFRecordDataset(
os.path.join(self.root, self.version, self.probe_folder,
'imagenet2012-probe.tfrecord-1-of-1'))
probe_ds = probe_ds.map(
self.wrapper_map_probe_v2,
num_parallel_calls=tf.data.experimental.AUTOTUNE)
# For single file, we need to disable auto_shard_policy for multi-workers,
# e.g. every worker takes the same file
options = tf.data.Options()
options.experimental_distribute.auto_shard_policy = (
tf.data.experimental.AutoShardPolicy.OFF)
probe_ds = probe_ds.with_options(options)
return probe_ds
self.probe_dataflow = _get_probe()
tf.logging.info(self.info)
tf.logging.info('[{}] Create {} \n train {} probe {} val {}'.format(
self.version, FLAGS.dataset, self.train_dataset_size,
self.probe_size, self.val_dataset_size))
return self
| apache-2.0 |
richardtran415/pymatgen | pymatgen/analysis/magnetism/tests/test_heisenberg.py | 5 | 2735 | # coding: utf-8
# Copyright (c) Pymatgen Development Team.
# Distributed under the terms of the MIT License.
import os
import unittest
import warnings
import pandas as pd
from pymatgen.core.structure import Structure
from pymatgen.analysis.magnetism.heisenberg import HeisenbergMapper
from pymatgen.util.testing import PymatgenTest
test_dir = os.path.join(PymatgenTest.TEST_FILES_DIR, "magnetic_orderings")
class HeisenbergMapperTest(unittest.TestCase):
@classmethod
def setUpClass(cls):
cls.df = pd.read_json(os.path.join(test_dir, "mag_orderings_test_cases.json"))
# Good tests
cls.Mn3Al = pd.read_json(os.path.join(test_dir, "Mn3Al.json"))
cls.compounds = [cls.Mn3Al]
cls.hms = []
for c in cls.compounds:
ordered_structures = list(c["structure"])
ordered_structures = [Structure.from_dict(d) for d in ordered_structures]
epa = list(c["energy_per_atom"])
energies = [e * len(s) for (e, s) in zip(epa, ordered_structures)]
hm = HeisenbergMapper(ordered_structures, energies, cutoff=5.0, tol=0.02)
cls.hms.append(hm)
def setUp(self):
pass
def tearDown(self):
warnings.simplefilter("default")
def test_graphs(self):
for hm in self.hms:
sgraphs = hm.sgraphs
self.assertEqual(len(sgraphs), 7)
def test_sites(self):
for hm in self.hms:
unique_site_ids = hm.unique_site_ids
self.assertEqual(unique_site_ids[(0, 1)], 0)
def test_nn_interactions(self):
for hm in self.hms:
num_interacts = len(hm.nn_interactions)
self.assertEqual(num_interacts, 3)
dists = hm.dists
self.assertEqual(dists["nn"], 2.51)
def test_exchange_params(self):
for hm in self.hms:
ex_params = hm.get_exchange()
J_nn = round(18.052116895702852, 3)
self.assertEqual(round(ex_params["0-1-nn"], 3), J_nn)
def test_mean_field(self):
for hm in self.hms:
j_avg = hm.estimate_exchange()
value = round(52.54997149705518, 3)
self.assertEqual(round(j_avg, 3), value)
mft_t = hm.get_mft_temperature(j_avg)
value = round(292.90252668100584)
self.assertEqual(round(mft_t), value)
def test_get_igraph(self):
for hm in self.hms:
igraph = hm.get_interaction_graph()
self.assertEqual(len(igraph), 6)
def test_heisenberg_model(self):
for hm in self.hms:
hmodel = hm.get_heisenberg_model()
self.assertEqual(hmodel.formula, "Mn3Al")
if __name__ == "__main__":
unittest.main()
| mit |
jonathandunn/c2xg | c2xg/c2xg.py | 1 | 34387 | import os
import random
import numpy as np
import pandas as pd
import copy
import operator
import pickle
import codecs
from collections import defaultdict
import multiprocessing as mp
import cytoolz as ct
from functools import partial
from pathlib import Path
from cleantext import clean
try :
from .modules.Encoder import Encoder
from .modules.Loader import Loader
from .modules.Parser import Parser
from .modules.Association import Association
from .modules.Candidates import Candidates
from .modules.MDL_Learner import MDL_Learner
from .modules.Parser import parse_examples
except :
from modules.Encoder import Encoder
from modules.Loader import Loader
from modules.Parser import Parser
from modules.Association import Association
from modules.Candidates import Candidates
from modules.MDL_Learner import MDL_Learner
from modules.Parser import parse_examples
#------------------------------------------------------------
def eval_mdl(files, workers, candidates, Load, Encode, Parse, freq_threshold = -1, report = False):
print("Now initiating MDL evaluation: " + str(files))
for file in files:
print("\tStarting " + str(file))
MDL = MDL_Learner(Load, Encode, Parse, freq_threshold = freq_threshold, vectors = {"na": 0}, candidates = candidates)
MDL.get_mdl_data([file], workers = workers, learn_flag = False)
total_mdl, l1_cost, l2_match_cost, l2_regret_cost, baseline_mdl = MDL.evaluate_subset(subset = False, return_detail = True)
if report == True:
return total_mdl, l1_cost, l2_match_cost, l2_regret_cost, baseline_mdl
#------------------------------------------------------------
def delta_grid_search(candidate_file, test_file, workers, mdl_workers, association_dict, freq_threshold, language, in_dir, out_dir, s3, s3_bucket, max_words, nickname = "current"):
print("\nStarting grid search for beam search settings.")
result_dict = {}
delta_thresholds = [0.01, 0.02, 0.03, 0.04, 0.05, 0.06, 0.07, 0.08, 0.09, 0.1]
if len(delta_thresholds) < workers:
parse_workers = len(delta_thresholds)
else:
parse_workers = workers
#Multi-process#
pool_instance = mp.Pool(processes = parse_workers, maxtasksperchild = 1)
distribute_list = [(x, candidate_file) for x in delta_thresholds]
pool_instance.map(partial(process_candidates,
association_dict = association_dict.copy(),
language = language,
freq_threshold = freq_threshold,
in_dir = in_dir,
out_dir = out_dir,
s3 = s3,
s3_bucket = s3_bucket,
max_words = max_words,
nickname = nickname
), distribute_list, chunksize = 1)
pool_instance.close()
pool_instance.join()
#Now MDL
if language == "zho":
zho_split = True
else:
zho_split = False
Load = Loader(in_dir, out_dir, language, s3, s3_bucket, max_words = max_words)
Encode = Encoder(Loader = Load, zho_split = zho_split)
Parse = Parser(Load, Encode)
for threshold in delta_thresholds:
print("\tStarting MDL search for " + str(threshold))
filename = str(candidate_file) + "." + nickname + ".delta." + str(threshold) + ".p"
candidates = Load.load_file(filename)
if len(candidates) < 5:
print("\tNot enough candidates!")
else:
mdl_score = eval_mdl(files = test_file,
candidates = candidates,
workers = mdl_workers,
Load = Load,
Encode = Encode,
Parse = Parse,
freq_threshold = freq_threshold,
report = True
)
result_dict[threshold] = mdl_score
print("\tThreshold: " + str(threshold) + " and MDL: " + str(mdl_score))
#Get threshold with best score
print(result_dict)
best = min(result_dict.items(), key=operator.itemgetter(1))[0]
#Get best candidates
filename = str(candidate_file) + "." + nickname + ".delta." + str(best) + ".p"
best_candidates = Load.load_file(filename)
return best, best_candidates
#------------------------------------------------------------
def process_candidates(input_tuple, association_dict, language, in_dir, out_dir, s3, s3_bucket, freq_threshold = 1, mode = "", max_words = False, nickname = "current"):
threshold = input_tuple[0]
candidate_file = input_tuple[1]
print("\tStarting " + str(threshold) + " with freq threshold: " + str(freq_threshold))
Load = Loader(in_dir, out_dir, language, s3, s3_bucket, max_words)
C = Candidates(language = language, Loader = Load, association_dict = association_dict)
if mode == "candidates":
filename = str(candidate_file + ".candidates.p")
else:
filename = str(candidate_file) + "." + nickname + ".delta." + str(threshold) + ".p"
if filename not in Load.list_output():
candidates = C.process_file(candidate_file, threshold, freq_threshold, save = False)
Load.save_file(candidates, filename)
#Clean
del association_dict
del C
return
#-------------------------------------------------------------------------------
class C2xG(object):
def __init__(self, data_dir = None, language = "eng", nickname = "", model = "", smoothing = False, zho_split = False, max_words = False, fast_parse = True):
#Initialize
self.nickname = nickname
if nickname != "":
print("Current nickname: " + nickname)
if data_dir != None:
in_dir = os.path.join(data_dir, "IN")
out_dir = os.path.join(data_dir, "OUT")
else:
in_dir = None
out_dir = None
self.language = language
self.zho_split = zho_split
self.Load = Loader(in_dir, out_dir, language = self.language, max_words = max_words)
self.Encode = Encoder(Loader = self.Load, zho_split = self.zho_split)
self.Association = Association(Loader = self.Load, nickname = self.nickname)
self.Candidates = Candidates(language = self.language, Loader = self.Load)
self.Parse = Parser(self.Load, self.Encode)
self.in_dir = in_dir
self.out_dir = out_dir
self.max_words = max_words
self.smoothing = smoothing
#Try to load default or specified model
if model == "":
model = self.language + ".Grammar.v3.p"
#Try to load grammar from file
if isinstance(model, str):
try:
modelname = None
if os.path.isfile( model ) :
modelname = model
else :
modelname = Path(__file__).parent / os.path.join("data", "models", model)
with open(modelname, "rb") as handle:
self.model = pickle.load(handle)
except Exception as e:
print("No model exists, loading empty model.")
self.model = None
#Take model as input
elif isinstance(model, list):
self.model = model
if fast_parse :
self._detail_model() ## self.detailed_model set by this.
else :
self.detailed_model = None
#self.n_features = len(self.model)
self.Encode.build_decoder()
#------------------------------------------------------------------
def _detail_model(self) :
## Update model so we can access grammar faster ...
## Want to make `if construction[0][1] == unit[construction[0][0]-1]` faster
## Dict on construction[0][1] which is self.model[i][0][1] (Call this Y)
## BUT unit[ construction[0][0] - 1 ] changes with unit ...
## construction[0][0] values are very limited. (call this X)
## dict[ construction[0][0] ][ construction[0][1] ] = list of constructions
model_expanded = dict()
X = list( set( [ self.model[i][0][0] for i in range(len(self.model)) ] ) )
for x in X :
model_expanded[ x ] = defaultdict( list )
this_x_elems = list()
for k, elem in enumerate( self.model ) :
if elem[0][0] != x :
continue
elem_trunc = [ i for i in elem if i != (0,0) ]
model_expanded[ x ][ elem[0][1] ].append( ( elem, elem_trunc, k ) )
self.detailed_model = ( X, model_expanded )
#------------------------------------------------------------------
def parse_return(self, input, mode = "files", workers = None):
#Compatbility with idNet
if mode == "idNet":
mode = "lines"
#Make sure grammar is loaded
if self.model == None:
print("Unable to parse: No grammar model provided.")
sys.kill()
#Accepts str of filename or list of strs of filenames
if isinstance(input, str):
input = [input]
#Text as input
if mode == "lines":
lines = self.Parse.parse_idNet(input, self.model, workers, self.detailed_model )
return np.array(lines)
#Filenames as input
elif mode == "files":
features = self.Parse.parse_batch(input, self.model, workers, self.detailed_model )
return np.array(features)
#-------------------------------------------------------------------------------
def parse_validate(self, input, workers = 1):
self.Parse.parse_validate(input, grammar = self.model, workers = workers, detailed_grammar = self.detailed_model)
#-------------------------------------------------------------------------------
def parse_yield(self, input, mode = "files"):
#Make sure grammar is loaded
if self.model == None:
print("Unable to parse: No grammar model provided.")
sys.kill()
#Accepts str of filename or list of strs in batch/stream modes
if isinstance(input, str):
input = [input]
#Filenames as input
if mode == "files":
for features in self.Parse.parse_stream(input, self.model, detailed_grammar = self.detailed_model):
yield np.array(features)
#Texts as input
elif mode == "lines":
for line in input:
line = self.Parse.parse_line_yield(line, self.model, detailed_grammar = self.detailed_model)
yield np.array(line)
#-------------------------------------------------------------------------------
def print_constructions(self):
return_list = []
for i in range(len(self.model)):
x = self.model[i]
printed_examples = []
#Prune to actual constraints
x = [y for y in x if y[0] != 0]
length = len(x)
construction = self.Encode.decode_construction(x)
print(i, construction)
return_list.append(str(i) + ": " + str(construction))
return return_list
#-------------------------------------------------------------------------------
def print_examples(self, input_file, output_file, n):
#Read and write in the default data directories
output_file = os.path.join(self.out_dir, output_file)
#Save the pre-processed lines, to save time later
line_list = []
for line, encoding in self.Encode.load_examples(input_file):
line_list.append([line, encoding])
with codecs.open(output_file, "w", encoding = "utf-8") as fw:
for i in range(len(self.model)):
x = self.model[i]
printed_examples = []
#Prune to actual constraints
x = [y for y in x if y[0] != 0]
length = len(x)
construction = self.Encode.decode_construction(x)
print(i, construction)
fw.write(str(i) + "\t")
fw.write(construction)
fw.write("\n")
#Track how many examples have been found
counter = 0
for line, encoding in line_list:
construction_thing, indexes, matches = parse_examples(x, encoding)
if matches > 0:
for index in indexes:
text = line.split()[index:index+length]
if text not in printed_examples:
counter += 1
printed_examples.append(text)
fw.write("\t" + str(counter) + "\t" + str(text) + "\n")
#Stop looking for examples at threshold
if counter > n:
break
#End of examples for this construction
fw.write("\n\n")
#-------------------------------------------------------------------------------
def get_association(self, input_data, freq_threshold = 1, smoothing = False, lex_only = False):
#Load from file if necessary
if isinstance(input_data, str):
input_data = [x for x in self.Load.read_file(input_data)]
ngrams = self.Association.find_ngrams(input_data, workers = 1, save = False, lex_only = lex_only)
ngrams = self.Association.merge_ngrams(input_data, ngram_dict = ngrams, n_gram_threshold = freq_threshold)
association_dict = self.Association.calculate_association(ngrams = ngrams, smoothing = smoothing, save = False)
#Reduce to bigrams
keepable = lambda x: len(x) > 1
all_ngrams = ct.keyfilter(keepable, association_dict)
#Convert to readable CSV
pairs = []
for pair in association_dict.keys():
try:
val1 = self.Encode.decoding_dict[pair[0][0]][pair[0][1]]
except Exception as e:
val1 = "UNK"
try:
val2 = self.Encode.decoding_dict[pair[1][0]][pair[1][1]]
except Exception as e:
val2 = "UNK"
if val1 != "UNK" and val2 != "UNK":
maximum = max(association_dict[pair]["LR"], association_dict[pair]["RL"])
pairs.append([val1, val2, maximum, association_dict[pair]["LR"], association_dict[pair]["RL"], association_dict[pair]["Freq"]])
#Make dataframe
df = pd.DataFrame(pairs, columns = ["Word1", "Word2", "Max", "LR", "RL", "Freq"])
df = df.sort_values("Max", ascending = False)
return df
#-------------------------------------------------------------------------------
def get_lexicon(self, file):
if self.data_dir == None:
print("Error: Cannot train lexicons without specified data directory.")
sys.kill()
vocab = []
for line in self.Load.read_file(file):
#Use clean-text
line = clean(line,
fix_unicode = True,
to_ascii = False,
lower = True,
no_line_breaks = True,
no_urls = True,
no_emails = True,
no_phone_numbers = True,
no_numbers = True,
no_digits = True,
no_currency_symbols = True,
no_punct = True,
replace_with_punct = "",
replace_with_url = "<URL>",
replace_with_email = "<EMAIL>",
replace_with_phone_number = "<PHONE>",
replace_with_number = "<NUMBER>",
replace_with_digit = "0",
replace_with_currency_symbol = "<CUR>"
)
line = line.split()
vocab += line
return set(vocab)
#-------------------------------------------------------------------------------
def learn(self,
nickname,
cycles = 1,
cycle_size = (1, 5, 20),
freq_threshold = 10,
beam_freq_threshold = 10,
turn_limit = 10,
workers = 1,
mdl_workers = 1,
states = None,
fixed_set = [],
beam_threshold = None,
no_mdl = False,
):
self.nickname = nickname
if self.data_dir == None:
print("Error: Cannot train grammars without specified data directory.")
sys.kill()
#Check learning state and resume
self.model_state_file = self.language + "." + self.nickname + ".State.p"
try:
loader_files = self.Load.list_output()
except:
loader_files = []
if self.model_state_file in loader_files:
print("Resuming learning state.")
self.progress_dict, self.data_dict = self.Load.load_file(self.model_state_file)
if states != None:
print("Manual state change!")
for state in states:
self.progress_dict[state[0]][state[1]] = state[2]
else:
print("Initializing learning state.")
self.data_dict = self.divide_data(cycles, cycle_size, fixed_set)
self.progress_dict = self.set_progress()
self.Load.save_file((self.progress_dict, self.data_dict), self.model_state_file)
#Check beam setting
if beam_threshold != None:
self.progress_dict["BeamSearch"] = beam_threshold
#Learn each cycle
for cycle in self.progress_dict.keys():
if isinstance(cycle, int):
if self.progress_dict[cycle]["State"] == "Complete":
print("\t Cycle " + str(cycle) + " already complete.")
#This cycle is not yet finished
else:
#-----------------#
#BACKGROUND STAGE
#-----------------#
if self.progress_dict[cycle]["Background_State"] != "Complete":
#Check if ngram extraction is finished
if self.progress_dict[cycle]["Background_State"] == "None":
check_files = self.Load.list_output(type = "ngrams")
pop_list = []
for i in range(len(self.progress_dict[cycle]["Background"])):
if self.progress_dict[cycle]["Background"][i] + "." + self.nickname + ".ngrams.p" in check_files:
pop_list.append(i)
#Pop items separately in reverse order
if len(pop_list) > 0:
for i in sorted(pop_list, reverse = True):
self.progress_dict[cycle]["Background"].pop(i)
#If remaining background files, process them
if len(self.progress_dict[cycle]["Background"]) > 0:
print("\tNow processing remaining files: " + str(len(self.progress_dict[cycle]["Background"])))
self.Association.find_ngrams(self.progress_dict[cycle]["Background"], workers)
#Change state
self.progress_dict[cycle]["Background_State"] = "Ngrams"
self.Load.save_file((self.progress_dict, self.data_dict), self.model_state_file)
#Check if ngram merging is finished
if self.progress_dict[cycle]["Background_State"] == "Ngrams":
files = [filename + "." + self.nickname + ".ngrams.p" for filename in self.data_dict[cycle]["Background"]]
print("\tNow merging ngrams for files: " + str(len(files)))
ngrams = self.Association.merge_ngrams(files, freq_threshold)
#Save data and state
self.Load.save_file(ngrams, nickname + ".Cycle-" + str(cycle) + ".Merged-Grams.p")
self.progress_dict[cycle]["Background_State"] = "Merged"
self.Load.save_file((self.progress_dict, self.data_dict), self.model_state_file)
#Check if association_dict has been made
if self.progress_dict[cycle]["Background_State"] == "Merged":
ngrams = self.Load.load_file(nickname + ".Cycle-" + str(cycle) + ".Merged-Grams.p")
association_dict = self.Association.calculate_association(ngrams = ngrams, smoothing = self.smoothing, save = False)
del ngrams
self.Load.save_file(association_dict, nickname + ".Cycle-" + str(cycle) + ".Association_Dict.p")
self.progress_dict[cycle]["Background_State"] = "Complete"
self.Load.save_file((self.progress_dict, self.data_dict), self.model_state_file)
self.association_dict = association_dict
else:
print("\tLoading association_dict.")
self.association_dict = self.Load.load_file(nickname + ".Cycle-" + str(cycle) + ".Association_Dict.p")
#-----------------#
#CANDIDATE STAGE
#-----------------#
if self.progress_dict[cycle]["Candidate_State"] != "Complete":
print("Initializing Candidates module")
C = Candidates(self.language, self.Load, workers, self.association_dict)
#Find beam search threshold
if self.progress_dict["BeamSearch"] == "None" or self.progress_dict["BeamSearch"] == {}:
print("Finding Beam Search settings.")
delta_threshold, best_candidates = delta_grid_search(candidate_file = self.data_dict["BeamCandidates"],
test_file = self.data_dict["BeamTest"],
workers = workers,
mdl_workers = mdl_workers,
association_dict = self.association_dict,
freq_threshold = beam_freq_threshold,
language = self.language,
in_dir = self.in_dir,
out_dir = self.out_dir,
s3 = self.s3,
s3_bucket = self.s3_bucket,
nickname = self.nickname,
max_words = self.max_words,
)
self.progress_dict["BeamSearch"] = delta_threshold
self.progress_dict[cycle]["Candidate_State"] = "Threshold"
self.Load.save_file((self.progress_dict, self.data_dict), self.model_state_file)
#If saved, load beam search threshold
else:
print("Loading Beam Search settings.")
delta_threshold = self.progress_dict["BeamSearch"]
self.progress_dict[cycle]["Candidate_State"] = "Threshold"
#For a fixed set experiment, we use the same data so we keep the best candidates
if fixed_set == []:
#Check which files have been completed
if self.progress_dict[cycle]["Candidate_State"] == "Threshold":
check_files = self.Load.list_output(type = "candidates")
pop_list = []
for i in range(len(self.progress_dict[cycle]["Candidate"])):
if self.progress_dict[cycle]["Candidate"][i] + ".candidates.p" in check_files:
pop_list.append(i)
#Pop items separately in reverse order
if len(pop_list) > 0:
for i in sorted(pop_list, reverse = True):
self.progress_dict[cycle]["Candidate"].pop(i)
#If remaining candidate files, process them
if len(self.progress_dict[cycle]["Candidate"]) > 0:
print("\n\tNow processing remaining files: " + str(len(self.progress_dict[cycle]["Candidate"])))
#Multi-process#
if workers > len(self.progress_dict[cycle]["Candidate"]):
candidate_workers = len(self.progress_dict[cycle]["Candidate"])
else:
candidate_workers = workers
pool_instance = mp.Pool(processes = candidate_workers, maxtasksperchild = 1)
distribute_list = [(delta_threshold, x) for x in self.progress_dict[cycle]["Candidate"]]
pool_instance.map(partial(process_candidates,
association_dict = self.association_dict.copy(),
language = self.language,
in_dir = self.in_dir,
out_dir = self.out_dir,
s3 = self.s3,
s3_bucket = self.s3_bucket,
mode = "candidates",
max_words = self.max_words,
), distribute_list, chunksize = 1)
pool_instance.close()
pool_instance.join()
self.progress_dict[cycle]["Candidate_State"] = "Merge"
self.Load.save_file((self.progress_dict, self.data_dict), self.model_state_file)
#Merge and Save candidates
if self.progress_dict[cycle]["Candidate_State"] == "Merge":
output_files = [filename + ".candidates.p" for filename in self.data_dict[cycle]["Candidate"]]
candidates = self.Candidates.merge_candidates(output_files, freq_threshold)
self.Load.save_file(candidates, nickname + ".Cycle-" + str(cycle) + ".Candidates.p")
self.progress_dict[cycle]["Candidate_State"] = "Dict"
self.Load.save_file((self.progress_dict, self.data_dict), self.model_state_file)
#Make association vectors
if self.progress_dict[cycle]["Candidate_State"] == "Dict":
candidates = self.Load.load_file(nickname + ".Cycle-" + str(cycle) + ".Candidates.p")
candidate_dict = self.Candidates.get_association(candidates, self.association_dict)
self.Load.save_file(candidate_dict, nickname + ".Cycle-" + str(cycle) + ".Candidate_Dict.p")
self.progress_dict[cycle]["Candidate_State"] == "Complete"
self.Load.save_file((self.progress_dict, self.data_dict), self.model_state_file)
else:
print("\tLoading candidate_dict.")
candidate_dict = self.Load.load_file(nickname + ".Cycle-" + str(cycle) + ".Candidate_Dict.p")
candidates = self.Load.load_file(nickname + ".Cycle-" + str(cycle) + ".Candidates.p")
del self.association_dict
#If there was a fixed set of training/testing files
elif fixed_set != []:
candidates = best_candidates
candidate_dict = self.Candidates.get_association(candidates, self.association_dict)
del self.association_dict
self.progress_dict[cycle]["Candidate_State"] == "Complete"
#-----------------#
#MDL STAGE
#-----------------#
if no_mdl == False:
if self.progress_dict[cycle]["MDL_State"] != "Complete":
#Prep test data for MDL
if self.progress_dict[cycle]["MDL_State"] == "None":
MDL = MDL_Learner(self.Load, self.Encode, self.Parse, freq_threshold = 1, vectors = candidate_dict, candidates = candidates)
MDL.get_mdl_data(self.progress_dict[cycle]["Test"], workers = mdl_workers)
self.Load.save_file(MDL, nickname + ".Cycle-" + str(cycle) + ".MDL.p")
self.progress_dict[cycle]["MDL_State"] = "EM"
self.Load.save_file((self.progress_dict, self.data_dict), self.model_state_file)
#Run EM-based Tabu Search
if self.progress_dict[cycle]["MDL_State"] == "EM":
try:
MDL.search_em(turn_limit, mdl_workers)
except:
MDL = self.Load.load_file(nickname + ".Cycle-" + str(cycle) + ".MDL.p")
MDL.search_em(turn_limit, mdl_workers)
self.Load.save_file(MDL, nickname + ".Cycle-" + str(cycle) + ".MDL.p")
self.progress_dict[cycle]["MDL_State"] = "Direct"
self.Load.save_file((self.progress_dict, self.data_dict), self.model_state_file)
#Run direct Tabu Search
if self.progress_dict[cycle]["MDL_State"] == "Direct":
try:
MDL.search_direct(turn_limit*3, mdl_workers)
except:
MDL = self.Load.load_file(nickname + ".Cycle-" + str(cycle) + ".MDL.p")
MDL.search_direct(turn_limit*3, mdl_workers)
#Get grammar to save
grammar_dict = defaultdict(dict)
for i in range(len(MDL.candidates)):
grammar_dict[i]["Constructions"] = MDL.candidates[i]
grammar_dict[i]["Matches"] = MDL.matches[i]
#Save grammar
self.Load.save_file(grammar_dict, nickname + ".Cycle-" + str(cycle) + ".Final_Grammar.p")
self.progress_dict[cycle]["MDL_State"] = "Complete"
self.progress_dict[cycle]["State"] = "Complete"
self.Load.save_file((self.progress_dict, self.data_dict), self.model_state_file)
del MDL
elif no_mdl == True:
print("Calculating MDL")
self.progress_dict[cycle]["MDL_State"] = "Complete"
self.progress_dict[cycle]["State"] = "Complete"
#-----------------#
#MERGING STAGE
#-----------------#
if self.progress_dict[cycle]["State"] == "Complete":
if no_mdl == False:
print("Starting to merge fold grammars.")
grammar_files = [nickname + ".Cycle-" + str(i) + ".Final_Grammar.p" for i in range(cycles)]
final_grammar = self.merge_grammars(grammar_files)
self.Load.save_file(final_grammar, self.language + ".Grammar.p")
else:
final_grammar = list(candidates.keys())
self.Load.save_file(final_grammar, self.nickname + ".Grammar_BeamOnly.p")
#-------------------------------------------------------------------------------
def merge_grammars(self, grammar_files, no_mdl = False):
all_grammars = {}
if no_mdl == False:
#Load all grammar files
for file in grammar_files:
current_dict = self.Load.load_file(file)
#Iterate over constructions in current fold grammar
for key in current_dict.keys():
current_construction = current_dict[key]["Constructions"]
current_construction = current_construction.tolist()
current_matches = current_dict[key]["Matches"]
#Reformat
new_construction = []
for unit in current_construction:
new_type = unit[0]
new_index = unit[1]
if new_type != 0:
new_construction.append(tuple((new_type, new_index)))
#Make hashable
new_construction = tuple(new_construction)
#Add to dictionary
if new_construction not in all_grammars:
all_grammars[new_construction] = {}
all_grammars[new_construction]["Matches"] = current_matches
all_grammars[new_construction]["Selected"] = 1
else:
all_grammars[new_construction]["Matches"] += current_matches
all_grammars[new_construction]["Selected"] += 1
#Done loading grammars
print("Final grammar for " + self.language + " contains " + str(len(list(all_grammars.keys()))))
final_grammar = list(all_grammars.keys())
final_grammar = self.Parse.format_grammar(final_grammar)
else:
final_grammar = []
for file in grammar_files:
current_dict = self.Load.load_file(file)
for key in current_dict:
if key not in final_grammar:
final_grammar.append(key)
return final_grammar
#-------------------------------------------------------------------------------
def divide_data(self, cycles, cycle_size, fixed_set = []):
data_dict = defaultdict(dict)
#For a fixed set experiment, we use the same data for all simulations
if fixed_set != []:
data_dict["BeamCandidates"] = fixed_set
data_dict["BeamTest"] = fixed_set
for cycle in range(cycles):
data_dict[cycle]["Test"] = fixed_set
data_dict[cycle]["Candidate"] = fixed_set
data_dict[cycle]["Background"] = fixed_set
#Otherwise we get unique data
else:
input_files = self.Load.list_input()
#Get number of files to use for each purpose
num_test_files = cycle_size[0]
num_candidate_files = cycle_size[1]
num_background_files = cycle_size[2]
num_cycle_files = cycle_size[0] + cycle_size[1] + cycle_size[2]
#Get Beam Search tuning files
candidate_i = random.randint(0, len(input_files))
candidate_file = input_files.pop(candidate_i)
test_i = random.randint(0, len(input_files))
test_file = input_files.pop(test_i)
#Get and divide input data
data_dict["BeamCandidates"] = candidate_file
data_dict["BeamTest"] = test_file
#Get unique data for each cycle
for cycle in range(cycles):
#Randomize remaining files
random.shuffle(input_files)
cycle_files = []
#Gather as many files as required
for segment in range(num_cycle_files):
current_file = input_files.pop()
cycle_files.append(current_file)
#Assign files as final MDL test data
random.shuffle(cycle_files)
test_files = []
for file in range(num_test_files):
current_file = cycle_files.pop()
test_files.append(current_file)
data_dict[cycle]["Test"] = test_files
#Assign files as candidate estimation data
random.shuffle(cycle_files)
candidate_files = []
for file in range(num_candidate_files):
current_file = cycle_files.pop()
candidate_files.append(current_file)
data_dict[cycle]["Candidate"] = candidate_files
#Assign files as candidate estimation data
random.shuffle(cycle_files)
background_files = []
for file in range(num_background_files):
current_file = cycle_files.pop()
background_files.append(current_file)
data_dict[cycle]["Background"] = background_files
return data_dict
#-------------------------------------------------------------------------------
def set_progress(self):
progress_dict = defaultdict(dict)
progress_dict["BeamSearch"] = "None"
for cycle in self.data_dict.keys():
if isinstance(cycle, int):
progress_dict[cycle]["State"] = "Incomplete"
progress_dict[cycle]["Background"] = self.data_dict[cycle]["Background"].copy()
progress_dict[cycle]["Background_State"] = "None"
progress_dict[cycle]["Candidate"] = self.data_dict[cycle]["Candidate"].copy()
progress_dict[cycle]["Candidate_State"] = "None"
progress_dict[cycle]["Test"] = self.data_dict[cycle]["Test"].copy()
progress_dict[cycle]["MDL_State"] = "None"
return progress_dict
#-----------------------------------------------
def fuzzy_jaccard(self, grammar1, grammar2, threshold = 0.70, workers = 2):
umbrella = set(grammar1 + grammar2)
#First grammar
pool_instance = mp.Pool(processes = workers, maxtasksperchild = None)
matches1 = pool_instance.map(partial(self.fuzzy_match, grammar = grammar1, threshold = threshold), umbrella, chunksize = 100)
pool_instance.close()
pool_instance.join()
#Second gammar
pool_instance = mp.Pool(processes = workers, maxtasksperchild = None)
matches2 = pool_instance.map(partial(self.fuzzy_match, grammar = grammar2, threshold = threshold), umbrella, chunksize = 100)
pool_instance.close()
pool_instance.join()
result = 1 - jaccard(matches1, matches2)
return result
#-----------------------------------------------
def fuzzy_match(self, construction, grammar, threshold = 0.70):
match = 0
#Check for exact match
if construction in grammar:
match = 1
#Or fall back to highest overlap
else:
for u_construction in grammar:
s = difflib.SequenceMatcher(None, construction, u_construction)
length = max(len(construction), len(u_construction))
overlap = sum([x[2] for x in s.get_matching_blocks()]) / float(length)
if overlap >= threshold:
match = 1
break
return match
#-----------------------------------------------
def get_mdl(self, candidates, file, workers = 2, freq_threshold = -1):
result = eval_mdl([file],
workers = workers,
candidates = candidates,
Load = self.Load,
Encode = self.Encode,
Parse = self.Parse,
freq_threshold = freq_threshold,
report = True
)
return result
#-----------------------------------------------
def step_data(self, data, step):
return_data = []
extra_data = []
counter = 0
for line in data:
if len(line) > 5:
if counter < step:
return_data.append(line)
counter += len(line.split())
else:
extra_data.append(line)
return return_data, extra_data
#-----------------------------------------------
def forget_constructions(self, grammar, datasets, workers = None, threshold = 1, adjustment = 0.25, increment_size = 100000):
round = 0
weights = [1 for x in range(len(grammar))]
for i in range(20):
print(round, len(grammar))
round += 1
for i in range(len(datasets)):
dataset = datasets[i]
data_parse, data_keep = self.step_data(dataset, increment_size)
datasets[i] = data_keep
if len(dataset) > 25:
self.model = grammar
self._detail_model()
vector = np.array(self.parse_return(data_parse, mode = "lines"))
vector = np.sum(vector, axis = 0)
weights = [1 if vector[i] > threshold else weights[i]-adjustment for i in range(len(weights))]
grammar = [grammar[i] for i in range(len(grammar)) if weights[i] >= 0.0001]
weights = [weights[i] for i in range(len(weights)) if weights[i] >= 0.0001]
return grammar
#----------------------------------------------- | gpl-3.0 |
larsmans/scikit-learn | sklearn/svm/tests/test_bounds.py | 42 | 2112 | import nose
from nose.tools import assert_true
import numpy as np
from scipy import sparse as sp
from sklearn.svm.bounds import l1_min_c
from sklearn.svm import LinearSVC
from sklearn.linear_model.logistic import LogisticRegression
dense_X = [[-1, 0], [0, 1], [1, 1], [1, 1]]
sparse_X = sp.csr_matrix(dense_X)
Y1 = [0, 1, 1, 1]
Y2 = [2, 1, 0, 0]
def test_l1_min_c():
losses = ['l2', 'log']
Xs = {'sparse': sparse_X, 'dense': dense_X}
Ys = {'two-classes': Y1, 'multi-class': Y2}
intercepts = {'no-intercept': {'fit_intercept': False},
'fit-intercept': {'fit_intercept': True,
'intercept_scaling': 10}}
for loss in losses:
for X_label, X in Xs.items():
for Y_label, Y in Ys.items():
for intercept_label, intercept_params in intercepts.items():
check = lambda: check_l1_min_c(X, Y, loss,
**intercept_params)
check.description = ('Test l1_min_c loss=%r %s %s %s' %
(loss, X_label, Y_label,
intercept_label))
yield check
def check_l1_min_c(X, y, loss, fit_intercept=True, intercept_scaling=None):
min_c = l1_min_c(X, y, loss, fit_intercept, intercept_scaling)
clf = {
'log': LogisticRegression(penalty='l1'),
'l2': LinearSVC(loss='l2', penalty='l1', dual=False),
}[loss]
clf.fit_intercept = fit_intercept
clf.intercept_scaling = intercept_scaling
clf.C = min_c
clf.fit(X, y)
assert_true((np.asarray(clf.coef_) == 0).all())
assert_true((np.asarray(clf.intercept_) == 0).all())
clf.C = min_c * 1.01
clf.fit(X, y)
assert_true((np.asarray(clf.coef_) != 0).any() or
(np.asarray(clf.intercept_) != 0).any())
@nose.tools.raises(ValueError)
def test_ill_posed_min_c():
X = [[0, 0], [0, 0]]
y = [0, 1]
l1_min_c(X, y)
@nose.tools.raises(ValueError)
def test_unsupported_loss():
l1_min_c(dense_X, Y1, 'l1')
| bsd-3-clause |
cheral/orange3 | Orange/data/io.py | 1 | 36192 | import contextlib
import csv
import locale
import pickle
import re
import subprocess
import sys
import warnings
from ast import literal_eval
from collections import OrderedDict, Counter
from functools import lru_cache
from itertools import chain, repeat
from math import isnan
from numbers import Number
from os import path, unlink
from tempfile import NamedTemporaryFile
from urllib.parse import urlparse, urlsplit, urlunsplit, unquote as urlunquote
from urllib.request import urlopen, Request
import bottleneck as bn
import numpy as np
from chardet.universaldetector import UniversalDetector
from Orange.data import (
_io, is_discrete_values, MISSING_VALUES, Table, Domain, Variable,
DiscreteVariable, StringVariable, ContinuousVariable, TimeVariable,
)
from Orange.util import Registry, flatten, namegen
__all__ = ["Flags", "FileFormat"]
_IDENTITY = lambda i: i
class Compression:
"""Supported compression extensions"""
GZIP = '.gz'
BZIP2 = '.bz2'
XZ = '.xz'
all = (GZIP, BZIP2, XZ)
def open_compressed(filename, *args, _open=open, **kwargs):
"""Return seamlessly decompressed open file handle for `filename`"""
if isinstance(filename, str):
if filename.endswith(Compression.GZIP):
from gzip import open as _open
elif filename.endswith(Compression.BZIP2):
from bz2 import open as _open
elif filename.endswith(Compression.XZ):
from lzma import open as _open
return _open(filename, *args, **kwargs)
# Else already a file, just pass it through
return filename
def detect_encoding(filename):
"""
Detect encoding of `filename`, which can be a ``str`` filename, a
``file``-like object, or ``bytes``.
"""
# Try with Unix file utility first because it's faster (~10ms vs 100ms)
if isinstance(filename, str) and not filename.endswith(Compression.all):
try:
with subprocess.Popen(('file', '--brief', '--mime-encoding', filename),
stdout=subprocess.PIPE) as process:
process.wait()
if process.returncode == 0:
encoding = process.stdout.read().strip()
# file only supports these encodings; for others it says
# unknown-8bit or binary. So we give chardet a chance to do
# better
if encoding in (b'utf-8', b'us-ascii', b'iso-8859-1',
b'utf-7', b'utf-16le', b'utf-16be', b'ebcdic'):
return encoding.decode('us-ascii')
except OSError: pass # windoze
# file not available or unable to guess the encoding, have chardet do it
detector = UniversalDetector()
# We examine only first N 4kB blocks of file because chardet is really slow
MAX_BYTES = 4*1024*12
def _from_file(f):
detector.feed(f.read(MAX_BYTES))
detector.close()
return (detector.result.get('encoding')
if detector.result.get('confidence', 0) >= .85 else
'utf-8')
if isinstance(filename, str):
with open_compressed(filename, 'rb') as f:
return _from_file(f)
elif isinstance(filename, bytes):
detector.feed(filename[:MAX_BYTES])
detector.close()
return detector.result.get('encoding')
elif hasattr(filename, 'encoding'):
return filename.encoding
else: # assume file-like object that you can iter through
return _from_file(filename)
def guess_data_type(orig_values):
"""
Use heuristics to guess data type.
"""
valuemap, values = [], orig_values
is_discrete = is_discrete_values(orig_values)
if is_discrete:
valuemap = sorted(is_discrete)
coltype = DiscreteVariable
else:
try:
values = [float(i) for i in orig_values]
except ValueError:
tvar = TimeVariable('_')
try:
values = [tvar.parse(i) for i in orig_values]
except ValueError:
coltype = StringVariable
else:
coltype = TimeVariable
else:
coltype = ContinuousVariable
return valuemap, values, coltype
def sanitize_variable(valuemap, values, orig_values, coltype, coltype_kwargs,
domain_vars, existing_var, new_var_name, data=None):
if valuemap:
# Map discrete data to ints
def valuemap_index(val):
try:
return valuemap.index(val)
except ValueError:
return np.nan
values = np.vectorize(valuemap_index, otypes=[float])(orig_values)
coltype_kwargs.update(values=valuemap)
if coltype is StringVariable:
values = ['' if i is np.nan else i for i in orig_values]
var = None
if domain_vars is not None:
if existing_var:
# Use existing variable if available
var = coltype.make(existing_var.strip(), **coltype_kwargs)
else:
# Never use existing for un-named variables
var = coltype(new_var_name, **coltype_kwargs)
# Reorder discrete values to match existing variable
if var.is_discrete and not var.ordered:
new_order, old_order = var.values, coltype_kwargs.get('values',
var.values)
if new_order != old_order:
offset = len(new_order)
column = values if data.ndim > 1 else data
column += offset
for i, val in enumerate(var.values):
try:
oldval = old_order.index(val)
except ValueError:
continue
bn.replace(column, offset + oldval, new_order.index(val))
if isinstance(var, TimeVariable) or coltype is TimeVariable:
# Re-parse the values because only now after coltype.make call
# above, variable var is the correct one
_var = var if isinstance(var, TimeVariable) else TimeVariable('_')
values = [_var.parse(i) for i in orig_values]
return values, var
class Flags:
"""Parser for column flags (i.e. third header row)"""
DELIMITER = ' '
_RE_SPLIT = re.compile(r'(?<!\\)' + DELIMITER).split
_RE_ATTR_UNQUOTED_STR = re.compile(r'^[a-zA-Z_]').match
ALL = OrderedDict((
('class', 'c'),
('ignore', 'i'),
('meta', 'm'),
('weight', 'w'),
('.+?=.*?', ''), # general key=value attributes
))
_RE_ALL = re.compile(r'^({})$'.format('|'.join(filter(None, flatten(ALL.items())))))
def __init__(self, flags):
for v in filter(None, self.ALL.values()):
setattr(self, v, False)
self.attributes = {}
for flag in flags or []:
flag = flag.strip()
if self._RE_ALL.match(flag):
if '=' in flag:
k, v = flag.split('=', 1)
if not Flags._RE_ATTR_UNQUOTED_STR(v):
try:
v = literal_eval(v)
except SyntaxError:
# If parsing failed, treat value as string
pass
self.attributes[k] = v
else:
setattr(self, flag, True)
setattr(self, self.ALL.get(flag, ''), True)
elif flag:
warnings.warn('Invalid attribute flag \'{}\''.format(flag))
@staticmethod
def join(iterable, *args):
return Flags.DELIMITER.join(i.strip().replace(Flags.DELIMITER, '\\' + Flags.DELIMITER)
for i in chain(iterable, args)).lstrip()
@staticmethod
def split(s):
return [i.replace('\\' + Flags.DELIMITER, Flags.DELIMITER)
for i in Flags._RE_SPLIT(s)]
# Matches discrete specification where all the values are listed, space-separated
_RE_DISCRETE_LIST = re.compile(r'^\s*[^\s]+(\s[^\s]+)+\s*$')
_RE_TYPES = re.compile(r'^\s*({}|{}|)\s*$'.format(_RE_DISCRETE_LIST.pattern,
'|'.join(flatten(getattr(vartype, 'TYPE_HEADERS')
for vartype in Variable.registry.values()))))
_RE_FLAGS = re.compile(r'^\s*( |{}|)*\s*$'.format('|'.join(flatten(filter(None, i) for i in Flags.ALL.items()))))
class FileFormatMeta(Registry):
def __new__(cls, name, bases, attrs):
newcls = super().__new__(cls, name, bases, attrs)
# Optionally add compressed versions of extensions as supported
if getattr(newcls, 'SUPPORT_COMPRESSED', False):
new_extensions = list(getattr(newcls, 'EXTENSIONS', ()))
for compression in Compression.all:
for ext in newcls.EXTENSIONS:
new_extensions.append(ext + compression)
# OSX file dialog doesn't support filtering on double
# extensions (e.g. .csv.gz)
# https://bugreports.qt.io/browse/QTBUG-38303
# This is just here for OWFile that gets QFileDialog
# filters from FileFormat.readers.keys()
if sys.platform == 'darwin':
new_extensions.append(compression)
newcls.EXTENSIONS = tuple(new_extensions)
return newcls
@property
def formats(cls):
return cls.registry.values()
@lru_cache(5)
def _ext_to_attr_if_attr2(cls, attr, attr2):
"""
Return ``{ext: `attr`, ...}`` dict if ``cls`` has `attr2`.
If `attr` is '', return ``{ext: cls, ...}`` instead.
"""
return OrderedDict((ext, getattr(cls, attr, cls))
for cls in cls.registry.values()
if hasattr(cls, attr2)
for ext in getattr(cls, 'EXTENSIONS', []))
@property
def names(cls):
return cls._ext_to_attr_if_attr2('DESCRIPTION', '__class__')
@property
def writers(cls):
return cls._ext_to_attr_if_attr2('', 'write_file')
@property
def readers(cls):
return cls._ext_to_attr_if_attr2('', 'read')
@property
def img_writers(cls):
return cls._ext_to_attr_if_attr2('', 'write_image')
@property
def graph_writers(cls):
return cls._ext_to_attr_if_attr2('', 'write_graph')
class FileFormat(metaclass=FileFormatMeta):
"""
Subclasses set the following attributes and override the following methods:
EXTENSIONS = ('.ext1', '.ext2', ...)
DESCRIPTION = 'human-readable file format description'
SUPPORT_COMPRESSED = False
SUPPORT_SPARSE_DATA = False
def read(self):
... # load headers, data, ...
return self.data_table(data, headers)
@classmethod
def write_file(cls, filename, data):
...
self.write_headers(writer.write, data)
writer.writerows(data)
Wrapper FileFormat.data_table() returns Orange.data.Table from `data`
iterable (list (rows) of lists of values (cols)).
"""
PRIORITY = 10000 # Sort order in OWSave widget combo box, lower is better
def __init__(self, filename):
"""
Parameters
----------
filename : str
name of the file to open
"""
self.filename = filename
self.sheet = None
@property
def sheets(self):
"""FileFormats with a notion of sheets should override this property
to return a list of sheet names in the file.
Returns
-------
a list of sheet names
"""
return ()
def select_sheet(self, sheet):
"""Select sheet to be read
Parameters
----------
sheet : str
sheet name
"""
self.sheet = sheet
@classmethod
def get_reader(cls, filename):
"""Return reader instance that can be used to read the file
Parameters
----------
filename : str
Returns
-------
FileFormat
"""
for ext, reader in cls.readers.items():
# Skip ambiguous, invalid compression-only extensions added on OSX
if ext in Compression.all:
continue
if filename.endswith(ext):
return reader(filename)
raise IOError('No readers for file "{}"'.format(filename))
@classmethod
def write(cls, filename, data):
return cls.write_file(filename, data)
@classmethod
def write_table_metadata(cls, filename, data):
if isinstance(filename, str) and hasattr(data, 'attributes'):
if all(isinstance(key, str) and isinstance(value, str)
for key, value in data.attributes.items()):
with open(filename + '.metadata', 'w') as f:
f.write("\n".join("{}: {}".format(*kv)
for kv in data.attributes.items()))
else:
with open(filename + '.metadata', 'wb') as f:
pickle.dump(data.attributes, f, pickle.HIGHEST_PROTOCOL)
@classmethod
def set_table_metadata(cls, filename, table):
# pylint: disable=bare-except
if isinstance(filename, str) and path.exists(filename + '.metadata'):
try:
with open(filename + '.metadata', 'rb') as f:
table.attributes = pickle.load(f)
# Unpickling throws different exceptions, not just UnpickleError
except:
with open(filename + '.metadata') as f:
table.attributes = OrderedDict(
(k.strip(), v.strip())
for k, v in (line.split(":", 1)
for line in f.readlines()))
@classmethod
def locate(cls, filename, search_dirs=('.',)):
"""Locate a file with given filename that can be opened by one
of the available readers.
Parameters
----------
filename : str
search_dirs : Iterable[str]
Returns
-------
str
Absolute path to the file
"""
if path.exists(filename):
return filename
for directory in search_dirs:
absolute_filename = path.join(directory, filename)
if path.exists(absolute_filename):
break
for ext in cls.readers:
if filename.endswith(ext):
break
if path.exists(absolute_filename + ext):
absolute_filename += ext
break
if path.exists(absolute_filename):
break
else:
absolute_filename = ""
if not path.exists(absolute_filename):
raise IOError('File "{}" was not found.'.format(filename))
return absolute_filename
@staticmethod
def open(filename, *args, **kwargs):
"""
Format handlers can use this method instead of the builtin ``open()``
to transparently (de)compress files if requested (according to
`filename` extension). Set ``SUPPORT_COMPRESSED=True`` if you use this.
"""
return open_compressed(filename, *args, **kwargs)
@staticmethod
def parse_headers(data):
"""Return (header rows, rest of data) as discerned from `data`"""
def is_number(item):
try: float(item)
except ValueError: return False
return True
# Second row items are type identifiers
def header_test2(items):
return all(map(_RE_TYPES.match, items))
# Third row items are flags and column attributes (attr=value)
def header_test3(items):
return all(map(_RE_FLAGS.match, items))
data = iter(data)
header_rows = []
# Try to parse a three-line header
lines = []
try:
lines.append(list(next(data)))
lines.append(list(next(data)))
lines.append(list(next(data)))
except StopIteration:
lines, data = [], chain(lines, data)
if lines:
l1, l2, l3 = lines
# Three-line header if line 2 & 3 match (1st line can be anything)
if header_test2(l2) and header_test3(l3):
header_rows = [l1, l2, l3]
else:
lines, data = [], chain((l1, l2, l3), data)
# Try to parse a single-line header
if not header_rows:
try: lines.append(list(next(data)))
except StopIteration: pass
if lines:
# Header if none of the values in line 1 parses as a number
if not all(is_number(i) for i in lines[0]):
header_rows = [lines[0]]
else:
data = chain(lines, data)
return header_rows, data
@classmethod
def data_table(self, data, headers=None):
"""
Return Orange.data.Table given rows of `headers` (iterable of iterable)
and rows of `data` (iterable of iterable; if ``numpy.ndarray``, might
as well **have it sorted column-major**, e.g. ``order='F'``).
Basically, the idea of subclasses is to produce those two iterables,
however they might.
If `headers` is not provided, the header rows are extracted from `data`,
assuming they precede it.
"""
if not headers:
headers, data = self.parse_headers(data)
# Consider various header types (single-row, two-row, three-row, none)
if 3 == len(headers):
names, types, flags = map(list, headers)
else:
if 1 == len(headers):
HEADER1_FLAG_SEP = '#'
# First row format either:
# 1) delimited column names
# 2) -||- with type and flags prepended, separated by #,
# e.g. d#sex,c#age,cC#IQ
_flags, names = zip(*[i.split(HEADER1_FLAG_SEP, 1) if HEADER1_FLAG_SEP in i else ('', i)
for i in headers[0]])
names = list(names)
elif 2 == len(headers):
names, _flags = map(list, headers)
else:
# Use heuristics for everything
names, _flags = [], []
types = [''.join(filter(str.isupper, flag)).lower() for flag in _flags]
flags = [Flags.join(filter(str.islower, flag)) for flag in _flags]
# Determine maximum row length
rowlen = max(map(len, (names, types, flags)))
def _equal_length(lst):
lst.extend(['']*(rowlen - len(lst)))
return lst
# Ensure all data is of equal width in a column-contiguous array
data = np.array([_equal_length(list(row)) for row in data if any(row)],
copy=False, dtype=object, order='F')
# Data may actually be longer than headers were
try:
rowlen = data.shape[1]
except IndexError:
pass
else:
for lst in (names, types, flags):
_equal_length(lst)
NAMEGEN = namegen('Feature ', 1)
Xcols, attrs = [], []
Mcols, metas = [], []
Ycols, clses = [], []
Wcols = []
# Rename variables if necessary
# Reusing across files still works if both files have same duplicates
name_counts = Counter(names)
del name_counts[""]
if len(name_counts) != len(names) and name_counts:
uses = {name: 0 for name, count in name_counts.items() if count > 1}
for i, name in enumerate(names):
if name in uses:
uses[name] += 1
names[i] = "{}_{}".format(name, uses[name])
# Iterate through the columns
for col in range(rowlen):
flag = Flags(Flags.split(flags[col]))
if flag.i:
continue
type_flag = types and types[col].strip()
try:
orig_values = [np.nan if i in MISSING_VALUES else i
for i in (i.strip() for i in data[:, col])]
except IndexError:
# No data instances leads here
orig_values = []
# In this case, coltype could be anything. It's set as-is
# only to satisfy test_table.TableTestCase.test_append
coltype = DiscreteVariable
coltype_kwargs = {}
valuemap = []
values = orig_values
if type_flag in StringVariable.TYPE_HEADERS:
coltype = StringVariable
elif type_flag in ContinuousVariable.TYPE_HEADERS:
coltype = ContinuousVariable
try:
values = [float(i) for i in orig_values]
except ValueError:
for row, num in enumerate(orig_values):
try:
float(num)
except ValueError:
break
raise ValueError('Non-continuous value in (1-based) '
'line {}, column {}'.format(row + len(headers) + 1,
col + 1))
elif type_flag in TimeVariable.TYPE_HEADERS:
coltype = TimeVariable
elif (type_flag in DiscreteVariable.TYPE_HEADERS or
_RE_DISCRETE_LIST.match(type_flag)):
coltype = DiscreteVariable
if _RE_DISCRETE_LIST.match(type_flag):
valuemap = Flags.split(type_flag)
coltype_kwargs.update(ordered=True)
else:
valuemap = sorted(set(orig_values) - {np.nan})
else:
# No known type specified, use heuristics
valuemap, values, coltype = guess_data_type(orig_values)
if flag.m or coltype is StringVariable:
append_to = (Mcols, metas)
elif flag.w:
append_to = (Wcols, None)
elif flag.c:
append_to = (Ycols, clses)
else:
append_to = (Xcols, attrs)
cols, domain_vars = append_to
cols.append(col)
existing_var, new_var_name, column = None, None, None
if domain_vars is not None:
existing_var = names and names[col]
if not existing_var:
new_var_name = next(NAMEGEN)
values, var = sanitize_variable(
valuemap, values, orig_values, coltype, coltype_kwargs,
domain_vars, existing_var, new_var_name, data)
if domain_vars is not None:
var.attributes.update(flag.attributes)
domain_vars.append(var)
# Write back the changed data. This is needeed to pass the
# correct, converted values into Table.from_numpy below
try:
data[:, col] = values
except IndexError:
pass
domain = Domain(attrs, clses, metas)
if not data.size:
return Table.from_domain(domain, 0)
table = Table.from_numpy(domain,
data[:, Xcols].astype(float, order='C'),
data[:, Ycols].astype(float, order='C'),
data[:, Mcols].astype(object, order='C'),
data[:, Wcols].astype(float, order='C'))
return table
@staticmethod
def header_names(data):
return ['weights'] * data.has_weights() + \
[v.name for v in chain(data.domain.attributes,
data.domain.class_vars,
data.domain.metas)]
@staticmethod
def header_types(data):
def _vartype(var):
if var.is_continuous or var.is_string:
return var.TYPE_HEADERS[0]
elif var.is_discrete:
return Flags.join(var.values) if var.ordered else var.TYPE_HEADERS[0]
raise NotImplementedError
return ['continuous'] * data.has_weights() + \
[_vartype(v) for v in chain(data.domain.attributes,
data.domain.class_vars,
data.domain.metas)]
@staticmethod
def header_flags(data):
return list(chain(['weight'] * data.has_weights(),
(Flags.join([flag], *('{}={}'.format(*a)
for a in sorted(var.attributes.items())))
for flag, var in chain(zip(repeat(''), data.domain.attributes),
zip(repeat('class'), data.domain.class_vars),
zip(repeat('meta'), data.domain.metas)))))
@classmethod
def write_headers(cls, write, data):
"""`write` is a callback that accepts an iterable"""
write(cls.header_names(data))
write(cls.header_types(data))
write(cls.header_flags(data))
@classmethod
def write_data(cls, write, data):
"""`write` is a callback that accepts an iterable"""
vars = list(chain((ContinuousVariable('_w'),) if data.has_weights() else (),
data.domain.attributes,
data.domain.class_vars,
data.domain.metas))
for row in zip(data.W if data.W.ndim > 1 else data.W[:, np.newaxis],
data.X,
data.Y if data.Y.ndim > 1 else data.Y[:, np.newaxis],
data.metas):
write(['' if isinstance(val, Number) and isnan(val) else
var.values[int(val)] if var.is_discrete else
var.repr_val(val) if isinstance(var, TimeVariable) else
val
for var, val in zip(vars, flatten(row))])
class CSVReader(FileFormat):
"""Reader for comma separated files"""
EXTENSIONS = ('.csv',)
DESCRIPTION = 'Comma-separated values'
DELIMITERS = ',;:\t$ '
SUPPORT_COMPRESSED = True
SUPPORT_SPARSE_DATA = False
PRIORITY = 20
def read(self):
for encoding in (lambda: ('us-ascii', None), # fast
lambda: (detect_encoding(self.filename), None), # precise
lambda: (locale.getpreferredencoding(False), None),
lambda: (sys.getdefaultencoding(), None), # desperate
lambda: ('utf-8', None), # ...
lambda: ('utf-8', 'ignore')): # fallback
encoding, errors = encoding()
# Clear the error flag for all except the last check, because
# the error of second-to-last check is stored and shown as warning in owfile
if errors != 'ignore':
error = ''
with self.open(self.filename, mode='rt', newline='',
encoding=encoding, errors=errors) as file:
# Sniff the CSV dialect (delimiter, quotes, ...)
try:
dialect = csv.Sniffer().sniff(file.read(1024), self.DELIMITERS)
except UnicodeDecodeError as e:
error = e
continue
except csv.Error:
dialect = csv.excel()
dialect.delimiter = self.DELIMITERS[0]
file.seek(0)
dialect.skipinitialspace = True
try:
reader = csv.reader(file, dialect=dialect)
data = self.data_table(reader)
# TODO: Name can be set unconditionally when/if
# self.filename will always be a string with the file name.
# Currently, some tests pass StringIO instead of
# the file name to a reader.
if isinstance(self.filename, str):
data.name = path.splitext(
path.split(self.filename)[-1])[0]
if error and isinstance(error, UnicodeDecodeError):
pos, endpos = error.args[2], error.args[3]
warning = ('Skipped invalid byte(s) in position '
'{}{}').format(pos,
('-' + str(endpos)) if (endpos - pos) > 1 else '')
warnings.warn(warning)
self.set_table_metadata(self.filename, data)
return data
except Exception as e:
error = e
continue
raise ValueError('Cannot parse dataset {}: {}'.format(self.filename, error)) from error
@classmethod
def write_file(cls, filename, data):
with cls.open(filename, mode='wt', newline='', encoding='utf-8') as file:
writer = csv.writer(file, delimiter=cls.DELIMITERS[0])
cls.write_headers(writer.writerow, data)
cls.write_data(writer.writerow, data)
cls.write_table_metadata(filename, data)
class TabReader(CSVReader):
"""Reader for tab separated files"""
EXTENSIONS = ('.tab', '.tsv')
DESCRIPTION = 'Tab-separated values'
DELIMITERS = '\t'
PRIORITY = 10
class PickleReader(FileFormat):
"""Reader for pickled Table objects"""
EXTENSIONS = ('.pickle', '.pkl')
DESCRIPTION = 'Pickled Python object file'
SUPPORT_SPARSE_DATA = True
def read(self):
with open(self.filename, 'rb') as f:
return pickle.load(f)
@staticmethod
def write_file(filename, data):
with open(filename, 'wb') as f:
pickle.dump(data, f, pickle.HIGHEST_PROTOCOL)
class BasketReader(FileFormat):
"""Reader for basket (sparse) files"""
EXTENSIONS = ('.basket', '.bsk')
DESCRIPTION = 'Basket file'
SUPPORT_SPARSE_DATA = True
def read(self):
def constr_vars(inds):
if inds:
return [ContinuousVariable(x.decode("utf-8")) for _, x in
sorted((ind, name) for name, ind in inds.items())]
X, Y, metas, attr_indices, class_indices, meta_indices = \
_io.sparse_read_float(self.filename.encode(sys.getdefaultencoding()))
attrs = constr_vars(attr_indices)
classes = constr_vars(class_indices)
meta_attrs = constr_vars(meta_indices)
domain = Domain(attrs, classes, meta_attrs)
table = Table.from_numpy(
domain, attrs and X, classes and Y, metas and meta_attrs)
table.name = path.splitext(path.split(self.filename)[-1])[0]
return table
class ExcelReader(FileFormat):
"""Reader for excel files"""
EXTENSIONS = ('.xls', '.xlsx')
DESCRIPTION = 'Mircosoft Excel spreadsheet'
SUPPORT_SPARSE_DATA = False
def __init__(self, filename):
super().__init__(filename)
from xlrd import open_workbook
self.workbook = open_workbook(self.filename)
@property
@lru_cache(1)
def sheets(self):
return self.workbook.sheet_names()
def read(self):
import xlrd
wb = xlrd.open_workbook(self.filename, on_demand=True)
if self.sheet:
ss = wb.sheet_by_name(self.sheet)
else:
ss = wb.sheet_by_index(0)
try:
first_row = next(i for i in range(ss.nrows) if any(ss.row_values(i)))
first_col = next(i for i in range(ss.ncols) if ss.cell_value(first_row, i))
row_len = ss.row_len(first_row)
cells = filter(any,
[[str(ss.cell_value(row, col)) if col < ss.row_len(row) else ''
for col in range(first_col, row_len)]
for row in range(first_row, ss.nrows)])
table = self.data_table(cells)
table.name = path.splitext(path.split(self.filename)[-1])[0]
if self.sheet:
table.name = '-'.join((table.name, self.sheet))
except Exception:
raise IOError("Couldn't load spreadsheet from " + self.filename)
return table
class DotReader(FileFormat):
"""Writer for dot (graph) files"""
EXTENSIONS = ('.dot', '.gv')
DESCRIPTION = 'Dot graph description'
SUPPORT_COMPRESSED = True
SUPPORT_SPARSE_DATA = False
@classmethod
def write_graph(cls, filename, graph):
from sklearn import tree
tree.export_graphviz(graph, out_file=cls.open(filename, 'wt'))
@classmethod
def write(cls, filename, tree):
if type(tree) == dict:
tree = tree['tree']
cls.write_graph(filename, tree)
class UrlReader(FileFormat):
@staticmethod
def urlopen(url):
req = Request(
url,
# Avoid 403 error with servers that dislike scrapers
headers={'User-Agent': 'Mozilla/5.0 (X11; Linux) Gecko/20100101 Firefox/'})
return urlopen(req, timeout=10)
def read(self):
self.filename = self._trim(self._resolve_redirects(self.filename))
with contextlib.closing(self.urlopen(self.filename)) as response:
name = self._suggest_filename(response.headers['content-disposition'])
with NamedTemporaryFile(suffix=name, delete=False) as f:
f.write(response.read())
# delete=False is a workaround for https://bugs.python.org/issue14243
reader = self.get_reader(f.name)
data = reader.read()
unlink(f.name)
# Override name set in from_file() to avoid holding the temp prefix
data.name = path.splitext(name)[0]
data.origin = self.filename
return data
def _resolve_redirects(self, url):
# Resolve (potential) redirects to a final URL
with contextlib.closing(self.urlopen(url)) as response:
return response.url
@classmethod
def _trim(cls, url):
URL_TRIMMERS = (
cls._trim_googlesheet,
cls._trim_dropbox,
)
for trim in URL_TRIMMERS:
try:
url = trim(url)
except ValueError:
continue
else:
break
return url
@staticmethod
def _trim_googlesheet(url):
match = re.match(r'(?:https?://)?(?:www\.)?'
r'docs\.google\.com/spreadsheets/d/'
r'(?P<workbook_id>[-\w_]+)'
r'(?:/.*?gid=(?P<sheet_id>\d+).*|.*)?',
url, re.IGNORECASE)
try:
workbook, sheet = match.group('workbook_id'), match.group('sheet_id')
if not workbook:
raise ValueError
except (AttributeError, ValueError):
raise ValueError
url = 'https://docs.google.com/spreadsheets/d/{}/export?format=tsv'.format(workbook)
if sheet:
url += '&gid=' + sheet
return url
@staticmethod
def _trim_dropbox(url):
parts = urlsplit(url)
if not parts.netloc.endswith('dropbox.com'):
raise ValueError
return urlunsplit(parts._replace(query='dl=1'))
def _suggest_filename(self, content_disposition):
default_name = re.sub(r'[\\:/]', '_', urlparse(self.filename).path)
# See https://tools.ietf.org/html/rfc6266#section-4.1
matches = re.findall(r"filename\*?=(?:\"|.{0,10}?'[^']*')([^\"]+)",
content_disposition or '')
return urlunquote(matches[-1]) if matches else default_name
| bsd-2-clause |
untom/scikit-learn | sklearn/utils/extmath.py | 142 | 21102 | """
Extended math utilities.
"""
# Authors: Gael Varoquaux
# Alexandre Gramfort
# Alexandre T. Passos
# Olivier Grisel
# Lars Buitinck
# Stefan van der Walt
# Kyle Kastner
# License: BSD 3 clause
from __future__ import division
from functools import partial
import warnings
import numpy as np
from scipy import linalg
from scipy.sparse import issparse
from . import check_random_state
from .fixes import np_version
from ._logistic_sigmoid import _log_logistic_sigmoid
from ..externals.six.moves import xrange
from .sparsefuncs_fast import csr_row_norms
from .validation import check_array, NonBLASDotWarning
def norm(x):
"""Compute the Euclidean or Frobenius norm of x.
Returns the Euclidean norm when x is a vector, the Frobenius norm when x
is a matrix (2-d array). More precise than sqrt(squared_norm(x)).
"""
x = np.asarray(x)
nrm2, = linalg.get_blas_funcs(['nrm2'], [x])
return nrm2(x)
# Newer NumPy has a ravel that needs less copying.
if np_version < (1, 7, 1):
_ravel = np.ravel
else:
_ravel = partial(np.ravel, order='K')
def squared_norm(x):
"""Squared Euclidean or Frobenius norm of x.
Returns the Euclidean norm when x is a vector, the Frobenius norm when x
is a matrix (2-d array). Faster than norm(x) ** 2.
"""
x = _ravel(x)
return np.dot(x, x)
def row_norms(X, squared=False):
"""Row-wise (squared) Euclidean norm of X.
Equivalent to np.sqrt((X * X).sum(axis=1)), but also supports CSR sparse
matrices and does not create an X.shape-sized temporary.
Performs no input validation.
"""
if issparse(X):
norms = csr_row_norms(X)
else:
norms = np.einsum('ij,ij->i', X, X)
if not squared:
np.sqrt(norms, norms)
return norms
def fast_logdet(A):
"""Compute log(det(A)) for A symmetric
Equivalent to : np.log(nl.det(A)) but more robust.
It returns -Inf if det(A) is non positive or is not defined.
"""
sign, ld = np.linalg.slogdet(A)
if not sign > 0:
return -np.inf
return ld
def _impose_f_order(X):
"""Helper Function"""
# important to access flags instead of calling np.isfortran,
# this catches corner cases.
if X.flags.c_contiguous:
return check_array(X.T, copy=False, order='F'), True
else:
return check_array(X, copy=False, order='F'), False
def _fast_dot(A, B):
if B.shape[0] != A.shape[A.ndim - 1]: # check adopted from '_dotblas.c'
raise ValueError
if A.dtype != B.dtype or any(x.dtype not in (np.float32, np.float64)
for x in [A, B]):
warnings.warn('Data must be of same type. Supported types '
'are 32 and 64 bit float. '
'Falling back to np.dot.', NonBLASDotWarning)
raise ValueError
if min(A.shape) == 1 or min(B.shape) == 1 or A.ndim != 2 or B.ndim != 2:
raise ValueError
# scipy 0.9 compliant API
dot = linalg.get_blas_funcs(['gemm'], (A, B))[0]
A, trans_a = _impose_f_order(A)
B, trans_b = _impose_f_order(B)
return dot(alpha=1.0, a=A, b=B, trans_a=trans_a, trans_b=trans_b)
def _have_blas_gemm():
try:
linalg.get_blas_funcs(['gemm'])
return True
except (AttributeError, ValueError):
warnings.warn('Could not import BLAS, falling back to np.dot')
return False
# Only use fast_dot for older NumPy; newer ones have tackled the speed issue.
if np_version < (1, 7, 2) and _have_blas_gemm():
def fast_dot(A, B):
"""Compute fast dot products directly calling BLAS.
This function calls BLAS directly while warranting Fortran contiguity.
This helps avoiding extra copies `np.dot` would have created.
For details see section `Linear Algebra on large Arrays`:
http://wiki.scipy.org/PerformanceTips
Parameters
----------
A, B: instance of np.ndarray
Input arrays. Arrays are supposed to be of the same dtype and to
have exactly 2 dimensions. Currently only floats are supported.
In case these requirements aren't met np.dot(A, B) is returned
instead. To activate the related warning issued in this case
execute the following lines of code:
>> import warnings
>> from sklearn.utils.validation import NonBLASDotWarning
>> warnings.simplefilter('always', NonBLASDotWarning)
"""
try:
return _fast_dot(A, B)
except ValueError:
# Maltyped or malformed data.
return np.dot(A, B)
else:
fast_dot = np.dot
def density(w, **kwargs):
"""Compute density of a sparse vector
Return a value between 0 and 1
"""
if hasattr(w, "toarray"):
d = float(w.nnz) / (w.shape[0] * w.shape[1])
else:
d = 0 if w is None else float((w != 0).sum()) / w.size
return d
def safe_sparse_dot(a, b, dense_output=False):
"""Dot product that handle the sparse matrix case correctly
Uses BLAS GEMM as replacement for numpy.dot where possible
to avoid unnecessary copies.
"""
if issparse(a) or issparse(b):
ret = a * b
if dense_output and hasattr(ret, "toarray"):
ret = ret.toarray()
return ret
else:
return fast_dot(a, b)
def randomized_range_finder(A, size, n_iter, random_state=None):
"""Computes an orthonormal matrix whose range approximates the range of A.
Parameters
----------
A: 2D array
The input data matrix
size: integer
Size of the return array
n_iter: integer
Number of power iterations used to stabilize the result
random_state: RandomState or an int seed (0 by default)
A random number generator instance
Returns
-------
Q: 2D array
A (size x size) projection matrix, the range of which
approximates well the range of the input matrix A.
Notes
-----
Follows Algorithm 4.3 of
Finding structure with randomness: Stochastic algorithms for constructing
approximate matrix decompositions
Halko, et al., 2009 (arXiv:909) http://arxiv.org/pdf/0909.4061
"""
random_state = check_random_state(random_state)
# generating random gaussian vectors r with shape: (A.shape[1], size)
R = random_state.normal(size=(A.shape[1], size))
# sampling the range of A using by linear projection of r
Y = safe_sparse_dot(A, R)
del R
# perform power iterations with Y to further 'imprint' the top
# singular vectors of A in Y
for i in xrange(n_iter):
Y = safe_sparse_dot(A, safe_sparse_dot(A.T, Y))
# extracting an orthonormal basis of the A range samples
Q, R = linalg.qr(Y, mode='economic')
return Q
def randomized_svd(M, n_components, n_oversamples=10, n_iter=0,
transpose='auto', flip_sign=True, random_state=0):
"""Computes a truncated randomized SVD
Parameters
----------
M: ndarray or sparse matrix
Matrix to decompose
n_components: int
Number of singular values and vectors to extract.
n_oversamples: int (default is 10)
Additional number of random vectors to sample the range of M so as
to ensure proper conditioning. The total number of random vectors
used to find the range of M is n_components + n_oversamples.
n_iter: int (default is 0)
Number of power iterations (can be used to deal with very noisy
problems).
transpose: True, False or 'auto' (default)
Whether the algorithm should be applied to M.T instead of M. The
result should approximately be the same. The 'auto' mode will
trigger the transposition if M.shape[1] > M.shape[0] since this
implementation of randomized SVD tend to be a little faster in that
case).
flip_sign: boolean, (True by default)
The output of a singular value decomposition is only unique up to a
permutation of the signs of the singular vectors. If `flip_sign` is
set to `True`, the sign ambiguity is resolved by making the largest
loadings for each component in the left singular vectors positive.
random_state: RandomState or an int seed (0 by default)
A random number generator instance to make behavior
Notes
-----
This algorithm finds a (usually very good) approximate truncated
singular value decomposition using randomization to speed up the
computations. It is particularly fast on large matrices on which
you wish to extract only a small number of components.
References
----------
* Finding structure with randomness: Stochastic algorithms for constructing
approximate matrix decompositions
Halko, et al., 2009 http://arxiv.org/abs/arXiv:0909.4061
* A randomized algorithm for the decomposition of matrices
Per-Gunnar Martinsson, Vladimir Rokhlin and Mark Tygert
"""
random_state = check_random_state(random_state)
n_random = n_components + n_oversamples
n_samples, n_features = M.shape
if transpose == 'auto' and n_samples > n_features:
transpose = True
if transpose:
# this implementation is a bit faster with smaller shape[1]
M = M.T
Q = randomized_range_finder(M, n_random, n_iter, random_state)
# project M to the (k + p) dimensional space using the basis vectors
B = safe_sparse_dot(Q.T, M)
# compute the SVD on the thin matrix: (k + p) wide
Uhat, s, V = linalg.svd(B, full_matrices=False)
del B
U = np.dot(Q, Uhat)
if flip_sign:
U, V = svd_flip(U, V)
if transpose:
# transpose back the results according to the input convention
return V[:n_components, :].T, s[:n_components], U[:, :n_components].T
else:
return U[:, :n_components], s[:n_components], V[:n_components, :]
def logsumexp(arr, axis=0):
"""Computes the sum of arr assuming arr is in the log domain.
Returns log(sum(exp(arr))) while minimizing the possibility of
over/underflow.
Examples
--------
>>> import numpy as np
>>> from sklearn.utils.extmath import logsumexp
>>> a = np.arange(10)
>>> np.log(np.sum(np.exp(a)))
9.4586297444267107
>>> logsumexp(a)
9.4586297444267107
"""
arr = np.rollaxis(arr, axis)
# Use the max to normalize, as with the log this is what accumulates
# the less errors
vmax = arr.max(axis=0)
out = np.log(np.sum(np.exp(arr - vmax), axis=0))
out += vmax
return out
def weighted_mode(a, w, axis=0):
"""Returns an array of the weighted modal (most common) value in a
If there is more than one such value, only the first is returned.
The bin-count for the modal bins is also returned.
This is an extension of the algorithm in scipy.stats.mode.
Parameters
----------
a : array_like
n-dimensional array of which to find mode(s).
w : array_like
n-dimensional array of weights for each value
axis : int, optional
Axis along which to operate. Default is 0, i.e. the first axis.
Returns
-------
vals : ndarray
Array of modal values.
score : ndarray
Array of weighted counts for each mode.
Examples
--------
>>> from sklearn.utils.extmath import weighted_mode
>>> x = [4, 1, 4, 2, 4, 2]
>>> weights = [1, 1, 1, 1, 1, 1]
>>> weighted_mode(x, weights)
(array([ 4.]), array([ 3.]))
The value 4 appears three times: with uniform weights, the result is
simply the mode of the distribution.
>>> weights = [1, 3, 0.5, 1.5, 1, 2] # deweight the 4's
>>> weighted_mode(x, weights)
(array([ 2.]), array([ 3.5]))
The value 2 has the highest score: it appears twice with weights of
1.5 and 2: the sum of these is 3.
See Also
--------
scipy.stats.mode
"""
if axis is None:
a = np.ravel(a)
w = np.ravel(w)
axis = 0
else:
a = np.asarray(a)
w = np.asarray(w)
axis = axis
if a.shape != w.shape:
w = np.zeros(a.shape, dtype=w.dtype) + w
scores = np.unique(np.ravel(a)) # get ALL unique values
testshape = list(a.shape)
testshape[axis] = 1
oldmostfreq = np.zeros(testshape)
oldcounts = np.zeros(testshape)
for score in scores:
template = np.zeros(a.shape)
ind = (a == score)
template[ind] = w[ind]
counts = np.expand_dims(np.sum(template, axis), axis)
mostfrequent = np.where(counts > oldcounts, score, oldmostfreq)
oldcounts = np.maximum(counts, oldcounts)
oldmostfreq = mostfrequent
return mostfrequent, oldcounts
def pinvh(a, cond=None, rcond=None, lower=True):
"""Compute the (Moore-Penrose) pseudo-inverse of a hermetian matrix.
Calculate a generalized inverse of a symmetric matrix using its
eigenvalue decomposition and including all 'large' eigenvalues.
Parameters
----------
a : array, shape (N, N)
Real symmetric or complex hermetian matrix to be pseudo-inverted
cond : float or None, default None
Cutoff for 'small' eigenvalues.
Singular values smaller than rcond * largest_eigenvalue are considered
zero.
If None or -1, suitable machine precision is used.
rcond : float or None, default None (deprecated)
Cutoff for 'small' eigenvalues.
Singular values smaller than rcond * largest_eigenvalue are considered
zero.
If None or -1, suitable machine precision is used.
lower : boolean
Whether the pertinent array data is taken from the lower or upper
triangle of a. (Default: lower)
Returns
-------
B : array, shape (N, N)
Raises
------
LinAlgError
If eigenvalue does not converge
Examples
--------
>>> import numpy as np
>>> a = np.random.randn(9, 6)
>>> a = np.dot(a, a.T)
>>> B = pinvh(a)
>>> np.allclose(a, np.dot(a, np.dot(B, a)))
True
>>> np.allclose(B, np.dot(B, np.dot(a, B)))
True
"""
a = np.asarray_chkfinite(a)
s, u = linalg.eigh(a, lower=lower)
if rcond is not None:
cond = rcond
if cond in [None, -1]:
t = u.dtype.char.lower()
factor = {'f': 1E3, 'd': 1E6}
cond = factor[t] * np.finfo(t).eps
# unlike svd case, eigh can lead to negative eigenvalues
above_cutoff = (abs(s) > cond * np.max(abs(s)))
psigma_diag = np.zeros_like(s)
psigma_diag[above_cutoff] = 1.0 / s[above_cutoff]
return np.dot(u * psigma_diag, np.conjugate(u).T)
def cartesian(arrays, out=None):
"""Generate a cartesian product of input arrays.
Parameters
----------
arrays : list of array-like
1-D arrays to form the cartesian product of.
out : ndarray
Array to place the cartesian product in.
Returns
-------
out : ndarray
2-D array of shape (M, len(arrays)) containing cartesian products
formed of input arrays.
Examples
--------
>>> cartesian(([1, 2, 3], [4, 5], [6, 7]))
array([[1, 4, 6],
[1, 4, 7],
[1, 5, 6],
[1, 5, 7],
[2, 4, 6],
[2, 4, 7],
[2, 5, 6],
[2, 5, 7],
[3, 4, 6],
[3, 4, 7],
[3, 5, 6],
[3, 5, 7]])
"""
arrays = [np.asarray(x) for x in arrays]
shape = (len(x) for x in arrays)
dtype = arrays[0].dtype
ix = np.indices(shape)
ix = ix.reshape(len(arrays), -1).T
if out is None:
out = np.empty_like(ix, dtype=dtype)
for n, arr in enumerate(arrays):
out[:, n] = arrays[n][ix[:, n]]
return out
def svd_flip(u, v, u_based_decision=True):
"""Sign correction to ensure deterministic output from SVD.
Adjusts the columns of u and the rows of v such that the loadings in the
columns in u that are largest in absolute value are always positive.
Parameters
----------
u, v : ndarray
u and v are the output of `linalg.svd` or
`sklearn.utils.extmath.randomized_svd`, with matching inner dimensions
so one can compute `np.dot(u * s, v)`.
u_based_decision : boolean, (default=True)
If True, use the columns of u as the basis for sign flipping. Otherwise,
use the rows of v. The choice of which variable to base the decision on
is generally algorithm dependent.
Returns
-------
u_adjusted, v_adjusted : arrays with the same dimensions as the input.
"""
if u_based_decision:
# columns of u, rows of v
max_abs_cols = np.argmax(np.abs(u), axis=0)
signs = np.sign(u[max_abs_cols, xrange(u.shape[1])])
u *= signs
v *= signs[:, np.newaxis]
else:
# rows of v, columns of u
max_abs_rows = np.argmax(np.abs(v), axis=1)
signs = np.sign(v[xrange(v.shape[0]), max_abs_rows])
u *= signs
v *= signs[:, np.newaxis]
return u, v
def log_logistic(X, out=None):
"""Compute the log of the logistic function, ``log(1 / (1 + e ** -x))``.
This implementation is numerically stable because it splits positive and
negative values::
-log(1 + exp(-x_i)) if x_i > 0
x_i - log(1 + exp(x_i)) if x_i <= 0
For the ordinary logistic function, use ``sklearn.utils.fixes.expit``.
Parameters
----------
X: array-like, shape (M, N)
Argument to the logistic function
out: array-like, shape: (M, N), optional:
Preallocated output array.
Returns
-------
out: array, shape (M, N)
Log of the logistic function evaluated at every point in x
Notes
-----
See the blog post describing this implementation:
http://fa.bianp.net/blog/2013/numerical-optimizers-for-logistic-regression/
"""
is_1d = X.ndim == 1
X = check_array(X, dtype=np.float)
n_samples, n_features = X.shape
if out is None:
out = np.empty_like(X)
_log_logistic_sigmoid(n_samples, n_features, X, out)
if is_1d:
return np.squeeze(out)
return out
def safe_min(X):
"""Returns the minimum value of a dense or a CSR/CSC matrix.
Adapated from http://stackoverflow.com/q/13426580
"""
if issparse(X):
if len(X.data) == 0:
return 0
m = X.data.min()
return m if X.getnnz() == X.size else min(m, 0)
else:
return X.min()
def make_nonnegative(X, min_value=0):
"""Ensure `X.min()` >= `min_value`."""
min_ = safe_min(X)
if min_ < min_value:
if issparse(X):
raise ValueError("Cannot make the data matrix"
" nonnegative because it is sparse."
" Adding a value to every entry would"
" make it no longer sparse.")
X = X + (min_value - min_)
return X
def _batch_mean_variance_update(X, old_mean, old_variance, old_sample_count):
"""Calculate an average mean update and a Youngs and Cramer variance update.
From the paper "Algorithms for computing the sample variance: analysis and
recommendations", by Chan, Golub, and LeVeque.
Parameters
----------
X : array-like, shape (n_samples, n_features)
Data to use for variance update
old_mean : array-like, shape: (n_features,)
old_variance : array-like, shape: (n_features,)
old_sample_count : int
Returns
-------
updated_mean : array, shape (n_features,)
updated_variance : array, shape (n_features,)
updated_sample_count : int
References
----------
T. Chan, G. Golub, R. LeVeque. Algorithms for computing the sample variance:
recommendations, The American Statistician, Vol. 37, No. 3, pp. 242-247
"""
new_sum = X.sum(axis=0)
new_variance = X.var(axis=0) * X.shape[0]
old_sum = old_mean * old_sample_count
n_samples = X.shape[0]
updated_sample_count = old_sample_count + n_samples
partial_variance = old_sample_count / (n_samples * updated_sample_count) * (
n_samples / old_sample_count * old_sum - new_sum) ** 2
unnormalized_variance = old_variance * old_sample_count + new_variance + \
partial_variance
return ((old_sum + new_sum) / updated_sample_count,
unnormalized_variance / updated_sample_count,
updated_sample_count)
def _deterministic_vector_sign_flip(u):
"""Modify the sign of vectors for reproducibility
Flips the sign of elements of all the vectors (rows of u) such that
the absolute maximum element of each vector is positive.
Parameters
----------
u : ndarray
Array with vectors as its rows.
Returns
-------
u_flipped : ndarray with same shape as u
Array with the sign flipped vectors as its rows.
"""
max_abs_rows = np.argmax(np.abs(u), axis=1)
signs = np.sign(u[range(u.shape[0]), max_abs_rows])
u *= signs[:, np.newaxis]
return u
| bsd-3-clause |
juanshishido/okcupid | utils/nonnegative_matrix_factorization.py | 1 | 2118 | import numpy as np
from sklearn.decomposition import NMF
def _print_words(model, feature_names, n_top_words):
"""For printing the `n_top_words` for each grouping
Parameters
----------
model : sklearn.decomposition.nmf.NMF
The NMF object
feature_names : list
The output from calling `TfidfVectorizer` on the users/features data
n_top_words : int
The top n words to print for a particular grouping
Returns
-------
None
"""
for topic_idx, topic in enumerate(model.components_):
print("Group %d:" % topic_idx)
print(" | ".join([feature_names[i]
for i in topic.argsort()[ : -n_top_words-1 : -1]]))
print()
print()
def nmf_inspect(tfidfmatrix, feature_names, k_vals=[3, 5, 7, 9], n_words=10):
"""For looping over various values of `k` and printing the
top `n_words`
Parameters
----------
tfidfmatrix : scipy.sparse.csr.csr_matrix
The output from calling `TfidfVectorizer` on the users/features data
feature_names : list
The output from calling the `.get_feature_names()` on
the TfidfVectorizer object
k_vals : list
A list of values for `k`, the number of groupings
n_words : int
The top n words to print for each grouping
Returns
-------
None
"""
for k in k_vals:
nmf = NMF(n_components=k, random_state=42).fit(tfidfmatrix)
print(k, end='\n')
_print_words(nmf, feature_names, n_words)
def nmf_labels(tfidfmatrix, k):
"""For getting the labels (group assignment) associated with
each sample (user, in this case)
Parameters
----------
tfidfmatrix : scipy.sparse.csr.csr_matrix
The output from calling `TfidfVectorizer` on the users/features data
k : int
The number of groupings to create
Returns
-------
labels : np.ndarray
An array of group assignments of length tfidfmatrix.shape[0] (users)
"""
H = NMF(n_components=k, random_state=42).fit_transform(tfidfmatrix)
labels = np.argmax(H, axis=1)
return labels
| mit |
bhtucker/chatnet | chatnet/pipes.py | 1 | 5136 | from . import logger
from chatnet import prep
import pandas as pd
from collections import Counter
from sklearn.externals import joblib
import os
class Pipeline(object):
"""
Transformer helper functions and state checkpoints
to go from text data/labels to model-ready numeric data
"""
def __init__(self, vocab_size=15000,
data_col=None, id_col=None, label_col=None, skip_top=10,
positive_class=None, df=None, message_key=None, **kwargs
):
# message processing
self.data_col = data_col or 'tokens'
self.id_col = id_col or 'id'
self.label_col = label_col or 'labels'
self.message_key = message_key or 'msgs'
self.positive_class = positive_class
if positive_class is None:
self.label_mode = 'multiclass'
self.n_classes = []
else:
self.label_mode = 'binary'
# vocab processing
self.tp = prep.TextPrepper()
self.vocab_size = vocab_size
self.skip_top = skip_top
self.to_matrices_kwargs = kwargs
if df is not None:
self.setup(df)
def _tokenize(self, df, message_key=''):
"""
Iterate over each row's messages (as specified by message_key),
tokenizing by ' ' and cleaning with self.tp.cleaner
"""
def mapper(message_col):
sequence = []
for message in message_col:
sequence += map(self.tp.cleaner, message.split())
return sequence
df[self.data_col] = df[message_key].map(mapper)
def _set_token_data(self, input_df):
df = input_df.copy()
if self.data_col not in df.columns:
self._tokenize(df, message_key=self.message_key)
self.data = pd.DataFrame(df[[self.data_col, self.id_col, self.label_col]])
logger.info("Counting words...")
self.set_word_counts()
def _set_vocabulary(self):
# This is extended by subclasses with special concerns about word_index (eg word embeddings)
self.set_word_index(skip_top=self.skip_top)
def _set_learning_data(self, **to_matrices_kwargs):
to_matrices_kwargs.setdefault('seed', 212)
to_matrices_kwargs.setdefault('test_split', .18)
to_matrices_kwargs.setdefault('chunk_size', 100)
to_matrices_kwargs.setdefault('data_col', self.data_col)
to_matrices_kwargs.setdefault('id_col', self.id_col)
to_matrices_kwargs.setdefault('label_col', self.label_col)
to_matrices_kwargs.setdefault('positive_class', self.positive_class)
logger.info("Making numeric sequences...")
self.learning_data = (X_train, y_train, train_ids), (X_test, y_test, test_ids) = \
self.tp.to_matrices(self.data, self.word_index, **to_matrices_kwargs)
def setup(self, df):
self._set_token_data(df)
self._set_vocabulary()
self._set_learning_data(**self.to_matrices_kwargs)
def set_word_counts(self):
"""
Map :tp.cleaner over token lists in :data
and return a counter of cleaned :word_counts
"""
word_counts = Counter()
def increment(word):
word_counts[word] += 1
self.data[self.data_col].map(lambda r: list(map(increment, r)))
self.word_counts = word_counts
def set_word_index(self, skip_top=None, nonembeddable=None):
"""
Accepts a dictionary of word counts
Selects the top :nb_words, after skipping the :skip_top most common
Optionally provide a set of words you don't have word vectors and want to omit entirely
Always includes special words (returned by self.cleaner) prepended with $
Returns dict like {word: ranking by count}
"""
skip_top = 10 if skip_top is None else skip_top
vocab = []
for (ix, (w, _)) in enumerate(self.word_counts.most_common(self.vocab_size)):
if w.startswith('$'):
if ix < skip_top:
skip_top += 1
vocab.append(w)
elif (not nonembeddable or w not in nonembeddable) and ix > skip_top:
vocab.append(w)
self.word_index = {v: ix for ix, v in enumerate(vocab)}
def persist(self, name, path):
for attr in self.persisted_attrs:
joblib.dump(getattr(self, attr), os.path.join(path, '_'.join([attr, name])))
@classmethod
def restore(cls, name, path):
pipe = cls()
for attr in cls.persisted_attrs:
setattr(pipe, attr, joblib.load(os.path.join(path, '_'.join([attr, name]))))
return pipe
def get_message_generator(message_key, kind='wide'):
if kind == 'wide':
# iterate over columns in message_key yielding from row
def message_generator(row):
for key in message_key:
yield row[key]
elif kind == 'dense':
# iterate over array of messages in row[message_key]
def message_generator(row):
for cell in row[message_key]:
yield cell
return message_generator
| mit |
jzadeh/Aktaion | python/parserDev/brothon/analysis/dataframe_cache.py | 1 | 2617 | """ DataFrame Cache Class: The class provides a caching mechanism for a Pandas DataFrame """
from __future__ import print_function
import time
from collections import deque
import pandas as pd
class DataFrameCache(object):
"""DataFrameCache: The class provides a caching mechanism for a Pandas DataFrame.
Args:
max_cache_size (int): Maximum size of dataframe cache (default=10000)
max_cache_time (int): Time to keep rows in cache (default=60 seconds)
"""
def __init__(self, max_cache_size=100000, max_cache_time=60):
"""Initialize the DataFrameCache class"""
self.max_size = max_cache_size
self.max_time = max_cache_time
self.row_deque = deque(maxlen=self.max_size)
self.time_deque = deque(maxlen=self.max_size)
self._dataframe = pd.DataFrame()
def add_row(self, row):
"""Add a row to the DataFrameCache class"""
self.add_rows([row])
def add_rows(self, list_of_rows):
"""Add a list of rows to the DataFrameCache class"""
for row in list_of_rows:
self.row_deque.append(row)
self.time_deque.append(time.time())
# Update the data structure
self.update()
def dataframe(self):
"""Return a DataFrame with the current window of data
Note: Only call this when you want the dataframe to be reconstructed"""
self.update()
return pd.DataFrame(list(self.row_deque))
def update(self):
"""Update the deque, removing rows based on time"""
expire_time = time.time() - self.max_time
while self.row_deque and self.time_deque[0] < expire_time:
self.row_deque.popleft() # FIFO
self.time_deque.popleft()
# Simple test of the functionality
def test():
"""Test for DataFrameCache Class"""
import copy
df_cache = DataFrameCache(max_cache_size=10, max_cache_time=1) # Make it small and short for testing
# Make some fake data
base_row = {'id': 0, 'foo': 'bar', 'port': 80, 'protocol': 17}
# Create an array of test rows
test_data = []
for i in range(20):
row = copy.deepcopy(base_row)
row['id'] = i
test_data.append(row)
# Add rows
df_cache.add_rows(test_data)
# Make sure the cache size is working properly
my_df = df_cache.dataframe()
assert len(my_df) == 10
assert my_df.iloc[0]['id'] == 10 # This means the deque is proper FIFO
# Now test time expiration
time.sleep(1)
my_df = df_cache.dataframe()
assert len(my_df) == 0
if __name__ == "__main__":
test()
| apache-2.0 |
aetilley/scikit-learn | sklearn/datasets/tests/test_lfw.py | 230 | 7880 | """This test for the LFW require medium-size data dowloading and processing
If the data has not been already downloaded by running the examples,
the tests won't run (skipped).
If the test are run, the first execution will be long (typically a bit
more than a couple of minutes) but as the dataset loader is leveraging
joblib, successive runs will be fast (less than 200ms).
"""
import random
import os
import shutil
import tempfile
import numpy as np
from sklearn.externals import six
try:
try:
from scipy.misc import imsave
except ImportError:
from scipy.misc.pilutil import imsave
except ImportError:
imsave = None
from sklearn.datasets import load_lfw_pairs
from sklearn.datasets import load_lfw_people
from sklearn.datasets import fetch_lfw_pairs
from sklearn.datasets import fetch_lfw_people
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_warns_message
from sklearn.utils.testing import SkipTest
from sklearn.utils.testing import raises
SCIKIT_LEARN_DATA = tempfile.mkdtemp(prefix="scikit_learn_lfw_test_")
SCIKIT_LEARN_EMPTY_DATA = tempfile.mkdtemp(prefix="scikit_learn_empty_test_")
LFW_HOME = os.path.join(SCIKIT_LEARN_DATA, 'lfw_home')
FAKE_NAMES = [
'Abdelatif_Smith',
'Abhati_Kepler',
'Camara_Alvaro',
'Chen_Dupont',
'John_Lee',
'Lin_Bauman',
'Onur_Lopez',
]
def setup_module():
"""Test fixture run once and common to all tests of this module"""
if imsave is None:
raise SkipTest("PIL not installed.")
if not os.path.exists(LFW_HOME):
os.makedirs(LFW_HOME)
random_state = random.Random(42)
np_rng = np.random.RandomState(42)
# generate some random jpeg files for each person
counts = {}
for name in FAKE_NAMES:
folder_name = os.path.join(LFW_HOME, 'lfw_funneled', name)
if not os.path.exists(folder_name):
os.makedirs(folder_name)
n_faces = np_rng.randint(1, 5)
counts[name] = n_faces
for i in range(n_faces):
file_path = os.path.join(folder_name, name + '_%04d.jpg' % i)
uniface = np_rng.randint(0, 255, size=(250, 250, 3))
try:
imsave(file_path, uniface)
except ImportError:
raise SkipTest("PIL not installed")
# add some random file pollution to test robustness
with open(os.path.join(LFW_HOME, 'lfw_funneled', '.test.swp'), 'wb') as f:
f.write(six.b('Text file to be ignored by the dataset loader.'))
# generate some pairing metadata files using the same format as LFW
with open(os.path.join(LFW_HOME, 'pairsDevTrain.txt'), 'wb') as f:
f.write(six.b("10\n"))
more_than_two = [name for name, count in six.iteritems(counts)
if count >= 2]
for i in range(5):
name = random_state.choice(more_than_two)
first, second = random_state.sample(range(counts[name]), 2)
f.write(six.b('%s\t%d\t%d\n' % (name, first, second)))
for i in range(5):
first_name, second_name = random_state.sample(FAKE_NAMES, 2)
first_index = random_state.choice(np.arange(counts[first_name]))
second_index = random_state.choice(np.arange(counts[second_name]))
f.write(six.b('%s\t%d\t%s\t%d\n' % (first_name, first_index,
second_name, second_index)))
with open(os.path.join(LFW_HOME, 'pairsDevTest.txt'), 'wb') as f:
f.write(six.b("Fake place holder that won't be tested"))
with open(os.path.join(LFW_HOME, 'pairs.txt'), 'wb') as f:
f.write(six.b("Fake place holder that won't be tested"))
def teardown_module():
"""Test fixture (clean up) run once after all tests of this module"""
if os.path.isdir(SCIKIT_LEARN_DATA):
shutil.rmtree(SCIKIT_LEARN_DATA)
if os.path.isdir(SCIKIT_LEARN_EMPTY_DATA):
shutil.rmtree(SCIKIT_LEARN_EMPTY_DATA)
@raises(IOError)
def test_load_empty_lfw_people():
fetch_lfw_people(data_home=SCIKIT_LEARN_EMPTY_DATA, download_if_missing=False)
def test_load_lfw_people_deprecation():
msg = ("Function 'load_lfw_people' has been deprecated in 0.17 and will be "
"removed in 0.19."
"Use fetch_lfw_people(download_if_missing=False) instead.")
assert_warns_message(DeprecationWarning, msg, load_lfw_people,
data_home=SCIKIT_LEARN_DATA)
def test_load_fake_lfw_people():
lfw_people = fetch_lfw_people(data_home=SCIKIT_LEARN_DATA,
min_faces_per_person=3, download_if_missing=False)
# The data is croped around the center as a rectangular bounding box
# arounthe the face. Colors are converted to gray levels:
assert_equal(lfw_people.images.shape, (10, 62, 47))
assert_equal(lfw_people.data.shape, (10, 2914))
# the target is array of person integer ids
assert_array_equal(lfw_people.target, [2, 0, 1, 0, 2, 0, 2, 1, 1, 2])
# names of the persons can be found using the target_names array
expected_classes = ['Abdelatif Smith', 'Abhati Kepler', 'Onur Lopez']
assert_array_equal(lfw_people.target_names, expected_classes)
# It is possible to ask for the original data without any croping or color
# conversion and not limit on the number of picture per person
lfw_people = fetch_lfw_people(data_home=SCIKIT_LEARN_DATA,
resize=None, slice_=None, color=True, download_if_missing=False)
assert_equal(lfw_people.images.shape, (17, 250, 250, 3))
# the ids and class names are the same as previously
assert_array_equal(lfw_people.target,
[0, 0, 1, 6, 5, 6, 3, 6, 0, 3, 6, 1, 2, 4, 5, 1, 2])
assert_array_equal(lfw_people.target_names,
['Abdelatif Smith', 'Abhati Kepler', 'Camara Alvaro',
'Chen Dupont', 'John Lee', 'Lin Bauman', 'Onur Lopez'])
@raises(ValueError)
def test_load_fake_lfw_people_too_restrictive():
fetch_lfw_people(data_home=SCIKIT_LEARN_DATA, min_faces_per_person=100, download_if_missing=False)
@raises(IOError)
def test_load_empty_lfw_pairs():
fetch_lfw_pairs(data_home=SCIKIT_LEARN_EMPTY_DATA, download_if_missing=False)
def test_load_lfw_pairs_deprecation():
msg = ("Function 'load_lfw_pairs' has been deprecated in 0.17 and will be "
"removed in 0.19."
"Use fetch_lfw_pairs(download_if_missing=False) instead.")
assert_warns_message(DeprecationWarning, msg, load_lfw_pairs,
data_home=SCIKIT_LEARN_DATA)
def test_load_fake_lfw_pairs():
lfw_pairs_train = fetch_lfw_pairs(data_home=SCIKIT_LEARN_DATA, download_if_missing=False)
# The data is croped around the center as a rectangular bounding box
# arounthe the face. Colors are converted to gray levels:
assert_equal(lfw_pairs_train.pairs.shape, (10, 2, 62, 47))
# the target is whether the person is the same or not
assert_array_equal(lfw_pairs_train.target, [1, 1, 1, 1, 1, 0, 0, 0, 0, 0])
# names of the persons can be found using the target_names array
expected_classes = ['Different persons', 'Same person']
assert_array_equal(lfw_pairs_train.target_names, expected_classes)
# It is possible to ask for the original data without any croping or color
# conversion
lfw_pairs_train = fetch_lfw_pairs(data_home=SCIKIT_LEARN_DATA,
resize=None, slice_=None, color=True, download_if_missing=False)
assert_equal(lfw_pairs_train.pairs.shape, (10, 2, 250, 250, 3))
# the ids and class names are the same as previously
assert_array_equal(lfw_pairs_train.target, [1, 1, 1, 1, 1, 0, 0, 0, 0, 0])
assert_array_equal(lfw_pairs_train.target_names, expected_classes)
| bsd-3-clause |
massmutual/scikit-learn | examples/cluster/plot_digits_agglomeration.py | 377 | 1694 | #!/usr/bin/python
# -*- coding: utf-8 -*-
"""
=========================================================
Feature agglomeration
=========================================================
These images how similar features are merged together using
feature agglomeration.
"""
print(__doc__)
# Code source: Gaël Varoquaux
# Modified for documentation by Jaques Grobler
# License: BSD 3 clause
import numpy as np
import matplotlib.pyplot as plt
from sklearn import datasets, cluster
from sklearn.feature_extraction.image import grid_to_graph
digits = datasets.load_digits()
images = digits.images
X = np.reshape(images, (len(images), -1))
connectivity = grid_to_graph(*images[0].shape)
agglo = cluster.FeatureAgglomeration(connectivity=connectivity,
n_clusters=32)
agglo.fit(X)
X_reduced = agglo.transform(X)
X_restored = agglo.inverse_transform(X_reduced)
images_restored = np.reshape(X_restored, images.shape)
plt.figure(1, figsize=(4, 3.5))
plt.clf()
plt.subplots_adjust(left=.01, right=.99, bottom=.01, top=.91)
for i in range(4):
plt.subplot(3, 4, i + 1)
plt.imshow(images[i], cmap=plt.cm.gray, vmax=16, interpolation='nearest')
plt.xticks(())
plt.yticks(())
if i == 1:
plt.title('Original data')
plt.subplot(3, 4, 4 + i + 1)
plt.imshow(images_restored[i], cmap=plt.cm.gray, vmax=16,
interpolation='nearest')
if i == 1:
plt.title('Agglomerated data')
plt.xticks(())
plt.yticks(())
plt.subplot(3, 4, 10)
plt.imshow(np.reshape(agglo.labels_, images[0].shape),
interpolation='nearest', cmap=plt.cm.spectral)
plt.xticks(())
plt.yticks(())
plt.title('Labels')
plt.show()
| bsd-3-clause |
cassandra-sh/corrset | getFullAreaHealPixels.py | 1 | 5066 | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
@author: HSC Weak Lensing collaboration
"""
import os
import sys
import math
import numpy as np
import pyfits
import matplotlib.pyplot as plt
import healpy as hp
def removeDuplicatedVisits(d):
# Remove visits with the same pntgid. Those visits were retaken because
# transparency was bad. They should be counted as a single visit rather than
# multiple visits
print("# of visits before removing duplicates in pntgid:", str(len(d)))
sel = list()
for i in range(len(d)):
eq = (d["pntgid"] == d["pntgid"][i])
if np.sum(eq) > 1:
indices = np.where(eq)[0]
if indices[0] == i:
sel.append(i)
else:
sel.append(i)
d = d[sel]
print("# of visits after removing duplicates in pntgid:"+str(len(d)))
return d
def getFullAreaHealPixels(d):
# Count up the number of visits and define full-color full-depth region in
# healpix. This part is originally from Yasuda-san.
bands = ['g', 'r', 'i', 'z', 'y']
Nexp = {'g': 4, 'r': 4, 'i': 6, 'z': 6, 'y': 6}
NSIDE = 1024
m = dict()
for b in bands:
m[b] = np.zeros(hp.nside2npix(NSIDE))
for dd in d:
b = dd["filter"][0:1]
phi = math.radians(dd["ra"])
theta = math.radians(90.-dd["dec"])
vec = hp.ang2vec(theta, phi)
healpix_bins = hp.query_disc(NSIDE, vec, math.radians(0.75))
for index in healpix_bins:
m[b][index] += 1
fill = dict()
full = np.ones(hp.nside2npix(NSIDE))
for b in bands:
fill[b] = m[b] >= Nexp[b]
print(str(NSIDE), str(b), str(129600/math.pi * sum(fill[b]) / len(fill[b])))
full = np.logical_and(full, fill[b])
print(str(NSIDE), str(5), str(129600/math.pi * sum(full) / len(full)))
return full
def getContiguousPixels(m):
# Get contiguous regions to remove isolated pixels around each field.
nside =hp.get_nside(m)
m_out = np.zeros(m.shape, dtype = np.bool)
for field in ["AEGIS", "HECTOMAP", "GAMA09H", "WIDE12H", "GAMA15H", "VVDS", "XMM"]:
print(field)
if field == "AEGIS":
alpha0 = 215.
delta0 = 52.5
if field == "HECTOMAP":
alpha0 = 240.
delta0 = 43.
if field == "GAMA09H":
alpha0 = 137.
delta0 = 1.
if field == "WIDE12H":
alpha0 = 180.
delta0 = 0.
if field == "GAMA15H":
alpha0 = 220.
delta0 = 0.
if field == "VVDS":
alpha0 = 336.
delta0 = 1.
if field == "XMM":
alpha0 = 34.
delta0 = -4.
phi = math.radians(alpha0)
theta = math.radians(90-delta0)
to_be_visited = list()
visited = list()
ipix = hp.ang2pix(nside, theta, phi)
if m[ipix] != True:
print("central pixel is not true")
os.exit(1)
m_out[ipix] = True
flag = True
ipix_in = ipix
i = 0
# grow from center
while(flag):
ipixs = hp.get_all_neighbours(nside, ipix_in)[[0,2,4,6]]
visited.append(ipix_in)
m_out[ipixs] = m[ipixs]
ipixs_true = ipixs[m[ipixs]]
ipixs_true_not_visited = list()
for item in ipixs_true:
if not (item in visited):
if not (item in to_be_visited):
ipixs_true_not_visited.append(item)
to_be_visited += ipixs_true_not_visited
ipix_in = np.min(to_be_visited)
to_be_visited.remove(ipix_in)
if len(to_be_visited) == 0:
flag = False
i += 1
if i % 100 == 0:
print(str(i), str(len(visited)), str(len(set(visited))),
str(len(to_be_visited)), str(len(set(to_be_visited))))
return m_out
def plot_mask(ra, dec, nside=2**10):
pix = hp.pixelfunc.ang2pix(nside, dec, dec, lonlat=True)
hp.visufunc.mollview(map=np.bincount(pix,minlength=hp.nside2npix(nside)),
cbar=False, notext=True, max=1, min=0, title="",
cmap="binary")
hp.graticule()
plt.show()
if __name__ == "__main__":
location = '/scratch/csh4/tools/147279.fits'
d = pyfits.getdata(location)#sys.argv[1])
# I do not know why, but a visit in s16a_wide.mosaicframe__deepcoadd is missing in s16a_wide.frame.
d = d[~d["visit_isnull"]]
d = removeDuplicatedVisits(d)
m = getFullAreaHealPixels(d)
m = getContiguousPixels(m)
# os.remove("/scratch/csh4/tools/S16A_fdfc_hp_map.fits")
# hp.write_map("/scratch/csh4/tools/S16A_fdfc_hp_map.fits", m,
# dtype=np.bool, nest = False)
hp.visufunc.mollview(map=m,
cbar=False, notext=True, max=1, min=0, title="",
cmap="binary")
hp.graticule()
plt.show()
| mit |
BoltzmannBrain/nupic | src/nupic/research/monitor_mixin/plot.py | 19 | 5187 | # ----------------------------------------------------------------------
# Numenta Platform for Intelligent Computing (NuPIC)
# Copyright (C) 2014-2015, Numenta, Inc. Unless you have an agreement
# with Numenta, Inc., for a separate license for this software code, the
# following terms and conditions apply:
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero Public License version 3 as
# published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
# See the GNU Affero Public License for more details.
#
# You should have received a copy of the GNU Affero Public License
# along with this program. If not, see http://www.gnu.org/licenses.
#
# http://numenta.org/licenses/
# ----------------------------------------------------------------------
"""
Plot class used in monitor mixin framework.
"""
import logging
import os
try:
# We import in here to avoid creating a matplotlib dependency in nupic.
import matplotlib.pyplot as plt
import matplotlib.cm as cm
except ImportError:
# Suppress; we log it at debug level to avoid polluting the logs of apps
# and services that don't care about plotting
logging.debug("Cannot import matplotlib. Plot class will not work.",
exc_info=True)
class Plot(object):
def __init__(self, monitor, title, show=True):
"""
@param monitor (MonitorMixinBase) Monitor Mixin instance that generated
this plot
@param title (string) Plot title
"""
self._monitor = monitor
self._title = title
self._fig = self._initFigure()
self._show = show
if self._show:
plt.ion()
plt.show()
def _initFigure(self):
fig = plt.figure()
fig.suptitle(self._prettyPrintTitle())
return fig
def _prettyPrintTitle(self):
if self._monitor.mmName is not None:
return "[{0}] {1}".format(self._monitor.mmName, self._title)
return self._title
def addGraph(self, data, position=111, xlabel=None, ylabel=None):
""" Adds a graph to the plot's figure.
@param data See matplotlib.Axes.plot documentation.
@param position A 3-digit number. The first two digits define a 2D grid
where subplots may be added. The final digit specifies the nth grid
location for the added subplot
@param xlabel text to be displayed on the x-axis
@param ylabel text to be displayed on the y-axis
"""
ax = self._addBase(position, xlabel=xlabel, ylabel=ylabel)
ax.plot(data)
plt.draw()
def addHistogram(self, data, position=111, xlabel=None, ylabel=None,
bins=None):
""" Adds a histogram to the plot's figure.
@param data See matplotlib.Axes.hist documentation.
@param position A 3-digit number. The first two digits define a 2D grid
where subplots may be added. The final digit specifies the nth grid
location for the added subplot
@param xlabel text to be displayed on the x-axis
@param ylabel text to be displayed on the y-axis
"""
ax = self._addBase(position, xlabel=xlabel, ylabel=ylabel)
ax.hist(data, bins=bins, color="green", alpha=0.8)
plt.draw()
def add2DArray(self, data, position=111, xlabel=None, ylabel=None, cmap=None,
aspect="auto", interpolation="nearest", name=None):
""" Adds an image to the plot's figure.
@param data a 2D array. See matplotlib.Axes.imshow documentation.
@param position A 3-digit number. The first two digits define a 2D grid
where subplots may be added. The final digit specifies the nth grid
location for the added subplot
@param xlabel text to be displayed on the x-axis
@param ylabel text to be displayed on the y-axis
@param cmap color map used in the rendering
@param aspect how aspect ratio is handled during resize
@param interpolation interpolation method
"""
if cmap is None:
# The default colormodel is an ugly blue-red model.
cmap = cm.Greys
ax = self._addBase(position, xlabel=xlabel, ylabel=ylabel)
ax.imshow(data, cmap=cmap, aspect=aspect, interpolation=interpolation)
if self._show:
plt.draw()
if name is not None:
if not os.path.exists("log"):
os.mkdir("log")
plt.savefig("log/{name}.png".format(name=name), bbox_inches="tight",
figsize=(8, 6), dpi=400)
def _addBase(self, position, xlabel=None, ylabel=None):
""" Adds a subplot to the plot's figure at specified position.
@param position A 3-digit number. The first two digits define a 2D grid
where subplots may be added. The final digit specifies the nth grid
location for the added subplot
@param xlabel text to be displayed on the x-axis
@param ylabel text to be displayed on the y-axis
@returns (matplotlib.Axes) Axes instance
"""
ax = self._fig.add_subplot(position)
ax.set_xlabel(xlabel)
ax.set_ylabel(ylabel)
return ax
| agpl-3.0 |
iamshang1/Projects | Basic_ML/Exploratory_Data_Analysis/exploratory_data_analysis.py | 1 | 5673 | import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
Forbes2000 = pd.read_csv("Forbes2000.csv", sep=',', usecols=range(0,9))
Forbes2000 = Forbes2000[1:]
#companies by market value and profits
fig, axes = plt.subplots(nrows=1, ncols=2, figsize=(15, 8))
marketv = axes[0].hist(np.array(Forbes2000['Market Value'].astype(float)), 50, range=[0,100], facecolor='green', alpha=0.5)
axes[0].set_title('2014 Forbes 2000 Company Market Values')
axes[0].set_ylabel('# of Companies')
axes[0].set_xlabel('Market Value in Billion $')
axes[0].set_xticks(np.arange(0,101,10))
profits = axes[1].hist(np.array(Forbes2000['Profits'].astype(float)), 50, range=[-5,15], facecolor='green', alpha=0.5)
axes[1].set_title('2014 Forbes 2000 Company Profits')
axes[1].set_ylabel('# of Companies')
axes[1].set_xlabel('Profit in Billion $')
axes[1].set_xticks(np.arange(-4,15,2))
plt.savefig('f1.png')
plt.show()
#separate into sectors
Financials = Forbes2000[Forbes2000.Sector=="Financials"]
Energy = Forbes2000[Forbes2000.Sector=="Energy"]
Industrials = Forbes2000[Forbes2000.Sector=="Industrials"]
IT = Forbes2000[Forbes2000.Sector=="Information Technology"]
ConsumerD = Forbes2000[Forbes2000.Sector=="Consumer Discretionary"]
ConsumerS = Forbes2000[Forbes2000.Sector=="Consumer Staples"]
Health = Forbes2000[Forbes2000.Sector=="Health Care"]
Utilities = Forbes2000[Forbes2000.Sector=="Utilities"]
Telecom = Forbes2000[Forbes2000.Sector=="Telecommunication Services"]
Materials = Forbes2000[Forbes2000.Sector=="Materials"]
#companies by sector
xnames = ['Financials', 'Energy', 'Industrials', 'Information Tech.', 'Cons. Discretionary', 'Cons. Staples', 'Health Care', 'Utilities', 'Telecommunications', 'Materials']
colors = ['lightgreen', 'cornflowerblue', 'lightgrey', 'steelblue', 'plum', 'sandybrown', 'tomato', 'silver', 'violet', 'skyblue']
plt.figure(figsize=(12, 8))
plt.pie([sector.count()[0] for sector in [Financials, Energy, Industrials, IT, ConsumerD, ConsumerS, Health, Utilities, Telecom, Materials]], labels=xnames, colors=colors, autopct='%1.1f%%', shadow=True, startangle=90)
plt.axis('equal')
plt.title("Forbes 2000 Companies by Sector", y=1.08)
plt.savefig('f2.png')
plt.show()
#market value and profits by sector
fig, axes = plt.subplots(nrows=1, ncols=2, figsize=(15, 10))
marketv = axes[0].boxplot([np.array(sector['Market Value'].astype(float)) for sector in [Financials, Energy, Industrials, IT, ConsumerD, ConsumerS, Health, Utilities, Telecom, Materials]], showmeans=True)
axes[0].set_ylabel('Market Value in Billion $')
axes[0].set_ylim(0, 200)
axes[0].set_title('2014 Forbes 2000 Market Value by Sector')
axes[0].set_yticks(np.arange(0,200,10))
axes[0].set_xticklabels(xnames, rotation=45, fontsize=8, ha="right")
axes[0].yaxis.grid(True, linestyle='-', color='lightgrey', alpha=0.5)
profits = axes[1].boxplot([np.array(sector.Profits.astype(float)) for sector in [Financials, Energy, Industrials, IT, ConsumerD, ConsumerS, Health, Utilities, Telecom, Materials]], showmeans=True)
axes[1].set_ylabel('Profits in Billion $')
axes[1].set_ylim(-4, 20)
axes[1].set_title('2014 Forbes 2000 Profits by Sector')
axes[1].set_yticks(np.arange(-4,20,2))
axes[1].set_xticklabels(xnames, rotation=45, fontsize=8, ha="right")
axes[1].yaxis.grid(True, linestyle='-', color='lightgrey', alpha=0.5)
plt.savefig('f3.png')
plt.show()
#separate by continent
NA = Forbes2000[Forbes2000.Continent=="North America"]
SA = Forbes2000[Forbes2000.Continent=="South America"]
Europe = Forbes2000[Forbes2000.Continent=="Europe"]
Asia = Forbes2000[Forbes2000.Continent=="Asia"]
Australia = Forbes2000[Forbes2000.Continent=="Australia"]
Africa = Forbes2000[Forbes2000.Continent=="Africa"]
#companies by continent
xnames = ['North America', 'South America', 'Europe', 'Australia', 'Asia', 'Africa']
colors = ['cornflowerblue', 'tomato', 'violet', 'gold', 'palegreen', 'sandybrown']
plt.figure(figsize=(12, 8))
plt.pie([continent.count()[0] for continent in [NA, SA, Europe, Australia, Asia, Africa]], labels=xnames, colors=colors, autopct='%1.1f%%', shadow=True, startangle=30)
plt.axis('equal')
plt.title("Forbes 2000 Companies by Continent", y=1.08)
plt.savefig('f4.png')
plt.show()
#market value and profits by continent
fig, axes = plt.subplots(nrows=1, ncols=2, figsize=(15, 10))
marketv = axes[0].boxplot([np.array(continent['Market Value'].astype(float)) for continent in [NA, SA, Europe, Australia, Asia, Africa]], showmeans=True)
axes[0].set_ylabel('Market Value in Billion $')
axes[0].set_ylim(0, 300)
axes[0].set_title('2014 Forbes 2000 Market Value by Continent')
axes[0].set_yticks(np.arange(0,300,20))
axes[0].set_xticklabels(xnames, rotation=45, fontsize=8, ha="right")
axes[0].yaxis.grid(True, linestyle='-', color='lightgrey', alpha=0.5)
profits = axes[1].boxplot([np.array(continent.Profits.astype(float)) for continent in [NA, SA, Europe, Australia, Asia, Africa]], showmeans=True)
axes[1].set_ylabel('Profits in Billion $')
axes[1].set_ylim(-5, 30)
axes[1].set_title('2014 Forbes 2000 Profits by Continent')
axes[1].set_yticks(np.arange(-5,30,5))
axes[1].set_xticklabels(xnames, rotation=45, fontsize=8, ha="right")
axes[1].yaxis.grid(True, linestyle='-', color='lightgrey', alpha=0.5)
plt.savefig('f5.png')
plt.show()
#relationship vetween profits and market value
plt.figure(figsize=(12, 8))
marketv = np.array(Forbes2000['Market Value'].astype(float))
profits = np.array(Forbes2000['Profits'].astype(float))
plt.scatter(marketv, profits, alpha=0.5)
plt.title("Relationship Between Market Value and Profits", y=1.08)
plt.xlabel('Market Value in Billion $')
plt.ylabel('Profit in Billion $')
plt.xlim(-20, 500)
plt.savefig('f6.png')
plt.show() | mit |
RebeccaWPerry/vispy | vispy/visuals/isocurve.py | 18 | 7809 | # -*- coding: utf-8 -*-
# Copyright (c) 2015, Vispy Development Team.
# Distributed under the (new) BSD License. See LICENSE.txt for more info.
from __future__ import division
import numpy as np
from .line import LineVisual
from ..color import ColorArray
from ..color.colormap import _normalize, get_colormap
from ..geometry.isocurve import isocurve
from ..testing import has_matplotlib
# checking for matplotlib
_HAS_MPL = has_matplotlib()
if _HAS_MPL:
from matplotlib import _cntr as cntr
class IsocurveVisual(LineVisual):
"""Displays an isocurve of a 2D scalar array.
Parameters
----------
data : ndarray | None
2D scalar array.
levels : ndarray, shape (Nlev,) | None
The levels at which the isocurve is constructed from "*data*".
color_lev : Color, colormap name, tuple, list or array
The color to use when drawing the line. If a list is given, it
must be of shape (Nlev), if an array is given, it must be of
shape (Nlev, ...). and provide one color per level (rgba, colorname).
clim : tuple
(min, max) limits to apply when mapping level values through a
colormap.
**kwargs : dict
Keyword arguments to pass to `LineVisual`.
Notes
-----
"""
def __init__(self, data=None, levels=None, color_lev=None, clim=None,
**kwargs):
self._data = None
self._levels = levels
self._color_lev = color_lev
self._clim = clim
self._need_color_update = True
self._need_level_update = True
self._need_recompute = True
self._X = None
self._Y = None
self._iso = None
self._level_min = None
self._data_is_uniform = False
self._lc = None
self._cl = None
self._li = None
self._connect = None
self._verts = None
kwargs['method'] = 'gl'
kwargs['antialias'] = False
LineVisual.__init__(self, **kwargs)
if data is not None:
self.set_data(data)
@property
def levels(self):
""" The threshold at which the isocurve is constructed from the
2D data.
"""
return self._levels
@levels.setter
def levels(self, levels):
self._levels = levels
self._need_level_update = True
self._need_recompute = True
self.update()
@property
def color(self):
return self._color_lev
@color.setter
def color(self, color):
self._color_lev = color
self._need_level_update = True
self._need_color_update = True
self.update()
def set_data(self, data):
""" Set the scalar array data
Parameters
----------
data : ndarray
A 2D array of scalar values. The isocurve is constructed to show
all locations in the scalar field equal to ``self.levels``.
"""
self._data = data
# if using matplotlib isoline algorithm we have to check for meshgrid
# and we can setup the tracer object here
if _HAS_MPL:
if self._X is None or self._X.T.shape != data.shape:
self._X, self._Y = np.meshgrid(np.arange(data.shape[0]),
np.arange(data.shape[1]))
self._iso = cntr.Cntr(self._X, self._Y, self._data.astype(float))
if self._clim is None:
self._clim = (data.min(), data.max())
# sanity check,
# should we raise an error here, since no isolines can be drawn?
# for now, _prepare_draw returns False if no isoline can be drawn
if self._data.min() != self._data.max():
self._data_is_uniform = False
else:
self._data_is_uniform = True
self._need_recompute = True
self.update()
def _get_verts_and_connect(self, paths):
""" retrieve vertices and connects from given paths-list
"""
verts = np.vstack(paths)
gaps = np.add.accumulate(np.array([len(x) for x in paths])) - 1
connect = np.ones(gaps[-1], dtype=bool)
connect[gaps[:-1]] = False
return verts, connect
def _compute_iso_line(self):
""" compute LineVisual vertices, connects and color-index
"""
level_index = []
connects = []
verts = []
# calculate which level are within data range
# this works for now and the existing examples, but should be tested
# thoroughly also with the data-sanity check in set_data-function
choice = np.nonzero((self.levels > self._data.min()) &
(self._levels < self._data.max()))
levels_to_calc = np.array(self.levels)[choice]
# save minimum level index
self._level_min = choice[0][0]
for level in levels_to_calc:
# if we use matplotlib isoline algorithm we need to add half a
# pixel in both (x,y) dimensions because isolines are aligned to
# pixel centers
if _HAS_MPL:
nlist = self._iso.trace(level, level, 0)
paths = nlist[:len(nlist)//2]
v, c = self._get_verts_and_connect(paths)
v += np.array([0.5, 0.5])
else:
paths = isocurve(self._data.astype(float).T, level,
extend_to_edge=True, connected=True)
v, c = self._get_verts_and_connect(paths)
level_index.append(v.shape[0])
connects.append(np.hstack((c, [False])))
verts.append(v)
self._li = np.hstack(level_index)
self._connect = np.hstack(connects)
self._verts = np.vstack(verts)
def _compute_iso_color(self):
""" compute LineVisual color from level index and corresponding color
"""
level_color = []
colors = self._lc
for i, index in enumerate(self._li):
level_color.append(np.zeros((index, 4)) +
colors[i+self._level_min])
self._cl = np.vstack(level_color)
def _levels_to_colors(self):
# computes ColorArrays for given levels
# try _color_lev as colormap, except as everything else
try:
f_color_levs = get_colormap(self._color_lev)
except:
colors = ColorArray(self._color_lev).rgba
else:
lev = _normalize(self._levels, self._clim[0], self._clim[1])
# map function expects (Nlev,1)!
colors = f_color_levs.map(lev[:, np.newaxis])
# broadcast to (nlev, 4) array
if len(colors) == 1:
colors = colors * np.ones((len(self._levels), 1))
# detect color_lev/levels mismatch and raise error
if (len(colors) != len(self._levels)):
raise TypeError("Color/level mismatch. Color must be of shape "
"(Nlev, ...) and provide one color per level")
self._lc = colors
def _prepare_draw(self, view):
if (self._data is None or self._levels is None or
self._color_lev is None or self._data_is_uniform):
return False
if self._need_level_update:
self._levels_to_colors()
self._need_level_update = False
if self._need_recompute:
self._compute_iso_line()
self._compute_iso_color()
LineVisual.set_data(self, pos=self._verts, connect=self._connect,
color=self._cl)
self._need_recompute = False
if self._need_color_update:
self._compute_iso_color()
LineVisual.set_data(self, color=self._cl)
self._need_color_update = False
return LineVisual._prepare_draw(self, view)
| bsd-3-clause |
tropp/acq4 | acq4/pyqtgraph/widgets/MatplotlibWidget.py | 30 | 1442 | from ..Qt import QtGui, QtCore, USE_PYSIDE, USE_PYQT5
import matplotlib
if not USE_PYQT5:
if USE_PYSIDE:
matplotlib.rcParams['backend.qt4']='PySide'
from matplotlib.backends.backend_qt4agg import FigureCanvasQTAgg as FigureCanvas
from matplotlib.backends.backend_qt4agg import NavigationToolbar2QTAgg as NavigationToolbar
else:
from matplotlib.backends.backend_qt5agg import FigureCanvasQTAgg as FigureCanvas
from matplotlib.backends.backend_qt5agg import NavigationToolbar2QT as NavigationToolbar
from matplotlib.figure import Figure
class MatplotlibWidget(QtGui.QWidget):
"""
Implements a Matplotlib figure inside a QWidget.
Use getFigure() and redraw() to interact with matplotlib.
Example::
mw = MatplotlibWidget()
subplot = mw.getFigure().add_subplot(111)
subplot.plot(x,y)
mw.draw()
"""
def __init__(self, size=(5.0, 4.0), dpi=100):
QtGui.QWidget.__init__(self)
self.fig = Figure(size, dpi=dpi)
self.canvas = FigureCanvas(self.fig)
self.canvas.setParent(self)
self.toolbar = NavigationToolbar(self.canvas, self)
self.vbox = QtGui.QVBoxLayout()
self.vbox.addWidget(self.toolbar)
self.vbox.addWidget(self.canvas)
self.setLayout(self.vbox)
def getFigure(self):
return self.fig
def draw(self):
self.canvas.draw()
| mit |
shafferm/SCNIC | SCNIC/module.py | 1 | 3600 | """Make modules of observations based on cooccurence networks and collapse table"""
from collections import defaultdict
import numpy as np
import pandas as pd
from biom import load_table
from biom.util import biom_open
from os import path
import os
import networkx as nx
from SCNIC import general
from SCNIC import module_analysis as ma
def module_maker(input_loc, output_loc, min_p=None, min_r=None, method='naive', k_size=3, gamma=.4, table_loc=None,
prefix='module', verbose=False):
logger = general.Logger(path.join(output_loc, "SCNIC_module_log.txt"))
logger["SCNIC analysis type"] = "module"
# read in correlations file
correls = pd.read_csv(input_loc, index_col=(0, 1), sep='\t')
correls.index = pd.MultiIndex.from_tuples([(str(id1), str(id2)) for id1, id2 in correls.index])
logger["input correls"] = input_loc
if verbose:
print("correls.txt read")
# sanity check args
if min_r is not None and min_p is not None:
raise ValueError("arguments min_p and min_r may not be used concurrently")
if min_r is None and min_p is None:
raise ValueError("argument min_p or min_r must be used")
# make new output directory and change to it
if output_loc is not None:
if not path.isdir(output_loc):
os.makedirs(output_loc)
logger["output directory"] = path.abspath(output_loc)
# make modules
if method == 'naive':
modules = ma.make_modules_naive(correls, min_r, min_p, prefix=prefix)
elif method == 'k_cliques':
modules = ma.make_modules_k_cliques(correls, min_r, min_p, k_size, prefix=prefix)
elif method == 'louvain':
modules = ma.make_modules_louvain(correls, min_r, min_p, gamma, prefix=prefix)
else:
raise ValueError('%s is not a valid module picking method' % method)
logger["number of modules created"] = len(modules)
if verbose:
print("Modules Formed")
print("number of modules: %s" % len(modules))
print("number of observations in modules: %s" % np.sum([len(i) for i in modules]))
print("")
ma.write_modules_to_file(modules, path_str=path.join(output_loc, 'modules.txt'))
# collapse modules
if table_loc is not None:
table = load_table(table_loc)
logger["input uncollapsed table"] = table_loc
if verbose:
print("otu table read")
coll_table = ma.collapse_modules(table, modules)
# ma.write_modules_to_dir(table, modules)
logger["number of observations in output table"] = coll_table.shape[0]
if verbose:
print("Table Collapsed")
print("collapsed Table Observations: " + str(coll_table.shape[0]))
print("")
with biom_open(path.join(output_loc, 'collapsed.biom'), 'w') as f:
coll_table.to_hdf5(f, 'make_modules.py')
metadata = general.get_metadata_from_table(table)
else:
metadata = defaultdict(dict)
# make network
metadata = ma.add_modules_to_metadata(modules, metadata)
correls_filter = general.filter_correls(correls, conet=True, min_p=min_p, min_r=min_r)
net = general.correls_to_net(correls_filter, metadata=metadata)
nx.write_gml(net, path.join(output_loc, 'correlation_network.gml'))
if verbose:
print("Network Generated")
print("number of nodes: %s" % str(net.number_of_nodes()))
print("number of edges: %s" % str(net.number_of_edges()))
logger["number of nodes"] = net.number_of_nodes()
logger["number of edges"] = net.number_of_edges()
logger.output_log()
| bsd-3-clause |
xhochy/arrow | python/examples/flight/client.py | 6 | 6791 | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""An example Flight CLI client."""
import argparse
import sys
import pyarrow
import pyarrow.flight
import pyarrow.csv as csv
def list_flights(args, client, connection_args={}):
print('Flights\n=======')
for flight in client.list_flights():
descriptor = flight.descriptor
if descriptor.descriptor_type == pyarrow.flight.DescriptorType.PATH:
print("Path:", descriptor.path)
elif descriptor.descriptor_type == pyarrow.flight.DescriptorType.CMD:
print("Command:", descriptor.command)
else:
print("Unknown descriptor type")
print("Total records:", end=" ")
if flight.total_records >= 0:
print(flight.total_records)
else:
print("Unknown")
print("Total bytes:", end=" ")
if flight.total_bytes >= 0:
print(flight.total_bytes)
else:
print("Unknown")
print("Number of endpoints:", len(flight.endpoints))
print("Schema:")
print(flight.schema)
print('---')
print('\nActions\n=======')
for action in client.list_actions():
print("Type:", action.type)
print("Description:", action.description)
print('---')
def do_action(args, client, connection_args={}):
try:
buf = pyarrow.allocate_buffer(0)
action = pyarrow.flight.Action(args.action_type, buf)
print('Running action', args.action_type)
for result in client.do_action(action):
print("Got result", result.body.to_pybytes())
except pyarrow.lib.ArrowIOError as e:
print("Error calling action:", e)
def push_data(args, client, connection_args={}):
print('File Name:', args.file)
my_table = csv.read_csv(args.file)
print('Table rows=', str(len(my_table)))
df = my_table.to_pandas()
print(df.head())
writer, _ = client.do_put(
pyarrow.flight.FlightDescriptor.for_path(args.file), my_table.schema)
writer.write_table(my_table)
writer.close()
def get_flight(args, client, connection_args={}):
if args.path:
descriptor = pyarrow.flight.FlightDescriptor.for_path(*args.path)
else:
descriptor = pyarrow.flight.FlightDescriptor.for_command(args.command)
info = client.get_flight_info(descriptor)
for endpoint in info.endpoints:
print('Ticket:', endpoint.ticket)
for location in endpoint.locations:
print(location)
get_client = pyarrow.flight.FlightClient(location,
**connection_args)
reader = get_client.do_get(endpoint.ticket)
df = reader.read_pandas()
print(df)
def _add_common_arguments(parser):
parser.add_argument('--tls', action='store_true',
help='Enable transport-level security')
parser.add_argument('--tls-roots', default=None,
help='Path to trusted TLS certificate(s)')
parser.add_argument("--mtls", nargs=2, default=None,
metavar=('CERTFILE', 'KEYFILE'),
help="Enable transport-level security")
parser.add_argument('host', type=str,
help="Address or hostname to connect to")
def main():
parser = argparse.ArgumentParser()
subcommands = parser.add_subparsers()
cmd_list = subcommands.add_parser('list')
cmd_list.set_defaults(action='list')
_add_common_arguments(cmd_list)
cmd_list.add_argument('-l', '--list', action='store_true',
help="Print more details.")
cmd_do = subcommands.add_parser('do')
cmd_do.set_defaults(action='do')
_add_common_arguments(cmd_do)
cmd_do.add_argument('action_type', type=str,
help="The action type to run.")
cmd_put = subcommands.add_parser('put')
cmd_put.set_defaults(action='put')
_add_common_arguments(cmd_put)
cmd_put.add_argument('file', type=str,
help="CSV file to upload.")
cmd_get = subcommands.add_parser('get')
cmd_get.set_defaults(action='get')
_add_common_arguments(cmd_get)
cmd_get_descriptor = cmd_get.add_mutually_exclusive_group(required=True)
cmd_get_descriptor.add_argument('-p', '--path', type=str, action='append',
help="The path for the descriptor.")
cmd_get_descriptor.add_argument('-c', '--command', type=str,
help="The command for the descriptor.")
args = parser.parse_args()
if not hasattr(args, 'action'):
parser.print_help()
sys.exit(1)
commands = {
'list': list_flights,
'do': do_action,
'get': get_flight,
'put': push_data,
}
host, port = args.host.split(':')
port = int(port)
scheme = "grpc+tcp"
connection_args = {}
if args.tls:
scheme = "grpc+tls"
if args.tls_roots:
with open(args.tls_roots, "rb") as root_certs:
connection_args["tls_root_certs"] = root_certs.read()
if args.mtls:
with open(args.mtls[0], "rb") as cert_file:
tls_cert_chain = cert_file.read()
with open(args.mtls[1], "rb") as key_file:
tls_private_key = key_file.read()
connection_args["cert_chain"] = tls_cert_chain
connection_args["private_key"] = tls_private_key
client = pyarrow.flight.FlightClient(f"{scheme}://{host}:{port}",
**connection_args)
while True:
try:
action = pyarrow.flight.Action("healthcheck", b"")
options = pyarrow.flight.FlightCallOptions(timeout=1)
list(client.do_action(action, options=options))
break
except pyarrow.ArrowIOError as e:
if "Deadline" in str(e):
print("Server is not ready, waiting...")
commands[args.action](args, client, connection_args)
if __name__ == '__main__':
main()
| apache-2.0 |
campbe13/openhatch | vendor/packages/mechanize/test/test_performance.py | 22 | 2573 | import os
import time
import sys
import unittest
import mechanize
from mechanize._testcase import TestCase, TempDirMaker
from mechanize._rfc3986 import urljoin
KB = 1024
MB = 1024**2
GB = 1024**3
def time_it(operation):
t = time.time()
operation()
return time.time() - t
def write_data(filename, nr_bytes):
block_size = 4096
block = "01234567" * (block_size // 8)
fh = open(filename, "w")
try:
for i in range(nr_bytes // block_size):
fh.write(block)
finally:
fh.close()
def time_retrieve_local_file(temp_maker, size, retrieve_fn):
temp_dir = temp_maker.make_temp_dir()
filename = os.path.join(temp_dir, "data")
write_data(filename, size)
def operation():
retrieve_fn(urljoin("file://", filename),
os.path.join(temp_dir, "retrieved"))
return time_it(operation)
class PerformanceTests(TestCase):
def test_retrieve_local_file(self):
def retrieve(url, filename):
br = mechanize.Browser()
br.retrieve(url, filename)
size = 100 * MB
# size = 1 * KB
desired_rate = 2*MB # per second
desired_time = size / float(desired_rate)
fudge_factor = 2.
self.assert_less_than(
time_retrieve_local_file(self, size, retrieve),
desired_time * fudge_factor)
def show_plot(rows):
import matplotlib.pyplot
figure = matplotlib.pyplot.figure()
axes = figure.add_subplot(111)
axes.plot([row[0] for row in rows], [row[1] for row in rows])
matplotlib.pyplot.show()
def power_2_range(start, stop):
n = start
while n <= stop:
yield n
n *= 2
def performance_plot():
def retrieve(url, filename):
br = mechanize.Browser()
br.retrieve(url, filename)
# import urllib2
# def retrieve(url, filename):
# urllib2.urlopen(url).read()
# from mechanize import _useragent
# ua = _useragent.UserAgent()
# ua.set_seekable_responses(True)
# ua.set_handle_equiv(False)
# def retrieve(url, filename):
# ua.retrieve(url, filename)
rows = []
for size in power_2_range(256 * KB, 256 * MB):
temp_maker = TempDirMaker()
try:
elapsed = time_retrieve_local_file(temp_maker, size, retrieve)
finally:
temp_maker.tear_down()
rows.append((size//float(MB), elapsed))
show_plot(rows)
if __name__ == "__main__":
args = sys.argv[1:]
if "--plot" in args:
performance_plot()
else:
unittest.main()
| agpl-3.0 |
nicproulx/mne-python | examples/visualization/plot_topo_customized.py | 12 | 1905 | """
========================================
Plot custom topographies for MEG sensors
========================================
This example exposes the `iter_topography` function that makes it
very easy to generate custom sensor topography plots.
Here we will plot the power spectrum of each channel on a topographic
layout.
"""
# Author: Denis A. Engemann <denis.engemann@gmail.com>
#
# License: BSD (3-clause)
import numpy as np
import matplotlib.pyplot as plt
import mne
from mne.viz import iter_topography
from mne import io
from mne.time_frequency import psd_welch
from mne.datasets import sample
print(__doc__)
data_path = sample.data_path()
raw_fname = data_path + '/MEG/sample/sample_audvis_filt-0-40_raw.fif'
raw = io.read_raw_fif(raw_fname, preload=True)
raw.filter(1, 20)
picks = mne.pick_types(raw.info, meg=True, exclude=[])
tmin, tmax = 0, 120 # use the first 120s of data
fmin, fmax = 2, 20 # look at frequencies between 2 and 20Hz
n_fft = 2048 # the FFT size (n_fft). Ideally a power of 2
psds, freqs = psd_welch(raw, picks=picks, tmin=tmin, tmax=tmax,
fmin=fmin, fmax=fmax)
psds = 20 * np.log10(psds) # scale to dB
def my_callback(ax, ch_idx):
"""
This block of code is executed once you click on one of the channel axes
in the plot. To work with the viz internals, this function should only take
two parameters, the axis and the channel or data index.
"""
ax.plot(freqs, psds[ch_idx], color='red')
ax.set_xlabel = 'Frequency (Hz)'
ax.set_ylabel = 'Power (dB)'
for ax, idx in iter_topography(raw.info,
fig_facecolor='white',
axis_facecolor='white',
axis_spinecolor='white',
on_pick=my_callback):
ax.plot(psds[idx], color='red')
plt.gcf().suptitle('Power spectral densities')
plt.show()
| bsd-3-clause |
rudhir-upretee/Sumo17_With_Netsim | tools/visualization/mpl_tripinfos_twoAgainst.py | 1 | 5678 | #!/usr/bin/env python
"""
@file mpl_tripinfos_twoAgainst.py
@author Daniel Krajzewicz
@author Michael Behrisch
@date 2007-10-25
@version $Id: mpl_tripinfos_twoAgainst.py 13811 2013-05-01 20:31:43Z behrisch $
This script reads two tripinfos files and plots one of the values
stored therein as an x-/y- plot.
matplotlib has to be installed for this purpose
SUMO, Simulation of Urban MObility; see http://sumo.sourceforge.net/
Copyright (C) 2008-2013 DLR (http://www.dlr.de/) and contributors
All rights reserved
"""
from matplotlib import rcParams
from pylab import *
import os, string, sys, StringIO
import math
from optparse import OptionParser
from xml.sax import saxutils, make_parser, handler
def toHex(val):
"""Converts the given value (0-255) into its hexadecimal representation"""
hex = "0123456789abcdef"
return hex[int(val/16)] + hex[int(val - int(val/16)*16)]
def toColor(val):
"""Converts the given value (0-1) into a color definition as parseable by matplotlib"""
g = 255. * val
return "#" + toHex(g) + toHex(g) + toHex(g)
def updateMinMax(min, max, value):
if min==None or min>value:
min = value
if max==None or max<value:
max = value
return (min, max)
class VehroutesReader(handler.ContentHandler):
"""Reads the vehroutes file"""
def __init__(self, value):
self._id = ''
self._veh2value = {}
self._veh2time = {}
self._value = value
def startElement(self, name, attrs):
if name == 'tripinfo':
if attrs.has_key('id'):
id = attrs['id']
else:
id = attrs['vehicle_id']
self._veh2value[id] = float(attrs[self._value])
if attrs.has_key('depart'):
self._veh2time[id] = float(attrs["depart"])
else:
self._veh2time[id] = float(attrs["wished"])
# initialise
optParser = OptionParser()
optParser.add_option("-v", "--verbose", action="store_true", dest="verbose",
default=False, help="tell me what you are doing")
# i/o
optParser.add_option("-1", "--tripinfos1", dest="tripinfos1",
help="First tripinfos (mandatory)", metavar="FILE")
optParser.add_option("-2", "--tripinfos2", dest="tripinfos2",
help="Second tripinfos (mandatory)", metavar="FILE")
optParser.add_option("-o", "--output", dest="output",
help="Name of the image to generate", metavar="FILE")
optParser.add_option("--size", dest="size",type="string", default="",
help="defines the output size")
# processing
optParser.add_option("--value", dest="value",
type="string", default="duration", help="which value shall be used")
optParser.add_option("-s", "--show", action="store_true", dest="show",
default=False, help="shows plot after generating it")
optParser.add_option("-C", "--time-coloring", action="store_true", dest="time_coloring",
default=False, help="colors the points by the time")
# axes/legend
optParser.add_option("--xticks", dest="xticks",type="string", default="",
help="defines ticks on x-axis")
optParser.add_option("--yticks", dest="yticks",type="string", default="",
help="defines ticks on y-axis")
optParser.add_option("--xlim", dest="xlim",type="string", default="",
help="defines x-axis range")
optParser.add_option("--ylim", dest="ylim",type="string", default="",
help="defines y-axis range")
# parse options
(options, args) = optParser.parse_args()
# check set options
if not options.show and not options.output:
print "Neither show (--show) not write (--output <FILE>)? Exiting..."
exit()
parser = make_parser()
# read dump1
if options.verbose:
print "Reading tripinfos1..."
r1 = VehroutesReader(options.value)
parser.setContentHandler(r1)
parser.parse(options.tripinfos1)
# read dump2
if options.verbose:
print "Reading tripinfos2..."
r2 = VehroutesReader(options.value)
parser.setContentHandler(r2)
parser.parse(options.tripinfos2)
# plot
if options.verbose:
print "Processing data..."
# set figure size
if not options.show:
rcParams['backend'] = 'Agg'
if options.size:
f = figure(figsize=(options.size.split(",")))
else:
f = figure()
xs = []
ys = []
# compute values and color(s)
c = 'k'
min = None
max = None
#if options.time_coloring:
c = []
for veh in r1._veh2value:
if veh in r2._veh2value:
if options.time_coloring:
cc = 1. - ((float(r1._veh2time[veh]) / 86400.) * .8 + .2)
c.append(toColor(cc))
else:
c.append('k')
xs.append(r1._veh2value[veh])
ys.append(r2._veh2value[veh])
(min, max) = updateMinMax(min, max, r1._veh2value[veh])
(min, max) = updateMinMax(min, max, r2._veh2value[veh])
# plot
print "data range: " + str(min) + " - " + str(max)
if options.verbose:
print "Plotting..."
if options.time_coloring:
scatter(xs, ys, color=c, s=1)
else:
plot(xs, ys, ',k')
# set axes
if options.xticks!="":
(xb, xe, xd, xs) = options.xticks.split(",")
xticks(arange(xb, xe, xd), size = xs)
if options.yticks!="":
(yb, ye, yd, ys) = options.yticks.split(",")
yticks(arange(yb, ye, yd), size = ys)
if options.xlim!="":
(xb, xe) = options.xlim.split(",")
xlim(int(xb), int(xe))
else:
xlim(min, max)
if options.ylim!="":
(yb, ye) = options.ylim.split(",")
ylim(int(yb), int(ye))
else:
ylim(min, max)
# show/save
if options.show:
show()
if options.output:
savefig(options.output);
| gpl-3.0 |
jongyoul/zeppelin | python/src/main/resources/python/zeppelin_python.py | 19 | 6945 | #
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import os, sys, traceback, json, re
from py4j.java_gateway import java_import, JavaGateway, GatewayClient
from py4j.protocol import Py4JJavaError
import ast
class Logger(object):
def __init__(self):
pass
def write(self, message):
intp.appendOutput(message)
def reset(self):
pass
def flush(self):
pass
class PythonCompletion:
def __init__(self, interpreter, userNameSpace):
self.interpreter = interpreter
self.userNameSpace = userNameSpace
def getObjectCompletion(self, text_value):
completions = [completion for completion in list(self.userNameSpace.keys()) if completion.startswith(text_value)]
builtinCompletions = [completion for completion in dir(__builtins__) if completion.startswith(text_value)]
return completions + builtinCompletions
def getMethodCompletion(self, objName, methodName):
execResult = locals()
try:
exec("{} = dir({})".format("objectDefList", objName), _zcUserQueryNameSpace, execResult)
except:
self.interpreter.logPythonOutput("Fail to run dir on " + objName)
self.interpreter.logPythonOutput(traceback.format_exc())
return None
else:
objectDefList = execResult['objectDefList']
return [completion for completion in execResult['objectDefList'] if completion.startswith(methodName)]
def getCompletion(self, text_value):
if text_value == None:
return None
dotPos = text_value.find(".")
if dotPos == -1:
objName = text_value
completionList = self.getObjectCompletion(objName)
else:
objName = text_value[:dotPos]
methodName = text_value[dotPos + 1:]
completionList = self.getMethodCompletion(objName, methodName)
if completionList is None or len(completionList) <= 0:
self.interpreter.setStatementsFinished("", False)
else:
result = json.dumps(list(filter(lambda x : not re.match("^__.*", x), list(completionList))))
self.interpreter.setStatementsFinished(result, False)
host = sys.argv[1]
port = int(sys.argv[2])
if "PY4J_GATEWAY_SECRET" in os.environ:
from py4j.java_gateway import GatewayParameters
gateway_secret = os.environ["PY4J_GATEWAY_SECRET"]
gateway = JavaGateway(gateway_parameters=GatewayParameters(
address=host, port=port, auth_token=gateway_secret, auto_convert=True))
else:
gateway = JavaGateway(GatewayClient(address=host, port=port), auto_convert=True)
intp = gateway.entry_point
_zcUserQueryNameSpace = {}
completion = PythonCompletion(intp, _zcUserQueryNameSpace)
_zcUserQueryNameSpace["__zeppelin_completion__"] = completion
_zcUserQueryNameSpace["gateway"] = gateway
from zeppelin_context import PyZeppelinContext
if intp.getZeppelinContext():
z = __zeppelin__ = PyZeppelinContext(intp.getZeppelinContext(), gateway)
__zeppelin__._setup_matplotlib()
_zcUserQueryNameSpace["z"] = z
_zcUserQueryNameSpace["__zeppelin__"] = __zeppelin__
intp.onPythonScriptInitialized(os.getpid())
# redirect stdout/stderr to java side so that PythonInterpreter can capture the python execution result
output = Logger()
sys.stdout = output
sys.stderr = output
while True :
req = intp.getStatements()
try:
stmts = req.statements().split("\n")
isForCompletion = req.isForCompletion()
# Get post-execute hooks
try:
if req.isCallHooks():
global_hook = intp.getHook('post_exec_dev')
else:
global_hook = None
except:
global_hook = None
try:
if req.isCallHooks():
user_hook = __zeppelin__.getHook('post_exec')
else:
user_hook = None
except:
user_hook = None
nhooks = 0
if not isForCompletion:
for hook in (global_hook, user_hook):
if hook:
nhooks += 1
if stmts:
# use exec mode to compile the statements except the last statement,
# so that the last statement's evaluation will be printed to stdout
code = compile('\n'.join(stmts), '<stdin>', 'exec', ast.PyCF_ONLY_AST, 1)
to_run_hooks = []
if (nhooks > 0):
to_run_hooks = code.body[-nhooks:]
to_run_exec, to_run_single = (code.body[:-(nhooks + 1)],
[code.body[-(nhooks + 1)]] if len(code.body) > nhooks else [])
try:
for node in to_run_exec:
mod = ast.Module([node])
code = compile(mod, '<stdin>', 'exec')
exec(code, _zcUserQueryNameSpace)
for node in to_run_single:
mod = ast.Interactive([node])
code = compile(mod, '<stdin>', 'single')
exec(code, _zcUserQueryNameSpace)
for node in to_run_hooks:
mod = ast.Module([node])
code = compile(mod, '<stdin>', 'exec')
exec(code, _zcUserQueryNameSpace)
if not isForCompletion:
# only call it when it is not for code completion. code completion will call it in
# PythonCompletion.getCompletion
intp.setStatementsFinished("", False)
except Py4JJavaError:
# raise it to outside try except
raise
except:
if not isForCompletion:
# extract which line incur error from error message. e.g.
# Traceback (most recent call last):
# File "<stdin>", line 1, in <module>
# ZeroDivisionError: integer division or modulo by zero
exception = traceback.format_exc()
m = re.search("File \"<stdin>\", line (\d+).*", exception)
if m:
line_no = int(m.group(1))
intp.setStatementsFinished(
"Fail to execute line {}: {}\n".format(line_no, stmts[line_no - 1]) + exception, True)
else:
intp.setStatementsFinished(exception, True)
else:
intp.setStatementsFinished("", False)
except Py4JJavaError:
excInnerError = traceback.format_exc() # format_tb() does not return the inner exception
innerErrorStart = excInnerError.find("Py4JJavaError:")
if innerErrorStart > -1:
excInnerError = excInnerError[innerErrorStart:]
intp.setStatementsFinished(excInnerError + str(sys.exc_info()), True)
except:
intp.setStatementsFinished(traceback.format_exc(), True)
output.reset()
| apache-2.0 |
magne-max/zipline-ja | tests/utils/test_cache.py | 6 | 1936 | from unittest import TestCase
from pandas import Timestamp, Timedelta
from zipline.utils.cache import CachedObject, Expired, ExpiringCache
class CachedObjectTestCase(TestCase):
def test_cached_object(self):
expiry = Timestamp('2014')
before = expiry - Timedelta('1 minute')
after = expiry + Timedelta('1 minute')
obj = CachedObject(1, expiry)
self.assertEqual(obj.unwrap(before), 1)
self.assertEqual(obj.unwrap(expiry), 1) # Unwrap on expiry is allowed.
with self.assertRaises(Expired) as e:
obj.unwrap(after)
self.assertEqual(e.exception.args, (expiry,))
class ExpiringCacheTestCase(TestCase):
def test_expiring_cache(self):
expiry_1 = Timestamp('2014')
before_1 = expiry_1 - Timedelta('1 minute')
after_1 = expiry_1 + Timedelta('1 minute')
expiry_2 = Timestamp('2015')
after_2 = expiry_1 + Timedelta('1 minute')
expiry_3 = Timestamp('2016')
cache = ExpiringCache()
cache.set('foo', 1, expiry_1)
cache.set('bar', 2, expiry_2)
self.assertEqual(cache.get('foo', before_1), 1)
# Unwrap on expiry is allowed.
self.assertEqual(cache.get('foo', expiry_1), 1)
with self.assertRaises(KeyError) as e:
self.assertEqual(cache.get('foo', after_1))
self.assertEqual(e.exception.args, ('foo',))
# Should raise same KeyError after deletion.
with self.assertRaises(KeyError) as e:
self.assertEqual(cache.get('foo', before_1))
self.assertEqual(e.exception.args, ('foo',))
# Second value should still exist.
self.assertEqual(cache.get('bar', after_2), 2)
# Should raise similar KeyError on non-existent key.
with self.assertRaises(KeyError) as e:
self.assertEqual(cache.get('baz', expiry_3))
self.assertEqual(e.exception.args, ('baz',))
| apache-2.0 |
vigilv/scikit-learn | examples/plot_kernel_approximation.py | 262 | 8004 | """
==================================================
Explicit feature map approximation for RBF kernels
==================================================
An example illustrating the approximation of the feature map
of an RBF kernel.
.. currentmodule:: sklearn.kernel_approximation
It shows how to use :class:`RBFSampler` and :class:`Nystroem` to
approximate the feature map of an RBF kernel for classification with an SVM on
the digits dataset. Results using a linear SVM in the original space, a linear
SVM using the approximate mappings and using a kernelized SVM are compared.
Timings and accuracy for varying amounts of Monte Carlo samplings (in the case
of :class:`RBFSampler`, which uses random Fourier features) and different sized
subsets of the training set (for :class:`Nystroem`) for the approximate mapping
are shown.
Please note that the dataset here is not large enough to show the benefits
of kernel approximation, as the exact SVM is still reasonably fast.
Sampling more dimensions clearly leads to better classification results, but
comes at a greater cost. This means there is a tradeoff between runtime and
accuracy, given by the parameter n_components. Note that solving the Linear
SVM and also the approximate kernel SVM could be greatly accelerated by using
stochastic gradient descent via :class:`sklearn.linear_model.SGDClassifier`.
This is not easily possible for the case of the kernelized SVM.
The second plot visualized the decision surfaces of the RBF kernel SVM and
the linear SVM with approximate kernel maps.
The plot shows decision surfaces of the classifiers projected onto
the first two principal components of the data. This visualization should
be taken with a grain of salt since it is just an interesting slice through
the decision surface in 64 dimensions. In particular note that
a datapoint (represented as a dot) does not necessarily be classified
into the region it is lying in, since it will not lie on the plane
that the first two principal components span.
The usage of :class:`RBFSampler` and :class:`Nystroem` is described in detail
in :ref:`kernel_approximation`.
"""
print(__doc__)
# Author: Gael Varoquaux <gael dot varoquaux at normalesup dot org>
# Andreas Mueller <amueller@ais.uni-bonn.de>
# License: BSD 3 clause
# Standard scientific Python imports
import matplotlib.pyplot as plt
import numpy as np
from time import time
# Import datasets, classifiers and performance metrics
from sklearn import datasets, svm, pipeline
from sklearn.kernel_approximation import (RBFSampler,
Nystroem)
from sklearn.decomposition import PCA
# The digits dataset
digits = datasets.load_digits(n_class=9)
# To apply an classifier on this data, we need to flatten the image, to
# turn the data in a (samples, feature) matrix:
n_samples = len(digits.data)
data = digits.data / 16.
data -= data.mean(axis=0)
# We learn the digits on the first half of the digits
data_train, targets_train = data[:n_samples / 2], digits.target[:n_samples / 2]
# Now predict the value of the digit on the second half:
data_test, targets_test = data[n_samples / 2:], digits.target[n_samples / 2:]
#data_test = scaler.transform(data_test)
# Create a classifier: a support vector classifier
kernel_svm = svm.SVC(gamma=.2)
linear_svm = svm.LinearSVC()
# create pipeline from kernel approximation
# and linear svm
feature_map_fourier = RBFSampler(gamma=.2, random_state=1)
feature_map_nystroem = Nystroem(gamma=.2, random_state=1)
fourier_approx_svm = pipeline.Pipeline([("feature_map", feature_map_fourier),
("svm", svm.LinearSVC())])
nystroem_approx_svm = pipeline.Pipeline([("feature_map", feature_map_nystroem),
("svm", svm.LinearSVC())])
# fit and predict using linear and kernel svm:
kernel_svm_time = time()
kernel_svm.fit(data_train, targets_train)
kernel_svm_score = kernel_svm.score(data_test, targets_test)
kernel_svm_time = time() - kernel_svm_time
linear_svm_time = time()
linear_svm.fit(data_train, targets_train)
linear_svm_score = linear_svm.score(data_test, targets_test)
linear_svm_time = time() - linear_svm_time
sample_sizes = 30 * np.arange(1, 10)
fourier_scores = []
nystroem_scores = []
fourier_times = []
nystroem_times = []
for D in sample_sizes:
fourier_approx_svm.set_params(feature_map__n_components=D)
nystroem_approx_svm.set_params(feature_map__n_components=D)
start = time()
nystroem_approx_svm.fit(data_train, targets_train)
nystroem_times.append(time() - start)
start = time()
fourier_approx_svm.fit(data_train, targets_train)
fourier_times.append(time() - start)
fourier_score = fourier_approx_svm.score(data_test, targets_test)
nystroem_score = nystroem_approx_svm.score(data_test, targets_test)
nystroem_scores.append(nystroem_score)
fourier_scores.append(fourier_score)
# plot the results:
plt.figure(figsize=(8, 8))
accuracy = plt.subplot(211)
# second y axis for timeings
timescale = plt.subplot(212)
accuracy.plot(sample_sizes, nystroem_scores, label="Nystroem approx. kernel")
timescale.plot(sample_sizes, nystroem_times, '--',
label='Nystroem approx. kernel')
accuracy.plot(sample_sizes, fourier_scores, label="Fourier approx. kernel")
timescale.plot(sample_sizes, fourier_times, '--',
label='Fourier approx. kernel')
# horizontal lines for exact rbf and linear kernels:
accuracy.plot([sample_sizes[0], sample_sizes[-1]],
[linear_svm_score, linear_svm_score], label="linear svm")
timescale.plot([sample_sizes[0], sample_sizes[-1]],
[linear_svm_time, linear_svm_time], '--', label='linear svm')
accuracy.plot([sample_sizes[0], sample_sizes[-1]],
[kernel_svm_score, kernel_svm_score], label="rbf svm")
timescale.plot([sample_sizes[0], sample_sizes[-1]],
[kernel_svm_time, kernel_svm_time], '--', label='rbf svm')
# vertical line for dataset dimensionality = 64
accuracy.plot([64, 64], [0.7, 1], label="n_features")
# legends and labels
accuracy.set_title("Classification accuracy")
timescale.set_title("Training times")
accuracy.set_xlim(sample_sizes[0], sample_sizes[-1])
accuracy.set_xticks(())
accuracy.set_ylim(np.min(fourier_scores), 1)
timescale.set_xlabel("Sampling steps = transformed feature dimension")
accuracy.set_ylabel("Classification accuracy")
timescale.set_ylabel("Training time in seconds")
accuracy.legend(loc='best')
timescale.legend(loc='best')
# visualize the decision surface, projected down to the first
# two principal components of the dataset
pca = PCA(n_components=8).fit(data_train)
X = pca.transform(data_train)
# Gemerate grid along first two principal components
multiples = np.arange(-2, 2, 0.1)
# steps along first component
first = multiples[:, np.newaxis] * pca.components_[0, :]
# steps along second component
second = multiples[:, np.newaxis] * pca.components_[1, :]
# combine
grid = first[np.newaxis, :, :] + second[:, np.newaxis, :]
flat_grid = grid.reshape(-1, data.shape[1])
# title for the plots
titles = ['SVC with rbf kernel',
'SVC (linear kernel)\n with Fourier rbf feature map\n'
'n_components=100',
'SVC (linear kernel)\n with Nystroem rbf feature map\n'
'n_components=100']
plt.tight_layout()
plt.figure(figsize=(12, 5))
# predict and plot
for i, clf in enumerate((kernel_svm, nystroem_approx_svm,
fourier_approx_svm)):
# Plot the decision boundary. For that, we will assign a color to each
# point in the mesh [x_min, m_max]x[y_min, y_max].
plt.subplot(1, 3, i + 1)
Z = clf.predict(flat_grid)
# Put the result into a color plot
Z = Z.reshape(grid.shape[:-1])
plt.contourf(multiples, multiples, Z, cmap=plt.cm.Paired)
plt.axis('off')
# Plot also the training points
plt.scatter(X[:, 0], X[:, 1], c=targets_train, cmap=plt.cm.Paired)
plt.title(titles[i])
plt.tight_layout()
plt.show()
| bsd-3-clause |
atantet/transferPlasim | transfer/plotSpectrumPlasim.py | 1 | 12156 | import os
import numpy as np
import matplotlib.pyplot as plt
from matplotlib import cm
from mpl_toolkits.mplot3d import Axes3D
from scipy.stats import gaussian_kde
import atmath
# Case definition
S = 1265
restartState = "warm"
lastYear = 9999
firstYear = 101
processing = "_yearly"
indexChoice = ["nhemisic", "eqmst"]
dim = len(indexChoice)
# Grid definition
nx = 25
nSTD = 5
# Lags
nLags = 1
tauDimRng = np.array([1])
nev = 25
nevPlot = 3
#plotAdjoint = False
plotAdjoint = True
resDir = "%s_%d" % (restartState, np.round(S*10))
dstDir = resDir
caseName = "%s_%05d_%05d" % (resDir, firstYear, lastYear)
os.system('mkdir %s/ccf %s/spectrum/eigval/figs %s/spectrum/eigvec/figs 2> /dev/null' \
% (dstDir, dstDir, dstDir))
obsName = caseName
gridPostfix = "N"
N = 1
for d in np.arange(dim):
N *= nx
obsName += ("_%s" % indexChoice[d])
if d > 0:
gridPostfix += ("x%d" % nx)
else:
gridPostfix += ("%d" % nx)
gridPostfix = "%s_%s_%s_%dstd" % (processing, obsName, gridPostfix, nSTD)
# Read grid
gridFile = '%s/grid/grid%s.txt' % (dstDir, gridPostfix)
f = open(gridFile, 'r')
bounds = []
coord = []
for k in np.arange(dim):
bounds.append(np.array(f.readline().split()).astype(float))
coord.append((bounds[k][1:] + bounds[k][:-1]) / 2)
f.close()
X, Y = np.meshgrid(coord[0], coord[1])
# Plot
levels = 20
fs_default = 'x-large'
fs_latex = 'xx-large'
fs_xlabel = fs_default
fs_ylabel = fs_default
fs_xticklabels = fs_default
fs_yticklabels = fs_default
fs_legend_title = fs_default
fs_legend_labels = fs_default
fs_cbar_label = fs_default
msize = 48
scattersize = 12
#scattersize = 36
# figFormat = 'eps'
figFormat = 'png'
dpi = 300
readMap = False
gridXlim = [coord[0].min(), coord[0].max()]
gridYlim = [coord[1].min(), coord[1].max()]
for lag in np.arange(tauDimRng.shape[0]):
tauDim = tauDimRng[lag]
postfix = "%s_tau%02d" % (gridPostfix, tauDim)
print 'Readig spectrum...'
EigValFile = '%s/spectrum/eigval/eigval_nev%d%s.txt' % (dstDir, nev, postfix)
EigVecFile = '%s/spectrum/eigvec/eigvec_nev%d%s.txt' % (dstDir, nev, postfix)
eigval = np.loadtxt(EigValFile)
eigval = eigval[:, 0] + eigval[:, 1]*1j
eigvec = np.loadtxt(EigVecFile)
eigvec = eigvec[::2] + eigvec[1::2]*1j
isort = np.argsort(np.abs(eigval))[::-1]
eigval = eigval[isort]
eigvec = eigvec[isort]
if plotAdjoint:
EigValAdjointFile = '%s/spectrum/eigval/eigvalAdjoint_nev%d%s.txt' % (dstDir, nev, postfix)
EigVecAdjointFile = '%s/spectrum/eigvec/eigvecAdjoint_nev%d%s.txt' % (dstDir, nev, postfix)
eigvalAdjoint = np.loadtxt(EigValAdjointFile)
eigvalAdjoint = eigvalAdjoint[:, 0] + eigvalAdjoint[:, 1]*1j
eigvecAdjoint = np.loadtxt(EigVecAdjointFile)
# From the transpose we get the conjugate of the adjoint eigenvectors
# so we take back the conjugate
eigvecAdjoint = eigvecAdjoint[::2] - eigvecAdjoint[1::2]*1j
isort = np.argsort(np.abs(eigvalAdjoint))[::-1]
eigvalAdjoint = eigvalAdjoint[isort]
eigvecAdjoint = eigvecAdjoint[isort]
nevSingle = eigval.shape[0]
eigvalGen = np.empty((nevSingle,), dtype=complex)
ev = 0
for count in np.arange(eigval.shape[0]):
eigvalGen[ev] = (np.log(np.abs(eigval[count])) \
+ np.angle(eigval[count]) * 1j) / tauDim
ev += 1
if ev >= nevSingle:
break
if eigval[count].imag != 0:
eigvalGen[ev] = np.conjugate(eigvalGen[ev-1])
ev +=1
if ev >= nevSingle:
break
# Plot spectrum
print 'Plotting spectrum slowest rate ', -1. / eigvalGen[1].real
fig = plt.figure()
ax = fig.add_subplot(111)
ax.scatter(eigvalGen[1:].real, eigvalGen[1:].imag, c='k', s=msize, marker='o')
ax.scatter(eigvalGen[0].real, eigvalGen[0].imag, c='r', s=msize, marker='o')
ax.set_xlabel(r'$\Re(\zeta_i)$', fontsize=fs_latex)
ax.set_ylabel(r'$\Im(\zeta_i)$', fontsize=fs_latex)
plt.setp(ax.get_xticklabels(), fontsize=fs_xticklabels)
plt.setp(ax.get_yticklabels(), fontsize=fs_yticklabels)
#ax.set_title('%d-time-step spectrum for %s\nSlowest time-scale: %.1f' \
# % (tau, srcPostfix, -1. / rate[0]))
ax.set_xlim(-1.2, 0.02)
xlim = ax.get_xlim()
ylim = ax.get_ylim()
plt.text(xlim[1] - (xlim[1] - xlim[0])*0.6, ylim[1] - (ylim[1] - ylim[0])*0.1,
r'$1 / \Re(\lambda_2) = %.1f$ (years)' \
% (-1. / eigvalGen[1].real,), fontsize=fs_latex)
fig.savefig('%s/spectrum/eigval/figs/eigval_nev%d%s.%s' % (dstDir, nev, postfix, figFormat),
bbox_inches='tight', dpi=dpi)
# Plot eigenvectors of transfer operator
tol = 0.
alpha = 0.0
for k in np.arange(nevPlot):
if np.abs(eigval[k].real - 1) < 1.e-3:
# Plot invariant measure
print 'Plotting stationary density...'
statDen = eigvec[0].real
statDen /= statDen.sum()
fig = plt.figure()
ax = fig.add_subplot(111)
alpha = 0.01
h = ax.contourf(X, Y, statDen.reshape(nx, nx), levels,
cmap=cm.hot_r)
ax.set_xlim(gridXlim)
ax.set_ylim(gridYlim)
plt.colorbar(h)
ax.set_xlabel("X Axis")
ax.set_ylabel("Y Axis")
ax.set_title("Approximation of the invariant measure", fontsize=fs_default)
fig.savefig('%s/spectrum/eigvec/figs/eigvecReal_nev%d_ev03%d%s.%s' \
% (dstDir, nev, 1, postfix, figFormat), bbox_inches='tight', dpi=dpi)
else:
print 'Plotting real part of eigenvector %d...' % (k+1,)
fig = plt.figure()
ax = fig.add_subplot(111)
v2Real = eigvec[k].real
# vmax = np.sort(np.abs(v2Real))[(1. - alpha)*N-1]
vmax = np.sort(np.abs(v2Real))[-1]
v2Real[v2Real > vmax] = vmax
v2Real[v2Real < -vmax] = -vmax
h = ax.contourf(X, Y, v2Real.reshape(nx, nx), levels,
cmap=cm.RdBu_r, vmin=-vmax, vmax=vmax)
ax.set_xlim(gridXlim)
ax.set_ylim(gridYlim)
plt.colorbar(h)
ax.set_xlabel("X Axis")
ax.set_ylabel("Y Axis")
ax.set_title("Real part of the eigenvector %d" % (k+1,),
fontsize=fs_default)
fig.savefig('%s/spectrum/eigvec/figs/eigvecReal_nev%d_ev03%d%s.%s' \
% (dstDir, nev, k+1, postfix, figFormat), bbox_inches='tight', dpi=dpi)
if eigval[k].imag != 0:
print 'Plotting imaginary part of eigenvector %d...' % (k+1,)
fig = plt.figure()
ax = fig.add_subplot(111)
v2Imag = eigvec[k].imag
vmax = np.sort(np.abs(v2Imag))[-1]
v2Imag[v2Imag > vmax] = vmax
v2Imag[v2Imag < -vmax] = -vmax
h = ax.contourf(X, Y, v2Imag.reshape(nx, nx), levels,
cmap=cm.RdBu_r, vmin=-vmax, vmax=vmax)
ax.set_xlim(gridXlim)
ax.set_ylim(gridYlim)
plt.colorbar(h)
ax.set_xlabel("X Axis")
ax.set_ylabel("Y Axis")
ax.set_title("Imaginary part of the eigenvector %d" % (k+1,),
fontsize=fs_default)
fig.savefig('%s/spectrum/eigvec/figs/eigvecImag_nev%d_ev03%d%s.%s' \
% (dstDir, nev, k, postfix, figFormat), bbox_inches='tight', dpi=dpi)
# Plot eigenvectors of Koopman operator
if plotAdjoint:
eigvecAdjointScale = np.zeros((nevSingle, N), dtype=complex)
for k in np.arange(nevPlot):
eigvecAdjointScale[k] = eigvecAdjoint[k] \
/ np.conjugate(np.vdot(eigvecAdjoint[k], eigvec[k]))
if np.abs(eigval[k].real - 1) < 1.e-3:
# Plot invariant measure
print 'Plotting ergodic vector...'
ergodicVec = np.abs(eigvecAdjoint[0].real)
ergodicVec /= np.abs(ergodicVec).max()
fig = plt.figure()
ax = fig.add_subplot(111)
alpha = 0.01
h = ax.contourf(X, Y, ergodicVec.reshape(nx, nx), levels,
cmap=cm.hot_r)
ax.set_xlim(gridXlim)
ax.set_ylim(gridYlim)
plt.colorbar(h)
ax.set_xlabel("X Axis")
ax.set_ylabel("Y Axis")
ax.set_title("Approximation of the ergodic vector", fontsize=fs_default)
fig.savefig('%s/spectrum/eigvec/figs/eigvecAdjointReal_nev%d_ev03%d%s.%s' \
% (dstDir, nev, 1, postfix, figFormat), bbox_inches='tight', dpi=dpi)
else:
print 'Plotting real part of Koopman eigenvector %d...' % (k+1,)
fig = plt.figure()
ax = fig.add_subplot(111)
v2Real = eigvecAdjoint[k].real
vmax = np.sort(np.abs(v2Real))[-1]
v2Real[v2Real > vmax] = vmax
v2Real[v2Real < -vmax] = -vmax
h = ax.contourf(X, Y, v2Real.reshape(nx, nx), levels,
cmap=cm.RdBu_r, vmin=-vmax, vmax=vmax)
ax.set_xlim(gridXlim)
ax.set_ylim(gridYlim)
plt.colorbar(h)
ax.set_xlabel("X Axis")
ax.set_ylabel("Y Axis")
ax.set_title("Real part of the Koopman eigenvector %d" % (k+1,),
fontsize=fs_default)
fig.savefig('%s/spectrum/eigvec/figs/eigvecAdjointReal_nev%d_ev03%d%s.%s' \
% (dstDir, nev, k+1, postfix, figFormat), bbox_inches='tight', dpi=dpi)
if eigval[k].imag != 0:
print 'Plotting imaginary part of Koopman eigenvector %d...' % (k+1,)
fig = plt.figure()
ax = fig.add_subplot(111)
v2Imag = eigvecAdjoint[k].imag
vmax = np.sort(np.abs(v2Imag))[-1]
v2Imag[v2Imag > vmax] = vmax
v2Imag[v2Imag < -vmax] = -vmax
h = ax.contourf(X, Y, v2Imag.reshape(nx, nx), levels,
cmap=cm.RdBu_r, vmin=-vmax, vmax=vmax)
ax.set_xlim(gridXlim)
ax.set_ylim(gridYlim)
plt.colorbar(h)
ax.set_xlabel("X Axis")
ax.set_ylabel("Y Axis")
ax.set_title("Imaginary part of the Koopman eigenvector %d" % (k+1,),
fontsize=fs_default)
fig.savefig('%s/spectrum/eigvec/figs/eigvecAdjointImag_nev%d_ev03%d%s.%s' \
% (dstDir, nev, k, postfix, figFormat),
bbox_inches='tight', dpi=dpi)
# Get ccf
lagMax = 100
lags = np.arange(0, lagMax+1, 1)
f = X.flatten()
g = X.flatten()
obsIdx0 = 0
obsIdx1 = 0
statesFileName = "%s/obs/obs%s.txt" % (dstDir, gridPostfix)
sim = np.loadtxt(statesFileName)
sim = sim.reshape(sim.shape[0] / dim, dim)
ccf = atmath.ccf(sim[:, obsIdx0], sim[:, obsIdx1], lagMax=lagMax)[lagMax:]
#ccf = atmath.ccovf(sim[:, obsIdx0], sim[:, obsIdx1], lagMax=lagMax)[lagMax:]
ccfRec = np.zeros((lags.shape[0],), dtype=complex)
for ev in np.arange(1, nevSingle):
ccfRec += np.exp(eigvalGen[ev]*lags) \
* (f * statDen * np.conjugate(eigvecAdjoint[ev])).sum() \
* (eigvec[ev] * np.conjugate(g)).sum()
#ccfRec /= np.cov(sim[:, obsIdx0], sim[:, obsIdx1])[0, 1]
ccfRec /= ccfRec[0]
print 'Plotting'
fig = plt.figure()
ax = fig.add_subplot(111)
ax.plot(lags, ccf, linewidth=2)
ax.plot(lags, ccfRec, '--', linewidth=2)
fig.savefig('%s/ccf/ccf_%s_%s_nev%d%s.%s' \
% (dstDir, indexChoice[obsIdx0], indexChoice[obsIdx1], nev, postfix, figFormat),
bbox_inches='tight', dpi=dpi)
| gpl-2.0 |
jskDr/jamespy_py3 | dl/kkeras_util.py | 2 | 2816 | #from keras.models import Sequential
from keras.layers import Dense, Input
from keras.models import Model
from keras.regularizers import l1
import matplotlib.pyplot as plt
def plot_model_history( history):
"""
accuracy and loss are depicted.
"""
plt.plot(history.history['acc'])
#plt.plot(history.history['val_acc'])
plt.title('model accuracy')
plt.ylabel('accuracy')
plt.xlabel('epoch')
#plt.legend(['train', 'test'], loc='upper left')
plt.show()
# summarize history for loss
plt.plot(history.history['loss'])
#plt.plot(history.history['val_loss'])
plt.title('model loss')
plt.ylabel('loss')
plt.xlabel('epoch')
#plt.legend(['train', 'test'], loc='upper left')
plt.show()
def plot_history( history):
"""
accuracy and loss are depicted.
"""
plt.plot(history.history['acc'])
plt.plot(history.history['val_acc'])
plt.title('model accuracy')
plt.ylabel('accuracy')
plt.xlabel('epoch')
plt.legend(['train', 'test'], loc='upper left')
plt.show()
# summarize history for loss
plt.plot(history.history['loss'])
plt.plot(history.history['val_loss'])
plt.title('model loss')
plt.ylabel('loss')
plt.xlabel('epoch')
plt.legend(['train', 'test'], loc='upper left')
plt.show()
class Model_Ordinary( Model):
"""
Adaptive linear model based on Keras Model
"""
def __init__(self, X_shape_1):
in_model = Input(shape=(X_shape_1,))
out_model = Dense(1, activation='linear')(in_model)
super().__init__(input = in_model, output=out_model)
self.compile(optimizer='adadelta', loss='mse', metrics=['accuracy'])
class Model_Ordinary_Hidden( Model):
"""
Adaptive linear model based on Keras Model
"""
def __init__(self, X_shape_1, n_h_nodes):
in_model = Input(shape=(X_shape_1,))
hidden_l = Dense(n_h_nodes, activation='relu')(in_model)
out_model = Dense(1, activation='linear')(hidden_l)
super().__init__(input = in_model, output=out_model)
self.compile(optimizer='adadelta', loss='mse', metrics=['accuracy'])
class Model_Lasso( Model):
"""
Adaptive linear model based on Keras Model
"""
def __init__(self, X_shape_1, alpha):
in_model = Input(shape=(X_shape_1,))
out_model = Dense(1, activation='linear', W_regularizer=l1(alpha))(in_model)
super().__init__(input = in_model, output=out_model)
self.compile(optimizer='adadelta', loss='mse', metrics=['accuracy'])
class Model_Lasso_Hidden( Model):
"""
Adaptive linear model based on Keras Model
"""
def __init__(self, X_shape_1, n_h_nodes, alpha):
in_model = Input(shape=(X_shape_1,))
hidden_l = Dense(n_h_nodes, activation='relu', W_regularizer=l1(alpha))(in_model)
out_model = Dense(1, activation='linear', W_regularizer=l1(alpha))(hidden_l)
super().__init__(input = in_model, output=out_model)
self.compile(optimizer='adadelta', loss='mse', metrics=['accuracy']) | mit |
yvlasov/ConProbIN | try-ml/try-v01.py | 1 | 2762 | #!/usr/bin/python
# Load libraries
import pandas
from pandas.tools.plotting import scatter_matrix
import matplotlib.pyplot as plt
#replace sklearn with model_selection
from sklearn import cross_validation
from sklearn.metrics import classification_report
from sklearn.metrics import confusion_matrix
from sklearn.metrics import accuracy_score
from sklearn.linear_model import LogisticRegression
from sklearn.tree import DecisionTreeClassifier
from sklearn.neighbors import KNeighborsClassifier
from sklearn.discriminant_analysis import LinearDiscriminantAnalysis
from sklearn.naive_bayes import GaussianNB
from sklearn.svm import SVC
url = "https://archive.ics.uci.edu/ml/machine-learning-databases/iris/iris.data"
names = ['sepal-length', 'sepal-width', 'petal-length', 'petal-width', 'class']
dataset = pandas.read_csv(url, names=names)
# shape
print(dataset.shape)
# head
print(dataset.head(20))
# descriptions
print(dataset.describe())
# class distribution
print(dataset.groupby('class').size())
#box and whisker plots
dataset.plot(kind='box', subplots=True, layout=(2,2), sharex=False, sharey=False)
plt.show()
# histograms
dataset.hist()
plt.show()
# scatter plot matrix
scatter_matrix(dataset)
plt.show()
# Split-out validation dataset
array = dataset.values
print array
X = array[:,0:4]
Y = array[:,4]
print X
print Y
validation_size = 0.20
seed = 7
X_train, X_validation, Y_train, Y_validation = cross_validation.train_test_split(X, Y, test_size=validation_size, random_state=seed)
# Test options and evaluation metric
num_folds = 10
num_instances = len(X_train)
seed = 7
scoring = 'accuracy'
# Spot Check Algorithms
models = []
models.append(('LR', LogisticRegression()))
models.append(('LDA', LinearDiscriminantAnalysis()))
models.append(('KNN', KNeighborsClassifier()))
models.append(('CART', DecisionTreeClassifier()))
models.append(('NB', GaussianNB()))
models.append(('SVM', SVC()))
# evaluate each model in turn
results = []
names = []
for name, model in models:
kfold = cross_validation.KFold(n=num_instances, n_folds=num_folds, random_state=seed)
cv_results = cross_validation.cross_val_score(model, X_train, Y_train, cv=kfold, scoring=scoring)
results.append(cv_results)
names.append(name)
msg = "%s: %f (%f)" % (name, cv_results.mean(), cv_results.std())
print(msg)
# Compare Algorithms
fig = plt.figure()
fig.suptitle('Algorithm Comparison')
ax = fig.add_subplot(111)
plt.boxplot(results)
ax.set_xticklabels(names)
plt.show()
# Make predictions on validation dataset
knn = KNeighborsClassifier()
knn.fit(X_train, Y_train)
predictions = knn.predict(X_validation)
print(accuracy_score(Y_validation, predictions))
print(confusion_matrix(Y_validation, predictions))
print(classification_report(Y_validation, predictions))
| mit |
cmshobe/landlab | landlab/plot/graph.py | 3 | 2498 | import matplotlib.pyplot as plt
import numpy as np
def plot_nodes(graph, color="r", with_id=True, markersize=10):
for node in range(len(graph.x_of_node)):
x, y = graph.x_of_node[node], graph.y_of_node[node]
plt.plot(
graph.x_of_node[node],
graph.y_of_node[node],
"o",
color=color,
markersize=markersize,
)
if with_id:
plt.text(x, y, node, color=color, size=16)
def plot_links(
graph, color="b", linestyle="solid", with_id=True, as_arrow=True, linewidth=None
):
if as_arrow:
head_width = 0.1
else:
head_width = 0.0
for link, nodes in enumerate(graph.nodes_at_link):
x, y = graph.x_of_node[nodes[0]], graph.y_of_node[nodes[0]]
dx, dy = graph.x_of_node[nodes[1]] - x, graph.y_of_node[nodes[1]] - y
plt.arrow(
x,
y,
dx,
dy,
head_width=head_width,
linewidth=linewidth,
length_includes_head=True,
color=color,
linestyle=linestyle,
)
if with_id:
plt.text(x + dx * 0.5, y + dy * 0.5, link, size=16, color=color)
def plot_patches(graph, color="g"):
for patch, nodes in enumerate(graph.nodes_at_patch):
x, y = np.mean(graph.x_of_node[nodes]), np.mean(graph.y_of_node[nodes])
plt.text(x, y, patch, color=color, size=16)
def plot_graph(graph, at="node,link,patch", with_id=True):
locs = [loc.strip() for loc in at.split(",")]
for loc in locs:
if loc not in ("node", "link", "patch", "corner", "face", "cell"):
raise ValueError('{at}: "at" element not understood'.format(at=loc))
plt.plot(graph.x_of_node, graph.y_of_node, ".", color="r")
plt.xlim([min(graph.x_of_node) - 0.5, max(graph.x_of_node) + 0.5])
plt.ylim([min(graph.y_of_node) - 0.5, max(graph.y_of_node) + 0.5])
if "node" in locs:
plot_nodes(graph, with_id=with_id, markersize=10)
if "link" in locs:
plot_links(graph, with_id=with_id, linewidth=None, as_arrow=False)
if "patch" in locs:
plot_patches(graph)
if "corner" in locs:
plot_nodes(graph.dual, color="c")
if "face" in locs:
plot_links(graph.dual, linestyle="dotted", color="k")
if "cell" in locs and graph.number_of_cells > 0:
plot_patches(graph.dual, color="m")
plt.xlabel("x")
plt.ylabel("y")
plt.gca().set_aspect(1.0)
plt.show()
| mit |
joshwalawender/RasPiProjects | HumidityMonitor.py | 1 | 11713 | #!/usr/env/python
from __future__ import division, print_function
## Import General Tools
import sys
import os
import argparse
import logging
import time
import numpy as np
import Adafruit_DHT
# import DHT22
# import DS18B20
# import urllib2
# import Carriots
import humiditycalc
# import astropy.io.ascii as ascii
# import astropy.table as table
threshold_humid = 55
threshold_wet = 75
##-------------------------------------------------------------------------
## Main Program
##-------------------------------------------------------------------------
def measure(verbose=False):
##-------------------------------------------------------------------------
## Create logger object
##-------------------------------------------------------------------------
logger = logging.getLogger('MyLogger')
logger.setLevel(logging.DEBUG)
## Set up console output
LogConsoleHandler = logging.StreamHandler()
if verbose:
LogConsoleHandler.setLevel(logging.DEBUG)
else:
LogConsoleHandler.setLevel(logging.INFO)
LogFormat = logging.Formatter('%(asctime)23s %(levelname)8s: %(message)s')
LogConsoleHandler.setFormatter(LogFormat)
logger.addHandler(LogConsoleHandler)
## Set up file output
LogFileName = os.path.join('/', 'home', 'joshw', 'logs', time.strftime('HumidityLog_%Y%m%d.txt', time.localtime()))
LogFileHandler = logging.FileHandler(LogFileName)
LogFileHandler.setLevel(logging.DEBUG)
LogFileHandler.setFormatter(LogFormat)
logger.addHandler(LogFileHandler)
##-------------------------------------------------------------------------
## Get Temperature and Humidity Values
##-------------------------------------------------------------------------
logger.info('#### Reading Temperature and Humidity Sensors ####')
logger.info('Reading DHT22')
sensor = Adafruit_DHT.AM2302
pin = 4
DHT_humidity, DHT_temperature_C = Adafruit_DHT.read_retry(sensor, pin)
if not DHT_humidity or not DHT_temperature_C:
DHT_humidity, DHT_temperature_C = Adafruit_DHT.read_retry(sensor, pin)
if not DHT_humidity or not DHT_temperature_C:
print('Read failed a second time.')
sys.exit(1)
DHT_temperature_F = 32. + 9./5.*DHT_temperature_C
logger.info(' Temperature = {:.3f} F, Humidity = {:.1f} %'.format(DHT_temperature_F, DHT_humidity))
AH = humiditycalc.relative_to_absolute_humidity(DHT_temperature_C, DHT_humidity)
logger.info(' Absolute Humidity = {:.2f} g/m^3'.format(AH))
##-------------------------------------------------------------------------
## Determine Status Using Humidity
##-------------------------------------------------------------------------
if (DHT_humidity < threshold_humid):
status = 'OK'
elif (DHT_humidity > threshold_humid) and (DHT_humidity < threshold_wet):
status = 'HUMID'
else:
status = 'WET'
logger.info('Status: {}'.format(status))
##-------------------------------------------------------------------------
## Determine Status and Alarm Using History
##-------------------------------------------------------------------------
datestring = time.strftime('%Y%m%d_log.txt', time.localtime())
timestring = time.strftime('%Y/%m/%d %H:%M:%S HST', time.localtime())
datafile = os.path.join('/', 'home', 'joshw', 'logs', datestring)
logger.debug("Reading history data from file: {0}".format(datafile))
dataFO = open(datafile, 'a+')
lines = dataFO.readlines()
if len(lines) == 0:
dataFO = open(datafile, 'a')
dataFO.write('# {},{},{},{},{},{}\n'.format(
'date',\
'time',\
'temperature (F)',\
'humidity (%)',\
'absolute humidity (g/m^3)',\
'status'))
data = []
else:
data = []
for line in lines:
if line[0] != '#':
data.append(line.strip('\n').split(','))
translation = {'OK':0, 'HUMID':1, 'WET':2, 'ALARM':2}
if len(data) > 6:
recent_status_vals = [translation[line[5]] for line in data][-6:]
recent_status = np.mean(recent_status_vals)
if len(data) > 23:
recent_status_vals = [translation[line[5]] for line in data][-23:]
recent_alarm = 2 in recent_status_vals
logger.debug(' Recent Status = {:.2f}, Current Status = {}, Recent alarm: {}'.format(recent_status, status, recent_alarm))
if (recent_status > 0.5) and not status == 'OK' and not recent_alarm:
status = 'ALARM'
##-------------------------------------------------------------------------
## Record Values to Table
##-------------------------------------------------------------------------
dataFO.write('{},{},{:.1f},{:.1f},{:.2f},{}\n'.format(
timestring[0:10],\
timestring[11:23],\
DHT_temperature_F,\
DHT_humidity,\
AH,\
status))
## Log to Carriots
# logger.info('Sending Data to Carriots')
# logger.debug(' Creating Device object')
# Device = Carriots.Client(device_id="Shed@joshwalawender")
# logger.debug(' Reading api key')
# Device.read_api_key_from_file(file=os.path.join(os.path.expanduser('~joshw'), '.carriots_api'))
# data_dict = {'Temperature1': sensor.temperatures_F[0], \
# 'Temperature2': sensor.temperatures_F[1], \
# 'Temperature3': DHT.temperature_F, \
# 'Humidity': DHT.humidity, \
# 'Absolute Humidity': AH, \
# 'Status': status
# }
# logger.debug(data_dict)
# logger.debug(' Uploading data')
# try:
# Device.upload(data_dict)
# except urllib2.HTTPError as e:
# logger.critical(' Upload failed')
# logger.critical(' {}'.format(e.code))
# logger.critical(' {}'.format(e.reason))
# except:
# logger.critical(' Upload failed')
# logger.critical(' Unexpected error: {}'.format(sys.exc_info()[0]))
# logger.info('Done')
##-------------------------------------------------------------------------
## Make Plot
##-------------------------------------------------------------------------
def plot(verbose=False):
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as pyplot
pyplot.ioff()
##-------------------------------------------------------------------------
## Create logger object
##-------------------------------------------------------------------------
logger = logging.getLogger('MyLogger')
logger.setLevel(logging.DEBUG)
## Set up console output
LogConsoleHandler = logging.StreamHandler()
if verbose:
LogConsoleHandler.setLevel(logging.DEBUG)
else:
LogConsoleHandler.setLevel(logging.INFO)
LogFormat = logging.Formatter('%(asctime)23s %(levelname)8s: %(message)s')
LogConsoleHandler.setFormatter(LogFormat)
logger.addHandler(LogConsoleHandler)
## Set up file output
LogFileName = os.path.join('/', 'home', 'joshw', 'logs', 'PlotLog.txt')
LogFileHandler = logging.FileHandler(LogFileName)
LogFileHandler.setLevel(logging.DEBUG)
LogFileHandler.setFormatter(LogFormat)
logger.addHandler(LogFileHandler)
##-------------------------------------------------------------------------
## Read Log File
##-------------------------------------------------------------------------
datestring = time.strftime('%Y%m%d_log.txt', time.localtime())
datafile = os.path.join('/', 'home', 'joshw', 'logs', datestring)
logger.info("Reading Data File: "+datafile)
dataFO = open(datafile, 'a+')
lines = dataFO.readlines()
data = []
for line in lines:
if line[0] != '#':
data.append(line.strip('\n').split(','))
dates = [val[0] for val in data]
time_strings = [val[1] for val in data]
times = [(time.strptime(val[1], '%H:%M:%S HST').tm_hour +\
time.strptime(val[1], '%H:%M:%S HST').tm_min/60.)\
for val in data ]
temperature = [float(val[2]) for val in data]
humidity = [float(val[3]) for val in data]
AH = [float(val[4]) for val in data]
status = [val[5] for val in data]
##-------------------------------------------------------------------------
## Plot
##-------------------------------------------------------------------------
# PlotFileName = time.strftime('%Y%m%d.png', time.localtime())
PlotFileName = 'latest.png'
PlotFile = os.path.join('/', 'home', 'joshw', 'logs', PlotFileName)
logger.info("Writing Output File: "+PlotFile)
dpi=72
Figure = pyplot.figure(figsize=(16,10), dpi=dpi)
HumidityAxes = pyplot.axes([0.10, 0.43, 0.9, 0.40])
title_string = '{:10s} at {:12s}:\n'.format(dates[-1], time_strings[-1])
title_string += 'Temperature = {:.1f} F, '.format(temperature[-1])
title_string += 'Humidity = {:.0f} %'.format(humidity[-1])
pyplot.title(title_string)
pyplot.plot(times, humidity, 'ko', label="Humidity", mew=0, ms=3)
pyplot.plot([0, 24], [threshold_humid, threshold_humid],\
'y-', label='threshold humidity', linewidth=3, alpha=0.8)
pyplot.plot([0, 24], [threshold_wet, threshold_wet],\
'r-', label='threshold humidity', linewidth=3, alpha=0.8)
pyplot.yticks(range(10,100,10))
pyplot.ylim(25,95)
pyplot.ylabel("Humidity (%)")
pyplot.xticks([])
pyplot.xlim(0,24)
pyplot.grid()
AbsHumidityAxes = HumidityAxes.twinx()
AbsHumidityAxes.set_ylabel('Abs. Hum. (g/m^3)', color='b')
pyplot.plot(times, AH, 'bo', label="Abs. Hum.", mew=0, ms=3)
pyplot.yticks(range(00,45,5))
pyplot.ylim(7.5,22.5)
pyplot.xticks(range(0,25,1))
pyplot.xlim(0,24)
pyplot.xlabel('Hours (HST)')
TemperatureAxes = pyplot.axes([0.10, 0.05, 0.9, 0.35])
pyplot.plot(times, temperature, 'go', label="Termperature", mew=0, ms=3)
pyplot.xticks(range(0,25,1))
pyplot.yticks(range(50,110,5))
pyplot.xlim(0,24)
pyplot.ylim(70,100)
pyplot.xlabel('Hours (HST)')
pyplot.ylabel("Temperature (F)")
pyplot.grid()
pyplot.savefig(PlotFile, dpi=dpi, bbox_inches='tight', pad_inches=0.10)
logger.info("Done")
##-------------------------------------------------------------------------
## Create Daily Symlink if Not Already
##-------------------------------------------------------------------------
LinkFileName = 'latest.png'
LinkFile = os.path.join('/', 'home', 'joshw', 'logs', LinkFileName)
if not os.path.exists(LinkFile):
logger.info('Making {} symlink to {}'.format(LinkFile, PlotFile))
os.symlink(PlotFile, LinkFile)
logger.info("Done")
def main():
##-------------------------------------------------------------------------
## Parse Command Line Arguments
##-------------------------------------------------------------------------
## create a parser object for understanding command-line arguments
parser = argparse.ArgumentParser(
description="Program description.")
## add flags
parser.add_argument("-v", "--verbose",
action="store_true", dest="verbose",
default=False, help="Be verbose! (default = False)")
parser.add_argument("-p", "--plot",
action="store_true", dest="plot",
default=False, help="Make plot.")
args = parser.parse_args()
if not args.plot:
measure(verbose=args.verbose)
else:
plot(verbose=args.verbose)
if __name__ == '__main__':
main()
| bsd-2-clause |
feranick/SpectralMachine | Utilities/PlotRruffSpectraRound.py | 1 | 3466 | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
'''
*********************************************
*
* PlotRruffSpectraRound
* Plot Rruff spectra
* Files must be in RRuFF
* version: 20171208c
*
* By: Nicola Ferralis <feranick@hotmail.com>
*
***********************************************
'''
print(__doc__)
import numpy as np
import sys, os.path, getopt, glob, csv, re
from datetime import datetime, date
import matplotlib.pyplot as plt
def main():
if len(sys.argv) < 4:
print(' Usage:\n python3 PlotRruffSpectraRound.py <EnIn> <EnFin> <EnStep> <decimals>\n')
print(' Requires python 3.x. Not compatible with python 2.x\n')
return
else:
enInit = sys.argv[1]
enFin = sys.argv[2]
enStep = sys.argv[3]
decimals = int(sys.argv[4])
rootPlotFile = "plot_"
dateTimeStamp = str(datetime.now().strftime('%Y-%m-%d_%H-%M-%S'))
summaryPlotFile = rootPlotFile+"summary_"+dateTimeStamp+".csv"
plotFile = rootPlotFile+dateTimeStamp
plt.figure(num=plotFile)
with open(summaryPlotFile, "a") as sum_file:
sum_file.write('Classification started: '+dateTimeStamp+"\n")
index = 0
for ind, file in enumerate(sorted(os.listdir("."))):
#try:
if os.path.splitext(file)[-1] == ".txt":
with open(file, 'r') as f:
En = np.loadtxt(f, unpack = True, usecols=range(0,1), delimiter = ',', skiprows = 10)
with open(file, 'r') as f:
R = np.loadtxt(f, unpack = True, usecols=range(1,2), delimiter = ',', skiprows = 10)
print(file + '\n File OK, converting to ASCII...')
EnT = np.arange(float(enInit), float(enFin), float(enStep), dtype=np.float)
if EnT.shape[0] == En.shape[0]:
print(' Number of points in the learning dataset: ' + str(EnT.shape[0]))
else:
print('\033[1m' + ' Mismatch in datapoints: ' + str(EnT.shape[0]) + '; sample = ' + str(En.shape[0]) + '\033[0m')
# Interpolate to new axis
R = np.interp(EnT, En, R, left = R[0], right = 0)
# Renormalize offset by min R
R = R - np.amin(R) + 1e-8
# Renormalize to max of R
R = R/np.amax(R)
R = np.around(R, decimals=decimals)
index += 1
'''
try:
convertFile = os.path.splitext(file)[0] + '_ASCII.txt'
convertR = np.transpose(np.vstack((EnT, R)))
with open(convertFile, 'ab') as f:
np.savetxt(f, convertR, delimiter='\t', fmt='%10.6f')
except:
pass
'''
label = re.search('(.+?)__',file).group(1)
with open(summaryPlotFile, "a") as sum_file:
sum_file.write(str(index) + ',,,' + label + ','+file+'\n')
plt.plot(EnT,R,label=label)
#except:
# print("\n Skipping: ",file)
plt.xlabel('Raman shift [1/cm]')
plt.ylabel('Raman Intensity [arb. units]')
plt.legend(loc='upper left')
plt.savefig(plotFile+".png", dpi = 160, format = 'png') # Save plot
plt.show()
plt.close()
#************************************
''' Main initialization routine '''
#************************************
if __name__ == "__main__":
sys.exit(main())
| gpl-3.0 |
jskDr/jamespy_py3 | jmultidk.py | 1 | 17925 | # Python 3
"""
Codes for MultiDK are included.
Author: (James) Sung-Jin Kim
Date: April 2, 2016 ~ Now
Editorial notes
* April2, 2016
I will use pd.DataFrame as base class to make new class for R2_DF,
which is specialized for r^2 scores and support MultiDK.
Hence, it can be copied to jpandas later on.
"""
from time import time
import pandas as pd
from functools import reduce
import numpy as np
import matplotlib.pyplot as plt
import seaborn as sns
sns.set()
# Jan 27, 2017
#cross_validation is deprecated and replace it to model_selection
from sklearn import model_selection #cross_validation
from sklearn import metrics
import tensorflow.contrib.learn as skflow
import jpandas as jpd
import jchem, jgrid
import j3x.jpyx
import jseaborn as jsns
import kutil
def list_agg_n( n_folds):
"""
Generate a function to aggregate multiple lists
for functools.reduce()
"""
f = lambda s, x: s + [x] * n_folds
return f
class R2_Scores( object):
def __init__(self, fname = None, Nm = 10, n_alphas = 10, n_folds = 20):
"""
Make a class for r^2 scores based on pd.DataFrame
[Input]
fname: file name for r^2 dataframe
Nm: the number of methods
E.g.
fname = "sheet/wang3705_MDMK2to23_{}methods.csv".format(Nm)
"""
if fname is not None:
self.fname = fname
self.df = pd.read_csv( fname)
else:
self.df = pd.DataFrame()
self.Nm = Nm
self.n_alphas = n_alphas
self.n_folds = n_folds
def updata_for_multidk(self, fname_out = None):
"""
1. alpha_ID is added on the DataFrame since float point values such as alpha
can not be found by value. An index of the best alpha value for each method
will be stored as well as the best alpha so that the index will be used
to filter the best values of mean(r2) and std(r2).
"""
n_alphas = self.n_alphas
n_folds = self.n_folds
# Step 1: adding alpha_ID
self.df["alpha_ID"] = reduce( list_agg_n( n_folds), range( n_alphas), []) * self.Nm
# Step 2: change names MDMKx to MultiDKx
rn_d = {'MDMK2to11':"MultiDK2-11", 'MDMK2to21': "MultiDK2-21",
'MDMK2to23': "MultiDK2-23", 'MDMK2to13': "MultiDK2-13", "MDMK1to10":"MultiDK1-10",
"MDMK": "MultiDK"} # MDMK is included for legacy cases such as redox potential prediction
df_method_l = self.df["Method"].tolist()
rn_l = []
for m in df_method_l:
if m in rn_d.keys():
rn_l.append( rn_d[m])
else:
rn_l.append( m)
self.df["Method"] = rn_l
if fname_out is not None:
self.df.to_csv( fname_out, index = False)
elif self.fname is not None:
fname_out = self.fname[:-4] + '_refine.csv'
print( "Default: self.df is saved to", fname_out)
self.df.to_csv( fname_out, index = False)
return self.df
def mean_std(self, fname_out = None):
df_g = self.df.groupby(["Method", "alpha"])
self.df_gr = df_g.agg({"r2":[np.mean, np.std]})["r2"]
# Index should be stored.
if fname_out is not None:
"""
index should be saved so 'index = True' as default
"""
self.df_gr.to_csv( fname_out)
elif self.fname is not None:
fname_out = self.fname[:-4] + '_mean_std.csv'
print( "Default: self.df_gr is saved to", fname_out)
self.df_gr.to_csv( fname_out)
return self.df_gr
def max_mean_r2( self, fname_out = None):
"""
Extact all method names and get a set with only unique name
"""
self.method_l = set(self.df_gr.index.get_level_values(0))
pdi_l = list()
for m in self.method_l:
p_m = self.df_gr.loc[ m]
alpha_l = p_m.index.tolist()
m_r2 = p_m["mean"].values
std_r2 = p_m["std"].values
i_max = m_r2.argmax()
pdi = pd.DataFrame( [[m, i_max, alpha_l[i_max], m_r2[i_max], std_r2[i_max]]],
columns=["Method", "best_alpha_ID", "best_alpha", "E[r2]", "std(r2)"])
pdi_l.append( pdi)
pdo_best = pd.concat( pdi_l, ignore_index=True).sort_values("Method")
self.df_best = pdo_best.set_index("Method")
if fname_out is not None:
self.df_best.to_csv( fname_out) # index should be stored.
elif self.fname is not None:
fname_out = self.fname[:-4] + '_best4bar.csv'
print( 'Default: self.df_best is saved to', fname_out)
self.df_best.to_csv( fname_out) # index should be stored.
return self.df_best
def get_box_data( self, fname_out = None):
"""
DataFrame is arranged for box plot.
"""
pdo = self.df
cond = None
for m in self.method_l:
best_alpha_ID = self.df_best.loc[ m]["best_alpha_ID"]
if cond is None:
cond = (pdo.Method == m) & (pdo.alpha_ID == best_alpha_ID)
else:
cond |= (pdo.Method == m) & (pdo.alpha_ID == best_alpha_ID)
self.df_best_expand = self.df[ cond].reset_index( drop = True)
if fname_out is not None:
self.df_best_expand.to_csv( fname_out) # index should be stored.
elif self.fname is not None:
fname_out = self.fname[:-4] + '_best4box.csv'
print( 'Default: self.df_best_expand is saved to', fname_out)
self.df_best_expand.to_csv( fname_out) # index should be stored.
return self.df_best_expand
def run( self):
self.updata_for_multidk()
self.mean_std()
self.max_mean_r2()
self.get_box_data()
return self.df_best, self.df_best_expand
def plot_bar( self, fname_out = None):
self.df_best.plot( kind = 'bar', y = "E[r2]", yerr="std(r2)", legend=False)
if fname_out is not None:
plt.savefig( fname_out) # index should be stored.
elif self.fname is not None:
fname_out = self.fname[:-4] + '_bar.eps'
print( 'Default: the figure of self.df_best is saved to', fname_out)
plt.savefig( fname_out)
def plot_box( self, fname_out = None):
sns.boxplot(x="Method", y="r2", data=self.df_best_expand, palette="PRGn")
sns.despine(offset=10, trim=True)
plt.ylabel( r"$r^2$")
plt.xlabel( "Methods")
if fname_out is not None:
plt.savefig( fname_out) # index should be stored.
elif self.fname is not None:
fname_out = self.fname[:-4] + '_box.eps'
print( 'Default: the figure of self.df_best_expand is saved to', fname_out)
plt.savefig( fname_out)
def run_with_plot( self):
self.run()
self.plot_bar()
plt.show()
self.plot_box()
plt.show()
def set_X_23( s_l, xM_logP):
# s_l = self.s_l
# xM_logP = self.xM_logP
# Body
xM_d = dict()
xM_d["MFP"] = jchem.get_xM( s_l, radius=4, nBits=2048)
xM_d["MACCS"] = jchem.get_xM_MACCSkeys( s_l)
xM_d["MolW"] = jchem.get_xM_molw( s_l)
xM_d["LASA"] = jchem.get_xM_lasa( s_l)
xM_d["logP"] = xM_logP
for d_s in ["MolW", "LASA", "logP"]:
# x_a = xM_d[ d_s]
# x_a = np.divide( x_a, np.std( x_a, axis = 0)) # Normalize
xM_d[ d_s] = np.divide( xM_d[ d_s], np.std( xM_d[ d_s], axis = 0))
xM_2 = np.concatenate( [xM_d["MFP"], xM_d["MACCS"]], axis = 1)
xM_p = np.concatenate( [xM_d[ d_s] for d_s in ["MolW", "LASA", "logP"]], axis = 1) # Concatenation of associated properties
print( xM_2.shape, xM_p.shape)
# Output processing
#self.xM_d = xM_d
#self.xM_2 = xM_2
#self.xM_p = xM_p
return xM_d, xM_2, xM_p
def set_X_23_M2( s_l, xM_logP):
# s_l = self.s_l
# xM_logP = self.xM_logP
# Body
xM_d = dict()
xM_d["MFP"] = jchem.get_xM( s_l, radius=4, nBits=2048)
xM_d["MACCS"] = jchem.get_xM_MACCSkeys( s_l)
xM_d["MolW"] = jchem.get_xM_molw( s_l)
xM_d["MolW2"] = np.power( jchem.get_xM_molw( s_l), 2)
xM_d["LASA"] = jchem.get_xM_lasa( s_l)
xM_d["LASA2"] = jchem.get_xM_lasa( s_l)
xM_d["logP"] = xM_logP
for d_s in ["MolW", "MolW2", "LASA", "LASA2", "logP"]:
xM_d[ d_s] = np.divide( xM_d[ d_s], np.std( xM_d[ d_s], axis = 0))
xM_2 = np.concatenate( [xM_d["MFP"], xM_d["MACCS"]], axis = 1)
xM_p = np.concatenate( [xM_d[ d_s] for d_s in ["MolW", "LASA", "logP"]], axis = 1) # Concatenation of associated properties
print( xM_2.shape, xM_p.shape)
# Output processing
#self.xM_d = xM_d
#self.xM_2 = xM_2
#self.xM_p = xM_p
return xM_d, xM_2, xM_p
def set_A_2( xM_d, xM_2):
# Input processing
#xM_d = self.xM_d
#xM_2 = self.xM_2
# Body
A_d = dict()
for key in ["MFP", "MACCS"]:
print( key)
A_d[ key] = j3x.jpyx.calc_tm_sim_M( xM_d[key])
A_2 = j3x.jpyx.calc_tm_sim_M( xM_2)
# Output processing
#self.A_d = A_d
#self.A_2 = A_2
return A_d, A_2
def set_alpha_log( a_st = -2, a_ed = 2, a_n = 2):
"""
Generate alpha_log with a range from a_st to s_ed
with a_n for each unit.
"""
a_N = (a_ed - a_st)*a_n + 1
return (a_st, a_ed, a_N)
class MultiDK():
def __init__(self, fname = 'sheet/wang3705_with_logP.csv'):
self.fname_core = fname[:-14]
self.pdr = pd.read_csv( fname)
self.alphas_log = set_alpha_log( -2, 2, 2)
def set_xy(self):
"""
if self is changed self will be a return value
for feedback all outputs using only a variable
"""
pdr = self.pdr
self.s_l = self.pdr.SMILES.tolist()
self.xM_logP = np.mat(self.pdr.logP.values).T
self.yV = jpd.pd_get_yV( self.pdr, y_id="exp")
return self
def set_X( self):
# Input processing
s_l = self.s_l
xM_logP = self.xM_logP
# BOdy
xM_d, xM_2, xM_p = set_X_23( s_l, xM_logP)
# Output processing
self.xM_d = xM_d
self.xM_2 = xM_2
self.xM_p = xM_p
return self
def set_X_M2( self):
# Input processing
s_l = self.s_l
xM_logP = self.xM_logP
# BOdy
xM_d, xM_2, xM_p = set_X_23_M2( s_l, xM_logP)
# Output processing
self.xM_d = xM_d
self.xM_2 = xM_2
self.xM_p = xM_p
return self
def set_A( self):
# Input processing
xM_d = self.xM_d
xM_2 = self.xM_2
# Body
A_d, A_2 = set_A_2( xM_d, xM_2)
# Output processing
self.A_d = A_d
self.A_2 = A_2
return self
def grid_search_sd( self):
# input processing
xM_d = self.xM_d
xM_p = self.xM_p
yV = self.yV
A_d = self.A_d
A_2 = self.A_2
#Body
t = time()
pdi_d = dict()
pdi_d["SD"] = jsns.pdi_gs_full( "SD", [xM_d["MFP"]], yV, expension = True, n_jobs = 1)
print('Elasped time is', time() - t, 'sec')
pdi_d["MD21"] = jsns.pdi_gs_full( "MD21", [xM_d[ d_s] for d_s in ["MFP", "MACCS", "MolW"]], yV,
expension = True)
print('Elasped time is', time() - t, 'sec')
pdi_d["MD23"] = jsns.pdi_gs_full( "MD23", list(xM_d.values()), yV, expension = True)
print('Elasped time is', time() - t, 'sec')
pdi_d["MDMK1to10"] = jsns.pdi_gs_full( "MDMK1to10", [A_d["MFP"]], yV,
mode = "BIKE_Ridge", expension = True)
print('Elasped time is', time() - t, 'sec')
pdi_d["MDMK2to11"] = jsns.pdi_gs_full( "MDMK2to11", [A_2], yV, X_concat = xM_d["MolW"],
mode = "BIKE_Ridge", expension = True)
print('Elasped time is', time() - t, 'sec')
pdi_d["MDMK2to13"] = jsns.pdi_gs_full( "MDMK2to13", [A_2], yV, X_concat = xM_p,
mode = "BIKE_Ridge", expension = True)
print('Elasped time is', time() - t, 'sec')
pdi_d["MDMK2to21"] = jsns.pdi_gs_full( "MDMK2to21", [A_d["MFP"], A_d["MACCS"]], yV, X_concat = xM_d["MolW"],
mode = "BIKE_Ridge", expension = True)
print('Elasped time is', time() - t, 'sec')
pdi_d["MDMK2to23"] = jsns.pdi_gs_full( "MDMK2to23", [A_d["MFP"], A_d["MACCS"]], yV, X_concat = xM_p,
mode = "BIKE_Ridge", expension = True, n_jobs = 1)
print('Elasped time is', time() - t, 'sec')
pdo = pd.concat( pdi_d.values())
print( pdo.shape)
Nm = len(pdi_d)
#print( "The number of methods now is", Nm)
fname_out = self.fname_core + "_MDMK2to23_{}methods.csv".format(Nm)
print("The performance data are save to", fname_out)
pdo.to_csv( fname_out, index = False)
self.pdo = pdo
return self
def grid_search( self):
# input processing
xM_d = self.xM_d
xM_p = self.xM_p
yV = self.yV
A_d = self.A_d
A_2 = self.A_2
#Body
t = time()
pdi_d = dict()
for k in xM_d:
s = "SD({})".format( k)
pdi_d[s] = jsns.pdi_gs_full( s, [xM_d[k]], yV, expension = True, n_jobs = 1)
print('Elasped time is', time() - t, 'sec')
# pdi_d["SD(MFP)"] = jsns.pdi_gs_full( "SD(MFP)", [xM_d["MFP"]], yV, expension = True, n_jobs = 1)
# print('Elasped time is', time() - t, 'sec')
# pdi_d["SD(MACCS)"] = jsns.pdi_gs_full( "SD(MACCS)", [xM_d["MACCS"]], yV, expension = True, n_jobs = 1)
# print('Elasped time is', time() - t, 'sec')
# pdi_d["SD(MolW)"] = jsns.pdi_gs_full( "SD(MolW)", [xM_d["MolW"]], yV, expension = True, n_jobs = 1)
# print('Elasped time is', time() - t, 'sec')
# pdi_d["SD(LASA)"] = jsns.pdi_gs_full( "SD(LASA)", [xM_d["MolW"]], yV, expension = True, n_jobs = 1)
# print('Elasped time is', time() - t, 'sec')
# pdi_d["SD(logP)"] = jsns.pdi_gs_full( "SD(logP)", [xM_d["logP"]], yV, expension = True, n_jobs = 1)
# print('Elasped time is', time() - t, 'sec')
pdi_d["MD21"] = jsns.pdi_gs_full( "MD21", [xM_d[ d_s] for d_s in ["MFP", "MACCS", "MolW"]], yV,
expension = True)
print('Elasped time is', time() - t, 'sec')
pdi_d["MD23"] = jsns.pdi_gs_full( "MD23", list(xM_d.values()), yV, expension = True)
print('Elasped time is', time() - t, 'sec')
pdi_d["MDMK1to10"] = jsns.pdi_gs_full( "MDMK1to10", [A_d["MFP"]], yV,
mode = "BIKE_Ridge", expension = True)
print('Elasped time is', time() - t, 'sec')
pdi_d["MDMK2to11"] = jsns.pdi_gs_full( "MDMK2to11", [A_2], yV, X_concat = xM_d["MolW"],
mode = "BIKE_Ridge", expension = True)
print('Elasped time is', time() - t, 'sec')
pdi_d["MDMK2to13"] = jsns.pdi_gs_full( "MDMK2to13", [A_2], yV, X_concat = xM_p,
mode = "BIKE_Ridge", expension = True)
print('Elasped time is', time() - t, 'sec')
pdi_d["MDMK2to21"] = jsns.pdi_gs_full( "MDMK2to21", [A_d["MFP"], A_d["MACCS"]], yV, X_concat = xM_d["MolW"],
mode = "BIKE_Ridge", expension = True)
print('Elasped time is', time() - t, 'sec')
pdi_d["MDMK2to23"] = jsns.pdi_gs_full( "MDMK2to23", [A_d["MFP"], A_d["MACCS"]], yV, X_concat = xM_p,
mode = "BIKE_Ridge", expension = True, n_jobs = 1)
print('Elasped time is', time() - t, 'sec')
pdo = pd.concat( pdi_d.values())
print( pdo.shape)
Nm = len(pdi_d)
#print( "The number of methods now is", Nm)
fname_out = self.fname_core + "_MDMK2to23_{}methods.csv".format(Nm)
print("The performance data are save to", fname_out)
pdo.to_csv( fname_out, index = False)
self.pdo = pdo
return self
def cv_MultiDK23( self, alpha, n_jobs = 1):
"""
Return
--------
yV_pred: np.array(), mostly 1D
return prediction results.
"""
self.set_xy()
self.set_X()
self.set_A()
xM_d = self.xM_d
xM_p = self.xM_p
yV = self.yV
# A_d = self.A_d
A_2 = self.A_2
#Body
t = time()
yV_pred = jgrid.cv_BIKE_Ridge( [A_2], yV, alpha = alpha, XX = xM_p, n_folds = 20, n_jobs = n_jobs, grid_std = None)
print('Elasped time is', time() - t, 'sec')
return yV_pred
def plot( self):
sns.tsplot(data=self.pdo, time="alpha", unit="unit", condition="Method", value="r2")
plt.xscale('log')
plt.ylabel( r'$r^2$')
def _run_r0( self):
self.set_xy()
self.set_X()
self.set_A()
self.grid_search()
self.plot()
def run( self, SDx = False):
self.set_xy()
self.set_X()
self.set_A()
if SDx:
self.grid_search()
else:
self.grid_search_sd()
self.plot()
return self
def set_XA( self):
self.set_xy()
self.set_X()
self.set_A()
return self
#############################
# April 24, 2016
#############################
class MultiDK_DL( MultiDK):
"""
Deep learning version of MultiDK
Kernels are, however, not applied.
"""
def __init__( self, fname = 'sheet/wang3310_with_logP.csv', graph = False):
"""
alpha_l is fixed now.
"""
super().__init__( fname = fname)
self.graph = graph
def learning( self):
X = self.X
y = self.y
print( "Shape of X and y are", X.shape, y.shape)
X_train, X_test, y_train, y_test = model_selection.train_test_split(X, y,
test_size=0.2, random_state=42)
X_train, X_val, y_train, y_val = model_selection.train_test_split(X_train, y_train,
test_size=0.2, random_state=42)
val_monitor = skflow.monitors.ValidationMonitor(X_val, y_val,
early_stopping_rounds=200)
model = skflow.TensorFlowDNNRegressor(hidden_units=[100, 50, 10], steps=5000)
model.fit(X_train, y_train, val_monitor)
yP = model.predict(X_test)
score_r2 = metrics.r2_score(y_test, yP)
score_MedAE = metrics.median_absolute_error(y_test, yP)
print('Accuracy')
print('--------')
print('R2: {0:f}, MedAE: {1:f}'.format(score_r2, score_MedAE))
if self.graph:
kutil.regress_show4( y_test, yP)
def _set_X_r0( self):
super().set_X()
print( [self.xM_d[key].shape for key in self.xM_d])
self.X = np.array( np.concatenate( list( self.xM_d.values()), axis = 1))
self.y = np.array( self.yV)
def set_X( self, ds_mode = "11111"):
"""
It combines only selected items by ds_mode
Each bit in a bit string indecate whether the coressponding descriptor
is included or not.
Default case, all descriptors will be aggreagated.
"""
self.set_X_M2()
#mode_l = [int(x) for x in ds_mode]
mode_key = ["MFP", "MACCS", "MolW2", "LASA", "logP"]
assert len(mode_key) == len(ds_mode)
X_l = list()
for idx in range( len(ds_mode)):
if ds_mode[ idx] == '1':
X_l.append( self.xM_d[ mode_key[ idx]])
self.X = np.array( np.concatenate( X_l, axis = 1))
self.y = np.array( self.yV)[:,0]
def run_mode( self, ds_mode):
self.set_X( ds_mode)
self.learning()
def run( self):
self.set_xy()
ds_mode_l = ["00111", "00110", "00101"]
for ds_mode in ds_mode_l:
print()
print("======================")
print("ds_mode:", ds_mode)
self.run_mode( ds_mode)
return self
#############################
# August 1, 2016
#############################
class MultiDK_DLA( MultiDK): # Underdevelopment
"""
use setA appropriately
I changed the base matrix as MultiDK instead of MultiDK_DL
since MultiDK is more simlar to the target class.
"""
def __init__( self, fname = 'sheet/wang3310_with_logP.csv'):
"""
- A will be used instead of A
- Need to change learning since training and testing should be
differently calculated for A.
"""
super().__init__( fname = fname)
def set_data(self):
self.set_xy()
self.set_X()
self.set_A()
| mit |
SKA-ScienceDataProcessor/algorithm-reference-library | tests/processing_components/test_pointing.py | 1 | 11233 | """ Unit tests for pointing
"""
import logging
import unittest
import astropy.units as u
import numpy
from astropy.coordinates import SkyCoord
from data_models.polarisation import PolarisationFrame
from processing_components.calibration.pointing import create_pointingtable_from_blockvisibility
from processing_components.imaging.primary_beams import create_vp
from processing_components.simulation.configurations import create_named_configuration
from processing_components.simulation.pointing import simulate_gaintable_from_pointingtable
from processing_components.simulation.testing_support import create_test_image, simulate_pointingtable
from processing_components.simulation.testing_support import create_test_skycomponents_from_s3
from processing_components.skycomponent.operations import create_skycomponent
from processing_components.visibility.base import create_blockvisibility
from processing_library.image.operations import create_image
log = logging.getLogger(__name__)
class TestPointing(unittest.TestCase):
def setUp(self):
from data_models.parameters import arl_path
self.doplot = True
self.midcore = create_named_configuration('MID', rmax=300.0)
self.nants = len(self.midcore.names)
self.dir = arl_path('test_results')
self.ntimes = 300
self.times = numpy.linspace(-12.0, 12.0, self.ntimes) * numpy.pi / (12.0)
self.frequency = numpy.array([1e9])
self.channel_bandwidth = numpy.array([1e7])
self.phasecentre = SkyCoord(ra=+15.0 * u.deg, dec=-45.0 * u.deg, frame='icrs', equinox='J2000')
self.vis = create_blockvisibility(self.midcore, self.times, self.frequency,
channel_bandwidth=self.channel_bandwidth,
phasecentre=self.phasecentre, weight=1.0,
polarisation_frame=PolarisationFrame('stokesI'))
self.vis.data['vis'] *= 0.0
# Create model
self.model = create_image(npixel=512, cellsize=0.00015, polarisation_frame=PolarisationFrame("stokesI"),
frequency=self.frequency, channel_bandwidth=self.channel_bandwidth,
phasecentre=self.phasecentre)
def test_create_pointingtable(self):
beam = create_test_image(cellsize=0.0015, phasecentre=self.vis.phasecentre,
frequency=self.frequency)
for telescope in ['MID', 'LOW', 'ASKAP']:
vp = create_vp(beam, telescope)
pt = create_pointingtable_from_blockvisibility(self.vis, vp)
pt = simulate_pointingtable(pt, 0.1, static_pointing_error=[0.01, 0.001])
assert pt.pointing.shape == (self.ntimes, self.nants, 1, 1, 2), pt.pointing.shape
def test_create_gaintable_from_pointingtable(self):
s3_components = create_test_skycomponents_from_s3(flux_limit=5.0,
phasecentre=self.phasecentre,
frequency=self.frequency,
polarisation_frame=PolarisationFrame('stokesI'),
radius=0.2)
pt = create_pointingtable_from_blockvisibility(self.vis)
pt = simulate_pointingtable(pt, pointing_error=0.01, static_pointing_error=[0.001,0.0001])
vp = create_vp(self.model, 'MID')
gt = simulate_gaintable_from_pointingtable(self.vis, s3_components, pt, vp)
assert gt[0].gain.shape == (self.ntimes, self.nants, 1, 1, 1), gt[0].gain.shape
def test_create_gaintable_from_pointingtable_dynamic(self):
comp = create_skycomponent(direction=self.phasecentre, flux=[[1.0]], frequency=self.frequency,
polarisation_frame=PolarisationFrame('stokesI'))
pt = create_pointingtable_from_blockvisibility(self.vis)
pt = simulate_pointingtable(pt, pointing_error=0.01, static_pointing_error=None,
global_pointing_error=[0.0, 0.0])
vp = create_vp(self.model, 'MID')
gt = simulate_gaintable_from_pointingtable(self.vis, [comp], pt, vp)
if self.doplot:
import matplotlib.pyplot as plt
plt.clf()
plt.plot(gt[0].time, numpy.real(1.0 / gt[0].gain[:, 0, 0, 0, 0]), '.')
plt.plot(gt[0].time, numpy.imag(1.0 / gt[0].gain[:, 0, 0, 0, 0]), '.')
plt.title('test_create_gaintable_from_pointingtable_dynamic')
plt.show()
assert gt[0].gain.shape == (self.ntimes, self.nants, 1, 1, 1), gt[0].gain.shape
def test_create_gaintable_from_pointingtable_dynamic_radec(self):
comp = create_skycomponent(direction=self.phasecentre, flux=[[1.0]], frequency=self.frequency,
polarisation_frame=PolarisationFrame('stokesI'))
pt = create_pointingtable_from_blockvisibility(self.vis)
pt = simulate_pointingtable(pt, pointing_error=0.01, static_pointing_error=None,
global_pointing_error=[0.0, 0.0])
vp = create_vp(self.model, 'MID', use_local=False)
gt = simulate_gaintable_from_pointingtable(self.vis, [comp], pt, vp, use_radec=True)
if self.doplot:
import matplotlib.pyplot as plt
plt.clf()
plt.plot(gt[0].time, numpy.real(1.0 / gt[0].gain[:, 0, 0, 0, 0]), '.')
plt.plot(gt[0].time, numpy.imag(1.0 / gt[0].gain[:, 0, 0, 0, 0]), '.')
plt.title('test_create_gaintable_from_pointingtable_dynamic_radec')
plt.show()
assert gt[0].gain.shape == (self.ntimes, self.nants, 1, 1, 1), gt[0].gain.shape
def test_create_gaintable_from_pointingtable_static(self):
comp = create_skycomponent(direction=self.phasecentre, flux=[[1.0]], frequency=self.frequency,
polarisation_frame=PolarisationFrame('stokesI'))
pt = create_pointingtable_from_blockvisibility(self.vis)
pt = simulate_pointingtable(pt, pointing_error=0.0, static_pointing_error=[0.01, 0.001],
global_pointing_error=[0.0, 0.0])
vp = create_vp(self.model, 'MID')
gt = simulate_gaintable_from_pointingtable(self.vis, [comp], pt, vp)
if self.doplot:
import matplotlib.pyplot as plt
plt.clf()
plt.plot(gt[0].time, numpy.real(1.0 / gt[0].gain[:, 0, 0, 0, 0]), '.')
plt.plot(gt[0].time, numpy.imag(1.0 / gt[0].gain[:, 0, 0, 0, 0]), '.')
plt.title('test_create_gaintable_from_pointingtable_static')
plt.show()
assert gt[0].gain.shape == (self.ntimes, self.nants, 1, 1, 1), gt[0].gain.shape
def test_create_gaintable_from_pointingtable_dynamic_static(self):
comp = create_skycomponent(direction=self.phasecentre, flux=[[1.0]], frequency=self.frequency,
polarisation_frame=PolarisationFrame('stokesI'))
pt = create_pointingtable_from_blockvisibility(self.vis)
pt = simulate_pointingtable(pt, pointing_error=0.01, static_pointing_error=[0.01, 0.001],
global_pointing_error=[0.0, 0.0])
vp = create_vp(self.model, 'MID')
gt = simulate_gaintable_from_pointingtable(self.vis, [comp], pt, vp)
if self.doplot:
import matplotlib.pyplot as plt
plt.clf()
plt.plot(gt[0].time, numpy.real(1.0 / gt[0].gain[:, 0, 0, 0, 0]), '.')
plt.plot(gt[0].time, numpy.imag(1.0 / gt[0].gain[:, 0, 0, 0, 0]), '.')
plt.title('test_create_gaintable_from_pointingtable_dynamic_static')
plt.show()
assert gt[0].gain.shape == (self.ntimes, self.nants, 1, 1, 1), gt[0].gain.shape
def test_create_gaintable_from_pointingtable_global(self):
comp = create_skycomponent(direction=self.phasecentre, flux=[[1.0]], frequency=self.frequency,
polarisation_frame=PolarisationFrame('stokesI'))
import matplotlib.pyplot as plt
pt = create_pointingtable_from_blockvisibility(self.vis)
pt = simulate_pointingtable(pt, pointing_error=0.0, static_pointing_error=None,
global_pointing_error=[0.0, 0.01])
vp = create_vp(self.model, 'MID')
gt = simulate_gaintable_from_pointingtable(self.vis, [comp], pt, vp)
if self.doplot:
plt.clf()
plt.plot(gt[0].time, numpy.real(1.0 / gt[0].gain[:, 0, 0, 0, 0]), '.')
plt.plot(gt[0].time, numpy.imag(1.0 / gt[0].gain[:, 0, 0, 0, 0]), '.')
plt.title('test_create_gaintable_from_pointingtable_global')
plt.show()
assert gt[0].gain.shape == (self.ntimes, self.nants, 1, 1, 1), gt[0].gain.shape
def test_create_gaintable_from_pointingtable_global_dynamic(self):
comp = create_skycomponent(direction=self.phasecentre, flux=[[1.0]], frequency=self.frequency,
polarisation_frame=PolarisationFrame('stokesI'))
pt = create_pointingtable_from_blockvisibility(self.vis)
pt = simulate_pointingtable(pt, pointing_error=0.01, static_pointing_error=None,
global_pointing_error=[0.0, 0.01])
vp = create_vp(self.model, 'MID')
gt = simulate_gaintable_from_pointingtable(self.vis, [comp], pt, vp)
if self.doplot:
import matplotlib.pyplot as plt
plt.clf()
plt.plot(gt[0].time, numpy.real(1.0 / gt[0].gain[:, 0, 0, 0, 0]), '.')
plt.plot(gt[0].time, numpy.imag(1.0 / gt[0].gain[:, 0, 0, 0, 0]), '.')
plt.title('test_create_gaintable_from_pointingtable_global_dynamic')
plt.show()
assert gt[0].gain.shape == (self.ntimes, self.nants, 1, 1, 1), gt[0].gain.shape
def test_create_gaintable_from_pointingtable_GRASP(self):
comp = create_skycomponent(direction=self.phasecentre, flux=[[1.0]], frequency=self.frequency,
polarisation_frame=PolarisationFrame('stokesI'))
pt = create_pointingtable_from_blockvisibility(self.vis)
pt = simulate_pointingtable(pt, pointing_error=0.0, static_pointing_error=None,
global_pointing_error=[0.0, 0.01])
vp = create_vp(self.model, 'MID_GRASP')
gt = simulate_gaintable_from_pointingtable(self.vis, [comp], pt, vp)
if self.doplot:
import matplotlib.pyplot as plt
plt.clf()
plt.plot(gt[0].time, numpy.real(1.0 / gt[0].gain[:, 0, 0, 0, 0]), '.')
plt.plot(gt[0].time, numpy.imag(1.0 / gt[0].gain[:, 0, 0, 0, 0]), '.')
plt.title('test_create_gaintable_from_pointingtable_global_dynamic')
plt.show()
assert gt[0].gain.shape == (self.ntimes, self.nants, 1, 1, 1), gt[0].gain.shape
if __name__ == '__main__':
unittest.main()
| apache-2.0 |
quevedin/python-tools | pyomotools.py | 2 | 2864 | """ pyomotools: common helper functions for pyomo model creation """
from datetime import datetime
import pandas as pd
import xlrd
__all__ = ["now", "read_xls"]
def now(mydateformat='%Y%m%dT%H%M%S'):
""" Return current datetime as string.
Just a shorthand to abbreviate the common task to obtain the current
datetime as a string, e.g. for result versioning.
Args:
mydateformat: optional format string (default: '%Y%m%dT%H%M%S')
Returns:
datetime.now(), formated to string with argument mydateformat, e.g.
YYYYMMDDThhmmss ==> 20131007H123456
"""
return datetime.now().strftime(mydateformat)
def read_xls(filename, sheets=[]):
""" Convert Excel file to dict of pandas DataFrames.
Parses all spreadsheets within an Excel file using pandas.ExcelFile.parse,
if its top left cell is not empty. The first row is expected to contain
column titles. Titles starting with uppercase lettres are used as index
columns in the resulting DataFrame. Here is a short example summarizing
these specifications:
Process CoIn CoOut | cap eff ... avail
-------------------------------------------
PP Coal Elec | 100 0.90 ... 24
WT Wind Elec | 300 0.95 ... 10
PV Solar Elec | 200 0.92 ... 8
A spreadsheet is skipped if a) it is completely empty or b) has an empty
first row.
Args:
filename: an Excel spreadsheet filename
Returns:
dict of pandas DataFrames with sheet names as keys
"""
dfs = {}
xls = pd.ExcelFile(filename)
for sheet in xls.book.sheets():
# skip sheet if list of sheets was specified
if sheets and sheet.name not in sheets:
continue
# extract the sheet's first row to check for emptiness
first_row = sheet.row_slice(0)
# skip a spreadsheet if completely empty or its first cell is blank
if not first_row \
or first_row[0].ctype in (xlrd.XL_CELL_BLANK, xlrd.XL_CELL_EMPTY):
continue
# otherwise determine column numbers of titles starting with an
# uppercase lettre while skipping empty columns
uppercase_columns = [k for k, column_title in enumerate(first_row)
if column_title.value
and column_title.value[0].isupper()]
# parse those columns to a pandas DataFrame
df = xls.parse(sheet.name, index_col=uppercase_columns)
# and prune any columns with only NaN values
# these are mainly empty columns
dfs[sheet.name] = df.dropna(axis=1, how='all')
return dfs | gpl-3.0 |
probml/pyprobml | scripts/rvm_regression_1d.py | 1 | 4503 | """
This code is a python version of
https://github.com/probml/pmtk3/blob/master/demos/svmRegrDemo.m
This file performs demos for rbf kernel regressors using L1reg, L2reg, SVM, RVM for noisy sine data
Author: Srikar Reddy Jilugu(@always-newbie161)
"""
import numpy as np
import matplotlib.pyplot as plt
import math
from sklearn import linear_model
from sklearn.svm import SVR
from cycler import cycler
from sklearn.kernel_approximation import RBFSampler
from sklearn.model_selection import cross_val_score
from rvm_regressor import RelevanceVectorRegressor as RVR
from sklearn.gaussian_process.kernels import RBF
import pyprobml_utils as pml
def main():
#CB_color = ['#377eb8', '#ff7f00', '#4daf4a']
#cb_cycler = (cycler(linestyle=['-', '--', '-.']) * cycler(color=CB_color))
#plt.rc('axes', prop_cycle=cb_cycler)
# -------------------------------------------
# making the data
np.random.seed(0)
N = 100
x = 10 * (np.linspace(-1, 1, 100).reshape(-1, 1))
ytrue = np.array([math.sin(abs(el)) / (abs(el)) for el in x]).reshape(-1, 1)
noise = 0.1
y = ytrue + noise * np.random.randn(N, 1)
X = (x - x.mean()) / x.std() # normalizing.
lambd_l2 = 0.1 # regularization parameter for L2reg
lambd_l1 = 1e-3 # regularization parameter for L1reg
rbf_scale = 0.3
gamma = 1 / (2 * rbf_scale ** 2)
xtest = np.arange(-10, 10.1, 0.1)
Xtest = (xtest - xtest.mean()) / xtest.std()
Xtest = Xtest.reshape((-1, 1))
# applying the rbf kernel feature scaling
rbf_features = RBFSampler(gamma=gamma, random_state=1)
rbf_X = rbf_features.fit_transform(X)
rbf_Xtest = rbf_features.fit_transform(Xtest)
# -------------------------------------------
# l2
reg = linear_model.Ridge(alpha=lambd_l2, fit_intercept=False).fit(rbf_X, y)
ypred = reg.predict(rbf_Xtest)
plt.figure()
plt.plot(X, y, '*')
plt.plot(Xtest, ypred, '-', color='blue')
plt.title('linregL2')
pml.savefig('rvm_data_l2.pdf')
plt.figure()
# stem plot of weight vectors.
plt.title('linregL2')
plt.stem(reg.coef_.ravel(), use_line_collection=True)
plt.tight_layout()
pml.savefig('rvm_stem_l2.pdf')
# -------------------------------------------
# l1
reg = linear_model.Lasso(alpha=lambd_l1, fit_intercept=False,tol=1e-3)
ypred = reg.fit(rbf_X, y).predict(rbf_Xtest)
plt.figure()
plt.plot(X, y, '*')
plt.plot(Xtest, ypred, '-', color='blue')
# coefficient vectors of l1reg
SV_idx = (np.abs(reg.coef_) > 1e-5)
plt.scatter(X[SV_idx], y[SV_idx], s=200, facecolor="none",edgecolor='red')
plt.title('linregL1')
pml.savefig('rvm_data_l1.pdf')
plt.figure()
# stem plot of weight vectors.
plt.title('linregL1')
plt.stem(reg.coef_.ravel(), use_line_collection=True)
plt.tight_layout()
pml.savefig('rvm_stem_l1.pdf')
# -------------------------------------------
# RVR
kernel = RBF(0.3)
reg = RVR(kernel=kernel)
reg.fit(X, y.ravel())
ypred = reg.predict(Xtest)[0]
plt.figure()
plt.plot(X, y, '*')
plt.plot(Xtest, ypred, '-', color='blue')
# support vectors of RVR
plt.scatter(reg.X, reg.t, s=200, facecolor="none",edgecolor='red')
plt.title('RVM')
plt.tight_layout()
pml.savefig('rvm_data_rvm.pdf')
plt.figure()
# stem plot of weight vectors.
plt.title('RVM')
plt.stem(reg.mean.ravel(), use_line_collection=True)
plt.tight_layout()
pml.savefig('rvm_stem_rvm.pdf')
# -------------------------------------------
# SVM
C = np.arange(10)+1
crossval_scores = [cross_val_score(SVR(gamma=gamma, C=c),
X, y.ravel(), scoring='neg_mean_squared_error', cv=5).mean() for c in C]
c_opt = np.argmin(crossval_scores)
reg = SVR(gamma=gamma, C=c_opt)
reg.fit(X, y.ravel())
ypred = reg.predict(Xtest)
plt.figure()
plt.plot(X, y, '*')
plt.plot(Xtest, ypred, '-', color='blue')
# support vectors of SVR.
SV_idx = reg.support_
plt.scatter(X[SV_idx], y[SV_idx], s=200, facecolor="none",edgecolor='red')
plt.title('SVM')
plt.tight_layout()
pml.savefig('rvm_data_svm.pdf')
plt.figure()
# stem plot of weight vectors.
plt.title('SVM')
plt.stem(reg.dual_coef_.ravel(), use_line_collection=True)
plt.tight_layout()
pml.savefig('rvm_stem_svm.pdf')
# -------------------------------------------
plt.show()
if __name__ == "__main__":
main() | mit |
ZENGXH/scikit-learn | sklearn/cross_decomposition/pls_.py | 187 | 28507 | """
The :mod:`sklearn.pls` module implements Partial Least Squares (PLS).
"""
# Author: Edouard Duchesnay <edouard.duchesnay@cea.fr>
# License: BSD 3 clause
from ..base import BaseEstimator, RegressorMixin, TransformerMixin
from ..utils import check_array, check_consistent_length
from ..externals import six
import warnings
from abc import ABCMeta, abstractmethod
import numpy as np
from scipy import linalg
from ..utils import arpack
from ..utils.validation import check_is_fitted
__all__ = ['PLSCanonical', 'PLSRegression', 'PLSSVD']
def _nipals_twoblocks_inner_loop(X, Y, mode="A", max_iter=500, tol=1e-06,
norm_y_weights=False):
"""Inner loop of the iterative NIPALS algorithm.
Provides an alternative to the svd(X'Y); returns the first left and right
singular vectors of X'Y. See PLS for the meaning of the parameters. It is
similar to the Power method for determining the eigenvectors and
eigenvalues of a X'Y.
"""
y_score = Y[:, [0]]
x_weights_old = 0
ite = 1
X_pinv = Y_pinv = None
eps = np.finfo(X.dtype).eps
# Inner loop of the Wold algo.
while True:
# 1.1 Update u: the X weights
if mode == "B":
if X_pinv is None:
X_pinv = linalg.pinv(X) # compute once pinv(X)
x_weights = np.dot(X_pinv, y_score)
else: # mode A
# Mode A regress each X column on y_score
x_weights = np.dot(X.T, y_score) / np.dot(y_score.T, y_score)
# 1.2 Normalize u
x_weights /= np.sqrt(np.dot(x_weights.T, x_weights)) + eps
# 1.3 Update x_score: the X latent scores
x_score = np.dot(X, x_weights)
# 2.1 Update y_weights
if mode == "B":
if Y_pinv is None:
Y_pinv = linalg.pinv(Y) # compute once pinv(Y)
y_weights = np.dot(Y_pinv, x_score)
else:
# Mode A regress each Y column on x_score
y_weights = np.dot(Y.T, x_score) / np.dot(x_score.T, x_score)
# 2.2 Normalize y_weights
if norm_y_weights:
y_weights /= np.sqrt(np.dot(y_weights.T, y_weights)) + eps
# 2.3 Update y_score: the Y latent scores
y_score = np.dot(Y, y_weights) / (np.dot(y_weights.T, y_weights) + eps)
# y_score = np.dot(Y, y_weights) / np.dot(y_score.T, y_score) ## BUG
x_weights_diff = x_weights - x_weights_old
if np.dot(x_weights_diff.T, x_weights_diff) < tol or Y.shape[1] == 1:
break
if ite == max_iter:
warnings.warn('Maximum number of iterations reached')
break
x_weights_old = x_weights
ite += 1
return x_weights, y_weights, ite
def _svd_cross_product(X, Y):
C = np.dot(X.T, Y)
U, s, Vh = linalg.svd(C, full_matrices=False)
u = U[:, [0]]
v = Vh.T[:, [0]]
return u, v
def _center_scale_xy(X, Y, scale=True):
""" Center X, Y and scale if the scale parameter==True
Returns
-------
X, Y, x_mean, y_mean, x_std, y_std
"""
# center
x_mean = X.mean(axis=0)
X -= x_mean
y_mean = Y.mean(axis=0)
Y -= y_mean
# scale
if scale:
x_std = X.std(axis=0, ddof=1)
x_std[x_std == 0.0] = 1.0
X /= x_std
y_std = Y.std(axis=0, ddof=1)
y_std[y_std == 0.0] = 1.0
Y /= y_std
else:
x_std = np.ones(X.shape[1])
y_std = np.ones(Y.shape[1])
return X, Y, x_mean, y_mean, x_std, y_std
class _PLS(six.with_metaclass(ABCMeta), BaseEstimator, TransformerMixin,
RegressorMixin):
"""Partial Least Squares (PLS)
This class implements the generic PLS algorithm, constructors' parameters
allow to obtain a specific implementation such as:
- PLS2 regression, i.e., PLS 2 blocks, mode A, with asymmetric deflation
and unnormalized y weights such as defined by [Tenenhaus 1998] p. 132.
With univariate response it implements PLS1.
- PLS canonical, i.e., PLS 2 blocks, mode A, with symmetric deflation and
normalized y weights such as defined by [Tenenhaus 1998] (p. 132) and
[Wegelin et al. 2000]. This parametrization implements the original Wold
algorithm.
We use the terminology defined by [Wegelin et al. 2000].
This implementation uses the PLS Wold 2 blocks algorithm based on two
nested loops:
(i) The outer loop iterate over components.
(ii) The inner loop estimates the weights vectors. This can be done
with two algo. (a) the inner loop of the original NIPALS algo. or (b) a
SVD on residuals cross-covariance matrices.
n_components : int, number of components to keep. (default 2).
scale : boolean, scale data? (default True)
deflation_mode : str, "canonical" or "regression". See notes.
mode : "A" classical PLS and "B" CCA. See notes.
norm_y_weights: boolean, normalize Y weights to one? (default False)
algorithm : string, "nipals" or "svd"
The algorithm used to estimate the weights. It will be called
n_components times, i.e. once for each iteration of the outer loop.
max_iter : an integer, the maximum number of iterations (default 500)
of the NIPALS inner loop (used only if algorithm="nipals")
tol : non-negative real, default 1e-06
The tolerance used in the iterative algorithm.
copy : boolean, default True
Whether the deflation should be done on a copy. Let the default
value to True unless you don't care about side effects.
Attributes
----------
x_weights_ : array, [p, n_components]
X block weights vectors.
y_weights_ : array, [q, n_components]
Y block weights vectors.
x_loadings_ : array, [p, n_components]
X block loadings vectors.
y_loadings_ : array, [q, n_components]
Y block loadings vectors.
x_scores_ : array, [n_samples, n_components]
X scores.
y_scores_ : array, [n_samples, n_components]
Y scores.
x_rotations_ : array, [p, n_components]
X block to latents rotations.
y_rotations_ : array, [q, n_components]
Y block to latents rotations.
coef_: array, [p, q]
The coefficients of the linear model: ``Y = X coef_ + Err``
n_iter_ : array-like
Number of iterations of the NIPALS inner loop for each
component. Not useful if the algorithm given is "svd".
References
----------
Jacob A. Wegelin. A survey of Partial Least Squares (PLS) methods, with
emphasis on the two-block case. Technical Report 371, Department of
Statistics, University of Washington, Seattle, 2000.
In French but still a reference:
Tenenhaus, M. (1998). La regression PLS: theorie et pratique. Paris:
Editions Technic.
See also
--------
PLSCanonical
PLSRegression
CCA
PLS_SVD
"""
@abstractmethod
def __init__(self, n_components=2, scale=True, deflation_mode="regression",
mode="A", algorithm="nipals", norm_y_weights=False,
max_iter=500, tol=1e-06, copy=True):
self.n_components = n_components
self.deflation_mode = deflation_mode
self.mode = mode
self.norm_y_weights = norm_y_weights
self.scale = scale
self.algorithm = algorithm
self.max_iter = max_iter
self.tol = tol
self.copy = copy
def fit(self, X, Y):
"""Fit model to data.
Parameters
----------
X : array-like, shape = [n_samples, n_features]
Training vectors, where n_samples in the number of samples and
n_features is the number of predictors.
Y : array-like of response, shape = [n_samples, n_targets]
Target vectors, where n_samples in the number of samples and
n_targets is the number of response variables.
"""
# copy since this will contains the residuals (deflated) matrices
check_consistent_length(X, Y)
X = check_array(X, dtype=np.float64, copy=self.copy)
Y = check_array(Y, dtype=np.float64, copy=self.copy, ensure_2d=False)
if Y.ndim == 1:
Y = Y.reshape(-1, 1)
n = X.shape[0]
p = X.shape[1]
q = Y.shape[1]
if self.n_components < 1 or self.n_components > p:
raise ValueError('Invalid number of components: %d' %
self.n_components)
if self.algorithm not in ("svd", "nipals"):
raise ValueError("Got algorithm %s when only 'svd' "
"and 'nipals' are known" % self.algorithm)
if self.algorithm == "svd" and self.mode == "B":
raise ValueError('Incompatible configuration: mode B is not '
'implemented with svd algorithm')
if self.deflation_mode not in ["canonical", "regression"]:
raise ValueError('The deflation mode is unknown')
# Scale (in place)
X, Y, self.x_mean_, self.y_mean_, self.x_std_, self.y_std_\
= _center_scale_xy(X, Y, self.scale)
# Residuals (deflated) matrices
Xk = X
Yk = Y
# Results matrices
self.x_scores_ = np.zeros((n, self.n_components))
self.y_scores_ = np.zeros((n, self.n_components))
self.x_weights_ = np.zeros((p, self.n_components))
self.y_weights_ = np.zeros((q, self.n_components))
self.x_loadings_ = np.zeros((p, self.n_components))
self.y_loadings_ = np.zeros((q, self.n_components))
self.n_iter_ = []
# NIPALS algo: outer loop, over components
for k in range(self.n_components):
if np.all(np.dot(Yk.T, Yk) < np.finfo(np.double).eps):
# Yk constant
warnings.warn('Y residual constant at iteration %s' % k)
break
# 1) weights estimation (inner loop)
# -----------------------------------
if self.algorithm == "nipals":
x_weights, y_weights, n_iter_ = \
_nipals_twoblocks_inner_loop(
X=Xk, Y=Yk, mode=self.mode, max_iter=self.max_iter,
tol=self.tol, norm_y_weights=self.norm_y_weights)
self.n_iter_.append(n_iter_)
elif self.algorithm == "svd":
x_weights, y_weights = _svd_cross_product(X=Xk, Y=Yk)
# compute scores
x_scores = np.dot(Xk, x_weights)
if self.norm_y_weights:
y_ss = 1
else:
y_ss = np.dot(y_weights.T, y_weights)
y_scores = np.dot(Yk, y_weights) / y_ss
# test for null variance
if np.dot(x_scores.T, x_scores) < np.finfo(np.double).eps:
warnings.warn('X scores are null at iteration %s' % k)
break
# 2) Deflation (in place)
# ----------------------
# Possible memory footprint reduction may done here: in order to
# avoid the allocation of a data chunk for the rank-one
# approximations matrix which is then subtracted to Xk, we suggest
# to perform a column-wise deflation.
#
# - regress Xk's on x_score
x_loadings = np.dot(Xk.T, x_scores) / np.dot(x_scores.T, x_scores)
# - subtract rank-one approximations to obtain remainder matrix
Xk -= np.dot(x_scores, x_loadings.T)
if self.deflation_mode == "canonical":
# - regress Yk's on y_score, then subtract rank-one approx.
y_loadings = (np.dot(Yk.T, y_scores)
/ np.dot(y_scores.T, y_scores))
Yk -= np.dot(y_scores, y_loadings.T)
if self.deflation_mode == "regression":
# - regress Yk's on x_score, then subtract rank-one approx.
y_loadings = (np.dot(Yk.T, x_scores)
/ np.dot(x_scores.T, x_scores))
Yk -= np.dot(x_scores, y_loadings.T)
# 3) Store weights, scores and loadings # Notation:
self.x_scores_[:, k] = x_scores.ravel() # T
self.y_scores_[:, k] = y_scores.ravel() # U
self.x_weights_[:, k] = x_weights.ravel() # W
self.y_weights_[:, k] = y_weights.ravel() # C
self.x_loadings_[:, k] = x_loadings.ravel() # P
self.y_loadings_[:, k] = y_loadings.ravel() # Q
# Such that: X = TP' + Err and Y = UQ' + Err
# 4) rotations from input space to transformed space (scores)
# T = X W(P'W)^-1 = XW* (W* : p x k matrix)
# U = Y C(Q'C)^-1 = YC* (W* : q x k matrix)
self.x_rotations_ = np.dot(
self.x_weights_,
linalg.pinv(np.dot(self.x_loadings_.T, self.x_weights_)))
if Y.shape[1] > 1:
self.y_rotations_ = np.dot(
self.y_weights_,
linalg.pinv(np.dot(self.y_loadings_.T, self.y_weights_)))
else:
self.y_rotations_ = np.ones(1)
if True or self.deflation_mode == "regression":
# FIXME what's with the if?
# Estimate regression coefficient
# Regress Y on T
# Y = TQ' + Err,
# Then express in function of X
# Y = X W(P'W)^-1Q' + Err = XB + Err
# => B = W*Q' (p x q)
self.coef_ = np.dot(self.x_rotations_, self.y_loadings_.T)
self.coef_ = (1. / self.x_std_.reshape((p, 1)) * self.coef_ *
self.y_std_)
return self
def transform(self, X, Y=None, copy=True):
"""Apply the dimension reduction learned on the train data.
Parameters
----------
X : array-like of predictors, shape = [n_samples, p]
Training vectors, where n_samples in the number of samples and
p is the number of predictors.
Y : array-like of response, shape = [n_samples, q], optional
Training vectors, where n_samples in the number of samples and
q is the number of response variables.
copy : boolean, default True
Whether to copy X and Y, or perform in-place normalization.
Returns
-------
x_scores if Y is not given, (x_scores, y_scores) otherwise.
"""
check_is_fitted(self, 'x_mean_')
X = check_array(X, copy=copy)
# Normalize
X -= self.x_mean_
X /= self.x_std_
# Apply rotation
x_scores = np.dot(X, self.x_rotations_)
if Y is not None:
Y = check_array(Y, ensure_2d=False, copy=copy)
if Y.ndim == 1:
Y = Y.reshape(-1, 1)
Y -= self.y_mean_
Y /= self.y_std_
y_scores = np.dot(Y, self.y_rotations_)
return x_scores, y_scores
return x_scores
def predict(self, X, copy=True):
"""Apply the dimension reduction learned on the train data.
Parameters
----------
X : array-like of predictors, shape = [n_samples, p]
Training vectors, where n_samples in the number of samples and
p is the number of predictors.
copy : boolean, default True
Whether to copy X and Y, or perform in-place normalization.
Notes
-----
This call requires the estimation of a p x q matrix, which may
be an issue in high dimensional space.
"""
check_is_fitted(self, 'x_mean_')
X = check_array(X, copy=copy)
# Normalize
X -= self.x_mean_
X /= self.x_std_
Ypred = np.dot(X, self.coef_)
return Ypred + self.y_mean_
def fit_transform(self, X, y=None, **fit_params):
"""Learn and apply the dimension reduction on the train data.
Parameters
----------
X : array-like of predictors, shape = [n_samples, p]
Training vectors, where n_samples in the number of samples and
p is the number of predictors.
Y : array-like of response, shape = [n_samples, q], optional
Training vectors, where n_samples in the number of samples and
q is the number of response variables.
copy : boolean, default True
Whether to copy X and Y, or perform in-place normalization.
Returns
-------
x_scores if Y is not given, (x_scores, y_scores) otherwise.
"""
check_is_fitted(self, 'x_mean_')
return self.fit(X, y, **fit_params).transform(X, y)
class PLSRegression(_PLS):
"""PLS regression
PLSRegression implements the PLS 2 blocks regression known as PLS2 or PLS1
in case of one dimensional response.
This class inherits from _PLS with mode="A", deflation_mode="regression",
norm_y_weights=False and algorithm="nipals".
Read more in the :ref:`User Guide <cross_decomposition>`.
Parameters
----------
n_components : int, (default 2)
Number of components to keep.
scale : boolean, (default True)
whether to scale the data
max_iter : an integer, (default 500)
the maximum number of iterations of the NIPALS inner loop (used
only if algorithm="nipals")
tol : non-negative real
Tolerance used in the iterative algorithm default 1e-06.
copy : boolean, default True
Whether the deflation should be done on a copy. Let the default
value to True unless you don't care about side effect
Attributes
----------
x_weights_ : array, [p, n_components]
X block weights vectors.
y_weights_ : array, [q, n_components]
Y block weights vectors.
x_loadings_ : array, [p, n_components]
X block loadings vectors.
y_loadings_ : array, [q, n_components]
Y block loadings vectors.
x_scores_ : array, [n_samples, n_components]
X scores.
y_scores_ : array, [n_samples, n_components]
Y scores.
x_rotations_ : array, [p, n_components]
X block to latents rotations.
y_rotations_ : array, [q, n_components]
Y block to latents rotations.
coef_: array, [p, q]
The coefficients of the linear model: ``Y = X coef_ + Err``
n_iter_ : array-like
Number of iterations of the NIPALS inner loop for each
component.
Notes
-----
For each component k, find weights u, v that optimizes:
``max corr(Xk u, Yk v) * var(Xk u) var(Yk u)``, such that ``|u| = 1``
Note that it maximizes both the correlations between the scores and the
intra-block variances.
The residual matrix of X (Xk+1) block is obtained by the deflation on
the current X score: x_score.
The residual matrix of Y (Yk+1) block is obtained by deflation on the
current X score. This performs the PLS regression known as PLS2. This
mode is prediction oriented.
This implementation provides the same results that 3 PLS packages
provided in the R language (R-project):
- "mixOmics" with function pls(X, Y, mode = "regression")
- "plspm " with function plsreg2(X, Y)
- "pls" with function oscorespls.fit(X, Y)
Examples
--------
>>> from sklearn.cross_decomposition import PLSRegression
>>> X = [[0., 0., 1.], [1.,0.,0.], [2.,2.,2.], [2.,5.,4.]]
>>> Y = [[0.1, -0.2], [0.9, 1.1], [6.2, 5.9], [11.9, 12.3]]
>>> pls2 = PLSRegression(n_components=2)
>>> pls2.fit(X, Y)
... # doctest: +NORMALIZE_WHITESPACE
PLSRegression(copy=True, max_iter=500, n_components=2, scale=True,
tol=1e-06)
>>> Y_pred = pls2.predict(X)
References
----------
Jacob A. Wegelin. A survey of Partial Least Squares (PLS) methods, with
emphasis on the two-block case. Technical Report 371, Department of
Statistics, University of Washington, Seattle, 2000.
In french but still a reference:
Tenenhaus, M. (1998). La regression PLS: theorie et pratique. Paris:
Editions Technic.
"""
def __init__(self, n_components=2, scale=True,
max_iter=500, tol=1e-06, copy=True):
_PLS.__init__(self, n_components=n_components, scale=scale,
deflation_mode="regression", mode="A",
norm_y_weights=False, max_iter=max_iter, tol=tol,
copy=copy)
class PLSCanonical(_PLS):
""" PLSCanonical implements the 2 blocks canonical PLS of the original Wold
algorithm [Tenenhaus 1998] p.204, referred as PLS-C2A in [Wegelin 2000].
This class inherits from PLS with mode="A" and deflation_mode="canonical",
norm_y_weights=True and algorithm="nipals", but svd should provide similar
results up to numerical errors.
Read more in the :ref:`User Guide <cross_decomposition>`.
Parameters
----------
scale : boolean, scale data? (default True)
algorithm : string, "nipals" or "svd"
The algorithm used to estimate the weights. It will be called
n_components times, i.e. once for each iteration of the outer loop.
max_iter : an integer, (default 500)
the maximum number of iterations of the NIPALS inner loop (used
only if algorithm="nipals")
tol : non-negative real, default 1e-06
the tolerance used in the iterative algorithm
copy : boolean, default True
Whether the deflation should be done on a copy. Let the default
value to True unless you don't care about side effect
n_components : int, number of components to keep. (default 2).
Attributes
----------
x_weights_ : array, shape = [p, n_components]
X block weights vectors.
y_weights_ : array, shape = [q, n_components]
Y block weights vectors.
x_loadings_ : array, shape = [p, n_components]
X block loadings vectors.
y_loadings_ : array, shape = [q, n_components]
Y block loadings vectors.
x_scores_ : array, shape = [n_samples, n_components]
X scores.
y_scores_ : array, shape = [n_samples, n_components]
Y scores.
x_rotations_ : array, shape = [p, n_components]
X block to latents rotations.
y_rotations_ : array, shape = [q, n_components]
Y block to latents rotations.
n_iter_ : array-like
Number of iterations of the NIPALS inner loop for each
component. Not useful if the algorithm provided is "svd".
Notes
-----
For each component k, find weights u, v that optimize::
max corr(Xk u, Yk v) * var(Xk u) var(Yk u), such that ``|u| = |v| = 1``
Note that it maximizes both the correlations between the scores and the
intra-block variances.
The residual matrix of X (Xk+1) block is obtained by the deflation on the
current X score: x_score.
The residual matrix of Y (Yk+1) block is obtained by deflation on the
current Y score. This performs a canonical symmetric version of the PLS
regression. But slightly different than the CCA. This is mostly used
for modeling.
This implementation provides the same results that the "plspm" package
provided in the R language (R-project), using the function plsca(X, Y).
Results are equal or collinear with the function
``pls(..., mode = "canonical")`` of the "mixOmics" package. The difference
relies in the fact that mixOmics implementation does not exactly implement
the Wold algorithm since it does not normalize y_weights to one.
Examples
--------
>>> from sklearn.cross_decomposition import PLSCanonical
>>> X = [[0., 0., 1.], [1.,0.,0.], [2.,2.,2.], [2.,5.,4.]]
>>> Y = [[0.1, -0.2], [0.9, 1.1], [6.2, 5.9], [11.9, 12.3]]
>>> plsca = PLSCanonical(n_components=2)
>>> plsca.fit(X, Y)
... # doctest: +NORMALIZE_WHITESPACE
PLSCanonical(algorithm='nipals', copy=True, max_iter=500, n_components=2,
scale=True, tol=1e-06)
>>> X_c, Y_c = plsca.transform(X, Y)
References
----------
Jacob A. Wegelin. A survey of Partial Least Squares (PLS) methods, with
emphasis on the two-block case. Technical Report 371, Department of
Statistics, University of Washington, Seattle, 2000.
Tenenhaus, M. (1998). La regression PLS: theorie et pratique. Paris:
Editions Technic.
See also
--------
CCA
PLSSVD
"""
def __init__(self, n_components=2, scale=True, algorithm="nipals",
max_iter=500, tol=1e-06, copy=True):
_PLS.__init__(self, n_components=n_components, scale=scale,
deflation_mode="canonical", mode="A",
norm_y_weights=True, algorithm=algorithm,
max_iter=max_iter, tol=tol, copy=copy)
class PLSSVD(BaseEstimator, TransformerMixin):
"""Partial Least Square SVD
Simply perform a svd on the crosscovariance matrix: X'Y
There are no iterative deflation here.
Read more in the :ref:`User Guide <cross_decomposition>`.
Parameters
----------
n_components : int, default 2
Number of components to keep.
scale : boolean, default True
Whether to scale X and Y.
copy : boolean, default True
Whether to copy X and Y, or perform in-place computations.
Attributes
----------
x_weights_ : array, [p, n_components]
X block weights vectors.
y_weights_ : array, [q, n_components]
Y block weights vectors.
x_scores_ : array, [n_samples, n_components]
X scores.
y_scores_ : array, [n_samples, n_components]
Y scores.
See also
--------
PLSCanonical
CCA
"""
def __init__(self, n_components=2, scale=True, copy=True):
self.n_components = n_components
self.scale = scale
self.copy = copy
def fit(self, X, Y):
# copy since this will contains the centered data
check_consistent_length(X, Y)
X = check_array(X, dtype=np.float64, copy=self.copy)
Y = check_array(Y, dtype=np.float64, copy=self.copy, ensure_2d=False)
if Y.ndim == 1:
Y = Y.reshape(-1, 1)
if self.n_components > max(Y.shape[1], X.shape[1]):
raise ValueError("Invalid number of components n_components=%d"
" with X of shape %s and Y of shape %s."
% (self.n_components, str(X.shape), str(Y.shape)))
# Scale (in place)
X, Y, self.x_mean_, self.y_mean_, self.x_std_, self.y_std_ =\
_center_scale_xy(X, Y, self.scale)
# svd(X'Y)
C = np.dot(X.T, Y)
# The arpack svds solver only works if the number of extracted
# components is smaller than rank(X) - 1. Hence, if we want to extract
# all the components (C.shape[1]), we have to use another one. Else,
# let's use arpacks to compute only the interesting components.
if self.n_components >= np.min(C.shape):
U, s, V = linalg.svd(C, full_matrices=False)
else:
U, s, V = arpack.svds(C, k=self.n_components)
V = V.T
self.x_scores_ = np.dot(X, U)
self.y_scores_ = np.dot(Y, V)
self.x_weights_ = U
self.y_weights_ = V
return self
def transform(self, X, Y=None):
"""Apply the dimension reduction learned on the train data."""
check_is_fitted(self, 'x_mean_')
X = check_array(X, dtype=np.float64)
Xr = (X - self.x_mean_) / self.x_std_
x_scores = np.dot(Xr, self.x_weights_)
if Y is not None:
if Y.ndim == 1:
Y = Y.reshape(-1, 1)
Yr = (Y - self.y_mean_) / self.y_std_
y_scores = np.dot(Yr, self.y_weights_)
return x_scores, y_scores
return x_scores
def fit_transform(self, X, y=None, **fit_params):
"""Learn and apply the dimension reduction on the train data.
Parameters
----------
X : array-like of predictors, shape = [n_samples, p]
Training vectors, where n_samples in the number of samples and
p is the number of predictors.
Y : array-like of response, shape = [n_samples, q], optional
Training vectors, where n_samples in the number of samples and
q is the number of response variables.
Returns
-------
x_scores if Y is not given, (x_scores, y_scores) otherwise.
"""
return self.fit(X, y, **fit_params).transform(X, y)
| bsd-3-clause |
OshynSong/scikit-learn | sklearn/linear_model/tests/test_passive_aggressive.py | 169 | 8809 | import numpy as np
import scipy.sparse as sp
from sklearn.utils.testing import assert_less
from sklearn.utils.testing import assert_greater
from sklearn.utils.testing import assert_array_almost_equal, assert_array_equal
from sklearn.utils.testing import assert_almost_equal
from sklearn.utils.testing import assert_raises
from sklearn.base import ClassifierMixin
from sklearn.utils import check_random_state
from sklearn.datasets import load_iris
from sklearn.linear_model import PassiveAggressiveClassifier
from sklearn.linear_model import PassiveAggressiveRegressor
iris = load_iris()
random_state = check_random_state(12)
indices = np.arange(iris.data.shape[0])
random_state.shuffle(indices)
X = iris.data[indices]
y = iris.target[indices]
X_csr = sp.csr_matrix(X)
class MyPassiveAggressive(ClassifierMixin):
def __init__(self, C=1.0, epsilon=0.01, loss="hinge",
fit_intercept=True, n_iter=1, random_state=None):
self.C = C
self.epsilon = epsilon
self.loss = loss
self.fit_intercept = fit_intercept
self.n_iter = n_iter
def fit(self, X, y):
n_samples, n_features = X.shape
self.w = np.zeros(n_features, dtype=np.float64)
self.b = 0.0
for t in range(self.n_iter):
for i in range(n_samples):
p = self.project(X[i])
if self.loss in ("hinge", "squared_hinge"):
loss = max(1 - y[i] * p, 0)
else:
loss = max(np.abs(p - y[i]) - self.epsilon, 0)
sqnorm = np.dot(X[i], X[i])
if self.loss in ("hinge", "epsilon_insensitive"):
step = min(self.C, loss / sqnorm)
elif self.loss in ("squared_hinge",
"squared_epsilon_insensitive"):
step = loss / (sqnorm + 1.0 / (2 * self.C))
if self.loss in ("hinge", "squared_hinge"):
step *= y[i]
else:
step *= np.sign(y[i] - p)
self.w += step * X[i]
if self.fit_intercept:
self.b += step
def project(self, X):
return np.dot(X, self.w) + self.b
def test_classifier_accuracy():
for data in (X, X_csr):
for fit_intercept in (True, False):
clf = PassiveAggressiveClassifier(C=1.0, n_iter=30,
fit_intercept=fit_intercept,
random_state=0)
clf.fit(data, y)
score = clf.score(data, y)
assert_greater(score, 0.79)
def test_classifier_partial_fit():
classes = np.unique(y)
for data in (X, X_csr):
clf = PassiveAggressiveClassifier(C=1.0,
fit_intercept=True,
random_state=0)
for t in range(30):
clf.partial_fit(data, y, classes)
score = clf.score(data, y)
assert_greater(score, 0.79)
def test_classifier_refit():
# Classifier can be retrained on different labels and features.
clf = PassiveAggressiveClassifier().fit(X, y)
assert_array_equal(clf.classes_, np.unique(y))
clf.fit(X[:, :-1], iris.target_names[y])
assert_array_equal(clf.classes_, iris.target_names)
def test_classifier_correctness():
y_bin = y.copy()
y_bin[y != 1] = -1
for loss in ("hinge", "squared_hinge"):
clf1 = MyPassiveAggressive(C=1.0,
loss=loss,
fit_intercept=True,
n_iter=2)
clf1.fit(X, y_bin)
for data in (X, X_csr):
clf2 = PassiveAggressiveClassifier(C=1.0,
loss=loss,
fit_intercept=True,
n_iter=2, shuffle=False)
clf2.fit(data, y_bin)
assert_array_almost_equal(clf1.w, clf2.coef_.ravel(), decimal=2)
def test_classifier_undefined_methods():
clf = PassiveAggressiveClassifier()
for meth in ("predict_proba", "predict_log_proba", "transform"):
assert_raises(AttributeError, lambda x: getattr(clf, x), meth)
def test_class_weights():
# Test class weights.
X2 = np.array([[-1.0, -1.0], [-1.0, 0], [-.8, -1.0],
[1.0, 1.0], [1.0, 0.0]])
y2 = [1, 1, 1, -1, -1]
clf = PassiveAggressiveClassifier(C=0.1, n_iter=100, class_weight=None,
random_state=100)
clf.fit(X2, y2)
assert_array_equal(clf.predict([[0.2, -1.0]]), np.array([1]))
# we give a small weights to class 1
clf = PassiveAggressiveClassifier(C=0.1, n_iter=100,
class_weight={1: 0.001},
random_state=100)
clf.fit(X2, y2)
# now the hyperplane should rotate clock-wise and
# the prediction on this point should shift
assert_array_equal(clf.predict([[0.2, -1.0]]), np.array([-1]))
def test_partial_fit_weight_class_balanced():
# partial_fit with class_weight='balanced' not supported
clf = PassiveAggressiveClassifier(class_weight="balanced")
assert_raises(ValueError, clf.partial_fit, X, y, classes=np.unique(y))
def test_equal_class_weight():
X2 = [[1, 0], [1, 0], [0, 1], [0, 1]]
y2 = [0, 0, 1, 1]
clf = PassiveAggressiveClassifier(C=0.1, n_iter=1000, class_weight=None)
clf.fit(X2, y2)
# Already balanced, so "balanced" weights should have no effect
clf_balanced = PassiveAggressiveClassifier(C=0.1, n_iter=1000,
class_weight="balanced")
clf_balanced.fit(X2, y2)
clf_weighted = PassiveAggressiveClassifier(C=0.1, n_iter=1000,
class_weight={0: 0.5, 1: 0.5})
clf_weighted.fit(X2, y2)
# should be similar up to some epsilon due to learning rate schedule
assert_almost_equal(clf.coef_, clf_weighted.coef_, decimal=2)
assert_almost_equal(clf.coef_, clf_balanced.coef_, decimal=2)
def test_wrong_class_weight_label():
# ValueError due to wrong class_weight label.
X2 = np.array([[-1.0, -1.0], [-1.0, 0], [-.8, -1.0],
[1.0, 1.0], [1.0, 0.0]])
y2 = [1, 1, 1, -1, -1]
clf = PassiveAggressiveClassifier(class_weight={0: 0.5})
assert_raises(ValueError, clf.fit, X2, y2)
def test_wrong_class_weight_format():
# ValueError due to wrong class_weight argument type.
X2 = np.array([[-1.0, -1.0], [-1.0, 0], [-.8, -1.0],
[1.0, 1.0], [1.0, 0.0]])
y2 = [1, 1, 1, -1, -1]
clf = PassiveAggressiveClassifier(class_weight=[0.5])
assert_raises(ValueError, clf.fit, X2, y2)
clf = PassiveAggressiveClassifier(class_weight="the larch")
assert_raises(ValueError, clf.fit, X2, y2)
def test_regressor_mse():
y_bin = y.copy()
y_bin[y != 1] = -1
for data in (X, X_csr):
for fit_intercept in (True, False):
reg = PassiveAggressiveRegressor(C=1.0, n_iter=50,
fit_intercept=fit_intercept,
random_state=0)
reg.fit(data, y_bin)
pred = reg.predict(data)
assert_less(np.mean((pred - y_bin) ** 2), 1.7)
def test_regressor_partial_fit():
y_bin = y.copy()
y_bin[y != 1] = -1
for data in (X, X_csr):
reg = PassiveAggressiveRegressor(C=1.0,
fit_intercept=True,
random_state=0)
for t in range(50):
reg.partial_fit(data, y_bin)
pred = reg.predict(data)
assert_less(np.mean((pred - y_bin) ** 2), 1.7)
def test_regressor_correctness():
y_bin = y.copy()
y_bin[y != 1] = -1
for loss in ("epsilon_insensitive", "squared_epsilon_insensitive"):
reg1 = MyPassiveAggressive(C=1.0,
loss=loss,
fit_intercept=True,
n_iter=2)
reg1.fit(X, y_bin)
for data in (X, X_csr):
reg2 = PassiveAggressiveRegressor(C=1.0,
loss=loss,
fit_intercept=True,
n_iter=2, shuffle=False)
reg2.fit(data, y_bin)
assert_array_almost_equal(reg1.w, reg2.coef_.ravel(), decimal=2)
def test_regressor_undefined_methods():
reg = PassiveAggressiveRegressor()
for meth in ("transform",):
assert_raises(AttributeError, lambda x: getattr(reg, x), meth)
| bsd-3-clause |
loli/sklearn-ensembletrees | examples/neighbors/plot_classification.py | 287 | 1790 | """
================================
Nearest Neighbors Classification
================================
Sample usage of Nearest Neighbors classification.
It will plot the decision boundaries for each class.
"""
print(__doc__)
import numpy as np
import matplotlib.pyplot as plt
from matplotlib.colors import ListedColormap
from sklearn import neighbors, datasets
n_neighbors = 15
# import some data to play with
iris = datasets.load_iris()
X = iris.data[:, :2] # we only take the first two features. We could
# avoid this ugly slicing by using a two-dim dataset
y = iris.target
h = .02 # step size in the mesh
# Create color maps
cmap_light = ListedColormap(['#FFAAAA', '#AAFFAA', '#AAAAFF'])
cmap_bold = ListedColormap(['#FF0000', '#00FF00', '#0000FF'])
for weights in ['uniform', 'distance']:
# we create an instance of Neighbours Classifier and fit the data.
clf = neighbors.KNeighborsClassifier(n_neighbors, weights=weights)
clf.fit(X, y)
# Plot the decision boundary. For that, we will assign a color to each
# point in the mesh [x_min, m_max]x[y_min, y_max].
x_min, x_max = X[:, 0].min() - 1, X[:, 0].max() + 1
y_min, y_max = X[:, 1].min() - 1, X[:, 1].max() + 1
xx, yy = np.meshgrid(np.arange(x_min, x_max, h),
np.arange(y_min, y_max, h))
Z = clf.predict(np.c_[xx.ravel(), yy.ravel()])
# Put the result into a color plot
Z = Z.reshape(xx.shape)
plt.figure()
plt.pcolormesh(xx, yy, Z, cmap=cmap_light)
# Plot also the training points
plt.scatter(X[:, 0], X[:, 1], c=y, cmap=cmap_bold)
plt.xlim(xx.min(), xx.max())
plt.ylim(yy.min(), yy.max())
plt.title("3-Class classification (k = %i, weights = '%s')"
% (n_neighbors, weights))
plt.show()
| bsd-3-clause |
fredrikw/scipy | doc/source/tutorial/examples/newton_krylov_preconditioning.py | 99 | 2489 | import numpy as np
from scipy.optimize import root
from scipy.sparse import spdiags, kron
from scipy.sparse.linalg import spilu, LinearOperator
from numpy import cosh, zeros_like, mgrid, zeros, eye
# parameters
nx, ny = 75, 75
hx, hy = 1./(nx-1), 1./(ny-1)
P_left, P_right = 0, 0
P_top, P_bottom = 1, 0
def get_preconditioner():
"""Compute the preconditioner M"""
diags_x = zeros((3, nx))
diags_x[0,:] = 1/hx/hx
diags_x[1,:] = -2/hx/hx
diags_x[2,:] = 1/hx/hx
Lx = spdiags(diags_x, [-1,0,1], nx, nx)
diags_y = zeros((3, ny))
diags_y[0,:] = 1/hy/hy
diags_y[1,:] = -2/hy/hy
diags_y[2,:] = 1/hy/hy
Ly = spdiags(diags_y, [-1,0,1], ny, ny)
J1 = kron(Lx, eye(ny)) + kron(eye(nx), Ly)
# Now we have the matrix `J_1`. We need to find its inverse `M` --
# however, since an approximate inverse is enough, we can use
# the *incomplete LU* decomposition
J1_ilu = spilu(J1)
# This returns an object with a method .solve() that evaluates
# the corresponding matrix-vector product. We need to wrap it into
# a LinearOperator before it can be passed to the Krylov methods:
M = LinearOperator(shape=(nx*ny, nx*ny), matvec=J1_ilu.solve)
return M
def solve(preconditioning=True):
"""Compute the solution"""
count = [0]
def residual(P):
count[0] += 1
d2x = zeros_like(P)
d2y = zeros_like(P)
d2x[1:-1] = (P[2:] - 2*P[1:-1] + P[:-2])/hx/hx
d2x[0] = (P[1] - 2*P[0] + P_left)/hx/hx
d2x[-1] = (P_right - 2*P[-1] + P[-2])/hx/hx
d2y[:,1:-1] = (P[:,2:] - 2*P[:,1:-1] + P[:,:-2])/hy/hy
d2y[:,0] = (P[:,1] - 2*P[:,0] + P_bottom)/hy/hy
d2y[:,-1] = (P_top - 2*P[:,-1] + P[:,-2])/hy/hy
return d2x + d2y + 5*cosh(P).mean()**2
# preconditioner
if preconditioning:
M = get_preconditioner()
else:
M = None
# solve
guess = zeros((nx, ny), float)
sol = root(residual, guess, method='krylov',
options={'disp': True,
'jac_options': {'inner_M': M}})
print 'Residual', abs(residual(sol.x)).max()
print 'Evaluations', count[0]
return sol.x
def main():
sol = solve(preconditioning=True)
# visualize
import matplotlib.pyplot as plt
x, y = mgrid[0:1:(nx*1j), 0:1:(ny*1j)]
plt.clf()
plt.pcolor(x, y, sol)
plt.clim(0, 1)
plt.colorbar()
plt.show()
if __name__ == "__main__":
main()
| bsd-3-clause |
jblackburne/scikit-learn | examples/cluster/plot_mean_shift.py | 351 | 1793 | """
=============================================
A demo of the mean-shift clustering algorithm
=============================================
Reference:
Dorin Comaniciu and Peter Meer, "Mean Shift: A robust approach toward
feature space analysis". IEEE Transactions on Pattern Analysis and
Machine Intelligence. 2002. pp. 603-619.
"""
print(__doc__)
import numpy as np
from sklearn.cluster import MeanShift, estimate_bandwidth
from sklearn.datasets.samples_generator import make_blobs
###############################################################################
# Generate sample data
centers = [[1, 1], [-1, -1], [1, -1]]
X, _ = make_blobs(n_samples=10000, centers=centers, cluster_std=0.6)
###############################################################################
# Compute clustering with MeanShift
# The following bandwidth can be automatically detected using
bandwidth = estimate_bandwidth(X, quantile=0.2, n_samples=500)
ms = MeanShift(bandwidth=bandwidth, bin_seeding=True)
ms.fit(X)
labels = ms.labels_
cluster_centers = ms.cluster_centers_
labels_unique = np.unique(labels)
n_clusters_ = len(labels_unique)
print("number of estimated clusters : %d" % n_clusters_)
###############################################################################
# Plot result
import matplotlib.pyplot as plt
from itertools import cycle
plt.figure(1)
plt.clf()
colors = cycle('bgrcmykbgrcmykbgrcmykbgrcmyk')
for k, col in zip(range(n_clusters_), colors):
my_members = labels == k
cluster_center = cluster_centers[k]
plt.plot(X[my_members, 0], X[my_members, 1], col + '.')
plt.plot(cluster_center[0], cluster_center[1], 'o', markerfacecolor=col,
markeredgecolor='k', markersize=14)
plt.title('Estimated number of clusters: %d' % n_clusters_)
plt.show()
| bsd-3-clause |
lbybee/HFRNNs | code/hf_test.py | 1 | 1606 | from rnn import MetaRNN
from hf import SequenceDataset, hf_optimizer
import numpy as np
import matplotlib.pyplot as plt
import logging
def HFTest(seq, targets, t_seq, t_targets, n_hidden=10, n_updates=250):
""" Test RNN with hessian free optimization """
n_in = 2
n_out = 2
n_classes = 10
# SequenceDataset wants a list of sequences
# this allows them to be different lengths, but here they're not
seq = [i for i in seq]
targets = [i for i in targets]
gradient_dataset = SequenceDataset([seq, targets], batch_size=None,
number_batches=500)
cg_dataset = SequenceDataset([seq, targets], batch_size=None,
number_batches=100)
model = MetaRNN(n_in=n_in, n_hidden=n_hidden, n_out=n_out,
activation='tanh', output_type='softmax',
use_symbolic_softmax=True)
# optimizes negative log likelihood
# but also reports zero-one error
opt = hf_optimizer(p=model.rnn.params, inputs=[model.x, model.y],
s=model.rnn.y_pred,
costs=[model.rnn.loss(model.y),
model.rnn.errors(model.y)], h=model.rnn.h)
mse_updates = []
for i in range(n_updates):
opt.train(gradient_dataset, cg_dataset, num_updates=1)
mse = 0
for t in range(len(t_seq)):
guess = model.predict_proba(t_seq[t])
if guess != t_target:
mse += 1
mse_updates.append(mse)
print i
return (mse_updates, model)
| gpl-2.0 |
huaj1101/ML-PY | TensorFlow/C04-Autoencoder.py | 1 | 4172 | import os
import datetime
import numpy as np
import sklearn.preprocessing as prep
import tensorflow as tf
from tensorflow.examples.tutorials.mnist import input_data
# 屏蔽一些警告
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '3'
def xavier_init(fan_in, fan_out, constant=1):
low = -constant * np.sqrt(6.0 / (fan_in + fan_out))
high = constant * np.sqrt(6.0 / (fan_in + fan_out))
return tf.random_uniform((fan_in, fan_out), minval=low, maxval=high, dtype=tf.float32)
def standard_scale(x_train, x_test):
preprocessor = prep.StandardScaler().fit(x_train)
x_train = preprocessor.transform(x_train)
x_test = preprocessor.transform(x_test)
return x_train, x_test
def get_random_block_from_data(data, batch_size):
start_index = np.random.randint(0, len(data) - batch_size)
return data[start_index:(start_index + batch_size)]
class AdditiveGaussianNoiseAutoencoder(object):
def __init__(self, n_input, n_hidden, transfer_function=tf.nn.softplus,
optimizer=tf.train.AdamOptimizer(), scale=0.1):
self.n_input = n_input
self.n_hidden = n_hidden
self.transfer = transfer_function
self.scale = tf.placeholder(tf.float32)
self.training_scale = scale
self.weights = self._initialize_weights()
self.x = tf.placeholder(tf.float32, [None, self.n_input])
self.hidden = self.transfer(
tf.add(tf.matmul(self.x + scale * tf.random_normal((n_input,)), self.weights['w1']), self.weights['b1']))
self.reconstruction = tf.add(tf.matmul(self.hidden, self.weights['w2']), self.weights['b2'])
self.cost = 0.5 * tf.reduce_sum(tf.pow(tf.subtract(self.reconstruction, self.x), 2.0))
self.optimizer = optimizer.minimize(self.cost)
init = tf.global_variables_initializer()
self.sess = tf.Session()
self.sess.run(init)
def _initialize_weights(self):
all_weights = dict()
all_weights['w1'] = tf.Variable(xavier_init(self.n_input, self.n_hidden))
all_weights['b1'] = tf.Variable(tf.zeros([self.n_hidden]), dtype=tf.float32)
all_weights['w2'] = tf.Variable(tf.zeros([self.n_hidden, self.n_input]), dtype=tf.float32)
all_weights['b2'] = tf.Variable(tf.zeros([self.n_input]), dtype=tf.float32)
return all_weights
def partial_fit(self, X):
cost, opt = self.sess.run((self.cost, self.optimizer), feed_dict={self.x: X, self.scale: self.training_scale})
return cost
def calc_total_cost(self, X):
return self.sess.run(self.cost, feed_dict={self.x: X, self.scale: self.training_scale})
def transform(self, X):
return self.sess.run(self.hidden, feed_dict={self.x: X, self.scale: self.training_scale})
def generate(self, hidden=None):
if hidden is None:
hidden = np.random.normal(size=self.weights['b1'])
return self.sess.run(self.reconstruction, feed_dict={self.hidden: hidden})
def reconstruct(self, X):
return self.sess.run(self.reconstruction, feed_dict={self.x: X, self.scale: self.training_scale})
def getWeights(self):
return self.sess.run(self.weights['w1'])
def getBiases(self):
return self.sess.run(self.weights['b1'])
mnist = input_data.read_data_sets('MNIST_data', one_hot=True)
x_train, x_test = standard_scale(mnist.train.images, mnist.test.images)
n_samples = int(mnist.train.num_examples)
train_epochs = 25
batch_size = 128
display_step = 1
autoencoder = AdditiveGaussianNoiseAutoencoder(n_input=784, n_hidden=200, transfer_function=tf.nn.softplus,
optimizer=tf.train.AdamOptimizer(learning_rate=0.001), scale=0.01)
for epoch in range(train_epochs):
avg_cost = 0.
total_batch = int(n_samples / batch_size)
for i in range(total_batch):
batch_xs = get_random_block_from_data(x_train, batch_size)
cost = autoencoder.partial_fit(batch_xs)
avg_cost += cost / n_samples * batch_size
if epoch % display_step == 0:
print("Epoch:", '%04d' % (epoch + 1), "cost=", "{:.9f}".format(avg_cost))
print("Total cost: " + str(autoencoder.calc_total_cost(x_test)))
| apache-2.0 |
chenyyx/scikit-learn-doc-zh | examples/zh/neighbors/plot_nearest_centroid.py | 38 | 1817 | """
===============================
Nearest Centroid Classification
===============================
Sample usage of Nearest Centroid classification.
It will plot the decision boundaries for each class.
"""
print(__doc__)
import numpy as np
import matplotlib.pyplot as plt
from matplotlib.colors import ListedColormap
from sklearn import datasets
from sklearn.neighbors import NearestCentroid
n_neighbors = 15
# import some data to play with
iris = datasets.load_iris()
# we only take the first two features. We could avoid this ugly
# slicing by using a two-dim dataset
X = iris.data[:, :2]
y = iris.target
h = .02 # step size in the mesh
# Create color maps
cmap_light = ListedColormap(['#FFAAAA', '#AAFFAA', '#AAAAFF'])
cmap_bold = ListedColormap(['#FF0000', '#00FF00', '#0000FF'])
for shrinkage in [None, .2]:
# we create an instance of Neighbours Classifier and fit the data.
clf = NearestCentroid(shrink_threshold=shrinkage)
clf.fit(X, y)
y_pred = clf.predict(X)
print(shrinkage, np.mean(y == y_pred))
# Plot the decision boundary. For that, we will assign a color to each
# point in the mesh [x_min, x_max]x[y_min, y_max].
x_min, x_max = X[:, 0].min() - 1, X[:, 0].max() + 1
y_min, y_max = X[:, 1].min() - 1, X[:, 1].max() + 1
xx, yy = np.meshgrid(np.arange(x_min, x_max, h),
np.arange(y_min, y_max, h))
Z = clf.predict(np.c_[xx.ravel(), yy.ravel()])
# Put the result into a color plot
Z = Z.reshape(xx.shape)
plt.figure()
plt.pcolormesh(xx, yy, Z, cmap=cmap_light)
# Plot also the training points
plt.scatter(X[:, 0], X[:, 1], c=y, cmap=cmap_bold,
edgecolor='b', s=20)
plt.title("3-Class classification (shrink_threshold=%r)"
% shrinkage)
plt.axis('tight')
plt.show()
| gpl-3.0 |
thirtysix/TFBS_footprinting | tfbs_footprinter/tfbs_footprinter.py | 1 | 137880 | #!/usr/bin/python
# -*- coding: utf-8 -*-
# Python vers. 2.7.0 ###########################################################
__version__ = "1.0.0b53"
# Libraries ####################################################################
import sys
import signal
import wget
import tarfile
import argparse
import textwrap
import os
import json
import msgpack
import time
import csv
import logging
from Bio import SeqIO
from Bio.Seq import Seq
from Bio.SeqRecord import SeqRecord
from Bio.Alphabet import IUPAC
from Bio import AlignIO
import socket
import httplib2
import math
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as plt
from pylab import mpl
from numpy import random as numpyrandom
from decimal import Decimal
from operator import itemgetter
from bisect import bisect_left
from bisect import bisect_right
################################################################################
# Description ##################################################################
################################################################################
"""
Thresholds are allowed to be negative, as based on whole genome scoring.
Improvements to be made:
1) Change FANTOM CAGE correlation analysis to only include top CAGE peaks,
instead of all CAGES. Should reduce size and focus on most relevant CAGEs.
2) Account for GTRD metacluster peak count. Switch to GTRD peaks by TF if the
number of TFs available becomes larger than those offered by JASPAR. Similarly,
if all JASPAR TFs become present in GTRD database, then switch from metaclusters
to peaks by TF.
"""
################################################################################
# Functions ####################################################################
################################################################################
script_dir = os.path.dirname(__file__)
curdir = os.getcwd()
################################################################################
# Arguments ####################################################################
################################################################################
def get_args():
"""
Retrieve arguments provided by the user.
"""
# Instantiate the parser
parser = argparse.ArgumentParser(
prog="tfbs_footprinter",
formatter_class = argparse.RawDescriptionHelpFormatter,
description=textwrap.dedent("""\
TFBS Footprinting - Identification of conserved vertebrate transcription factor binding sites (TFBSs).
See https://github.com/thirtysix/TFBS_footprinting for additional usage instructions.
------------------------------------------------------------------------------------------------------
Example Usage:
simplest:
tfbs_footprinter PATH_TO/sample_ensembl_ids.txt
all arguments:
tfbs_footprinter -t PATH_TO/sample_ensembl_ids.txt -tfs PATH_TO/sample_jaspar_tf_ids.txt -pb 900 -pa 100 -tx 10 -p 0.01 -update
run the sample analysis:
Option #1: tfbs_footprinter -t PATH_TO/sample_analysis/sample_analysis_list.csv
Option #2: tfbs_footprinter -t PATH_TO/sample_analysis/sample_ensembl_ids.txt
update the experimental data files (not needed often):
tfbs_footprinter -update
Results will be output to the current directory in a created directory named "tfbs_results"
------------------------------------------------------------------------------------------------------
"""))
# Arguments
parser.add_argument('--t_ids_file', '-t', metavar='', type=str,
help='Required for running an analysis. Location of a file containing Ensembl target_species transcript ids. Input options are either a text file of Ensembl transcript ids or a .csv file with individual values set for each parameter.')
parser.add_argument('--tf_ids_file', '-tfs', metavar='', type=str, default = None, help='Optional: Location of a file containing a limited list of Jaspar TFs to use in scoring alignment \
(see sample file tf_ids.txt at https://github.com/thirtysix/TFBS_footprinting) [default: all Jaspar TFs]')
parser.add_argument('--promoter_before_tss', '-pb', metavar='', choices = range(-10000, 100001), type=int, default=900,
help='(0-100,000) [default: 900] - Number (integer) of nucleotides upstream of TSS to include in analysis. If this number is negative the start point will be downstream of the TSS, the end point will then need to be further downstream.')
parser.add_argument('--promoter_after_tss', '-pa', metavar='', choices = range(-10000, 100001), type=int, default=100,
help='(0-100,000) [default: 100] - Number (integer) of nucleotides downstream of TSS to include in analysis. If this number is negative the end point will be upstream of the TSS. The start point will then need to be further upstream.')
parser.add_argument('--top_x_tfs', '-tx', metavar='', choices = range(1, 21), type=int, default=10,
help='(1-20) [default: 10] - Number (integer) of unique TFs to include in output .svg figure.')
## parser.add_argument('--output_dir', '-o', metavar='', type=str, default=os.path.join(curdir, "tfbs_results"),
## help=" ".join(['[default:', os.path.join(curdir, "tfbs_results"), '] - Full path of directory where result directories will be output. Make sure that the root directory already exists.']))
# for now pvalue refers to the PWM score, in the future it will need to relate to the combined affinity score
parser.add_argument('--pval', '-p', type=float, default=0.01, help='P-value (float) for PWM score cutoff (range: 1 (all results) to 0.0000001; in divisions of 10 (i.e. 1, 0.1, 0.01, 0.001 etc.) [default: 0.01]')
parser.add_argument('--exp_data_update', '-update', action="store_true", help='Download the latest experimental data files for use in analysis. Will run automatically if the "data" directory does not already exist (e.g. first usage).')
parser.add_argument('--nofig', '-no', action="store_true", help="Don't output a figure.")
# Functionality to add later
##parser.add_argument('--noclean', '-nc', action = 'store_true', help='Optional: Don't clean retrieved alignment. Off by default.')
# pre-processing the arguments
args = parser.parse_args()
args_lists = []
transcript_ids_filename = args.t_ids_file
exp_data_update = args.exp_data_update
if transcript_ids_filename:
filename, file_extension = os.path.splitext(transcript_ids_filename)
if file_extension == ".csv" or file_extension == ".tsv":
# If the user has provided a .csv file with the required parameters defined for each Ensembl transcript id
# this can be parsed to run unique analyses for each.
parsed_arg_lines = file_to_datalist(transcript_ids_filename)[1:]
for i, parsed_arg_line in enumerate(parsed_arg_lines):
if len(parsed_arg_line) < 6:
print("Incomplete arguments in input file on line", i)
else:
transcript_id, target_tfs_filename, promoter_before_tss, promoter_after_tss, top_x_tfs_count, pval = parsed_arg_line
# promoter_before_tss/promoter_after_tss
try:
promoter_before_tss = int(promoter_before_tss)
except:
print("Entered promoter before TSS", promoter_before_tss, "in line", i, "is not an integer. Defaulting to 900.")
promoter_before_tss = 900
try:
promoter_after_tss = int(promoter_after_tss)
except:
print("Entered promoter after TSS", promoter_after_tss, "in line", i, "is not an integer. Defaulting to 100.")
promoter_after_tss = 100
# top_x_tfs_count
try:
top_x_tfs_count = int(top_x_tfs_count)
except:
print("Entered top x tfs count", top_x_tfs_count, "in line", i, "is not an integer. Defaulting to 10.")
top_x_tfs_count = 10
# p-value
try:
pval = float(pval)
except:
print("Entered p-value threshold", pval, "in line", i, "is not float. Defaulting to 0.01.")
pval = 0.01
# update exp data
exp_data_update = False
parsed_cleaned_arg_line = [transcript_id, target_tfs_filename, promoter_before_tss, promoter_after_tss, top_x_tfs_count, pval]
args_lists.append([args, transcript_ids_filename] + parsed_cleaned_arg_line)
else:
# If the analysis does not require setting the parameters individually for each Ensembl transcript id then build
# build a list which has all of the parameters set as the same, in this way there can be a similar input format
# as a .tsv, and standardized handling in the rest of the analysis.
target_tfs_filename = args.tf_ids_file
promoter_before_tss = args.promoter_before_tss
promoter_after_tss = args.promoter_after_tss
top_x_tfs_count = args.top_x_tfs
pval = args.pval
exp_data_update = args.exp_data_update
nofigure = args.nofig
transcript_ids_list = parse_transcript_ids(transcript_ids_filename)
for transcript_id in transcript_ids_list:
args_list = [args, transcript_ids_filename, transcript_id, target_tfs_filename, promoter_before_tss, promoter_after_tss, top_x_tfs_count, pval]
args_lists.append(args_list)
return args_lists, exp_data_update, nofigure
################################################################################
# House-keeping functions ######################################################
################################################################################
def signal_handler(signal, frame):
print('You have manually stopped tfbs_footprinter with Ctrl+C')
sys.exit(0)
def load_json(filename):
if os.path.exists(filename):
with open(filename) as open_infile:
return json.load(open_infile)
else:
return None
return json_data
def dump_json(filename, json_data):
with open(filename, 'w') as open_outfile:
json_data = json.dump(json_data, open_outfile)
def load_msgpack(object_filename):
"""unpack a msgpack file to object."""
if os.path.exists(object_filename):
with open(object_filename, 'r') as object_file:
return msgpack.unpack(object_file, max_array_len=200000, use_list=False)
else:
return None
def save_msgpack(msgpack_obj, msgpack_filename):
"""Save msgpack object to file."""
with open(msgpack_filename, 'w') as msgpack_file:
msgpack.pack(msgpack_obj, msgpack_file, use_bin_type=True)
def directory_creator(directory_name):
"""
Create directory if it does not already exist.
"""
if not os.path.isdir(directory_name):
os.mkdir(directory_name)
def is_online():
"""
Test if the system is online.
This breaks when TFBS_footprinter outlasts Google.
"""
REMOTE_SERVER = "www.google.com"
try:
host = socket.gethostbyname(REMOTE_SERVER)
s = socket.create_connection((host, 80), 2)
return True
except:
logging.info(" ".join(["System does not appear to be connected to the internet."]))
return False
def ensemblrest(query_type, options, output_type, ensembl_id=None, log=False):
"""
Retrieve REST data from Ensembl using provided ID, query type, and options.
"""
http = httplib2.Http()
server = "http://rest.ensembl.org"
full_query = server + query_type + ensembl_id + options
if log:
logging.info(" ".join(["Ensembl REST query made:", full_query]))
success = False
try_count = 0
max_tries = 10
fail_sleep_time = 120
decoded_json = {}
fasta_content = []
if output_type == 'json':
while success == False and try_count < max_tries:
try:
resp, json_data = http.request(full_query, method="GET")
decoded_json = json.loads(json_data)
ensemblrest_rate(resp)
try_count += 1
success = True
return decoded_json
except:
logging.info(" ".join(["Ensembl REST query unsuccessful, attempt:", "/".join([str(try_count), str(max_tries)]), "Sleeping:", str(fail_sleep_time), "seconds.", full_query]))
try_count += 1
print(" ".join(["Ensembl REST query unsuccessful, attempt:", "/".join([str(try_count), str(max_tries)]), "Sleeping:", str(fail_sleep_time), "seconds.", "See logfile for query."]))
time.sleep(fail_sleep_time)
# return empty decoded_json if max tries has elapsed
return decoded_json
if output_type == 'fasta':
while success == False and try_count < max_tries:
try:
resp, fasta_content = http.request(server, method="GET", headers={"Content-Type":"text/x-fasta"})
ensemblrest_rate(resp)
try_count += 1
success = True
return fasta_content
except:
logging.info(" ".join(["Ensembl REST query unsuccessful, attempt:", "/".join([str(try_count), str(max_tries)]), "Sleeping:", str(fail_sleep_time), "seconds.", full_query]))
try_count += 1
print(" ".join(["Ensembl REST query unsuccessful, attempt:", "/".join([str(try_count), str(max_tries)]), "Sleeping:", str(fail_sleep_time), "seconds.", "See logfile for query."]))
time.sleep(fail_sleep_time)
# return empty fasta_content if max tries has elapsed
return fasta_content
def ensemblrest_rate(resp):
"""
Read ensembl REST headers and determine if rate-limit has been exceeded, sleep appropriately if necessary.
"""
if int(resp['x-ratelimit-remaining']) == 0:
if 'Retry-After' in resp:
sleep_time = int(resp['Retry-After'])
logging.warning(" ".join(["Ensembl REST (Retry-After) requests sleeping for:", str(sleep_time)]))
time.sleep(sleep_time)
else:
sleep_time = 60
logging.warning(" ".join(["Ensembl REST requests sleeping for:", str(sleep_time)]))
time.sleep(sleep_time)
def parse_transcript_ids(transcript_ids_filename):
"""
If user has provided a file with Ensembl transcript ids, parse these to a list.
"""
with open(transcript_ids_filename, 'r') as transcript_ids_file:
transcript_ids_list = transcript_ids_file.read().splitlines()
transcript_ids_list = [x for x in transcript_ids_list if len(x)>0]
return transcript_ids_list
def parse_tf_ids(target_tfs_filename):
"""
If user has provided a file with Ensembl transcript ids, parse these to a list.
"""
with open(target_tfs_filename, 'r') as target_tfs_file:
target_tfs_list = target_tfs_file.read().splitlines()
target_tfs_list = [x.upper() for x in target_tfs_list if len(x)>0]
return target_tfs_list
def file_to_datalist(data_filename):
"""
Starting with a filename, import and convert data to a list.
Attempted to use csv.Sniffer() but it was inconsistent in delimiter detection.
Instead, limit users to comma or tab-separated input files.
"""
# accept only tab
filename, file_extension = os.path.splitext(data_filename)
file_extension = file_extension.lower()
ext_delimiter_dict = {".csv":",", ".tsv":"\t"}
with open(data_filename, 'r') as data_file:
csv_reader = csv.reader(data_file, delimiter = ext_delimiter_dict[file_extension])
all_data = list(csv_reader)
return all_data
def compare_tfs_list_jaspar(target_tfs_list, TFBS_matrix_dict):
"""
If user has provided a file containing Jaspar TF ids,
compare candidate entries to those in the loaded dictionary of Jaspar PWMs.
"""
jaspar_dict_keys = TFBS_matrix_dict.keys()
erroneous = list(set(target_tfs_list) - set(jaspar_dict_keys))
target_tfs_list = list(set(jaspar_dict_keys).intersection(target_tfs_list))
if len(erroneous) > 0:
logging.warning(" ".join(["the following tf ids are not in the Jaspar database:", ", ".join(erroneous)]))
return target_tfs_list
def experimentalDataUpdater(exp_data_update):
"""
Update the experimental data by downloading it from the Amazon repository.
Only activates if the user specifically calls for an update, or the data directory does not exist.
"""
experimental_data_dir = os.path.join(script_dir, 'data')
experimental_data_present = False
# test if data dir exists
if not os.path.exists(experimental_data_dir):
directory_creator(experimental_data_dir)
exp_data_update = True
print("Data dir doesn't exist")
# if the data dir exists, check to see that all of the required file patterns are present
else:
required_data_file_patterns = ["pwms.json", "all_tfs_thresholds", "all_pwms_loglikelihood_dict"]
experimental_data_filenames = [x for x in os.listdir(experimental_data_dir) if os.path.isfile(os.path.join(experimental_data_dir, x))]
all_patterns_matched = all([any([required_data_file_pattern in experimental_data_filename
for experimental_data_filename in experimental_data_filenames])
for required_data_file_pattern in required_data_file_patterns])
if all_patterns_matched:
experimental_data_present = True
else:
exp_data_update = True
# perform an update of the base data dir
if exp_data_update:
experimental_data_url = "https://s3.us-east-2.amazonaws.com/tfbssexperimentaldata/data.tar.gz"
experimental_data_down_loc = os.path.join(script_dir,'data.tar.gz')
## current_versions_file = os.path.join(experimental_data_dir, "experimental_data.current_versions.json")
print("Downloading the most current experimental data")
logging.info(" ".join(["Downloading most current experimental data."]))
try:
wget.download(experimental_data_url, out=experimental_data_down_loc)
tar = tarfile.open(experimental_data_down_loc)
tar.extractall(experimental_data_dir)
experimental_data_present = True
except:
## logging.warning(" ".join(["Error in downloading experimental data. Check your internet connection, and make sure the transcript id is of the format 'ENST00000378357' so the correct species data can be downloaded"]))
logging.warning(" ".join(["Error in downloading experimental data. Check your internet connection."]))
experimental_data_present = False
return experimental_data_present
def experimentaldata(target_species):
"""
Retrieve the experimental data for non-human species by downloading it from the Amazon repository.
Activates if the current installation Data directory does not include data for the target species.
Example location: https://s3.us-east-2.amazonaws.com/tfbssexperimentaldata/acanthochromis_polyacanthus.tar.gz
"""
experimental_data_dir = os.path.join(script_dir, 'data')
experimental_data_species_dir = os.path.join(experimental_data_dir, target_species)
experimental_data_species_dir_tar = os.path.join(experimental_data_dir, ".".join([target_species, "tar.gz"]))
if not os.path.exists(experimental_data_species_dir):
aws_server = "https://s3.us-east-2.amazonaws.com"
experimental_data_species_url = "/".join([aws_server, "tfbssexperimentaldata", ".".join([target_species, "tar.gz"])])
print(experimental_data_species_url)
logging.info("Downloading most current experimental data for %s." %target_species)
try:
wget.download(experimental_data_species_url, out=experimental_data_species_dir_tar)
tar = tarfile.open(experimental_data_species_dir_tar)
tar.extractall(experimental_data_dir)
except:
logging.warning(" ".join(["Error in downloading experimental data. Check your internet connection."]))
def experimentalDataUpdater_beta():
"""
Update the experimental data by downloading it from the Amazon repository.
Using a file which contains an dictionary of the most up to date exp. data filenames,
activates if data directory does not exist, if the data directory does not contain the most recent files,
or if it has been >= 60 days since last update.
This version of the updater is perhaps too error prone for this stage of development.
"""
## download experimental data if not already present or if it is outdated
current_version_url = "https://s3.us-east-2.amazonaws.com/tfbssexperimentaldata/experimental_data.current_versions.json"
experimental_data_url = "https://s3.us-east-2.amazonaws.com/tfbssexperimentaldata/data.tar.gz"
experimental_data_down_loc = os.path.join(script_dir,'data.tar.gz')
experimental_data_dir = os.path.join(script_dir, 'data')
current_versions_file = os.path.join(experimental_data_dir, "experimental_data.current_versions.json")
update_required = False
if not os.path.exists(experimental_data_dir):
directory_creator(experimental_data_dir)
update_required = True
else:
if os.path.exists(current_versions_file):
# check if all current versions are in the data dir
current_versions = load_json(current_versions_file)
current_versions_filenames = current_versions.values()
owned_versions_filenames = os.listdir(experimental_data_dir)
missing_files = [x for x in current_versions_filenames if x not in owned_versions_filenames]
if len(missing_files) > 0:
update_required = True
# check if 60 days has passed since last check of versions
if 'last_checked' in current_versions:
current_versions_last_checked = current_versions['last_checked']
if (time.time() - current_versions_last_checked)/(3600*24) >= 60:
update_required = True
else:
update_required = True
# download the most current experimental data
if update_required:
print("Downloading the most current experimental data")
logging.info(" ".join(["Downloading most current experimental data."]))
try:
wget.download(current_version_url, out=experimental_data_dir)
wget.download(experimental_data_url, out=experimental_data_down_loc)
tar = tarfile.open(experimental_data_down_loc)
tar.extractall(experimental_data_dir)
except:
logging.warning(" ".join(["Error in downloading experimental data. Check your internet connection."]))
## os.remove(experimental_data_down_loc)
# update the current versions file last checked time with current time
if os.path.exists(current_versions_file):
current_versions = load_json(current_versions_file)
current_versions['last_checked'] = time.time()
dump_json(current_versions_file, current_versions)
def species_specific_data(target_species, chromosome, species_specific_data_dir):
"""
Many datasets are species-specific. If the current target species has species-specific datasets, load them.
Altered to allow for multiple versions of data. The matching files are sorted and those occuring later in the list, are presumably later editions.
"""
logging.info(" ".join(["Species-specific data: loading"]))
# load GERP locations
gerp_conservation_locations_dict = {}
species_group = ""
gerp_data_dir = os.path.join(species_specific_data_dir, "gerp_data")
gerp_conservation_locations_dict_filenames = [os.path.join(gerp_data_dir, x) for x in os.listdir(gerp_data_dir) if "gerp_conservation.locations_dict" in x and target_species in x]
if len(gerp_conservation_locations_dict_filenames) > 0:
gerp_conservation_locations_dict_filenames.sort()
gerp_conservation_locations_dict_filename = gerp_conservation_locations_dict_filenames[-1]
gerp_conservation_locations_dict = load_msgpack(gerp_conservation_locations_dict_filename)
species_group = gerp_conservation_locations_dict_filename.split(".")[3]
# load GERP conservation weights
gerp_conservation_weight_dict = {}
gerp_conservation_weight_dict_filenames = [os.path.join(gerp_data_dir, x) for x in os.listdir(gerp_data_dir) if "gerp_conservation.weight_dict" in x and target_species in x]
if len(gerp_conservation_weight_dict_filenames) > 0:
gerp_conservation_weight_dict_filenames.sort()
gerp_conservation_weight_dict_filename = gerp_conservation_weight_dict_filenames[-1]
gerp_conservation_weight_dict = load_msgpack(gerp_conservation_weight_dict_filename)
# load human CAGEs-Genes associated dict
cage_dict = {}
cage_data_dir = os.path.join(species_specific_data_dir, "cage_data")
if os.path.exists(cage_data_dir):
cage_dict_filename = os.path.join(cage_data_dir, ".".join([target_species, "CAGE", "peak_dict", "gene", "hg38", "json"]))
if os.path.exists(cage_dict_filename):
cage_dict = load_json(cage_dict_filename)
# load CAGE locs occuring near promoters of TFs
TF_cage_dict = {}
cage_data_dir = os.path.join(species_specific_data_dir, "cage_data")
if os.path.exists(cage_data_dir):
TF_cage_dict_filename = os.path.join(cage_data_dir, ".".join(["homo_sapiens", "CAGE", "jasparTFs", "dict", "json"]))
if os.path.exists(TF_cage_dict_filename):
TF_cage_dict = load_json(TF_cage_dict_filename)
# load CAGE dist weights
cage_dist_weights_dict = {}
if os.path.exists(cage_data_dir):
cage_dist_weights_dict_filenames = [os.path.join(cage_data_dir, x) for x in os.listdir(cage_data_dir) if "cage_dist_weights" in x and target_species in x]
if len(cage_dist_weights_dict_filenames) > 0:
cage_dist_weights_dict_filenames.sort()
cage_dist_weights_dict_filename = cage_dist_weights_dict_filenames[-1]
cage_dist_weights_dict = load_json(cage_dist_weights_dict_filename)
# load CAGE correlations
cage_correlations_dict = {}
cage_corr_data_dir = os.path.join(species_specific_data_dir, "cage_corr_data")
if os.path.exists(cage_corr_data_dir):
cage_correlations_dict_filename = os.path.join(cage_corr_data_dir, ".".join([target_species, "CAGE_corr", "Chr"+chromosome.upper(), "hg38", "msg"]))
if os.path.exists(cage_correlations_dict_filename):
cage_correlations_dict = load_msgpack(cage_correlations_dict_filename)
else:
print("cage_correlations_dict not loaded")
## cage_correlations_dict_filenames = [os.path.join(cage_corr_data_dir, x) for x in os.listdir(cage_corr_data_dir) if "rekeyed_combined_cage_corr_dict" in x and target_species in x]
## if len(cage_correlations_dict_filenames) > 0:
## cage_correlations_dict_filenames.sort()
## cage_correlations_dict_filename = cage_correlations_dict_filenames[-1]
## cage_correlations_dict = load_msgpack(cage_correlations_dict_filename)
# load CAGE correlation weights
cage_corr_weights_dict = {}
if os.path.exists(cage_corr_data_dir):
cage_corr_weights_dict_filename = os.path.join(cage_corr_data_dir, ".".join([target_species, "CAGE_corr", "weight_dict", "hg38", "json"]))
if os.path.exists(cage_corr_weights_dict_filename):
cage_corr_weights_dict = load_json(cage_corr_weights_dict_filename)
cage_corr_weights_dict = {float(k):v for k,v in cage_corr_weights_dict.iteritems()}
else:
print("cage_corr_weights_dict not loaded")
## cage_corr_weights_dict_filenames = [os.path.join(cage_data_dir, x) for x in os.listdir(cage_data_dir) if "cage_corr_weights" in x and target_species in x]
## if len(cage_corr_weights_dict_filenames) > 0:
## cage_corr_weights_dict_filenames.sort()
## cage_corr_weights_dict_filename = cage_corr_weights_dict_filenames[-1]
## cage_corr_weights_dict = load_json(cage_corr_weights_dict_filename)
## cage_corr_weights_dict = {float(k):v for k,v in cage_corr_weights_dict.iteritems()}
## # load CAGE keys
## cage_keys_dict = {}
## if os.path.exists(cage_data_dir):
## cage_keys_dict_filenames = [os.path.join(cage_data_dir, x) for x in os.listdir(cage_data_dir) if "cage_ids_key_dict" in x and target_species in x]
## if len(cage_keys_dict_filenames) > 0:
## cage_keys_dict_filenames.sort()
## cage_keys_dict_filename = cage_keys_dict_filenames[-1]
## cage_keys_dict = load_json(cage_keys_dict_filename)
## # load JASPAR tfs to Ensembl transcript ids
## jasparTFs_transcripts_dict_filenames = [os.path.join(species_specific_data_dir, x) for x in os.listdir(species_specific_data_dir) if "jasparTFs.transcripts.single_protein" in x and target_species in x]
## if len(jasparTFs_transcripts_dict_filenames) > 0:
## jasparTFs_transcripts_dict_filenames.sort()
## jasparTFs_transcripts_dict_filename = jasparTFs_transcripts_dict_filenames[-1]
## jasparTFs_transcripts_dict = load_json(jasparTFs_transcripts_dict_filename)
## else:
## jasparTFs_transcripts_dict = {}
# load CpG score weights
cpg_obsexp_weights_dict_filenames = [os.path.join(species_specific_data_dir, x) for x in os.listdir(species_specific_data_dir) if "cpg_obsexp_weights" in x and target_species in x]
if len(cpg_obsexp_weights_dict_filenames) > 0:
cpg_obsexp_weights_dict_filenames.sort()
cpg_obsexp_weights_dict_filename = cpg_obsexp_weights_dict_filenames[-1]
cpg_obsexp_weights_dict = load_json(cpg_obsexp_weights_dict_filename)
cpg_obsexp_weights_dict = {float(k):float(v) for k,v in cpg_obsexp_weights_dict.iteritems()}
cpg_obsexp_weights_dict_keys = cpg_obsexp_weights_dict.keys()
cpg_obsexp_weights_dict_keys.sort()
else:
cpg_obsexp_weights_dict = {}
cpg_obsexp_weights_dict_keys = []
# load GTEx variants
gtex_variants = {}
gtex_data_dir = os.path.join(species_specific_data_dir, "gtex_data")
if os.path.exists(gtex_data_dir):
gtex_chrom_dict_filename = os.path.join(gtex_data_dir, ".".join([target_species, "gtex_v7", "Chr"+chromosome.upper(), "min_unique", "eqtls", "grch38","msg"]))
if os.path.exists(gtex_chrom_dict_filename):
gtex_variants = load_msgpack(gtex_chrom_dict_filename)
# load GTEx weights
gtex_weights_dict = {}
if os.path.exists(gtex_data_dir):
gtex_weights_dict_filenames = [os.path.join(gtex_data_dir, x) for x in os.listdir(gtex_data_dir) if "gtex_weights" in x and target_species in x]
if len(gtex_weights_dict_filenames) > 0:
gtex_weights_dict_filenames.sort()
gtex_weights_dict_filename = gtex_weights_dict_filenames[-1]
gtex_weights_dict = load_json(gtex_weights_dict_filename)
gtex_weights_dict = {float(k):float(v) for k,v in gtex_weights_dict.iteritems()}
# load meta clusters from GTRD project
gtrd_metaclusters_dict = {}
gtrd_data_dir = os.path.join(species_specific_data_dir, "gtrd_data")
if os.path.exists(gtrd_data_dir):
gtrd_metaclusters_chrom_dict_filename = os.path.join(gtrd_data_dir, ".".join([target_species, "metaclusters", "interval", "Chr"+chromosome.upper(), "clipped", "ordered", "tupled", "msg"]))
if os.path.exists(gtrd_metaclusters_chrom_dict_filename):
gtrd_metaclusters_dict = load_msgpack(gtrd_metaclusters_chrom_dict_filename)
# load metacluster overlap weights
metacluster_overlap_weights_dict = {}
if os.path.exists(gtrd_data_dir):
metacluster_overlap_weights_dict_filenames = [os.path.join(gtrd_data_dir, x) for x in os.listdir(gtrd_data_dir) if "metaclusters_overlap_weights_dict" in x and target_species in x]
if len(metacluster_overlap_weights_dict_filenames) > 0:
metacluster_overlap_weights_dict_filenames.sort()
metacluster_overlap_weights_dict_filename = metacluster_overlap_weights_dict_filenames[-1]
metacluster_overlap_weights_dict = load_json(metacluster_overlap_weights_dict_filename)
metacluster_overlap_weights_dict = {float(k):float(v) for k,v in metacluster_overlap_weights_dict.iteritems()}
# load ATAC-Seq from Encode project
atac_seq_dict = {}
atac_seq_data_dir = os.path.join(species_specific_data_dir, "atac_data")
if os.path.exists(atac_seq_data_dir):
atac_seq_chrom_dict_filename = os.path.join(atac_seq_data_dir, ".".join([target_species, "atac-seq", "Chr"+chromosome.upper(), "msg"]))
if os.path.exists(atac_seq_chrom_dict_filename):
atac_seq_dict = load_msgpack(atac_seq_chrom_dict_filename)
# load ATAC-Seq dist weights
atac_dist_weights_dict = {}
if os.path.exists(atac_seq_data_dir):
atac_dist_weights_dict_filenames = [os.path.join(atac_seq_data_dir, x) for x in os.listdir(atac_seq_data_dir) if "atac_dist_weights" in x and target_species in x]
if len(atac_dist_weights_dict_filenames) > 0:
atac_dist_weights_dict_filenames.sort()
atac_dist_weights_dict_filename = atac_dist_weights_dict_filenames[-1]
atac_dist_weights_dict = load_json(atac_dist_weights_dict_filename)
# load pre-calculated combined affinity score, by tf, p-values
cas_pvalues_dict = {}
cas_pvalues_dict_filename = os.path.join(species_specific_data_dir, ".".join(["CAS_pvalues", "0.1", "tf_ls", "json"]))
if os.path.exists(cas_pvalues_dict_filename):
cas_pvalues_dict = load_json(cas_pvalues_dict_filename)
return gerp_conservation_locations_dict, gerp_conservation_weight_dict, species_group, cage_dict, TF_cage_dict, cage_dist_weights_dict, cage_correlations_dict, cage_corr_weights_dict, atac_dist_weights_dict, metacluster_overlap_weights_dict, cpg_obsexp_weights_dict, cpg_obsexp_weights_dict_keys, gtex_variants, gtex_weights_dict, gtrd_metaclusters_dict, atac_seq_dict, cas_pvalues_dict
def overlap_range(x,y):
"""
Identify an overlap between two lists of two numbers.
"""
x.sort()
y.sort()
return range(max(x[0], y[0]), min(x[-1], y[-1])+1)
################################################################################
# PWM analysis #################################################################
################################################################################
def pwm_maker(strand, motif_length, tf_motif, bg_nuc_freq_dict, neg_bg_nuc_freq_dict):
"""
Make a PWM from a nucleotide frequency table.
"""
pwm = [[],[],[],[]]
nuc_list = 'ACGT'
# PWM according to http://www.ncbi.nlm.nih.gov/pmc/articles/PMC2647310/
for i in range(0, motif_length):
col = [tf_motif[0][i], tf_motif[1][i], tf_motif[2][i], tf_motif[3][i]]
# number of sequences
N = sum(col)
# for each position (col) in the PFM.
for j in range(0, len(tf_motif)):
# nuc count at this position.
nuc_count = tf_motif[j][i]
# pseudo-count = sqrt(total number of samples).
pseudo_count = 0.8
# background frequency for this nucleotide in the promoter.
if strand == "+1":
nuc_bg = bg_nuc_freq_dict[nuc_list[j]]
if strand == "-1":
nuc_bg = neg_bg_nuc_freq_dict[nuc_list[j]]
# probability of nuc
nuc_probability = (nuc_count + pseudo_count/4)/(N + pseudo_count)
nuc_weight = math.log((nuc_probability/nuc_bg), 2)
pwm[j].append(nuc_weight)
## ppm[j].append(nuc_probability)
pwm = pwm[:]
return pwm
def PWM_scorer(seq, pwm, pwm_dict, pwm_type):
"""
Generate score for current seq given a pwm.
"""
# set relevant variables based on whether the pwm is mono or dinucleotide
## if pwm_type == "mono":
## motif_dist = len(seq)
## span = 1
## if pwm_type == "dinuc":
## motif_dist = len(seq) - 1
## span = 2
seq_score = 0.0
for i in range(0, len(seq)):
## seq_score += pwm[pwm_dict[seq[i:i+1]][i]]
seq_score += pwm[pwm_dict[seq[i:i+1]]][i]
## nuc = seq[i:i+1]
## row = pwm_dict[nuc]
## score = pwm[row][i]
## seq_score += score
## # account for sequences which are non-standard code
## non_standard_dict = {'R':['A','G'],
## 'Y':['C','T'],
## 'S':['G','C'],
## 'W':['A','T'],
## 'K':['G','T'],
## 'M':['A','C'],
## 'B':['C','G','T'],
## 'D':['A','G','T'],
## 'H':['A','C','T'],
## 'V':['A','C','G'],
## 'B':['C','G','T']}
##
## possible_seqs = []
## non_standards_in_seq = [x for x in non_standard_dict.keys() if x in seq]
## if len(non_standards_in_seq) > 0:
## for non_standard_in_seq in non_standards_in_seq:
## variant_chars = non_standard_dict[non_standard_in_seq]
## for variant_char in variant_chars:
## variant_seq = seq.replace(non_standard_in_seq, variant_char)
## possible_seqs.append(variant_seq)
## else:
## possible_seqs.append(seq)
##
## # iterate through candidate sequence, and score each mono or dinucleotide
## possible_seq_scores = []
## for possible_seq in possible_seqs:
## seq_score = 0.0
## for i in range(0, motif_dist):
## nuc = possible_seq[i:i+span]
## row = pwm_dict[nuc]
## score = pwm[row][i]
## seq_score += score
## possible_seq_scores.append(seq_score)
##
## # use the best score achieved
## possible_seq_scores.sort()
## seq_score = possible_seq_scores[-1]
return seq_score
def tfbs_finder(transcript_name, alignment, target_tfs_list, TFBS_matrix_dict, target_dir, pwm_score_threshold_dict, all_pwms_loglikelihood_dict, unaligned2aligned_index_dict, promoter_after_tss, pval):
"""
1.Convert PFM to PWM for each TF in the Jaspar dictionary.
2.Score all positions in the cleaned sequence
3.If score is greater than or equal to precomputed threshold, then keep, otherwise set to zero
4.Return dictionary of pwm scores for [species][tf_name][strand]
"""
start_time = time.time()
tfbss_found_dict_outfilename = os.path.join(target_dir, "TFBSs_found.all.json")
# Determine if the analysis has been done already, load results if so
if os.path.isfile(tfbss_found_dict_outfilename):
logging.info(" ".join(["tfbss_found_dict already exists: loading"]))
tfbss_found_dict = load_json(tfbss_found_dict_outfilename)
# If results don't already exist, time to party
else:
tfbss_found_dict = {}
align_chars = '-N .'
mononuc_pwm_dict = {"A":0,"C":1,"G":2,"T":3}
entry = alignment[0]
species = entry['species']
# Remove alignment/ambiguous characters from the sequences
cleaned_seq = entry['seq']
for char in align_chars:
cleaned_seq = cleaned_seq.replace(char,"")
entry_seqrecord = SeqRecord(Seq(cleaned_seq, alphabet=IUPAC.unambiguous_dna), id=species)
forward_seq = str(entry_seqrecord.seq)
reverse_seq = str(entry_seqrecord.seq.reverse_complement())
seq_dict = {"+1": forward_seq, "-1":reverse_seq}
# generate background frequencies of each mono-nucleotide for forward and reverse strands
bg_nuc_freq_dict = {}
neg_bg_nuc_freq_dict = {}
# Use empirical data from whole human genome, but limited to within -2000/+200 bp of TSSs
# http://journals.plos.org/plosone/article?id=10.1371/journal.pone.0033204
# http://journals.plos.org/plosone/article/file?type=supplementary&id=info:doi/10.1371/journal.pone.0033204.s023
## bg_nuc_freq_dict = {'A':0.247, 'C':0.251, 'G':0.254, 'T':0.248}
## neg_bg_nuc_freq_dict = {'A':0.247, 'C':0.251, 'G':0.254, 'T':0.248}
# https://arxiv.org/pdf/q-bio/0611041.pdf
# empirical data from complete genome
bg_nuc_freq_dict = {'A':0.292, 'C':0.207, 'G':0.207, 'T':0.292}
neg_bg_nuc_freq_dict = {'A':0.292, 'C':0.207, 'G':0.207, 'T':0.292}
# iterate through each tf_name and its motif
for tf_name in target_tfs_list:
if tf_name in TFBS_matrix_dict:
tf_motif = TFBS_matrix_dict[tf_name]
motif_length = len(tf_motif[0])
if motif_length > 0:
# retrieve precomputed threshold and other information required for calculating the pvalue of the score
tf_pwm_score_threshold_dict = pwm_score_threshold_dict[tf_name]
pvals_scores_list = [[k,v] for k,v in tf_pwm_score_threshold_dict.iteritems()]
pvals_scores_list_sorted = sorted(pvals_scores_list, key=itemgetter(1))
scores_list_sorted = [x[1] for x in pvals_scores_list_sorted]
pvals_list = [x[0] for x in pvals_scores_list_sorted]
pvals_list.sort()
if pval in tf_pwm_score_threshold_dict:
tf_pwm_score_threshold = tf_pwm_score_threshold_dict[pval]
else:
if pval == 1:
tf_pwm_score_threshold = -100000
else:
pval = pvals_list[bisect_left(pvals_list, pval)]
tf_pwm_score_threshold = tf_pwm_score_threshold_dict[pval]
tfbss_found_dict[tf_name] = []
# iterate through the forward and reverse strand sequences
for strand, seq in seq_dict.iteritems():
pwm = pwm_maker(strand, motif_length, tf_motif, bg_nuc_freq_dict, neg_bg_nuc_freq_dict)
seq_length = len(seq)
# iterate through the nt sequence, extract a current frame based on the motif size and score
for i in range(0, seq_length - motif_length):
current_frame = seq[i:i+motif_length]
current_frame_score = PWM_scorer(current_frame, pwm, mononuc_pwm_dict, 'mono')
# keep results that are above the precomputed threshold
## if current_frame_score >= tf_pwm_score_threshold:
if current_frame_score >= tf_pwm_score_threshold:
current_frame_score = round(current_frame_score, 2)
pval_index = bisect_left(scores_list_sorted, current_frame_score)
if pval_index >= len(pvals_scores_list_sorted):
pval_index = -1
## elif pval_index == 0:
## pval_index == 0
else:
pval_index -= 1
# account for pvalues which are larger than the current largest, so that they can be distinguished appropriately in the result table.
if pval_index < 0:
current_frame_score_pvalue = ">" + str(pvals_scores_list_sorted[0][0])
else:
current_frame_score_pvalue = str(pvals_scores_list_sorted[pval_index][0])
hit_loc_start, hit_loc_end, hit_loc_before_TSS_start, hit_loc_before_TSS_end = start_end_found_motif(i, strand, seq_length, promoter_after_tss, motif_length)
# identify position in alignment from start of found motif in unaligned sequence
aligned_position = unaligned2aligned_index_dict[species][hit_loc_start]
tfbss_found_dict[tf_name].append([current_frame, strand, hit_loc_start, hit_loc_end, hit_loc_before_TSS_start, hit_loc_before_TSS_end, current_frame_score, current_frame_score_pvalue])
# add to results dictionary by tf_name
## if tf_name in tfbss_found_dict:
## tfbss_found_dict[tf_name].append([tf_name, species, current_frame, strand, hit_loc_start, hit_loc_end, hit_loc_before_TSS_start, hit_loc_before_TSS_end, current_frame_score, current_frame_score_pvalue, aligned_position])
## tfbss_found_dict[tf_name].append([tf_name, current_frame, strand, hit_loc_start, hit_loc_end, hit_loc_before_TSS_start, hit_loc_before_TSS_end, current_frame_score, current_frame_score_pvalue])
##
## else:
## tfbss_found_dict[tf_name] = [[tf_name, species, current_frame, strand, hit_loc_start, hit_loc_end, hit_loc_before_TSS_start, hit_loc_before_TSS_end, current_frame_score, current_frame_score_pvalue, aligned_position]]
## tfbss_found_dict[tf_name] = [[tf_name, current_frame, strand, hit_loc_start, hit_loc_end, hit_loc_before_TSS_start, hit_loc_before_TSS_end, current_frame_score, current_frame_score_pvalue]]
# sort results for each tf_name according to the position in alignment
## for tf_name, hits in tfbss_found_dict.iteritems():
# ref-point
## tfbss_found_dict[tf_name] = sorted(hits, key = itemgetter(10))
## tfbss_found_dict[tf_name] = sorted(hits, key = itemgetter(3))
## # save the results to file
## dump_json(tfbss_found_dict_outfilename, tfbss_found_dict)
end_time = time.time()
logging.info(" ".join(["total time for tfbs_finder() for this transcript:", str(end_time - start_time), "seconds"]))
return tfbss_found_dict
def start_end_found_motif(i, strand, seq_length, promoter_after_tss, motif_length):
"""
Determine the start/end positions of the found motif.
"""
if strand == "+1":
hit_loc_start = i
hit_loc_before_TSS_start = i - seq_length + promoter_after_tss
hit_loc_end = i + motif_length
hit_loc_before_TSS_end = i - seq_length + motif_length + promoter_after_tss
if strand == "-1":
hit_loc_start = seq_length - i - motif_length
hit_loc_before_TSS_start = (seq_length - i - motif_length) - seq_length + promoter_after_tss
hit_loc_end = seq_length - i
hit_loc_before_TSS_end = (seq_length - i) - seq_length + promoter_after_tss
return hit_loc_start, hit_loc_end, hit_loc_before_TSS_start, hit_loc_before_TSS_end
def unaligned2aligned_indexes(cleaned_aligned_filename):
"""
Create a dictionary for mapping aligned positions to unaligned positions.
"""
with open(cleaned_aligned_filename, 'rU') as cleaned_aligned_file:
aligned_entries_dict = SeqIO.to_dict(SeqIO.parse(cleaned_aligned_file, 'fasta'))
unaligned2aligned_index_dict = {}
for species, seqRecord in aligned_entries_dict.iteritems():
unaligned2aligned_index_dict[species] = {}
seq_str = str(seqRecord.seq)
for aligned_index in range(len(seq_str)):
if seq_str[aligned_index] != "-":
unaligned_index = aligned_index - seq_str[:aligned_index].count("-")
unaligned2aligned_index_dict[species][unaligned_index] = aligned_index
return unaligned2aligned_index_dict
def calcCombinedAffinityPvalue(combined_affinity_score, cas_pvalues_dict, cass_with_pvalues_sorted, cass_sorted, cas_pvalues_subdict):
"""
Calculate the pvalue for this combined affinity score.
"""
# determine the pvalue of the current combined affinity score
if combined_affinity_score in cas_pvalues_subdict:
combined_affinity_score_pvalue = str(cas_pvalues_subdict[combined_affinity_score])
else:
# index the current combined affinity score in the sorted list of scores (keys)
cass_with_pvalues_sorted_index = bisect_left(cass_sorted, combined_affinity_score)
if cass_with_pvalues_sorted_index>0:
combined_affinity_score_pvalue = str(cass_with_pvalues_sorted[cass_with_pvalues_sorted_index][1])
else:
combined_affinity_score_pvalue = ">"+str(cass_with_pvalues_sorted[0][1])
return combined_affinity_score_pvalue
def find_clusters(gene_name, ens_gene_id, chr_start, chr_end, alignment, target_species, chromosome, tfbss_found_dict, cleaned_aligned_filename, converted_gerps_in_promoter, gerp_conservation_weight_dict, converted_cages, converted_metaclusters_in_promoter, converted_atac_seqs_in_promoter, converted_eqtls, gtex_weights_dict, transcript_id, cage_dict, TF_cage_dict, cage_dist_weights_dict, atac_dist_weights_dict, metacluster_overlap_weights_dict, cpg_list, cpg_obsexp_weights_dict, cpg_obsexp_weights_dict_keys, cage_correlations_dict, cage_corr_weights_dict, gtex_variants, gene_len, cas_pvalues_dict):
"""
For each target species hit:
Identify the highest score for each species within the locality threshold.
Create combined affinity score from the target species hit and those best scores from each species.
If two target species hits are within the locality threshold from one another, choose the hit which has the highest combined affinity score.
"""
start_time = time.time()
cluster_dict = {}
for tf_name, hits in tfbss_found_dict.iteritems():
# build dict and sorted list of pre-computed combined affinity scores for this tf
if tf_name in cas_pvalues_dict:
cass_with_pvalues_sorted = cas_pvalues_dict[tf_name]
cass_sorted = [x[0] for x in cass_with_pvalues_sorted]
cas_pvalues_subdict = {x[0]:x[1] for x in cass_with_pvalues_sorted}
if len(hits) > 0:
cluster_dict[tf_name] = []
tf_len = len(hits[0][1])
target_cages, tf_cages = cage_correlations_summing_preparation(gene_name, transcript_id, cage_dict, TF_cage_dict, tf_name)
eqtl_occurrence_log_likelihood = eqtl_overlap_likelihood(converted_eqtls, chr_start, chr_end, tf_len, gene_len, gtex_variants, ens_gene_id)
for hit in hits:
# ref-point
combined_affinity_score = 0
target_species_hit = hit
target_species_pwm_score = target_species_hit[6]
species_weights_sum = 0
cage_weights_sum = 0
eqtls_weights_sum = 0
atac_weights_sum = 0
metacluster_weights_sum = 0
corr_weight_sum = 0
tf_len = len(hit[0])
# datasets only available for homo sapiens
# todo: build within function checking
if target_species == "homo_sapiens":
cage_weights_sum = cage_weights_summing(transcript_id, target_species_hit, cage_dist_weights_dict, converted_cages)
eqtls_weights_sum = eqtls_weights_summing(eqtl_occurrence_log_likelihood, ens_gene_id, target_species_hit, converted_eqtls, gtex_weights_dict, chr_start, chr_end, gtex_variants, tf_len, gene_len)
atac_weights_sum = atac_weights_summing(transcript_id, target_species_hit, atac_dist_weights_dict, converted_atac_seqs_in_promoter)
metacluster_weights_sum = metacluster_weights_summing(transcript_id, target_species_hit, metacluster_overlap_weights_dict, converted_metaclusters_in_promoter)
corr_weight_sum = cage_correlations_summing(target_species_hit, transcript_id, target_cages, tf_cages, cage_correlations_dict, cage_corr_weights_dict)
species_weights_sum = gerp_weights_summing(target_species, transcript_id, chromosome, target_species_hit, gerp_conservation_weight_dict, converted_gerps_in_promoter)
cpg_weight = cpg_weights_summing(transcript_id, target_species_hit, cpg_obsexp_weights_dict, cpg_obsexp_weights_dict_keys, cpg_list)
# calculate the complete score (combined affinity)
experimental_weights = [species_weights_sum, cage_weights_sum, eqtls_weights_sum, atac_weights_sum, metacluster_weights_sum, cpg_weight, corr_weight_sum]
combined_affinity_score += sum(experimental_weights) + target_species_pwm_score
combined_affinity_score = round(combined_affinity_score, 2)
if tf_name in cas_pvalues_dict:
combined_affinity_score_pvalue = calcCombinedAffinityPvalue(combined_affinity_score, cas_pvalues_dict, cass_with_pvalues_sorted, cass_sorted, cas_pvalues_subdict)
else:
combined_affinity_score_pvalue = ""
# append the combined affinity score and its pvalue
hit.append(combined_affinity_score)
hit.append(combined_affinity_score_pvalue)
# round all of the experimental weights to two places and append to hit
experimental_weights_rounded = [round(x, 2) for x in experimental_weights]
hit += experimental_weights_rounded
cluster_dict[tf_name].append(hit)
total_time = time.time() - start_time
logging.info(" ".join(["total time for find_clusters() for this transcript:", str(total_time), "seconds"]))
return cluster_dict
def eqtl_overlap_likelihood(converted_eqtls, chr_start, chr_end, tf_len, gene_len, gtex_variants, ens_gene_id):
"""
Likelihood of eQTL occurrence.
"""
eqtl_occurrence_log_likelihood = 0
if ens_gene_id in gtex_variants:
eqtl_occurrence_log_likelihood_dict = {}
## if len(converted_eqtls) > 0:
transcript_len = float(chr_end - chr_start)
# determine size of search space, and probability of observing an eQTL in this gene.
# GTEx searches for variants which occur over the span of the gene + 1,000,000 nt upstream+downstream.
eqtl_search_space = 2000000 + gene_len
associated_gtx_eqtls = gtex_variants[ens_gene_id]
variant_count = len(associated_gtx_eqtls) * 1.
eqtl_occurrence_log_likelihood = -1 * math.log(((tf_len * variant_count)/(eqtl_search_space-tf_len)) * (transcript_len/gene_len), 2)
return eqtl_occurrence_log_likelihood
def eqtls_weights_summing(eqtl_occurrence_log_likelihood, ens_gene_id, target_species_hit, converted_eqtls, gtex_weights_dict, chr_start, chr_end, gtex_variants, tf_len, gene_len):
"""
Identify if any of the eQTLs associated with this gene overlap this predicted TFBS.
Retrieve the log-likelihood scores for all of them.
Fix.
"""
eqtl_weights = []
if len(converted_eqtls) > 0:
## transcript_len = float(chr_end - chr_start)
# Only needs to be calculated once, therefore break into new function (eqtl_overlap_likelihood) so that it is not performed for each of the TFBS predictions #
# Requires the tf_len, which will need to be stored in a dict instead of drawing it from the current hit in order to avoid repetition #
# determine size of search space, and probability of observing an eQTL in this gene.
# GTEx searches for variants which occur over the span of the gene + 1,000,000 nt upstream+downstream.
## eqtl_search_space = 2000000 + gene_len
## associated_gtx_eqtls = gtex_variants[ens_gene_id]
## variant_count = len(associated_gtx_eqtls) * 1.
## eqtl_occurrence_log_likelihood = -1 * math.log(((tf_len * variant_count)/(eqtl_search_space-tf_len)) * (transcript_len/gene_len), 2)
# determine the weight score for likelihood of this magnitude eQTL.
# ref-point
motif_start = target_species_hit[4]
motif_end = target_species_hit[5]
for converted_eqtl in converted_eqtls:
converted_eqtl_start = converted_eqtl[0]
converted_eqtl_end = converted_eqtl[1]
converted_eqtl_score_mag = abs(converted_eqtl[2])
overlap = overlap_range([motif_start, motif_end], [converted_eqtl_start, converted_eqtl_end])
## if motif_start<=converted_eqtl_start<=motif_end or motif_start<=converted_eqtl_end<=motif_end:
if len(overlap) > 0:
eqtl_weight = gtex_weights_dict[converted_eqtl_score_mag]
eqtl_weights.append(eqtl_weight + eqtl_occurrence_log_likelihood)
eqtl_weights_sum = sum(eqtl_weights)
return eqtl_weights_sum
##def cage_correlations_summing_preparation(transcript_id, cage_dict, TF_cage_dict, tf_name, jasparTFs_transcripts_dict):
def cage_correlations_summing_preparation(gene_name, transcript_id, cage_dict, TF_cage_dict, tf_name):
"""
Extract transcript relevant cages (target_cages) once for each TF under analysis. This could be reduced,
to just once for the whole analysis of the current transcript (better that the last version where it was once per hit).
Extract TF relevant data just once per TF, instead of for every hit.
Identify the CAGE ides which are associated with the promoter of the TF currently under analysis.
The correlation between these and the target gene will be extracted and correlating log-weight will be summed.
"""
target_cages = []
tf_cages = []
# current transcript (target) cages
if gene_name in cage_dict:
target_cages = [x[0].replace("hg_","").replace(".1","") for x in cage_dict[gene_name]]
if tf_name in TF_cage_dict:
tf_cages = [x[0].replace("hg_","").replace(".1","") for x in TF_cage_dict[tf_name]]
## # JASPAR tfs are often hetero multimers
## # therefore we should parse the individual proteins and identify transcripts for each
## split_tf_names = clean_jaspar_names([tf_name])
## tf_transcripts = []
## for split_tf_name in split_tf_names:
## if split_tf_name in jasparTFs_transcripts_dict:
## tf_transcripts += jasparTFs_transcripts_dict[split_tf_name]
##
## # for each JASPAR transcript, compile associated FANTOM CAGEs
## for tf_transcript in tf_transcripts:
## tf_cages += cage_dict[tf_transcript]
##
## # CAGEs may be shared by multiple transcripts with TSSs in close proximity
## tf_cages = list(set([x[0] for x in tf_cages]))
return target_cages, tf_cages
##def cage_correlations_summing(target_species_hit, transcript_id, target_cages, tf_cages, jasparTFs_transcripts_dict, cage_keys_dict, cage_correlations_dict, cage_corr_weights_dict, cage_correlations_hit_tf_dict):
##def cage_correlations_summing(target_species_hit, transcript_id, target_cages, tf_cages, jasparTFs_transcripts_dict, cage_keys_dict, cage_correlations_dict, cage_corr_weights_dict):
##def cage_correlations_summing(target_species_hit, transcript_id, target_cages, tf_cages, jasparTFs_transcripts_dict, cage_correlations_dict, cage_corr_weights_dict):
def cage_correlations_summing(target_species_hit, transcript_id, target_cages, tf_cages, cage_correlations_dict, cage_corr_weights_dict):
"""
Extract correlation values between CAGEs associated with a predicted TFBS protein,
and CAGEs associated with the current gene.
"""
corr_weights_ls = []
corr_weight_sum = 0
# cages for all transcripts of the predicted TFBS's proteins
tf_name = target_species_hit[0]
for target_cage in target_cages:
if target_cage in cage_correlations_dict:
for tf_cage in tf_cages:
if tf_cage in cage_correlations_dict[target_cage]:
cage_correlation = cage_correlations_dict[target_cage][tf_cage]
cage_corr_weight = cage_corr_weights_dict[abs(cage_correlation)]
## corr_weight_sum += cage_corr_weight
corr_weights_ls.append(cage_corr_weight)
## if target_cage_key in cage_correlations_dict[tf_cage_key]:
## cage_correlation = cage_correlations_dict[tf_cage_key][target_cage_key]
## cage_corr_weight = cage_corr_weights_dict[abs(cage_correlation)]
## corr_weights_ls.append(cage_corr_weight)
if len(corr_weights_ls) > 0:
corr_weights_ls.sort()
corr_weight_sum = corr_weights_ls[-1]
## cage_correlations_hit_tf_dict[tf_name] = corr_weight_sum
## return corr_weight_sum, cage_correlations_hit_tf_dict
return corr_weight_sum
def cage_weights_summing(transcript_id, target_species_hit, cage_dist_weights_dict, converted_cages):
"""
Retrieve a log-likelihood score for this from the pre-existing dictionary.
"""
cage_weights = []
# ref-point
for converted_cage in converted_cages:
cage_peak_count_ratio = converted_cage[3]
motif_cage_dist = str(distance_solve([converted_cage[0], converted_cage[1]], [target_species_hit[4], target_species_hit[5]]))
if motif_cage_dist in cage_dist_weights_dict:
cage_weight = cage_dist_weights_dict[motif_cage_dist]
cage_weight_peak_count_ratio_adjusted = cage_weight * cage_peak_count_ratio
cage_weights.append(cage_weight_peak_count_ratio_adjusted)
cage_weights_sum = sum(cage_weights)
return cage_weights_sum
def atac_weights_summing(transcript_id, target_species_hit, atac_dist_weights_dict, converted_atac_seqs_in_promoter):
"""
Identify ATAC-Seq peaks which are near a putative TFBS.
Retrieve a log-likelihood score for this from the pre-existing dictionary.
"""
atac_weights = []
motif_start = target_species_hit[4]
motif_end = target_species_hit[5]
for converted_atac in converted_atac_seqs_in_promoter:
transcript_atac_start = converted_atac[0]
transcript_atac_end = converted_atac[1]
motif_atac_dist = str(distance_solve([motif_start, motif_end],[transcript_atac_start, transcript_atac_end]))
if motif_atac_dist in atac_dist_weights_dict:
atac_weight = atac_dist_weights_dict[motif_atac_dist]
atac_weights.append(atac_weight)
atac_weights_sum = sum(atac_weights)
return atac_weights_sum
def metacluster_weights_summing(transcript_id, target_species_hit, metacluster_overlap_weights_dict, converted_metaclusters_in_promoter):
"""
Identify the number of metaclusters which overlap this putative TFBS.
Retrieve a log-likelihood score for this from the pre-existing dictionary.
"""
num_ovelapping_metaclusters = 0
metacluster_weights_sum = 0
# ref-point
motif_start = target_species_hit[4]
motif_end = target_species_hit[5]
for converted_metacluster in converted_metaclusters_in_promoter:
transcript_metacluster_start = converted_metacluster[0]
transcript_metacluster_end = converted_metacluster[1]
# doesn't work clusters are bigger than motifs
overlap = overlap_range([motif_start, motif_end], [transcript_metacluster_start, transcript_metacluster_end])
if len(overlap)>0:
num_ovelapping_metaclusters += 1
if num_ovelapping_metaclusters in metacluster_overlap_weights_dict:
metacluster_weights_sum = metacluster_overlap_weights_dict[num_ovelapping_metaclusters]
else:
print("metacluster overlap sum not in weight dict")
logging.warning(" ".join(["metacluster overlap sum not in weight dict"]))
return metacluster_weights_sum
def gerp_weights_summing(target_species, transcript_id, chromosome, target_species_hit, gerp_conservation_weight_dict, converted_gerps_in_promoter):
"""
Identify the gerps which are near this predicted TFBS.
Retrieve a log-likelihood score for this distance from the pre-existing dictionary.
"""
# ref-point
motif_start = target_species_hit[4]
motif_end = target_species_hit[5]
tf_len = len(target_species_hit[0])
gerp_weights_sum = 0
dists = []
for converted_gerp_in_promoter in converted_gerps_in_promoter:
converted_gerp_in_promoter_start = converted_gerp_in_promoter[0]
converted_gerp_in_promoter_end = converted_gerp_in_promoter[1]
dist = distance_solve([motif_start, motif_end], [converted_gerp_in_promoter_start, converted_gerp_in_promoter_end])
dists.append(dist)
dists.sort()
best_dist = dists[0]
# should only overlap be used?
## if best_dist == 0:
if best_dist <=1000:
if target_species == "homo_sapiens":
if best_dist in gerp_conservation_weight_dict[chromosome][str(tf_len)]:
gerp_weights_sum = gerp_conservation_weight_dict[chromosome][str(tf_len)][best_dist]
else:
if best_dist in gerp_conservation_weight_dict:
gerp_weights_sum = gerp_conservation_weight_dict[best_dist]
return gerp_weights_sum
def cpg_weights_summing(transcript_id, target_species_hit, cpg_obsexp_weights_dict, cpg_obsexp_weights_dict_keys, cpg_list):
"""
Retrieve a CpG weight score based on the CpG obs/exp of the midpoint of the
current predicted TFBS.
"""
if len(cpg_obsexp_weights_dict_keys)>0:
# retrieve locations and CpG obs/exp score for the midpoint of this predicted TFBS
# ref-point
motif_start = target_species_hit[2]
motif_end = target_species_hit[3]
motif_midpoint = (motif_end + motif_start)/2
cpg_obsexp = cpg_list[motif_midpoint][-1]
# extract the weight for the obsexp which is just less than the current obsexp
next_lesser_obsexp_index = bisect_left(cpg_obsexp_weights_dict_keys, cpg_obsexp)
if next_lesser_obsexp_index != len(cpg_obsexp_weights_dict_keys):
next_lesser_obsexp = cpg_obsexp_weights_dict_keys[next_lesser_obsexp_index]
else:
next_lesser_obsexp = cpg_obsexp_weights_dict_keys[-1]
cpg_weight = cpg_obsexp_weights_dict[next_lesser_obsexp]
return cpg_weight
else:
return 0
def clean_jaspar_names(uncleaned_jaspar_ids):
"""
Clean names of jaspar transcription factor names.
MSX3 <- lost in humans.
RHOX11 <- only present in 3 species.
DUX <- mouse only gene.
EWSR1 <- didn't end up in the Ensembl BioMart export.
MIX-A <- jaspar says present in xenopus laevis, but not even showing
in Ensembl as a gene for any species.
"""
special_dict = {"EWSR1-FLI1" : ["EWSR1","FLI1"]}
names_list = []
# split the combined names
for uncleaned_jaspar_id in uncleaned_jaspar_ids:
uncleaned_jaspar_id = uncleaned_jaspar_id.upper()
split_names = uncleaned_jaspar_id.split("::")
for name in split_names:
names_list.append(name)
# replace variants
for i, name in enumerate(names_list):
names_list[i] = name.replace("(VAR.2)","").replace("(VAR.3)","")
tmp_list = []
for i, name in enumerate(names_list):
if name in special_dict:
tmp_list += special_dict[name]
else:
tmp_list.append(name)
names_list = list(set(tmp_list))
names_list.sort()
return names_list
def target_species_hits_table_writer(sorted_clusters_target_species_hits_list, output_table_name):
"""
Write to table results sorted by combined affinity score.
"""
with open(output_table_name, 'wb') as output_table:
writerUS=csv.writer(output_table)
writerUS.writerow(['binding prot.', 'motif', 'strand', 'start', 'end', 'TSS-relative start', 'TSS-relative end', 'PWM score', 'p-value', 'combined\naffinity\nscore', 'combined\naffinity\nscore\np-value', 'species\nweights\nsum', 'cage\nweights\nsum', 'eqtls\nweights\nsum', 'atac\nweights\nsum', 'metacluster\nweights\nsum', 'cpg\nweight', 'corr.\nweight\nsum'])
# for all results which have passed thresholds, write full result to .csv
# ref-point
for hit in sorted_clusters_target_species_hits_list:
frame_score_pval_str = hit[8]
combined_affinity_score_pval_str = hit[10]
if ">" not in frame_score_pval_str and frame_score_pval_str != "":
if float(frame_score_pval_str) <= 0.0001:
hit[8] = "{0:.3e}".format(Decimal(frame_score_pval_str))
if ">" not in combined_affinity_score_pval_str and combined_affinity_score_pval_str != "":
if float(combined_affinity_score_pval_str) <= 0.0001:
hit[10] = "{0:.3e}".format(Decimal(combined_affinity_score_pval_str))
writerUS.writerow([str(x) for x in hit])
def sort_target_species_hits(cluster_dict):
"""
Sort target_species hits which are part of a cluster by combined affinity score.
"""
sorted_clusters_target_species_hits_list = []
for tf_name, hits in cluster_dict.iteritems():
for hit in hits:
sorted_clusters_target_species_hits_list.append([tf_name]+hit)
## sorted_clusters_target_species_hits_list += hit
# ref-point
sorted_clusters_target_species_hits_list = sorted(sorted_clusters_target_species_hits_list, key=itemgetter(9), reverse = True)
return sorted_clusters_target_species_hits_list
def top_x_greatest_hits(sorted_clusters_target_species_hits_list, top_x_tfs_count):
"""
Identify the best scoring hits up to some threshold of number of tfs.
Allows plotting more than one instance of a top tf, without increasing the total tf used count.
e.g. 3 instances of KLF4 will count as only one tf used towards the top_x_tfs_count threshold.
"""
# to keep track of how many tfs have been added
top_x_tfs = []
# to store the x greatest tfs and their locations
top_x_greatest_hits_dict = {}
# add all hits to single pool so top hits can be identified
for sorted_clusters_target_species_hit in sorted_clusters_target_species_hits_list:
# ref-point
tf_name = sorted_clusters_target_species_hit[0]
if (len(top_x_tfs) < top_x_tfs_count):
# keep track of what & how many tfs have been added
if tf_name not in top_x_tfs:
top_x_tfs.append(tf_name)
# add the hit to the top hits if the count threshold has not been met
if tf_name in top_x_greatest_hits_dict:
top_x_greatest_hits_dict[tf_name].append(sorted_clusters_target_species_hit)
else:
top_x_greatest_hits_dict[tf_name] = [sorted_clusters_target_species_hit]
return top_x_greatest_hits_dict
################################################################################
# Alignment Manipulation #######################################################
################################################################################
def retrieve_genome_aligned(target_species, chromosome, strand, promoter_start, promoter_end):
"""
Takes as input target_species CCDS start position and size of promoter to be extracted. Retrieves genome aligned,
corresponding regions in all orthologs.
***Alignment no longer necessary as the use of pre-computed GERP scores means that no conservation calculation needs to be made.
Additionally, the newest implementation of Ensembl REST alignment will not retrieve target species sequence,
if there is no alignment with other species at that location.
For example, a request for homo_sapiens alignment from chr1:1-10,000 will only return the locations where an alignment
exists with the target species group (e.g. mammals_low). This may only exist at chr1:2000-9500.
"""
## # Retrieve alignment if alignment FASTA does not already exist
## query_type = "/alignment/block/region/"
## pre_options = target_species + "/" + chromosome + ":" + str(promoter_start) + "-" + str(promoter_end) + ":" + str(strand)
##
## if coverage == "low":
## coverage_str = "EPO_LOW_COVERAGE"
## else:
## coverage_str = "EPO"
##
## options = pre_options + "?method=" + coverage_str + ";compact=1;content-type=application/json;species_set_group=" + species_group
## alignment_decoded = ensemblrest(query_type, options, 'json', "", log=True)
##
## if 'error' not in alignment_decoded:
## # remove those entries which are computed ancestral species
## alignment = [x for x in alignment_decoded[0]['alignments'] if 'Ancestor' not in x['seq_region']]
## else:
## logging.warning(" ".join([alignment_decoded['error']]))
## if alignment_decoded['error'].lower() == "no alignment available for this region":
query_type = "/sequence/region/"
pre_options = target_species + "/" + chromosome + ":" + str(promoter_start) + "-" + str(promoter_end) + ":" + str(strand)
options = pre_options + "?content-type=application/json"
target_only_decoded = ensemblrest(query_type, options, 'json', "", log=True)
if 'seq' in target_only_decoded:
target_only_decoded['species'] = target_species
alignment = [target_only_decoded]
else:
alignment = []
return alignment
def fasta_writer(alignment, outfile):
"""
Write ensembl JSON alignment to fasta file.
"""
if not os.path.isfile(outfile) or (os.path.isfile(outfile) and os.path.getsize(outfile) == 0):
with open(outfile, "w") as aligned_file:
for entry in alignment:
record = SeqRecord(Seq(entry['seq'], alphabet = IUPAC.ambiguous_dna), id = entry['species'], description = "")
SeqIO.write(record, aligned_file, 'fasta')
def remove_non_ACGT(alignment):
"""
Remove non alignment characters and ambiguous nucleotides. should consider changing to replacing any non ACGT char to '-'.
"""
# account for sequences which are non-standard code
non_standard_dict = {'R':['A','G'],
'Y':['C','T'],
'S':['G','C'],
'W':['A','T'],
'K':['G','T'],
'M':['A','C'],
'B':['C','G','T'],
'D':['A','G','T'],
'H':['A','C','T'],
'V':['A','C','G'],
'B':['C','G','T']}
non_alignment_chars = " .N"
for entry in alignment:
for non_alignment_char in non_alignment_chars:
entry['seq'] = entry['seq'].replace(non_alignment_char, '-')
for multi_char, replacement_list in non_standard_dict.iteritems():
entry['seq'] = entry['seq'].replace(non_alignment_char, replacement_list[0])
return alignment
def remove_gap_only(alignment):
"""
Find columns in the alignment where the entire column is '-',
replace the '-' with 'P', then remove the '*'.
"""
if len(alignment) > 0:
for entry in alignment:
entry['seq'] = list(entry['seq'])
for i in range(0,len(alignment[0]['seq'])):
col = [x['seq'][i] for x in alignment]
if col.count('-') == len(col):
for entry in alignment:
entry['seq'][i] = 'Z'
for entry in alignment:
entry['seq'] = "".join(entry['seq']).replace(u'Z',"")
return alignment
def remove_duplicate_species(alignment, target_species):
"""
If there are multiple entries for a single species in an alignment retrieved from Ensembl,
keep the one which has more ACGT characters.
"""
entry_ids = [x['species'] for x in alignment]
duplicate_ids = list(set([x for x in entry_ids if entry_ids.count(x) > 1]))
non_duplicate_alignment = [x for x in alignment if x['species'] not in duplicate_ids]
for duplicate_id in duplicate_ids:
duplicate_seqs = [x for x in alignment if x['species'] == duplicate_id]
duplicate_seqs_lens = [x['seq'].count('-') for x in duplicate_seqs]
sorted_duplicate_seqs_lens = duplicate_seqs_lens[:]
sorted_duplicate_seqs_lens.sort()
longest_seq = sorted_duplicate_seqs_lens[0]
longest_seq_index = duplicate_seqs_lens.index(longest_seq)
kept_seq = duplicate_seqs[longest_seq_index]
if duplicate_id == target_species:
non_duplicate_alignment = [kept_seq] + non_duplicate_alignment
else:
non_duplicate_alignment.append(kept_seq)
return non_duplicate_alignment
##def selective_alignment(alignment):
## """
## Remove sequences from the alignment if they have less then 75% of the nucleotides of the target_species sequence.
## Work needed: identify scenarios where length of target sequence affects alignment and subsequent scoring.
## """
##
## target_species_entry = alignment[0]
## target_species_seq_2nd_half = target_species_entry['seq'][len(target_species_entry['seq'])/2:]
## target_species_seq_2nd_half = target_species_seq_2nd_half.replace("-","").replace("N","").replace(" ","").replace(".","")
## target_species_seq_2nd_half_len = len(target_species_seq_2nd_half)
##
## cleaned_alignment = []
## if target_species_seq_2nd_half_len > 0:
## for entry in alignment:
## entry_seq_2nd_half = entry['seq'][len(entry['seq'])/2:]
## entry_seq_2nd_half = entry_seq_2nd_half.replace("-","").replace("N","").replace(" ","").replace(".","")
## entry_seq_2nd_half_len = len(entry_seq_2nd_half)
## if float(entry_seq_2nd_half_len)/target_species_seq_2nd_half_len >= 0.75:
## cleaned_alignment.append(entry)
##
## return cleaned_alignment
def load_genome_aligned(aligned_filename):
"""
Load previously retrieved alignment fasta file into dictionary.
"""
with open(aligned_filename, 'r') as alignment_handle:
alignment_list = list(SeqIO.parse(alignment_handle, 'fasta'))
alignment = [{'seq': str(entry.seq), 'species':entry.id} for entry in alignment_list if '[' not in entry.id]
return alignment
def alignment_tools(ensembl_aligned_filename, cleaned_aligned_filename, target_species, chromosome, strand, promoter_start, promoter_end):
"""
Return cleaned alignment for further analysis.
"""
# if cleaned alignment file doesn't exist, or the size is zero.
if not os.path.isfile(cleaned_aligned_filename) or (os.path.isfile(cleaned_aligned_filename) and os.path.getsize(cleaned_aligned_filename) == 0):
# If uncleaned Ensembl alignment file doesn't exist, or the size is zero: retrieve from Ensembl, write to file.
if not os.path.isfile(ensembl_aligned_filename) or (os.path.isfile(ensembl_aligned_filename) and os.path.getsize(ensembl_aligned_filename) == 0):
alignment = retrieve_genome_aligned(target_species, chromosome, strand, promoter_start, promoter_end)
fasta_writer(alignment, ensembl_aligned_filename)
# If uncleaned Ensembl file exists and size is not zero: clean, write to cleaned filename.
if os.path.isfile(ensembl_aligned_filename) and (os.path.isfile(ensembl_aligned_filename) and os.path.getsize(ensembl_aligned_filename) > 0):
alignment = load_genome_aligned(ensembl_aligned_filename)
alignment = remove_non_ACGT(alignment)
alignment = remove_duplicate_species(alignment, target_species)
## # analysis is now based on conservation around individual hits, so removing sequences based on completeness is wasteful
## alignment = selective_alignment(alignment)
alignment = remove_gap_only(alignment)
fasta_writer(alignment, cleaned_aligned_filename)
# Uncleaned alignment file still doesn't exist (or size is zero): note in logfile.
else:
logging.warning(" ".join(["No ensembl alignment, or size is zero"]))
alignment = []
# Cleaned alignment file exists and size is not zero: load cleaned alignment.
else:
alignment = load_genome_aligned(cleaned_aligned_filename)
return alignment
def test_transcript_id(decoded_json_description, transcript_id):
"""
Test if the dictionary for the target transcript id:
Indicates it is a transcript.
Doesn't contain errors/Is complete.
"""
transcript_id_pass = False
if 'error' not in decoded_json_description:
if 'object_type' in decoded_json_description:
if decoded_json_description['object_type'].lower() == 'transcript':
transcript_id_pass = True
else:
logging.warning(" ".join([transcript_id, "This input does not appear to be a valid Ensembl transcript ID. Ensembl REST defines it as:", decoded_json_description['object_type']]))
else:
logging.warning(" ".join([transcript_id, "This input does not appear to be a valid Ensembl transcript ID. Please check it for errors."]))
else:
logging.warning(" ".join(["Ensembl REST responds with error:", decoded_json_description['error']]))
return transcript_id_pass
def transfabulator(transcript, transcript_dict_filename):
"""
Given a transcript ID, retrieve Ensembl descriptive data for that transcript.
"""
retrieve_transcript_data = True
# load transcript position data from json file if it already exists
if os.path.isfile(transcript_dict_filename):
if os.path.getsize(transcript_dict_filename) != 0:
logging.info(" ".join(["transcript_dict already exists: loading"]))
decoded_json_description = load_json(transcript_dict_filename)
retrieve_transcript_data = False
# retrieve transcript position data from json file if it does not exist
# or contains no data.
if retrieve_transcript_data:
# Set parameters for retrieving Ensembl data via REST
query_type = '/lookup/id/'
options = '?feature=transcript;content-type=application/json'
# populate 'transcript_dict' dictionary with sub-dictionaries.
# key[transcript_id] = {chromosome, strand, start, end} for each ensembl transcript id
decoded_json_description = ensemblrest(query_type, options, 'json', transcript, log=True)
decoded_json_description = {k.lower():v for k,v in decoded_json_description.iteritems()}
return decoded_json_description
def transcript_data_retrieve(decoded_json_description, transcript_dict_filename, promoter_before_tss, promoter_after_tss):
"""
The retrieved transcript data is assumed complete, extract important data.
Write json data to file.
Based on user-defined values for target region (referenced to TSS),
calculate genomic coordinates of target region.
"""
# Extract position data
chromosome = decoded_json_description['seq_region_name']
chr_start = decoded_json_description['start']
chr_end = decoded_json_description['end']
strand = decoded_json_description['strand']
ens_gene_id = decoded_json_description['parent']
target_species = decoded_json_description['species']
if 'display_name' in decoded_json_description:
transcript_name = decoded_json_description['display_name']
else:
transcript_name = ""
dump_json(transcript_dict_filename, decoded_json_description)
if strand == 1:
tss = chr_start
#[promoter_start][promoter_end][TSS=chr_start][>GENE--->][chr_end]
promoter_start = tss - promoter_before_tss
promoter_end = tss - 1 + promoter_after_tss
if strand == -1:
tss = chr_end
#[chr_start][<---GENE<][TSS=chr_end][promoter_start][promoter_end]
promoter_start = tss + 1 - promoter_after_tss
promoter_end = tss + promoter_before_tss
return target_species, transcript_name, ens_gene_id, chromosome, tss, strand, promoter_start, promoter_end, chr_start, chr_end
def gene_data_retrieve(gene_dict_filename, ens_gene_id):
"""
Retrieve gene data for the parent gene of the target transcript.
"""
# determine likelihood of overlapping an eQTL at all.
# Set parameters for retrieving Ensembl data via REST
## if os.path.exists(gene_dict_filename):
decoded_json_description = load_json(gene_dict_filename)
## else:
if not decoded_json_description or len(decoded_json_description)==0:
query_type = '/lookup/id/'
options = '?feature=transcript;content-type=application/json'
# retrieve_gene_len
decoded_json_description = ensemblrest(query_type, options, 'json', ens_gene_id, log=True)
decoded_json_description = {k.lower():v for k,v in decoded_json_description.iteritems()}
dump_json(gene_dict_filename, decoded_json_description)
gene_start = decoded_json_description['start']
gene_end = decoded_json_description['end']
gene_name = decoded_json_description['display_name'].upper()
gene_len = gene_end - gene_start
return gene_name, gene_len
################################################################################
# Regulatory & Conservation Features ###########################################
################################################################################
def retrieve_regulatory(chromosome, strand, promoter_start, promoter_end, regulatory_decoded_filename, target_species):
"""
Retrieve ensembl JSON data for regulatory features within the coordinates provided.
"""
# determine if the regulatory data has already been retrieved, if so load, if not retrieve.
if os.path.isfile(regulatory_decoded_filename):
logging.info(" ".join(["regulatory_decoded already exists: loading"]))
regulatory_decoded = load_json(regulatory_decoded_filename)
else:
query_type = "/overlap/region/"
pre_options = target_species + "/" + chromosome + ":" + str(promoter_start) + "-" + str(promoter_end) + ":" + str(strand)
options = pre_options + "?feature=regulatory;content-type=application/json"
regulatory_decoded = ensemblrest(query_type, options, 'json', "", log=True)
# rename the Ensembl regulatory elements so that they don't overtake the space available for names.
for reg in regulatory_decoded:
if "description" in reg:
if reg["description"] == "Transcription factor binding site":
reg["description"] = "Pred. TFBS"
if reg["description"] == "Open chromatin region":
reg["description"] = "Open chromatin"
if reg["description"] == "Predicted promoter":
reg["description"] = "Pred. promoter"
dump_json(regulatory_decoded_filename, regulatory_decoded)
return regulatory_decoded
def reg_position_translate(tss,regulatory_decoded,promoter_start,promoter_end,strand,promoter_before_tss,promoter_after_tss):
"""
Convert positions of regulatory elements (json) to coordinates usable by the plot_promoter function.
Requires supplying location relative to TSS (i.e. negative).
For creating a left to right plot of the promoter, regardless of strand:
Converted_reg_start is the leftmost regulatory position.
Converted_reg_end is the rightmost regulatory position.
"""
converted_reg_dict = {}
for reg in regulatory_decoded:
if "error" not in reg:
reg_id = reg['id']
reg_start = reg['start']
reg_end = reg['end']
description = reg['description']
if strand == 1:
#[promoter_start][reg_start][reg_end][promoter_end][chr_start][TSS>GENE--->][chr_end]
converted_reg_start = (tss - reg_start) * -1
converted_reg_end = (tss - reg_end) * -1
if reg_start <= promoter_start:
converted_reg_start = (-1 * promoter_before_tss)
if reg_end >= promoter_end:
converted_reg_end = promoter_after_tss - 0.001
if strand == -1:
#[chr_start][<---GENE<TSS][chr_end][promoter_start][reg_start][reg_end][promoter_end]
converted_reg_start = (tss - reg_start)
converted_reg_end = (tss - reg_end)
if reg_start <= promoter_start:
converted_reg_start = promoter_after_tss - 0.001
if reg_end >= promoter_end:
converted_reg_end = (-1 * promoter_before_tss + promoter_after_tss) + 0.001
converted_reg_dict[reg_id] = {'converted_start':converted_reg_start, 'converted_end':converted_reg_end, 'description':description}
return converted_reg_dict
##def alignment_conservation(aligned_filename):
## """
## Identify basic conservation of DNA sequence in the alignment.
## """
##
## alignment = AlignIO.read(aligned_filename, "fasta")
## target_species_row = alignment[0]
## species_num = float(len(alignment))
## conservation = []
## for i in range(0, len(target_species_row)):
##
## target_species_nuc = target_species_row[i]
## if target_species_nuc != "-":
## alignment_col = alignment[:,i]
## common_char = collections.Counter(alignment_col).most_common()[0][0]
## char_count = collections.Counter(alignment_col).most_common()[0][1]
## if common_char != '-':
## col_conservation = char_count/species_num
## else:
## col_conservation = alignment_col.count(target_species_nuc)/species_num
## conservation.append(col_conservation)
##
## return conservation
def CpG(aligned_filename):
"""
Score the CpG content of the target_species sequence over a 200 nt window.
"""
alignment = AlignIO.read(aligned_filename, "fasta")
target_species_row = alignment[0]
cpg_list = []
# [1 C, 1 if G, 1 if CPG, CorG, num_cpg, obs2exp]
for i in range(0, len(target_species_row)):
current_pos = target_species_row[i]
if current_pos != '-':
if i < len(target_species_row) - 1:
next_pos = target_species_row[i+1]
else:
next_pos = False
if current_pos == 'C' and next_pos == 'G':
cpg_list.append([1, 0, 1])
elif current_pos == 'C' and next_pos != 'G':
cpg_list.append([1, 0, 0])
elif current_pos == 'G':
cpg_list.append([0, 1, 0])
else:
cpg_list.append([0, 0, 0])
for i in range(0, len(cpg_list)):
if i < 100:
rolling_island = cpg_list[:i] + cpg_list[i:i+100]
elif i > len(cpg_list) - 100:
rolling_island = cpg_list[i-100:i] + cpg_list[i:]
else:
rolling_island = cpg_list[i-100:i+100]
Cs = sum([x[0] for x in rolling_island]) * 1.0
Gs = sum([x[1] for x in rolling_island]) * 1.0
CorG_ratio = (Cs+Gs)/len(rolling_island)
num_cpg = sum([x[2] for x in rolling_island]) * 1.0
obs = num_cpg/len(rolling_island)
exp = (CorG_ratio/2)**2
if exp==0:
exp = 0.0000000001
obs2exp = obs/exp
cpg_list[i] = cpg_list[i] + [CorG_ratio, num_cpg, obs2exp]
return cpg_list
def cage_position_translate(gene_name,transcript_id,tss,cage_dict,promoter_start,promoter_end,strand,promoter_before_tss,promoter_after_tss):
"""
Convert the CAGE data genome positions to those which can be mapped into the final figure.
"""
converted_cages = []
if gene_name in cage_dict:
cages = cage_dict[gene_name]
cages_peak_count_sum = float(sum([int(x[5]) for x in cages]))
for cage in cages:
# old
## cage_strand = cage[2]
## cage_start = cage[3]
## cage_end = cage[4]
## cage_desc = cage[6]
cage_desc = cage[1]
cage_start = int(cage[3])
cage_end = cage_start + int(cage[4])
cage_peak_count = int(cage[5])
cage_peak_count_ratio = cage_peak_count/cages_peak_count_sum
cage_strand = cage[6]
if cage_strand == "+":
#[promoter_start][cage_start][cage_end][promoter_end][chr_start][TSS>GENE--->][chr_end]
converted_cage_start = (tss - cage_start) * -1
converted_cage_end = (tss - cage_end) * -1
if cage_strand == "-":
#[chr_start][<---GENE<TSS][chr_end][promoter_start][cage_start][cage_end][promoter_end]
converted_cage_start = (tss - cage_start)
converted_cage_end = (tss - cage_end)
converted_cage = [converted_cage_start, converted_cage_end, cage_desc, cage_peak_count_ratio]
converted_cages.append(converted_cage)
converted_cages = sorted(converted_cages, key=itemgetter(2))
return converted_cages
def gtex_position_translate(ens_gene_id,gtex_variants,tss,promoter_start,promoter_end,strand,promoter_before_tss,promoter_after_tss):
"""
Convert the GTEx data genome positions to those which can be mapped into the final figure.
Reduce to those that are within range of the promoter before/after tss.
"""
converted_eqtls = []
if ens_gene_id in gtex_variants:
eqtls = gtex_variants[ens_gene_id]
for eqtl in eqtls:
if len(eqtl) == 2:
loc = eqtl[0]
eqtl_length = 1
eqtl_effect = eqtl[1]
else:
loc = eqtl[0]
eqtl_length = eqtl[1]
eqtl_effect = eqtl[2]
## overlap = overlap_range([promoter_start, promoter_end], [loc, loc+eqtl_length])
## if len(overlap) >0:
if promoter_start<=loc<=promoter_end or promoter_start<=loc+eqtl_length<=promoter_end:
#[promoter_start][eqtl_start][eqtl_end][promoter_end][chr_start][TSS>GENE--->][chr_end]
if strand == 1:
converted_eqtl_start = (tss - loc) * -1
converted_eqtl_end = (tss - loc + eqtl_length) * -1
#[chr_start][<---GENE<TSS][chr_end][promoter_start][eqtl_start][eqtl_end][promoter_end]
if strand == -1:
converted_eqtl_start = (tss - loc)
converted_eqtl_end = (tss - loc + eqtl_length)
# save to final list
converted_eqtl = [converted_eqtl_start, converted_eqtl_end, eqtl_effect]
converted_eqtls.append(converted_eqtl)
return converted_eqtls
def distance_solve(r1, r2):
# sort the two ranges such that the range with smaller first element
# is assigned to x and the bigger one is assigned to y
r1.sort()
r2.sort()
x, y = sorted((r1, r2))
#now if x[1] lies between x[0] and y[0](x[1] != y[0] but can be equal to x[0])
#then the ranges are not overlapping and return the differnce of y[0] and x[1]
#otherwise return 0
if x[1] < y[0]:
return y[0] - x[1]
return 0
def gerp_positions_translate(target_dir, gerp_conservation_locations_dict, chromosome, strand, promoter_start, promoter_end, tss):
"""
Identify GERP constrained conservation locations which occur within the defined promoter region.
Convert positions of GERP elements (json) to coordinates usable by the plot_promoter function.
Requires supplying location relative to TSS (i.e. negative).
For creating a left to right plot of the promoter, regardless of strand:
Converted_reg_start is the leftmost regulatory position.
Converted_reg_end is the rightmost regulatory position.
"""
potential_gerps_in_promoter = []
gerps_in_promoter = []
# because a prediction can occur at the start/end of a defined promoter
extended_range = 1000
if chromosome in gerp_conservation_locations_dict:
## left_most_index = bisect_left([x[0] for x in gerp_conservation_locations_dict[chromosome]], promoter_start - extended_range)
## right_most_index = bisect_right([x[0]+x[1] for x in gerp_conservation_locations_dict[chromosome]], promoter_end + extended_range)
## potential_gerps_in_promoter = gerp_conservation_locations_dict[chromosome][left_most_index-1:right_most_index+1]
for potential_gerp_in_promoter in gerp_conservation_locations_dict[chromosome]:
## for potential_gerp_in_promoter in potential_gerps_in_promoter:
## overlap = overlap_range([promoter_start - extended_range, promoter_end + extended_range], [potential_gerp_in_promoter[0], potential_gerp_in_promoter[0]+potential_gerp_in_promoter[1]])
if promoter_start - extended_range<=potential_gerp_in_promoter[0]<=promoter_end + extended_range or promoter_start - extended_range<=potential_gerp_in_promoter[0]+potential_gerp_in_promoter[1]<=promoter_end + extended_range:
## if len(overlap) > 0:
gerps_in_promoter.append(potential_gerp_in_promoter)
# convert the positions of the in-promoter metaclusters to tss-relative
converted_gerps_in_promoter = []
for gerp_in_promoter in gerps_in_promoter:
gerp_start = gerp_in_promoter[0]
gerp_end = gerp_start + gerp_in_promoter[1]
if strand == 1:
converted_gerp_start = (tss - gerp_start) * -1
converted_gerp_end = (tss - gerp_end) * -1
if strand == -1:
converted_gerp_start = (tss - gerp_start)
converted_gerp_end = (tss - gerp_end)
converted_gerp = [converted_gerp_start, converted_gerp_end]
converted_gerps_in_promoter.append(converted_gerp)
return converted_gerps_in_promoter
def gtrd_positions_translate(target_dir, gtrd_metaclusters_dict, chromosome, strand, promoter_start, promoter_end, tss):
"""
Identify GTRD metaclusters which occur within the defined promoter region.
Convert positions of metaclusters (json) to coordinates usable by the plot_promoter function.
Requires supplying location relative to TSS (i.e. negative).
For creating a left to right plot of the promoter, regardless of strand:
Converted_reg_start is the leftmost regulatory position.
Converted_reg_end is the rightmost regulatory position.
"""
potential_metaclusters_in_promoter = []
promoter_start_millions = promoter_start/1000000
promoter_end_millions = promoter_end/1000000
# retrieve the metacluster peaks on which the chrom that the transcript is found
# if the millions place is the same for each then the metaclusters come from a single
# subdict entry
if promoter_start_millions == promoter_end_millions:
if promoter_start_millions in gtrd_metaclusters_dict:
potential_metaclusters_in_promoter += gtrd_metaclusters_dict[promoter_start_millions]
# have to account for the possibility that this location spans a millions place
# e.g. from 999,000 - 1,001,000
else:
if promoter_start_millions in gtrd_metaclusters_dict:
potential_metaclusters_in_promoter += gtrd_metaclusters_dict[promoter_start_millions]
if promoter_end_millions in gtrd_metaclusters_dict:
potential_metaclusters_in_promoter += gtrd_metaclusters_dict[promoter_end_millions]
# identify if the metacluster occurs within user-defined promoter region
metaclusters_in_promoter = []
for potential_metacluster in potential_metaclusters_in_promoter:
metacluster_start = potential_metacluster[0]
metacluster_end = metacluster_start + potential_metacluster[1]
metacluster_peak_count = potential_metacluster[2]
overlap = overlap_range([promoter_start, promoter_end], [metacluster_start, metacluster_end])
if len(overlap) > 0:
metaclusters_in_promoter.append(potential_metacluster)
# convert the positions of the in-promoter metaclusters to tss-relative
converted_metaclusters_in_promoter = []
## gtrd_outfilename = os.path.join(target_dir, os.path.basename(target_dir) + '.gtrd.txt')
## with open(gtrd_outfilename, 'w') as gtrd_outfile:
for metacluster_in_promoter in metaclusters_in_promoter:
metacluster_start = metacluster_in_promoter[0]
metacluster_end = metacluster_start + metacluster_in_promoter[1]
metacluster_peak_count = metacluster_in_promoter[2]
if strand == 1:
converted_metacluster_start = (tss - metacluster_start) * -1
converted_metacluster_end = (tss - metacluster_end) * -1
if strand == -1:
converted_metacluster_start = (tss - metacluster_start)
converted_metacluster_end = (tss - metacluster_end)
converted_metacluster = [converted_metacluster_start, converted_metacluster_end, metacluster_peak_count]
converted_metaclusters_in_promoter.append(converted_metacluster)
return converted_metaclusters_in_promoter
def atac_pos_translate(atac_seq_dict, chromosome, strand, promoter_start, promoter_end, tss):
"""
Identify merged ATAC-Seq peaks which occur within the defined promoter region.
Convert positions of ATAC-Seq peaks (json) to coordinates usable by the plot_promoter function.
Requires supplying location relative to TSS (i.e. negative).
For creating a left to right plot of the promoter, regardless of strand:
Converted_reg_start is the leftmost regulatory position.
Converted_reg_end is the rightmost regulatory position.
"""
potential_atac_seqs_in_promoter = []
## chromosome = "chr" + chromosome.lower()
## if chromosome in atac_seq_dict:
promoter_start_millions = promoter_start/1000000
promoter_end_millions = promoter_end/1000000
# retrieve the ATAC-Seq peaks on which the chrom that the transcript is found
# if the millions place is the same for each then the atac-seqs come from a single subdict entry
if promoter_start_millions == promoter_end_millions:
if promoter_start_millions in atac_seq_dict:
potential_atac_seqs_in_promoter += atac_seq_dict[promoter_start_millions]
# have to account for the possibility that this location spans a millions place
# e.g. from 999,000 - 1,001,000
else:
if promoter_start_millions in atac_seq_dict:
potential_atac_seqs_in_promoter += atac_seq_dict[promoter_start_millions]
if promoter_end_millions in atac_seq_dict:
potential_atac_seqs_in_promoter += atac_seq_dict[promoter_end_millions]
# identify if the ATAC-Seq peak occurs within user-defined promoter region
atac_seqs_in_promoter = []
for potential_atac_seq in potential_atac_seqs_in_promoter:
atac_seq_start = potential_atac_seq[0]
atac_seq_end = potential_atac_seq[0] + potential_atac_seq[1]
atac_seq_score = potential_atac_seq[2]
overlap = overlap_range([promoter_start, promoter_end], [atac_seq_start, atac_seq_end])
if len(overlap) > 0:
atac_seqs_in_promoter.append([atac_seq_start, atac_seq_end, atac_seq_score])
# convert the positions of the in-promoter atac_seqs to tss-relative
converted_atac_seqs_in_promoter = []
for atac_seq_in_promoter in atac_seqs_in_promoter:
atac_seq_start = atac_seq_in_promoter[0]
atac_seq_end = atac_seq_in_promoter[1]
atac_seq_score = atac_seq_in_promoter[2]
if strand == 1:
converted_atac_seq_start = (tss - atac_seq_start) * -1
converted_atac_seq_end = (tss - atac_seq_end) * -1
if strand == -1:
converted_atac_seq_start = (tss - atac_seq_start)
converted_atac_seq_end = (tss - atac_seq_end)
converted_atac_seq = [converted_atac_seq_start, converted_atac_seq_end, atac_seq_score]
converted_atac_seqs_in_promoter.append(converted_atac_seq)
return converted_atac_seqs_in_promoter
##def plot_promoter(target_species, transcript_id, species_group, alignment, alignment_len, promoter_before_tss, promoter_after_tss, transcript_name, top_x_greatest_hits_dict, target_dir, converted_reg_dict, converted_gerps_in_promoter, cpg_list, converted_cages, converted_metaclusters_in_promoter, converted_atac_seqs_in_promoter, converted_eqtls, cage_correlations_hit_tf_dict):
def plot_promoter(target_species, transcript_id, species_group, alignment, alignment_len, promoter_before_tss, promoter_after_tss, transcript_name, top_x_greatest_hits_dict, target_dir, converted_reg_dict, converted_gerps_in_promoter, cpg_list, converted_cages, converted_metaclusters_in_promoter, converted_atac_seqs_in_promoter, converted_eqtls):
"""
Plot the predicted TFBSs, onto a 5000 nt promoter graph, which possess support above the current strand threshold.
['binding_prot', 'species', 'motif', 'strand', 'start', 'end', 'TSS-relative start', 'TSS-relative end', 'PWM score', 'p-value', 'pos in align.', 'combined affinity score', 'support']
"""
if target_species == "homo_sapiens":
fig = plt.figure(figsize=(10, 6))
ax1 = plt.subplot2grid((20,1),(0,0), rowspan = 6, colspan = 11)
ax8 = plt.subplot2grid((20,1),(6,0), rowspan = 2, colspan = 11)
ax2 = plt.subplot2grid((20,1),(8,0), sharex=ax1, rowspan = 2, colspan = 11)
ax3 = plt.subplot2grid((20,1),(10,0), sharex=ax1, rowspan = 2, colspan = 11)
ax4 = plt.subplot2grid((20,1),(12,0), sharex=ax1, rowspan = 2, colspan = 11)
ax5 = plt.subplot2grid((20,1),(14,0), sharex=ax1, rowspan = 2, colspan = 11)
ax6 = plt.subplot2grid((20,1),(16,0), sharex=ax1, rowspan = 2, colspan = 11)
ax7 = plt.subplot2grid((20,1),(18,0), sharex=ax1, rowspan = 2, colspan = 11)
# Set format of the plot(s)
# Hide x-ticks for all plots except the lowest
plt.setp(ax1.get_xticklabels(), visible=False)
plt.setp(ax2.get_xticklabels(), visible=False)
plt.setp(ax3.get_xticklabels(), visible=False)
plt.setp(ax4.get_xticklabels(), visible=False)
plt.setp(ax5.get_xticklabels(), visible=False)
plt.setp(ax6.get_xticklabels(), visible=False)
plt.setp(ax8.get_xticklabels(), visible=False)
# plt + ax labels
ax1.text(1.02,.5,'Predicted TFBSs', verticalalignment='center', transform=ax1.transAxes, rotation='vertical', fontsize=8)
ax1.set_ylabel("Combined Affinity Score", fontsize = 8, labelpad = 0)
ax1.text(1.005,0.99,'+ strand', verticalalignment='top', transform=ax1.transAxes, rotation='vertical', fontsize=6)
ax1.text(1.005,.01,'- strand', verticalalignment='bottom', transform=ax1.transAxes, rotation='vertical', fontsize=6)
ax2.text(1.01,.5,'GERP\nConserv.\n'+species_group, verticalalignment='center', transform=ax2.transAxes, rotation='vertical', fontsize=5)
ax3.text(1.01,.5,'CpG\nObs/Exp', verticalalignment='center', transform=ax3.transAxes, rotation='vertical', fontsize=6)
ax4.text(1.01,.5,'eQTLs', verticalalignment='center', transform=ax4.transAxes, rotation='vertical', fontsize=6)
ax5.text(1.01,.5,'TFBS\nMeta\nClusters', verticalalignment='center', transform=ax5.transAxes, rotation='vertical', fontsize=6)
ax6.text(1.01,.5,'ATAC-Seq', verticalalignment='center', transform=ax6.transAxes, rotation='vertical', fontsize=6)
ax7.text(1.01,.5,'CAGE\nPeaks\n(TSSs)', verticalalignment='center', transform=ax7.transAxes, rotation='vertical', fontsize=6)
ax8.text(1.01,.5,'TF\nExpress.\nCorr.', verticalalignment='center', transform=ax8.transAxes, rotation='vertical', fontsize=6)
### as of now the data for non-human species is limited to predicted TFBSs, conservation, and CpG
else:
fig = plt.figure(figsize=(10, 6))
ax1 = plt.subplot2grid((10,1),(0,0), rowspan = 6, colspan = 11)
ax2 = plt.subplot2grid((10,1),(6,0), sharex=ax1, rowspan = 2, colspan = 11)
ax3 = plt.subplot2grid((10,1),(8,0), sharex=ax1, rowspan = 2, colspan = 11)
# Set format of the plot(s)
# Hide x-ticks for all plots except the lowest
plt.setp(ax1.get_xticklabels(), visible=False)
plt.setp(ax2.get_xticklabels(), visible=False)
# plt + ax labels
ax1.text(1.02,.5,'Predicted TFBSs', verticalalignment='center', transform=ax1.transAxes, rotation='vertical', fontsize=8)
ax1.set_ylabel("Combined Affinity Score", fontsize = 8, labelpad = 0)
ax1.text(1.005,0.99,'+ strand', verticalalignment='top', transform=ax1.transAxes, rotation='vertical', fontsize=6)
ax1.text(1.005,.01,'- strand', verticalalignment='bottom', transform=ax1.transAxes, rotation='vertical', fontsize=6)
ax2.text(1.01,.5,'GERP\nConserv.\n'+species_group, verticalalignment='center', transform=ax2.transAxes, rotation='vertical', fontsize=5)
ax3.text(1.01,.5,'CpG\nObs/Exp', verticalalignment='center', transform=ax3.transAxes, rotation='vertical', fontsize=6)
# plot title
title_str = target_species+"\n"+" ".join([transcript_name, transcript_id])
fig.text(0.065, 0.5, title_str, horizontalalignment='center', verticalalignment='center', transform=ax1.transAxes, rotation='vertical', fontsize=14)
mpl.rcParams['axes.linewidth'] = 1.1
plt.xlabel("Nucleotide position before TSS", labelpad=5)
# Generate data for each of the greatest_hits and plot corresponding bar
color_series=['#FFB300','#803E75','#FF6800','#A6BDD7','#C10020','#CEA262','#817066','#007D34','#F6768E','#00538A','#FF7A5C','#53377A','#FF8E00','#B32851','#F4C800','#7F180D','#93AA00','#593315','#F13A13','#232C16']
color_dict = {'CTCF':'#FF0000', 'TBP':'#FF00FF'}
y_range = []
labels_used = []
# Create a list sorted descending by combined affinity score, so that lower support hits that overlap can be seen.
sorted_by_ca_list = []
for TF, great_hits in top_x_greatest_hits_dict.iteritems():
for great_hit in great_hits:
sorted_by_ca_list.append(great_hit)
# ref-point
sorted_by_ca_list = sorted(sorted_by_ca_list, key=itemgetter(9), reverse=True)
### AX1: Predicted TFBSs
for sorted_great_hit in sorted_by_ca_list:
tf_name = sorted_great_hit[0]
# choose a unique color for each tf_name
if tf_name not in color_dict:
pick = numpyrandom.randint(0, len(color_series) - 1)
picked_color = color_series[pick]
color_series.remove(picked_color)
color_dict[tf_name] = picked_color
else:
picked_color = color_dict[tf_name]
# if the label has been used, set label to "", otherwise labels will repeat in legend
if tf_name in labels_used:
lab = ""
else:
lab = tf_name
labels_used.append(tf_name)
edge_color = picked_color
x_series = []
y_series = []
# ref-point
## binding_site_start = sorted_great_hit[6]
## binding_site_end = sorted_great_hit[7]
## combined_affinity = sorted_great_hit[12]
## binding_strand = int(sorted_great_hit[3])
#'binding prot.', 'motif', 'strand', 'start', 'end', 'TSS-relative start', 'TSS-relative end', 'frame score', 'p-value', 'combined\naffinity\nscore', 'species\nweights\nsum', 'cage\nweights\nsum', 'eqtls\nweights\nsum', 'atac\nweights\nsum', 'metacluster\nweights\nsum', 'cpg\nweight', 'corr.\nweight\nsum']
binding_site_start = sorted_great_hit[5]
binding_site_end = sorted_great_hit[6]
combined_affinity = sorted_great_hit[9]
binding_strand = int(sorted_great_hit[2])
TF_center_point = float(binding_site_start + binding_site_end)/2
TF_width = abs(binding_site_start - binding_site_end)
x_series.append(TF_center_point)
y_series.append(combined_affinity * binding_strand)
y_range.append(combined_affinity)
ax1.bar(x_series, y_series, facecolor = picked_color, edgecolor = edge_color, linewidth=1, alpha=0.9, align = 'center', width = TF_width, label = lab)
# Set y-axis height based on number of entries in alignment
y_range.sort()
tens_y = int(y_range[-1])/10 + 1
# Ensembl regulatory information
# All will be horizontally plotted in some shade of red
if len(converted_reg_dict) > 0:
alpha_gradient = 1.0
## alpha_gradient_dict = {1:0, 2:0.5, 3:0.25, 4:0.225}
alpha_gradient_dict = {1:0}
for i in range(2,100):
alpha_gradient_dict[i] = 1./i
reg_height = 1
reg_height = (tens_y * 1.0)/4
for reg_id, data in converted_reg_dict.iteritems():
converted_start = int(data['converted_start'])
converted_end = int(data['converted_end'])
# limit length to first two words so legend isn't overrun
description = data['description']
reg_x_series = []
reg_y_series = []
center_point = float(converted_start + converted_end)/2
reg_x_series.append(center_point)
reg_y_series.append(reg_height)
reg_x_series.append(center_point)
reg_y_series.append(reg_height * -1)
reg_width = abs(converted_start - converted_end)
ax1.bar(reg_x_series, reg_y_series, facecolor='red', edgecolor='red', alpha=alpha_gradient, align = 'center', width=reg_width, label=description)
alpha_gradient -= alpha_gradient_dict[len(converted_reg_dict)]
reg_height += 0.5
ax1.axhline(0, color = 'black', linewidth=0.5)
### AX2: Add GERP conservation bars
for converted_gerp_in_promoter in converted_gerps_in_promoter:
converted_gerp_start = converted_gerp_in_promoter[0]
converted_gerp_end = converted_gerp_in_promoter[1]
alpha_gradient = 1
gerp_height = 1
gerp_x_series = []
gerp_y_series = []
gerp_midpoint = float(converted_gerp_start + converted_gerp_end)/2
gerp_x_series.append(gerp_midpoint)
gerp_y_series.append(gerp_height)
gerp_width = abs(converted_gerp_start - converted_gerp_end)
ax2.bar(gerp_x_series, gerp_y_series, facecolor='black', edgecolor='black', alpha=alpha_gradient, align = 'center', width=gerp_width)
ax2.set_yticks([0, 1])
plt.setp(ax2.get_yticklabels(), fontsize=6)
ax2.set_ylim(0, 1)
### AX3: CpG plot
# [1 C, 1 if G, 1 if CPG, CorG, num_cpg, obs2exp]
obs2exp = [x[5] for x in cpg_list]
ax3.plot(range(-1 * alignment_len + promoter_after_tss, promoter_after_tss), obs2exp, color = 'red')
## gpc = [x[2] for x in cpg_list]
gpc = []
top_obs2exp = ax3.get_ylim()[-1]
for x in cpg_list:
if x[2] == 0:
gpc.append(x[2])
else:
if top_obs2exp <= 1:
gpc.append(1)
else:
gpc.append(top_obs2exp)
ax3.bar(range(-1 * alignment_len + promoter_after_tss, promoter_after_tss), gpc, color = 'black')
if top_obs2exp <1:
top_obs2exp = 1
ax3.set_ylim(0, top_obs2exp)
ax3.set_yticks([0, 0.6, 1])
ax3.set_yticklabels([0, 0.6, 1], va='center')
plt.setp(ax3.get_yticklabels(), fontsize=6)
ax3.axhline(0.6, color = 'black', alpha = 0.4)
### human-specific experimental data
if target_species == "homo_sapiens":
### AX7: CAGE plot
cage_height = 1
cage_labels = []
for converted_cage in converted_cages:
converted_cage_start = converted_cage[0]
converted_cage_end = converted_cage[1]
description = converted_cage[2]
## if ".." in description:
## description = ""
cage_x_series = []
cage_y_series = []
cage_center_point = float(converted_cage_start + converted_cage_end)/2
cage_x_series.append(cage_center_point)
cage_y_series.append(cage_height)
cage_width = abs(converted_cage_start - converted_cage_end)
ax7.bar(cage_x_series, cage_y_series, facecolor='black', edgecolor='black', align = 'center', width=cage_width, label=description)
# add label for the CAGE peak
if -1 * promoter_before_tss <= converted_cage_start <= promoter_after_tss + 1 or -1 * promoter_before_tss <= converted_cage_end <= promoter_after_tss + 1:
plt.text(cage_center_point, cage_height, description, color="red", rotation = 270, fontsize=5, horizontalalignment='center', verticalalignment='top')
ax7.axes.get_yaxis().set_visible(False)
### AX5: GTRD plot
gtrd_height = 1
for converted_metacluster_in_promoter in converted_metaclusters_in_promoter:
converted_metacluster_start = converted_metacluster_in_promoter[0]
converted_metacluster_end = converted_metacluster_in_promoter[1]
metacluster_peak_count = converted_metacluster_in_promoter[2]
alpha_gradient = 0.5 + (metacluster_peak_count/1220.0)/2
gtrd_x_series = []
gtrd_y_series = []
gtrd_center_point = float(converted_metacluster_start + converted_metacluster_end)/2
gtrd_x_series.append(gtrd_center_point)
gtrd_y_series.append(gtrd_height)
gtrd_width = abs(converted_metacluster_start - converted_metacluster_end)
ax5.bar(gtrd_x_series, gtrd_y_series, facecolor='black', edgecolor='black', alpha=alpha_gradient, align = 'center', width=gtrd_width)
ax5.axes.get_yaxis().set_visible(False)
### AX6: ATAC-Seq plot
for converted_atac_seq_in_promoter in converted_atac_seqs_in_promoter:
converted_atac_seq_start = converted_atac_seq_in_promoter[0]
converted_atac_seq_end = converted_atac_seq_in_promoter[1]
atac_seq_peak_score = converted_atac_seq_in_promoter[2]
alpha_gradient = 0.5 + atac_seq_peak_score/93.234864
gtrd_x_series = []
gtrd_y_series = []
gtrd_midpoint = float(converted_atac_seq_start + converted_atac_seq_end)/2
gtrd_x_series.append(gtrd_midpoint)
gtrd_y_series.append(gtrd_height)
gtrd_width = abs(converted_atac_seq_start - converted_atac_seq_end)
ax6.bar(gtrd_x_series, gtrd_y_series, facecolor='black', edgecolor='black', alpha=alpha_gradient, align = 'center', width=gtrd_width)
ax6.axes.get_yaxis().set_visible(False)
### AX4: eQTLs plot
colors = ["green", "red"]
magnitudes = []
for converted_eqtl in converted_eqtls:
converted_eqtl_start, converted_eqtl_end, converted_eqtl_mag = converted_eqtl
if -1 * promoter_before_tss <= converted_eqtl_start <= promoter_after_tss + 1 or -1 * promoter_before_tss <= converted_eqtl_end <= promoter_after_tss + 1:
eqtl_midpoint = float(converted_eqtl_start + converted_eqtl_end)/2
eqtl_width = abs(converted_eqtl_start - converted_eqtl_end)
eqtl_x_series = []
eqtl_y_series = []
eqtl_x_series.append(eqtl_midpoint)
eqtl_y_series.append(converted_eqtl_mag)
magnitudes.append(converted_eqtl_mag)
if converted_eqtl_mag > 0:
c = colors[0]
else:
c = colors[1]
ax4.bar(eqtl_x_series, eqtl_y_series, facecolor=c, edgecolor=c, align = 'center', width=eqtl_width)
## # arrow does not format properly, perhaps due to size. y value starts not at 0, and arrow wraps over itself.
## ax4.arrow(eqtl_midpoint, 0, 0, converted_eqtl_mag, color=c, length_includes_head = True, lw=10, width=0.01)
ax4_yticks = [-1,0,1]
if len(magnitudes) > 0:
magnitudes.sort()
ax4_yticks = [math.floor(magnitudes[0]), 0, math.ceil(magnitudes[-1])]
ax4.set_yticks(ax4_yticks)
ax4.set_yticklabels(ax4_yticks, va='center')
plt.setp(ax4.get_yticklabels(), fontsize=6)
ax4.axhline(0.0, color = 'black', alpha = 0.4)
### AX8: cage_correlations
# rebuild dict to have just the top correlation
# ref-point
## plot_tfs_corrs_colors = [(tf_name, cage_correlations_hit_tf_dict[tf_name], color_dict[tf_name]) if tf_name in cage_correlations_hit_tf_dict else (tf_name, 0, color_dict[tf_name]) for tf_name in top_x_greatest_hits_dict]
plot_tfs_corrs_colors = [(tf_name, hits_list[0][16], color_dict[tf_name]) for tf_name, hits_list in top_x_greatest_hits_dict.iteritems()]
plot_tfs_corrs_colors_sorted = sorted(plot_tfs_corrs_colors, key=itemgetter(1), reverse=True)
ax8.bar(range(0, len(plot_tfs_corrs_colors_sorted)), [x[1] for x in plot_tfs_corrs_colors_sorted], color=[x[2] for x in plot_tfs_corrs_colors_sorted], edgecolor = "none")
ax8.set_ylim(0, plot_tfs_corrs_colors_sorted[0][1]+1)
ax8.set_xlim(-1, len(top_x_greatest_hits_dict))
ax8.set_yticks([0, math.ceil(plot_tfs_corrs_colors_sorted[0][1])+1])
plt.setp(ax8.get_yticklabels(), fontsize=6)
## set ticks
# based on 100's
if y_range[-1] <= 100:
for falling_y_thresh in range(100, -1, -10):
if y_range[-1] < falling_y_thresh:
y_thresh = falling_y_thresh
ax1.set_yticks(range(-1* y_thresh, y_thresh+1, 10))
else:
ax1.set_yticks(range(-1 * (((tens_y*10)/100)+1)*100, (((tens_y*10)/100)+2)*100, 100))
## if y_range[-1] <= 50:
## ax1.set_yticks(range(-50, 50+1, 10))
## else:
## ax1.set_yticks(range(-1 * (((tens_y*10)/100)+1)*100, (((tens_y*10)/100)+2)*100, 100))
ylabs=ax1.get_yticks().tolist()
ylabs=[abs(x) for x in ylabs]
ax1.set_yticklabels(ylabs)
plt.setp(ax1.get_yticklabels(), fontsize=8)
# Misc
plt.xlim([-1 * promoter_before_tss, promoter_after_tss + 1])
# legend
num_cols = 6
ax1.legend(bbox_to_anchor=[0., 1.1, 1.0, .102], loc='center', ncol=num_cols, prop={'size':8}, mode="expand", borderaxespad=0.)
# produce .svg figure
plt.subplots_adjust(hspace=0.40)
fig.savefig(os.path.join(target_dir, os.path.basename(target_dir) + '.Promoterhisto' + '.svg'), facecolor='white', bbox_inches='tight')
plt.clf()
plt.close()
## # variable x-ticks
## dist = promoter_before_tss + promoter_after_tss
## rough_interval = dist/10
## power = int(np.log10(rough_interval))
## xtick_jump = (rough_interval/(10**power)) * 10**power
## ax3.set_xticks(range(-1 * promoter_before_tss, promoter_after_tss + 1, xtick_jump))
################################################################################
# Initiating Variables #########################################################
################################################################################
################################################################################
# Execution ####################################################################
################################################################################
signal.signal(signal.SIGINT, signal_handler)
def main():
"""
All the things.
"""
total_time_start = time.time()
print("Executing tfbs_footprinter version %s." % __version__)
# Create directory for results
output_dir = os.path.join(curdir, "tfbs_results")
directory_creator(output_dir)
# begin timing and logging
logging.basicConfig(filename=os.path.join(os.path.dirname(output_dir), 'TFBS_footprinter.log'), level=logging.INFO, format='%(asctime)s: [%(levelname)s] %(message)s')
logging.info(" ".join(["***NEW SET OF ANALYSES HAS BEGUN***"]))
if is_online():
args_lists, exp_data_update, nofigure = get_args()
# if experimental data dir does not exist or user has requested an exp data update, then update.
experimental_data_present = experimentalDataUpdater(exp_data_update)
if experimental_data_present:
if len(args_lists) > 0:
# analysis variables
# non-species-specific
# dictionary of thresholds for each TF
## # updated version, which requires the presence of a current versions file
## pwm_score_threshold_dict_filename = os.path.join(experimental_data_dir, current_versions["jaspar_thresholds"])
pwm_score_threshold_dict_filename = os.path.join(script_dir, 'data/all_tfs_thresholds.jaspar_2018.1.json')
pwm_score_threshold_dicta = load_json(pwm_score_threshold_dict_filename)
pwm_score_threshold_dict = {}
for k,v in pwm_score_threshold_dicta.iteritems():
pwm_score_threshold_dict[k] = {float(kk):float(vv) for kk,vv in v.iteritems()}
# load mono-nuc PFMs
TFBS_matrix_filename = os.path.join(script_dir, 'data/pwms.json')
TFBS_matrix_dict = load_json(TFBS_matrix_filename)
TFBS_matrix_dict = {k.upper():v for k,v in TFBS_matrix_dict.iteritems()}
# load JASPAR PWM score weights
all_pwms_loglikelihood_dict_filename = os.path.join(script_dir, 'data/all_pwms_loglikelihood_dict.reduced.msg')
all_pwms_loglikelihood_dict = load_msgpack(all_pwms_loglikelihood_dict_filename)
last_target_species = None
for i, args_list in enumerate(args_lists):
args, transcript_ids_filename, transcript_id, target_tfs_filename, promoter_before_tss, promoter_after_tss, top_x_tfs_count, pval = args_list
print("Ensembl transcript id:", transcript_id)
logging.info(" ".join(["***ANALYSIS OF A NEW TRANSCRIPT HAS BEGUN:", transcript_id]))
logging.info(" ".join(["Arguments used in this run:", str(args_list)]))
# target dir naming
start_end = "("+"_".join([str(promoter_before_tss), str(promoter_after_tss)])+")"
target_dir_name = "_".join([transcript_id+start_end, str(pval)])
target_dir = os.path.join(output_dir, target_dir_name)
# declare all possible results filenames.
tfbss_found_dict_outfilename = os.path.join(target_dir, "TFBSs_found.all.json")
cluster_dict_filename = os.path.join(target_dir, "cluster_dict.json")
ensembl_aligned_filename = os.path.join(target_dir, "alignment_uncleaned.fasta")
cleaned_aligned_filename = os.path.join(target_dir, "alignment_cleaned.fasta")
transcript_dict_filename = os.path.join(target_dir, "transcript_dict.json")
gene_dict_filename = os.path.join(target_dir, "gene_dict.json")
regulatory_decoded_filename = os.path.join(target_dir, "regulatory_decoded.json")
sortedclusters_table_filename = os.path.join(target_dir, ".".join(["TFBSs_found", "sortedclusters", "csv"]))
# check if results have been created for this query.
required_results_filenames = [cluster_dict_filename, ensembl_aligned_filename, cleaned_aligned_filename, transcript_dict_filename, gene_dict_filename, regulatory_decoded_filename, sortedclusters_table_filename]
results_files_exist = all([os.path.exists(x) for x in required_results_filenames])
if not results_files_exist:
# identify if the target transcript id exists in Ensembl
decoded_json_description = transfabulator(transcript_id, transcript_dict_filename)
transcript_id_pass = test_transcript_id(decoded_json_description, transcript_id)
# parse target transcript id data from successful retrieval, and continue
if transcript_id_pass:
# create target output dir
directory_creator(target_dir)
logging.info(" ".join(["Results will be output to:", target_dir]))
target_species, transcript_name, ens_gene_id, chromosome, tss, strand, promoter_start, promoter_end, chr_start, chr_end = transcript_data_retrieve(decoded_json_description, transcript_dict_filename, promoter_before_tss, promoter_after_tss)
gene_name, gene_len = gene_data_retrieve(gene_dict_filename, ens_gene_id)
# species-specific
species_specific_data_dir = os.path.join(script_dir, 'data', target_species)
experimentaldata(target_species)
if target_species != last_target_species or chromosome != last_chromosome:
if os.path.exists(species_specific_data_dir):
gerp_conservation_locations_dict, gerp_conservation_weight_dict, species_group, cage_dict, TF_cage_dict, cage_dist_weights_dict, cage_correlations_dict, cage_corr_weights_dict, atac_dist_weights_dict, metacluster_overlap_weights_dict, cpg_obsexp_weights_dict, cpg_obsexp_weights_dict_keys, gtex_variants, gtex_weights_dict, gtrd_metaclusters_dict, atac_seq_dict, cas_pvalues_dict = species_specific_data(target_species, chromosome, species_specific_data_dir)
last_target_species = target_species
last_chromosome = chromosome
# load target tfs
if target_tfs_filename == "" or target_tfs_filename == None:
target_tfs_filename = None
target_tfs_list = TFBS_matrix_dict.keys()
if target_tfs_filename != None:
target_tfs_list = parse_tf_ids(target_tfs_filename)
target_tfs_list = compare_tfs_list_jaspar(target_tfs_list, TFBS_matrix_dict)
# filenames for alignment and ensembl regulatory data
alignment = alignment_tools(ensembl_aligned_filename, cleaned_aligned_filename, target_species, chromosome, strand, promoter_start, promoter_end)
# continue if there is an alignment from Ensembl, and after cleaning
if len(alignment) > 0:
target_species_row = alignment[0]
alignment_len = len(target_species_row['seq'].replace('-',''))
# retrieve regulatory
regulatory_decoded = retrieve_regulatory(chromosome, strand, promoter_start, promoter_end, regulatory_decoded_filename, target_species)
converted_reg_dict = reg_position_translate(tss,regulatory_decoded,promoter_start,promoter_end,strand,promoter_before_tss,promoter_after_tss)
# conservation
converted_gerps_in_promoter = gerp_positions_translate(target_dir, gerp_conservation_locations_dict, chromosome, strand, promoter_start, promoter_end, tss)
# identify information content of each column of the alignment
cpg_list = CpG(cleaned_aligned_filename)
# identify CAGEs in proximity to Ensembl TSS, convert for plotting
converted_cages = cage_position_translate(gene_name, transcript_id,tss,cage_dict,promoter_start,promoter_end,strand,promoter_before_tss,promoter_after_tss)
# identify eQTLs in proximity to Ensembl TSS, convert for plotting
converted_eqtls = gtex_position_translate(ens_gene_id,gtex_variants,tss,promoter_start,promoter_end,strand,promoter_before_tss,promoter_after_tss)
# GTRD metaclusters
converted_metaclusters_in_promoter = gtrd_positions_translate(target_dir, gtrd_metaclusters_dict, chromosome, strand, promoter_start, promoter_end, tss)
# ATAC-seq data
converted_atac_seqs_in_promoter = atac_pos_translate(atac_seq_dict, chromosome, strand, promoter_start, promoter_end, tss)
# create index of aligned to unaligned positions
unaligned2aligned_index_dict = unaligned2aligned_indexes(cleaned_aligned_filename)
if not (os.path.exists(cluster_dict_filename) and os.path.exists(sortedclusters_table_filename)):
# score alignment for tfbss
tfbss_found_dict = tfbs_finder(transcript_name, alignment, target_tfs_list, TFBS_matrix_dict, target_dir, pwm_score_threshold_dict, all_pwms_loglikelihood_dict, unaligned2aligned_index_dict, promoter_after_tss, pval)
# sort through scores, identify hits in target_species supported in other species
cluster_dict = find_clusters(gene_name, ens_gene_id, chr_start, chr_end, alignment, target_species, chromosome, tfbss_found_dict, cleaned_aligned_filename, converted_gerps_in_promoter, gerp_conservation_weight_dict, converted_cages, converted_metaclusters_in_promoter, converted_atac_seqs_in_promoter, converted_eqtls, gtex_weights_dict, transcript_id, cage_dict, TF_cage_dict, cage_dist_weights_dict, atac_dist_weights_dict, metacluster_overlap_weights_dict, cpg_list, cpg_obsexp_weights_dict, cpg_obsexp_weights_dict_keys, cage_correlations_dict, cage_corr_weights_dict, gtex_variants, gene_len, cas_pvalues_dict)
tfbss_found_dict.clear()
dump_json(cluster_dict_filename, cluster_dict)
else:
cluster_dict = load_json(cluster_dict_filename)
# sort the target_species hits supported by other species
sorted_clusters_target_species_hits_list = sort_target_species_hits(cluster_dict)
target_species_hits_table_writer(sorted_clusters_target_species_hits_list, sortedclusters_table_filename)
# extract the top x target_species hits supported by other species
top_x_greatest_hits_dict = top_x_greatest_hits(sorted_clusters_target_species_hits_list, top_x_tfs_count)
# plot the top x target_species hits
if not nofigure:
if len(top_x_greatest_hits_dict) > 0:
plot_promoter(target_species, transcript_id, species_group, alignment, alignment_len, promoter_before_tss, promoter_after_tss, transcript_name, top_x_greatest_hits_dict, target_dir, converted_reg_dict, converted_gerps_in_promoter, cpg_list, converted_cages, converted_metaclusters_in_promoter, converted_atac_seqs_in_promoter, converted_eqtls)
total_time_end = time.time()
logging.info(" ".join(["Total time for", str(len(args_lists)), "transcripts:", str(total_time_end - total_time_start), "seconds"]) + "\n\n")
else:
print("System does not appear to be connected to the internet. Exiting TFBS_footprinter.")
| mit |
ethankruse/kepler_orrery | diverging_map.py | 1 | 12013 | #!/usr/bin/env python
#------------------------------------------------------------------------------
# Name: colorMapCreator.py
# Purpose: Generate reasonable diverging colormaps using the technique
# presented in "Diverging Color Maps for Scientific Visualization
# (Expanded)" by Kenneth Moreland.
#
# Author: Carlo Barth
#
# Created: 22.10.2013
# Copyright: (c) 2013
#------------------------------------------------------------------------------
# main() (diverge_map) function modified by Ethan Kruse 2015
# to return a colormap directly. Also found some bugs, but am hacking around
# that for now
# Imports
import numpy as np
# =============================================================================
# ====================== The Class ColorMapCreator ============================
# =============================================================================
class ColorMapCreator:
"""
Class ColorMapCreator:
Create diverging colormaps from RGB1 to RGB2 using the method of Moreland
or a simple CIELAB-interpolation. numColors controls the number of color
values to output (odd number) and divide gives the possibility to output
RGB-values from 0.0-1.0 instead of 0-255. If a filename different than
"" is given, the colormap will be saved to this file, otherwise a simple
output using print will be given.
"""
# ======================== Global Variables ===============================
# Reference white-point D65
Xn, Yn, Zn = [95.047, 100.0, 108.883] # from Adobe Cookbook
# Transfer-matrix for the conversion of RGB to XYZ color space
transM = np.array([[0.4124564, 0.2126729, 0.0193339],
[0.3575761, 0.7151522, 0.1191920],
[0.1804375, 0.0721750, 0.9503041]])
# ============================= Functions =================================
def __init__(self, RGB1, RGB2, numColors = 33., divide = 255.,
method = "moreland", filename = ""):
# create a class variable for the number of colors
self.numColors = numColors
# assert an odd number of points
assert np.mod(numColors,2) == 1, \
"For diverging colormaps odd numbers of colors are desireable!"
# assert a known method was specified
knownMethods = ["moreland", "lab"]
assert method in knownMethods, "Unknown method was specified!"
if method == knownMethods[0]:
#generate the Msh diverging colormap
self.colorMap = self.generateColorMap(RGB1, RGB2, divide)
elif method == knownMethods[1]:
# generate the Lab diverging colormap
self.colorMap = self.generateColorMapLab(RGB1, RGB2, divide)
# print out the colormap of save it to file named filename
if filename == "":
for c in self.colorMap:
pass
# print "{0}, {1}, {2}".format(c[0], c[1], c[2])
else:
with open(filename, "w") as f:
for c in self.colorMap:
f.write("{0}, {1}, {2}\n".format(c[0], c[1], c[2]))
#-
def rgblinear(self, RGB):
"""
Conversion from the sRGB components to RGB components with physically
linear properties.
"""
# initialize the linear RGB array
RGBlinear = np.zeros((3,))
# calculate the linear RGB values
for i,value in enumerate(RGB):
value = float(value) / 255.
if value > 0.04045 :
value = ( ( value + 0.055 ) / 1.055 ) ** 2.4
else :
value = value / 12.92
RGBlinear[i] = value * 100.
return RGBlinear
#-
def sRGB(self, RGBlinear):
"""
Back conversion from linear RGB to sRGB.
"""
# initialize the sRGB array
RGB = np.zeros((3,))
# calculate the sRGB values
for i,value in enumerate(RGBlinear):
value = float(value) / 100.
if value > 0.00313080495356037152:
value = (1.055 * np.power(value,1./2.4) ) - 0.055
else :
value = value * 12.92
RGB[i] = round(value * 255.)
return RGB
#-
def rgb2xyz(self, RGB):
"""
Conversion of RGB to XYZ using the transfer-matrix
"""
return np.dot(self.rgblinear(RGB), self.transM)
#-
def xyz2rgb(self, XYZ):
"""
Conversion of RGB to XYZ using the transfer-matrix
"""
#return np.round(np.dot(XYZ, np.array(np.matrix(transM).I)))
return self.sRGB(np.dot(XYZ, np.array(np.matrix(self.transM).I)))
#-
def rgb2Lab(self, RGB):
"""
Conversion of RGB to CIELAB
"""
# convert RGB to XYZ
X, Y, Z = (self.rgb2xyz(RGB)).tolist()
# helper function
def f(x):
limit = 0.008856
if x> limit:
return np.power(x, 1./3.)
else:
return 7.787*x + 16./116.
# calculation of L, a and b
L = 116. * ( f(Y/self.Yn) - (16./116.) )
a = 500. * ( f(X/self.Xn) - f(Y/self.Yn) )
b = 200. * ( f(Y/self.Yn) - f(Z/self.Zn) )
return np.array([L, a, b])
#-
def Lab2rgb(self, Lab):
"""
Conversion of CIELAB to RGB
"""
# unpack the Lab-array
L, a, b = Lab.tolist()
# helper function
def finverse(x):
xlim = 0.008856
a = 7.787
b = 16./116.
ylim = a*xlim+b
if x > ylim:
return np.power(x, 3)
else:
return ( x - b ) / a
# calculation of X, Y and Z
X = self.Xn * finverse( (a/500.) + (L+16.)/116. )
Y = self.Yn * finverse( (L+16.)/116. )
Z = self.Zn * finverse( (L+16.)/116. - (b/200.) )
# conversion of XYZ to RGB
return self.xyz2rgb(np.array([X,Y,Z]))
#-
def Lab2Msh(self, Lab):
"""
Conversion of CIELAB to Msh
"""
# unpack the Lab-array
L, a, b = Lab.tolist()
# calculation of M, s and h
M = np.sqrt(np.sum(np.power(Lab, 2)))
s = np.arccos(L/M)
h = np.arctan2(b,a)
return np.array([M,s,h])
#-
def Msh2Lab(self, Msh):
"""
Conversion of Msh to CIELAB
"""
# unpack the Msh-array
M, s, h = Msh.tolist()
# calculation of L, a and b
L = M*np.cos(s)
a = M*np.sin(s)*np.cos(h)
b = M*np.sin(s)*np.sin(h)
return np.array([L,a,b])
#-
def rgb2Msh(self, RGB):
""" Direct conversion of RGB to Msh. """
return self.Lab2Msh(self.rgb2Lab(RGB))
#-
def Msh2rgb(self, Msh):
""" Direct conversion of Msh to RGB. """
return self.Lab2rgb(self.Msh2Lab(Msh))
#-
def adjustHue(self, MshSat, Munsat):
"""
Function to provide an adjusted hue when interpolating to an
unsaturated color in Msh space.
"""
# unpack the saturated Msh-array
Msat, ssat, hsat = MshSat.tolist()
if Msat >= Munsat:
return hsat
else:
hSpin = ssat * np.sqrt(Munsat**2 - Msat**2) / \
(Msat * np.sin(ssat))
if hsat > -np.pi/3:
return hsat + hSpin
else:
return hsat - hSpin
#-
def interpolateColor(self, RGB1, RGB2, interp):
"""
Interpolation algorithm to automatically create continuous diverging
color maps.
"""
# convert RGB to Msh and unpack
Msh1 = self.rgb2Msh(RGB1)
M1, s1, h1 = Msh1.tolist()
Msh2 = self.rgb2Msh(RGB2)
M2, s2, h2 = Msh2.tolist()
# If points saturated and distinct, place white in middle
if (s1>0.05) and (s2>0.05) and ( np.abs(h1-h2) > np.pi/3. ):
Mmid = max([M1, M2, 88.])
if interp < 0.5:
M2 = Mmid
s2 = 0.
h2 = 0.
interp = 2*interp
else:
M1 = Mmid
s1 = 0.
h1 = 0.
interp = 2*interp-1.
# Adjust hue of unsaturated colors
if (s1 < 0.05) and (s2 > 0.05):
h1 = self.adjustHue(np.array([M2,s2,h2]), M1)
elif (s2 < 0.05) and (s1 > 0.05):
h2 = self.adjustHue(np.array([M1,s1,h1]), M2)
# Linear interpolation on adjusted control points
MshMid = (1-interp)*np.array([M1,s1,h1]) + \
interp*np.array([M2,s2,h2])
return self.Msh2rgb(MshMid)
#-
def generateColorMap(self, RGB1, RGB2, divide):
"""
Generate the complete diverging color map using the Moreland-technique
from RGB1 to RGB2, placing "white" in the middle. The number of points
given by "numPoints" controls the resolution of the colormap. The
optional parameter "divide" gives the possibility to scale the whole
colormap, for example to have float values from 0 to 1.
"""
# calculate
scalars = np.linspace(0., 1., self.numColors)
RGBs = np.zeros((self.numColors, 3))
for i,s in enumerate(scalars):
RGBs[i,:] = self.interpolateColor(RGB1, RGB2, s)
return RGBs/divide
#-
def generateColorMapLab(self, RGB1, RGB2, divide):
"""
Generate the complete diverging color map using a transition from
Lab1 to Lab2, transitioning true RGB-white. The number of points
given by "numPoints" controls the resolution of the colormap. The
optional parameter "divide" gives the possibility to scale the whole
colormap, for example to have float values from 0 to 1.
"""
# convert to Lab-space
Lab1 = self.rgb2Lab(RGB1)
Lab2 = self.rgb2Lab(RGB2)
LabWhite = np.array([100., 0., 0.])
# initialize the resulting arrays
Lab = np.zeros((self.numColors ,3))
RGBs = np.zeros((self.numColors ,3))
N2 = np.floor(self.numColors/2.)
# calculate
for i in range(3):
Lab[0:N2+1, i] = np.linspace(Lab1[i], LabWhite[i], N2+1)
Lab[N2:, i] = np.linspace(LabWhite[i], Lab2[i], N2+1)
for i,l in enumerate(Lab):
RGBs[i,:] = self.Lab2rgb(l)
return RGBs/divide
#-
# =============================================================================
# ========================== The Main-Function ================================
# =============================================================================
# define the initial and final RGB-colors (low and high end of the diverging
# colormap
def diverge_map(RGB1=np.array([59, 76, 192]), RGB2=np.array([180, 4, 38]),
numColors=101):
# create a new instance of the ColorMapCreator-class using the desired
# options
colormap = ColorMapCreator(RGB1, RGB2, numColors=numColors)
# there's clearly some bugs since it's possible to get values > 1
# e.g. with starting values RGB1 = [1,185,252], RGB2 = [220, 55, 19],
# numColors > 3
# but this is good enough for now
colormap.colorMap = np.clip(colormap.colorMap, 0, 1)
cdict = {'red': [], 'green': [], 'blue': []}
inds = np.linspace(0.,1.,numColors)
# create a matplotlib colormap
for ii, ind in enumerate(inds):
cdict['red'].append([ind, colormap.colorMap[ii, 0],
colormap.colorMap[ii, 0]])
cdict['green'].append([ind, colormap.colorMap[ii, 1],
colormap.colorMap[ii, 1]])
cdict['blue'].append([ind, colormap.colorMap[ii, 2],
colormap.colorMap[ii, 2]])
from matplotlib.colors import LinearSegmentedColormap
mycmap = LinearSegmentedColormap('BlueRed1', cdict)
return mycmap
| mit |
wisfern/vnpy | vnpy/trader/gateway/tkproGateway/TradeApi/trade_api.py | 4 | 19713 | from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import json
from builtins import *
import pandas as pd
from . import utils
class EntrustOrder(object):
def __init__(self, security, action, price, size):
self.security = security
self.action = action
self.price = price
self.size = size
def set_log_dir(log_dir):
if log_dir:
try:
import jrpc
jrpc.set_log_dir(log_dir)
except Exception as e:
print("Exception", e)
class TradeApi(object):
def __init__(self, addr, use_jrpc=True, prod_type="jzts"):
"""
use_jrpc:
True -- Use jrcp_client of C version, for jzts only
False -- Use pure python version
prod_type:
"jaqs" -- jrpc_msgpack_wth_snappy
"jzts" -- jrpc_msgpack
"""
self._remote = None
if prod_type == "jzts":
try:
if use_jrpc:
import jrpc
self._remote = jrpc.JsonRpcClient()
else:
from . import jrpc_py
self._remote = jrpc_py.JRpcClient(data_format="msgpack")
except Exception as e:
print("Exception", e)
if not self._remote:
from . import jrpc_py
self._remote = jrpc_py.JRpcClient(data_format="msgpack")
else:
from . import jrpc_py
self._remote = jrpc_py.JRpcClient(data_format="msgpack_snappy")
self._remote.on_rpc_callback = self._on_rpc_callback
self._remote.on_disconnected = self._on_disconnected
self._remote.on_connected = self._on_connected
self._remote.connect(addr)
self._ordstatus_callback = None
self._taskstatus_callback = None
self._internal_order_callback = None
self._trade_callback = None
self._on_connection_callback = None
self._connected = False
self._username = ""
self._password = ""
self._strategy_id = 0
self._strategy_selected = False
self._data_format = "default"
def __del__(self):
self._remote.close()
def _on_rpc_callback(self, method, data):
# print "_on_rpc_callback:", method, data
if method == "oms.orderstatus_ind":
if self._data_format == "obj":
data = utils.to_obj("Order", data)
if self._ordstatus_callback:
self._ordstatus_callback(data)
elif method == "oms.taskstatus_ind":
if self._data_format == "obj":
data = utils.to_obj("TaskStatus", data)
if self._taskstatus_callback:
self._taskstatus_callback(data)
elif method == "oms.trade_ind":
if self._data_format == "obj":
data = utils.to_obj("Trade", data)
if self._trade_callback:
self._trade_callback(data)
elif method == "oms.internal_order_ind":
if self._data_format == "obj":
data = utils.to_obj("QuoteOrder", data)
if self._internal_order_callback:
self._internal_order_callback(data)
def _on_disconnected(self):
print("TradeApi: _on_disconnected")
self._connected = False
self._strategy_selected = False
if self._on_connection_callback:
self._on_connection_callback(False)
def _on_connected(self):
print("TradeApi: _on_connected")
self._connected = True
self._do_login()
self._do_use_strategy()
if self._on_connection_callback:
self._on_connection_callback(True)
def _check_session(self):
if not self._connected:
return (False, "no connection")
if self._strategy_selected:
return (True, "")
r, msg = self._do_login()
if not r: return (r, msg)
if self._strategy_id:
return self._do_use_strategy()
else:
return (r, msg)
def set_data_format(self, format):
self._data_format = format
def set_connection_callback(self, callback):
self._on_connection_callback = callback
def set_ordstatus_callback(self, callback):
self._ordstatus_callback = callback
def set_trade_callback(self, callback):
self._trade_callback = callback
def set_task_callback(self, callback):
self._taskstatus_callback = callback
def set_quoteorder_callback(self, callback):
self._internal_order_callback = callback
def _get_format(self, format, default_format):
if format:
return format
elif self._data_format != "default":
return self._data_format
else:
return default_format
def login(self, username, password, format=""):
self._username = username
self._password = password
return self._do_login(format=format)
def _do_login(self, format=""):
# Shouldn't check connected flag here. ZMQ is a mesageq queue!
# if !self._connected :
# return (False, "-1,no connection")
if self._username and self._password:
rpc_params = {"username": self._username,
"password": self._password}
cr = self._remote.call("auth.login", rpc_params)
f = self._get_format(format, "")
if f != "obj" and f != "":
f = ""
return utils.extract_result(cr, format=f, class_name="UserInfo")
else:
return (False, "-1,empty username or password")
def logout(self):
rpc_params = {}
cr = self._remote.call("auth.logout", rpc_params)
return utils.extract_result(cr)
def close(self):
self._remote.close()
def use_strategy(self, strategy_id):
if strategy_id:
self._strategy_id = strategy_id
return self._do_use_strategy()
else:
# Query
rpc_params = {"account_id": 0}
cr = self._remote.call("auth.use_strategy", rpc_params)
r, msg = utils.extract_result(cr)
self._strategy_selected = r
return (r, msg)
def _do_use_strategy(self):
if self._strategy_id:
rpc_params = {"account_id": self._strategy_id}
cr = self._remote.call("auth.use_strategy", rpc_params)
r, msg = utils.extract_result(cr)
self._strategy_selected = r
return (r, msg)
else:
return (False, "-1,no strategy_id was specified")
def confirm_internal_order(self, task_id, confirmed):
"""
return (result, message)
if result is None, message contains error information
"""
r, msg = self._check_session()
if not r: return (None, msg)
rpc_params = {"task_id": task_id,
"confirmed": confirmed}
cr = self._remote.call("oms.confirm_internal_order", rpc_params)
return utils.extract_result(cr)
def order(self, security, price, size, algo="", algo_param={}, userdata=""):
"""
return (result, message)
if result is None, message contains error information
"""
r, msg = self._check_session()
if not r: return (None, msg)
rpc_params = {"security": security,
"price": price,
"size": int(size),
"algo": algo,
"algo_param": json.dumps(algo_param),
"user": self._username,
"userdata": userdata}
cr = self._remote.call("oms.order", rpc_params)
return utils.extract_result(cr)
def place_order(self, security, action, price, size, algo="", algo_param={}, userdata=""):
"""
return (result, message)
if result is None, message contains error information
"""
r, msg = self._check_session()
if not r: return (None, msg)
rpc_params = {"security": security,
"action": action,
"price": price,
"size": int(size),
"algo": algo,
"algo_param": json.dumps(algo_param),
"user": self._username,
"userdata": userdata}
cr = self._remote.call("oms.place_order", rpc_params)
return utils.extract_result(cr)
def batch_order(self, orders, algo="", algo_param={}, userdata=""):
"""
orders format:
[ {"security": "000001.SZ", "action": "Buy", "price": 10.0, "size" : 100}, ... ]
return (result, message)
if result is None, message contains error information
"""
if not orders or not isinstance(orders, (list, tuple)):
return (None, "empty order")
if isinstance(orders[0], EntrustOrder):
tmp = []
for o in orders:
tmp.append({"security": o.security,
"price": o.price,
"size": int(o.size)})
orders = tmp
r, msg = self._check_session()
if not r: return (None, msg)
rpc_params = {"orders": orders,
"algo": algo,
"algo_param": json.dumps(algo_param),
"user": self._username,
"userdata": userdata}
cr = self._remote.call("oms.batch_order", rpc_params)
return utils.extract_result(cr)
def place_batch_order(self, orders, algo="", algo_param={}, userdata=""):
"""
orders format:
[ {"security": "000001.SZ", "action": "Buy", "price": 10.0, "size" : 100}, ... ]
return (result, message)
if result is None, message contains error information
"""
if not orders or not isinstance(orders, (list, tuple)):
return (None, "empty order")
if isinstance(orders[0], EntrustOrder):
tmp = []
for o in orders:
tmp.append({"security": o.security,
"action": o.action,
"price": o.price,
"size": int(o.size)})
orders = tmp
r, msg = self._check_session()
if not r: return (None, msg)
rpc_params = {"orders": orders,
"algo": algo,
"algo_param": json.dumps(algo_param),
"user": self._username,
"userdata": userdata}
cr = self._remote.call("oms.place_batch_order", rpc_params)
return utils.extract_result(cr)
def cancel_order(self, task_id):
"""
return (result, message)
if result is None, message contains error information
"""
r, msg = self._check_session()
if not r: return (None, msg)
rpc_params = {"task_id": task_id}
cr = self._remote.call("oms.cancel_order", rpc_params)
return utils.extract_result(cr)
def query_account(self, format=""):
"""
return pd.dataframe
"""
r, msg = self._check_session()
if not r: return (None, msg)
rpc_params = {}
data_format = self._get_format(format, "pandas")
if data_format == "pandas":
rpc_params["format"] = "columnset"
cr = self._remote.call("oms.query_account", rpc_params)
return utils.extract_result(cr, format=data_format, class_name="Account")
def query_position(self, mode="all", securities="", format=""):
"""
securities: seperate by ","
return pd.dataframe
"""
r, msg = self._check_session()
if not r: return (None, msg)
rpc_params = {"mode": mode,
"security": securities}
data_format = self._get_format(format, "pandas")
if data_format == "pandas":
rpc_params["format"] = "columnset"
cr = self._remote.call("oms.query_position", rpc_params)
return utils.extract_result(cr, format=data_format, class_name="Position")
def query_net_position(self, mode="all", securities="", format=""):
"""
securities: seperate by ","
return pd.dataframe
"""
r, msg = self._check_session()
if not r: return (None, msg)
rpc_params = {"mode": mode,
"security": securities}
data_format = self._get_format(format, "pandas")
if data_format == "pandas":
rpc_params["format"] = "columnset"
cr = self._remote.call("oms.query_net_position", rpc_params)
return utils.extract_result(cr, format=data_format, class_name="NetPosition")
def query_repo_contract(self, format=""):
"""
securities: seperate by ","
return pd.dataframe
"""
r, msg = self._check_session()
if not r: return (None, msg)
rpc_params = {}
cr = self._remote.call("oms.query_repo_contract", rpc_params)
return utils.extract_result(cr, format=self._get_format(format, "pandas"), class_name="RepoContract")
def query_task(self, task_id=-1, format=""):
"""
task_id: -1 -- all
return pd.dataframe
"""
r, msg = self._check_session()
if not r: return (None, msg)
rpc_params = {"task_id": task_id}
data_format = self._get_format(format, "pandas")
if data_format == "pandas":
rpc_params["format"] = "columnset"
cr = self._remote.call("oms.query_task", rpc_params)
return utils.extract_result(cr, format=data_format, class_name="Task")
def query_order(self, task_id=-1, format=""):
"""
task_id: -1 -- all
return pd.dataframe
"""
r, msg = self._check_session()
if not r: return (None, msg)
rpc_params = {"task_id": task_id}
data_format = self._get_format(format, "pandas")
if data_format == "pandas":
rpc_params["format"] = "columnset"
cr = self._remote.call("oms.query_order", rpc_params)
return utils.extract_result(cr, format=data_format, class_name="Order")
def query_trade(self, task_id=-1, format=""):
"""
task_id: -1 -- all
return pd.dataframe
"""
r, msg = self._check_session()
if not r: return (None, msg)
rpc_params = {"task_id": task_id}
data_format = self._get_format(format, "pandas")
if data_format == "pandas":
rpc_params["format"] = "columnset"
cr = self._remote.call("oms.query_trade", rpc_params)
return utils.extract_result(cr, format=data_format, class_name="Trade")
def query_portfolio(self, format=""):
"""
return pd.dataframe
"""
r, msg = self._check_session()
if not r: return (None, msg)
rpc_params = {}
data_format = self._get_format(format, "pandas")
if data_format == "pandas":
rpc_params["format"] = "columnset"
cr = self._remote.call("pms.query_portfolio", rpc_params)
return utils.extract_result(cr, index_column="security", format=data_format, class_name="NetPosition")
def goal_portfolio(self, positions, algo="", algo_param={}, userdata=""):
"""
positions format:
[ {"security": "000001.SZ", "ref_price": 10.0, "size" : 100}, ...]
return (result, message)
if result is None, message contains error information
"""
r, msg = self._check_session()
if not r: return (False, msg)
if type(positions) is pd.core.frame.DataFrame:
tmp = []
for i in range(0, len(positions)):
tmp.append({'security': positions.index[i], 'ref_price': float(positions['ref_price'][i]),
"size": int(positions['size'][i])})
positions = tmp
rpc_params = {"positions": positions,
"algo": algo,
"algo_param": json.dumps(algo_param),
"user": self._username,
"userdata": userdata}
cr = self._remote.call("pms.goal_portfolio", rpc_params)
return utils.extract_result(cr)
def basket_order(self, orders, algo="", algo_param={}, userdata=""):
"""
orders format:
[ {"security": "000001.SZ", "ref_price": 10.0, "inc_size" : 100}, ...]
return (result, message)
if result is None, message contains error information
"""
r, msg = self._check_session()
if not r: return (False, msg)
if type(orders) is pd.core.frame.DataFrame:
tmp = []
for i in range(0, len(orders)):
tmp.append({'security': orders.index[i], 'ref_price': float(orders['ref_price'][i]),
"inc_size": int(orders['inc_size'][i])})
orders = tmp
rpc_params = {"orders": orders,
"algo": algo,
"algo_param": json.dumps(algo_param),
"user": self._username,
"userdata": userdata}
cr = self._remote.call("pms.basket_order", rpc_params)
return utils.extract_result(cr)
def stop_portfolio(self):
"""
return (result, message)
if result is None, message contains error information
"""
r, msg = self._check_session()
if not r: return (False, msg)
rpc_params = {}
cr = self._remote.call("pms.stop_portfolio", rpc_params)
return utils.extract_result(cr)
def query_universe(self, format=""):
r, msg = self._check_session()
if not r: return (None, msg)
rpc_params = {}
data_format = self._get_format(format, "pandas")
if data_format == "pandas":
rpc_params["format"] = "columnset"
cr = self._remote.call("oms.query_universe", rpc_params)
return utils.extract_result(cr, format=data_format, class_name="UniverseItem")
def set_heartbeat(self, interval, timeout):
self._remote.set_hearbeat_options(interval, timeout)
print("heartbeat_interval =", self._remote._heartbeat_interval, ", heartbeat_timeout =",
self._remote._heartbeat_timeout)
| mit |
JVillella/tensorflow | tensorflow/contrib/learn/python/learn/estimators/estimator_input_test.py | 72 | 12865 | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for Estimator input."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import functools
import tempfile
import numpy as np
from tensorflow.contrib.framework.python.ops import variables
from tensorflow.contrib.layers.python.layers import optimizers
from tensorflow.contrib.learn.python.learn import metric_spec
from tensorflow.contrib.learn.python.learn import models
from tensorflow.contrib.learn.python.learn.datasets import base
from tensorflow.contrib.learn.python.learn.estimators import _sklearn
from tensorflow.contrib.learn.python.learn.estimators import estimator
from tensorflow.contrib.learn.python.learn.estimators import model_fn
from tensorflow.contrib.metrics.python.ops import metric_ops
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import data_flow_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.platform import test
from tensorflow.python.training import input as input_lib
from tensorflow.python.training import queue_runner_impl
_BOSTON_INPUT_DIM = 13
_IRIS_INPUT_DIM = 4
def boston_input_fn(num_epochs=None):
boston = base.load_boston()
features = input_lib.limit_epochs(
array_ops.reshape(
constant_op.constant(boston.data), [-1, _BOSTON_INPUT_DIM]),
num_epochs=num_epochs)
labels = array_ops.reshape(constant_op.constant(boston.target), [-1, 1])
return features, labels
def boston_input_fn_with_queue(num_epochs=None):
features, labels = boston_input_fn(num_epochs=num_epochs)
# Create a minimal queue runner.
fake_queue = data_flow_ops.FIFOQueue(30, dtypes.int32)
queue_runner = queue_runner_impl.QueueRunner(fake_queue,
[constant_op.constant(0)])
queue_runner_impl.add_queue_runner(queue_runner)
return features, labels
def iris_input_fn():
iris = base.load_iris()
features = array_ops.reshape(
constant_op.constant(iris.data), [-1, _IRIS_INPUT_DIM])
labels = array_ops.reshape(constant_op.constant(iris.target), [-1])
return features, labels
def iris_input_fn_labels_dict():
iris = base.load_iris()
features = array_ops.reshape(
constant_op.constant(iris.data), [-1, _IRIS_INPUT_DIM])
labels = {
'labels': array_ops.reshape(constant_op.constant(iris.target), [-1])
}
return features, labels
def boston_eval_fn():
boston = base.load_boston()
n_examples = len(boston.target)
features = array_ops.reshape(
constant_op.constant(boston.data), [n_examples, _BOSTON_INPUT_DIM])
labels = array_ops.reshape(
constant_op.constant(boston.target), [n_examples, 1])
return array_ops.concat([features, features], 0), array_ops.concat(
[labels, labels], 0)
def extract(data, key):
if isinstance(data, dict):
assert key in data
return data[key]
else:
return data
def linear_model_params_fn(features, labels, mode, params):
features = extract(features, 'input')
labels = extract(labels, 'labels')
assert mode in (model_fn.ModeKeys.TRAIN, model_fn.ModeKeys.EVAL,
model_fn.ModeKeys.INFER)
prediction, loss = (models.linear_regression_zero_init(features, labels))
train_op = optimizers.optimize_loss(
loss,
variables.get_global_step(),
optimizer='Adagrad',
learning_rate=params['learning_rate'])
return prediction, loss, train_op
def linear_model_fn(features, labels, mode):
features = extract(features, 'input')
labels = extract(labels, 'labels')
assert mode in (model_fn.ModeKeys.TRAIN, model_fn.ModeKeys.EVAL,
model_fn.ModeKeys.INFER)
if isinstance(features, dict):
(_, features), = features.items()
prediction, loss = (models.linear_regression_zero_init(features, labels))
train_op = optimizers.optimize_loss(
loss, variables.get_global_step(), optimizer='Adagrad', learning_rate=0.1)
return prediction, loss, train_op
def linear_model_fn_with_model_fn_ops(features, labels, mode):
"""Same as linear_model_fn, but returns `ModelFnOps`."""
assert mode in (model_fn.ModeKeys.TRAIN, model_fn.ModeKeys.EVAL,
model_fn.ModeKeys.INFER)
prediction, loss = (models.linear_regression_zero_init(features, labels))
train_op = optimizers.optimize_loss(
loss, variables.get_global_step(), optimizer='Adagrad', learning_rate=0.1)
return model_fn.ModelFnOps(
mode=mode, predictions=prediction, loss=loss, train_op=train_op)
def logistic_model_no_mode_fn(features, labels):
features = extract(features, 'input')
labels = extract(labels, 'labels')
labels = array_ops.one_hot(labels, 3, 1, 0)
prediction, loss = (models.logistic_regression_zero_init(features, labels))
train_op = optimizers.optimize_loss(
loss, variables.get_global_step(), optimizer='Adagrad', learning_rate=0.1)
return {
'class': math_ops.argmax(prediction, 1),
'prob': prediction
}, loss, train_op
VOCAB_FILE_CONTENT = 'emerson\nlake\npalmer\n'
EXTRA_FILE_CONTENT = 'kermit\npiggy\nralph\n'
class EstimatorInputTest(test.TestCase):
def testContinueTrainingDictionaryInput(self):
boston = base.load_boston()
output_dir = tempfile.mkdtemp()
est = estimator.Estimator(model_fn=linear_model_fn, model_dir=output_dir)
boston_input = {'input': boston.data}
float64_target = {'labels': boston.target.astype(np.float64)}
est.fit(x=boston_input, y=float64_target, steps=50)
scores = est.evaluate(
x=boston_input,
y=float64_target,
metrics={'MSE': metric_ops.streaming_mean_squared_error})
del est
# Create another estimator object with the same output dir.
est2 = estimator.Estimator(model_fn=linear_model_fn, model_dir=output_dir)
# Check we can evaluate and predict.
scores2 = est2.evaluate(
x=boston_input,
y=float64_target,
metrics={'MSE': metric_ops.streaming_mean_squared_error})
self.assertAllClose(scores2['MSE'], scores['MSE'])
predictions = np.array(list(est2.predict(x=boston_input)))
other_score = _sklearn.mean_squared_error(predictions,
float64_target['labels'])
self.assertAllClose(other_score, scores['MSE'])
def testBostonAll(self):
boston = base.load_boston()
est = estimator.SKCompat(estimator.Estimator(model_fn=linear_model_fn))
float64_labels = boston.target.astype(np.float64)
est.fit(x=boston.data, y=float64_labels, steps=100)
scores = est.score(
x=boston.data,
y=float64_labels,
metrics={'MSE': metric_ops.streaming_mean_squared_error})
predictions = np.array(list(est.predict(x=boston.data)))
other_score = _sklearn.mean_squared_error(predictions, boston.target)
self.assertAllClose(scores['MSE'], other_score)
self.assertTrue('global_step' in scores)
self.assertEqual(100, scores['global_step'])
def testBostonAllDictionaryInput(self):
boston = base.load_boston()
est = estimator.Estimator(model_fn=linear_model_fn)
boston_input = {'input': boston.data}
float64_target = {'labels': boston.target.astype(np.float64)}
est.fit(x=boston_input, y=float64_target, steps=100)
scores = est.evaluate(
x=boston_input,
y=float64_target,
metrics={'MSE': metric_ops.streaming_mean_squared_error})
predictions = np.array(list(est.predict(x=boston_input)))
other_score = _sklearn.mean_squared_error(predictions, boston.target)
self.assertAllClose(other_score, scores['MSE'])
self.assertTrue('global_step' in scores)
self.assertEqual(scores['global_step'], 100)
def testIrisAll(self):
iris = base.load_iris()
est = estimator.SKCompat(
estimator.Estimator(model_fn=logistic_model_no_mode_fn))
est.fit(iris.data, iris.target, steps=100)
scores = est.score(
x=iris.data,
y=iris.target,
metrics={('accuracy', 'class'): metric_ops.streaming_accuracy})
predictions = est.predict(x=iris.data)
predictions_class = est.predict(x=iris.data, outputs=['class'])['class']
self.assertEqual(predictions['prob'].shape[0], iris.target.shape[0])
self.assertAllClose(predictions['class'], predictions_class)
self.assertAllClose(
predictions['class'], np.argmax(
predictions['prob'], axis=1))
other_score = _sklearn.accuracy_score(iris.target, predictions['class'])
self.assertAllClose(scores['accuracy'], other_score)
self.assertTrue('global_step' in scores)
self.assertEqual(100, scores['global_step'])
def testIrisAllDictionaryInput(self):
iris = base.load_iris()
est = estimator.Estimator(model_fn=logistic_model_no_mode_fn)
iris_data = {'input': iris.data}
iris_target = {'labels': iris.target}
est.fit(iris_data, iris_target, steps=100)
scores = est.evaluate(
x=iris_data,
y=iris_target,
metrics={('accuracy', 'class'): metric_ops.streaming_accuracy})
predictions = list(est.predict(x=iris_data))
predictions_class = list(est.predict(x=iris_data, outputs=['class']))
self.assertEqual(len(predictions), iris.target.shape[0])
classes_batch = np.array([p['class'] for p in predictions])
self.assertAllClose(classes_batch,
np.array([p['class'] for p in predictions_class]))
self.assertAllClose(
classes_batch,
np.argmax(
np.array([p['prob'] for p in predictions]), axis=1))
other_score = _sklearn.accuracy_score(iris.target, classes_batch)
self.assertAllClose(other_score, scores['accuracy'])
self.assertTrue('global_step' in scores)
self.assertEqual(scores['global_step'], 100)
def testIrisInputFn(self):
iris = base.load_iris()
est = estimator.Estimator(model_fn=logistic_model_no_mode_fn)
est.fit(input_fn=iris_input_fn, steps=100)
_ = est.evaluate(input_fn=iris_input_fn, steps=1)
predictions = list(est.predict(x=iris.data))
self.assertEqual(len(predictions), iris.target.shape[0])
def testIrisInputFnLabelsDict(self):
iris = base.load_iris()
est = estimator.Estimator(model_fn=logistic_model_no_mode_fn)
est.fit(input_fn=iris_input_fn_labels_dict, steps=100)
_ = est.evaluate(
input_fn=iris_input_fn_labels_dict,
steps=1,
metrics={
'accuracy':
metric_spec.MetricSpec(
metric_fn=metric_ops.streaming_accuracy,
prediction_key='class',
label_key='labels')
})
predictions = list(est.predict(x=iris.data))
self.assertEqual(len(predictions), iris.target.shape[0])
def testTrainInputFn(self):
est = estimator.Estimator(model_fn=linear_model_fn)
est.fit(input_fn=boston_input_fn, steps=1)
_ = est.evaluate(input_fn=boston_eval_fn, steps=1)
def testPredictInputFn(self):
est = estimator.Estimator(model_fn=linear_model_fn)
boston = base.load_boston()
est.fit(input_fn=boston_input_fn, steps=1)
input_fn = functools.partial(boston_input_fn, num_epochs=1)
output = list(est.predict(input_fn=input_fn))
self.assertEqual(len(output), boston.target.shape[0])
def testPredictInputFnWithQueue(self):
est = estimator.Estimator(model_fn=linear_model_fn)
boston = base.load_boston()
est.fit(input_fn=boston_input_fn, steps=1)
input_fn = functools.partial(boston_input_fn_with_queue, num_epochs=2)
output = list(est.predict(input_fn=input_fn))
self.assertEqual(len(output), boston.target.shape[0] * 2)
def testPredictConstInputFn(self):
est = estimator.Estimator(model_fn=linear_model_fn)
boston = base.load_boston()
est.fit(input_fn=boston_input_fn, steps=1)
def input_fn():
features = array_ops.reshape(
constant_op.constant(boston.data), [-1, _BOSTON_INPUT_DIM])
labels = array_ops.reshape(constant_op.constant(boston.target), [-1, 1])
return features, labels
output = list(est.predict(input_fn=input_fn))
self.assertEqual(len(output), boston.target.shape[0])
if __name__ == '__main__':
test.main()
| apache-2.0 |
catalyst-cooperative/pudl | test/unit/helpers_test.py | 1 | 2259 | """Unit tests for the :mod:`pudl.helpers` module."""
import pandas as pd
from pandas.testing import assert_frame_equal
import pudl
def test_convert_to_date():
"""Test automated cleanup of EIA date columns."""
data = [
(2019, 3, 14),
("2019", "03", "14"),
]
in_df = pd.DataFrame.from_records(
data, columns=["report_year", "report_month", "report_day"]
)
expected_df = pd.DataFrame({
"report_date": pd.to_datetime([
"2019-03-14",
"2019-03-14",
]),
})
out_df = pudl.helpers.convert_to_date(in_df)
assert_frame_equal(out_df, expected_df)
def test_fix_eia_na():
"""Test cleanup of bad EIA spreadsheet NA values."""
in_df = pd.DataFrame({
"vals": [
"",
" ",
"\t",
".",
".0", # Should only replace naked decimals
"..", # Only single naked decimals?
" ", # 2 spaces -- we only replace single whitespace chars?
"\t\t", # 2 tabs -- we only replace single whitespace chars?
]
})
expected_df = pd.DataFrame({
"vals": [
pd.NA,
pd.NA,
pd.NA,
pd.NA,
".0",
"..",
" ",
"\t\t",
]
})
out_df = pudl.helpers.fix_eia_na(in_df)
assert_frame_equal(out_df, expected_df)
def test_fix_leading_zero_gen_ids():
"""Test removal of leading zeroes from EIA generator IDs."""
in_df = pd.DataFrame({
"generator_id": [
"0001", # Leading zeroes, all numeric string.
"26", # An appropriate numeric string w/o leading zeroes.
100, # Integer, should get stringified.
100.0, # What happens if it's a float?
"01-A", # Leading zeroes, alphanumeric. Should not change.
"HRSG-01", # Alphanumeric, should be no change.
]
})
expected_df = pd.DataFrame({
"generator_id": [
"1",
"26",
"100",
"100.0",
"01-A",
"HRSG-01",
]
})
out_df = pudl.helpers.fix_leading_zero_gen_ids(in_df)
assert_frame_equal(out_df, expected_df)
| mit |
m3wolf/xanespy | tests/test_importers.py | 1 | 48905 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Copyright © 2016 Mark Wolf
#
# This file is part of Xanespy.
#
# Xanespy is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Xanespy is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Xanespy. If not, see <http://www.gnu.org/licenses/>.
# flake8: noqa
import logging
import datetime as dt
import unittest
from unittest import TestCase, mock
import os
import sys
sys.path.append(os.path.abspath(os.path.join(os.path.dirname(__file__), os.pardir)))
import warnings
import contextlib
import pytz
import numpy as np
import pandas as pd
import h5py
from skimage import data
import matplotlib.pyplot as plt
from xanespy import exceptions, utilities
from xanespy.xradia import XRMFile, TXRMFile
from xanespy.nanosurveyor import CXIFile, HDRFile
from xanespy.sxstm import SxstmDataFile
from xanespy.importers import (magnification_correction,
decode_aps_params, decode_ssrl_params,
import_ssrl_xanes_dir, CURRENT_VERSION,
import_nanosurveyor_frameset,
import_cosmic_frameset,
import_aps4idc_sxstm_files,
import_aps8bm_xanes_dir,
import_aps8bm_xanes_file,
import_aps32idc_xanes_files,
import_aps32idc_xanes_file,
read_metadata, minimum_shape,
rebin_image, )
from xanespy.txmstore import TXMStore
# logging.basicConfig(level=logging.DEBUG)
TEST_DIR = os.path.dirname(__file__)
SSRL_DIR = os.path.join(TEST_DIR, 'txm-data-ssrl')
APS_DIR = os.path.join(TEST_DIR, 'txm-data-aps')
APS32_DIR = os.path.join(TEST_DIR, 'txm-data-32-idc')
PTYCHO_DIR = os.path.join(TEST_DIR, 'ptycho-data-als/NS_160406074')
COSMIC_DIR = os.path.join(TEST_DIR, 'ptycho-data-cosmic')
SXSTM_DIR = os.path.join(TEST_DIR, "sxstm-data-4idc/")
class APS32IDCImportTest(TestCase):
"""Check that the program can import a collection of SSRL frames from
a directory."""
src_data = os.path.join(APS32_DIR, 'nca_32idc_xanes.h5')
def setUp(self):
self.hdf = os.path.join(APS32_DIR, 'testdata.h5')
if os.path.exists(self.hdf):
os.remove(self.hdf)
def tearDown(self):
if os.path.exists(self.hdf):
os.remove(self.hdf)
def test_imported_hdf(self):
# Run the import function
import_aps32idc_xanes_file(self.src_data,
hdf_filename=self.hdf, hdf_groupname='experiment1',
downsample=1)
# Check that the file was created
self.assertTrue(os.path.exists(self.hdf))
with h5py.File(self.hdf, mode='r') as f:
self.assertNotIn('experiment2', list(f.keys()))
parent_group = f['experiment1']
data_group = f['experiment1/imported']
# Check metadata about beamline
self.assertEqual(parent_group.attrs['technique'], 'Full-field TXM')
self.assertEqual(parent_group.attrs['xanespy_version'], CURRENT_VERSION)
self.assertEqual(parent_group.attrs['beamline'], "APS 32-ID-C")
self.assertEqual(parent_group.attrs['original_directory'],
os.path.dirname(self.src_data))
self.assertEqual(parent_group.attrs['latest_data_name'], 'imported')
# Check h5 data structure
keys = list(data_group.keys())
self.assertIn('intensities', keys)
self.assertTrue(np.any(data_group['intensities']))
self.assertEqual(data_group['intensities'].shape, (1, 3, 256, 256))
self.assertEqual(data_group['intensities'].attrs['context'], 'frameset')
self.assertIn('flat_fields', keys)
self.assertTrue(np.any(data_group['flat_fields']))
self.assertEqual(data_group['flat_fields'].attrs['context'], 'frameset')
self.assertIn('dark_fields', keys)
self.assertEqual(data_group['dark_fields'].shape, (1, 2, 256, 256))
self.assertTrue(np.any(data_group['dark_fields']))
self.assertEqual(data_group['dark_fields'].attrs['context'], 'frameset')
self.assertIn('optical_depths', keys)
self.assertEqual(data_group['optical_depths'].shape, (1, 3, 256, 256))
self.assertTrue(np.any(data_group['optical_depths']))
self.assertEqual(data_group['optical_depths'].attrs['context'], 'frameset')
self.assertEqual(data_group['pixel_sizes'].attrs['unit'], 'µm')
self.assertEqual(data_group['pixel_sizes'].shape, (1, 3))
# Original pixel size is 29.99nm but we have downsampling factor 1
self.assertTrue(np.all(data_group['pixel_sizes'].value == 0.02999 * 2))
self.assertEqual(data_group['energies'].shape, (1, 3))
expected_Es = np.array([[8340, 8350, 8360]])
np.testing.assert_array_almost_equal(data_group['energies'].value,
expected_Es, decimal=3)
self.assertIn('timestamps', keys)
expected_timestamp = np.empty(shape=(1, 3, 2), dtype="S32")
expected_timestamp[...,0] = b'2016-10-07 18:24:42'
expected_timestamp[...,1] = b'2016-10-07 18:37:42'
np.testing.assert_equal(data_group['timestamps'].value,
expected_timestamp)
self.assertIn('timestep_names', keys)
self.assertEqual(data_group['timestep_names'][0], bytes("soc000", 'ascii'))
self.assertIn('filenames', keys)
self.assertEqual(data_group['filenames'].shape, (1, 3))
self.assertEqual(data_group['filenames'][0, 0], self.src_data.encode('ascii'))
# self.assertIn('original_positions', keys)
def test_exclude_frames(self):
# Run the import function
import_aps32idc_xanes_file(self.src_data,
hdf_filename=self.hdf, hdf_groupname='experiment1',
downsample=1, exclude=(1,))
# Check that the file was created
self.assertTrue(os.path.exists(self.hdf))
with h5py.File(self.hdf, mode='r') as f:
self.assertNotIn('experiment2', list(f.keys()))
parent_group = f['experiment1']
data_group = f['experiment1/imported']
self.assertEqual(data_group['intensities'].shape, (1, 2, 256, 256))
def test_limited_dark_flat(self):
# Only import some of the flat and dark field images
import_aps32idc_xanes_file(self.src_data,
hdf_filename=self.hdf, hdf_groupname='experiment1',
downsample=0, dark_idx=slice(0, 1))
# Check that the right number of files were imported
with h5py.File(self.hdf, mode='r') as f:
grp = f['experiment1/imported']
self.assertEqual(grp['dark_fields'].shape[1], 1)
def test_import_multiple_hdfs(self):
import_aps32idc_xanes_files([self.src_data, self.src_data],
hdf_filename=self.hdf, hdf_groupname='experiment1',
square=False, downsample=0)
with h5py.File(self.hdf, mode='r') as f:
g = f['/experiment1/imported']
self.assertEqual(g['intensities'].shape, (2, 3, 512, 612))
self.assertTrue(np.any(g['intensities'][0]))
self.assertTrue(np.any(g['intensities'][1]))
# They should be equal since we have import the same data twice
np.testing.assert_equal(g['intensities'][0], g['intensities'][1])
def test_import_second_hdf(self):
# Run the import function
import_aps32idc_xanes_file(self.src_data,
hdf_filename=self.hdf, hdf_groupname='experiment1',
total_timesteps=2, square=False, downsample=0)
import_aps32idc_xanes_file(self.src_data,
hdf_filename=self.hdf, hdf_groupname='experiment1',
total_timesteps=2, timestep=1, append=True, square=False,
downsample=0)
with h5py.File(self.hdf, mode='r') as f:
g = f['/experiment1/imported']
self.assertEqual(g['intensities'].shape, (2, 3, 512, 612))
self.assertTrue(np.any(g['intensities'][0]))
self.assertTrue(np.any(g['intensities'][1]))
# They should be equal since we have import the same data twice
np.testing.assert_equal(g['intensities'][0], g['intensities'][1])
class CosmicTest(TestCase):
"""Test for importing STXM and ptychography data.
From ALS Cosmic beamline. Test data taken from beamtime on
2018-11-09. The cxi file is a stripped down version of the
original (to save space). Missing crucial data should be added to
the cxi as needed.
Data
====
ptycho-scan-856eV.cxi : NS_181110188_002.cxi
stxm-scan-a003.xim : NS_181110203_a003.xim
stxm-scan-a019.xim : NS_181110203_a019.xim
stxm-scan.hdr : NS_181110203.hdr
"""
stxm_hdr = os.path.join(COSMIC_DIR, 'stxm-scan.hdr')
ptycho_cxi = os.path.join(COSMIC_DIR, 'ptycho-scan-856eV.cxi')
hdf_filename = os.path.join(COSMIC_DIR, 'cosmic-test-import.h5')
def tearDown(self):
if os.path.exists(self.hdf_filename):
os.remove(self.hdf_filename)
def test_import_partial_data(self):
"""Check if the cosmic importer works if only hdr or cxi files are
given."""
# Import only STXM images
import_cosmic_frameset(stxm_hdr=[self.stxm_hdr],
ptycho_cxi=[],
hdf_filename=self.hdf_filename)
with TXMStore(self.hdf_filename, parent_name='stxm-scan') as store:
self.assertEqual(store.data_name, 'imported')
# Import only ptycho images
import_cosmic_frameset(stxm_hdr=[],
ptycho_cxi=[self.ptycho_cxi],
hdf_filename=self.hdf_filename)
with TXMStore(self.hdf_filename, parent_name='ptycho-scan-856eV') as store:
self.assertEqual(store.data_name, 'imported')
def test_import_cosmic_data(self):
# Check that passing no data raises and exception
with self.assertRaises(ValueError):
import_cosmic_frameset(hdf_filename=self.hdf_filename)
import_cosmic_frameset(stxm_hdr=[self.stxm_hdr],
ptycho_cxi=[self.ptycho_cxi],
hdf_filename=self.hdf_filename)
# Does the HDF file exist
self.assertTrue(os.path.exists(self.hdf_filename),
"%s doesn't exist" % self.hdf_filename)
hdf_kw = dict(hdf_filename=self.hdf_filename,
parent_name='ptycho-scan-856eV',
mode='r')
# Open ptychography TXM store and check its contents
with TXMStore(**hdf_kw, data_name='imported_ptychography') as store:
# Make sure the group exists
self.assertEqual(store.data_group().name,
'/ptycho-scan-856eV/imported_ptychography')
# Check the data structure
self.assertEqual(store.filenames.shape, (1, 1))
stored_filename = store.filenames[0,0].decode('utf-8')
self.assertEqual(stored_filename, os.path.basename(self.ptycho_cxi))
np.testing.assert_equal(store.energies.value, [[855.9056362433222]])
np.testing.assert_equal(store.pixel_sizes.value, [[6.0435606480754585]])
np.testing.assert_equal(store.pixel_unit, 'nm')
self.assertEqual(store.intensities.shape, (1, 1, 285, 285))
self.assertEqual(store.optical_depths.shape, (1, 1, 285, 285))
self.assertEqual(store.timestep_names[0].decode('utf-8'), 'ex-situ')
# Open STXM TXM store and check its contents
with TXMStore(**hdf_kw, data_name='imported_stxm') as store:
# Make sure the group exists
self.assertEqual(store.data_group().name,
'/ptycho-scan-856eV/imported_stxm')
# Check the data structure
self.assertEqual(store.filenames.shape, (1, 2))
stored_filename = store.filenames[0,0].decode('utf-8')
expected_filename = os.path.join(COSMIC_DIR, 'stxm-scan_a003.xim')
self.assertEqual(stored_filename, expected_filename)
np.testing.assert_equal(store.energies.value, [[853, 857.75]])
np.testing.assert_equal(store.pixel_sizes.value, [[27.2, 27.2]])
self.assertEqual(store.intensities.shape, (1, 2, 120, 120))
self.assertEqual(store.optical_depths.shape, (1, 2, 120, 120))
self.assertEqual(store.timestep_names[0].decode('utf-8'), 'ex-situ')
# Open imported TXMStore to check its contents
with TXMStore(**hdf_kw, data_name='imported') as store:
self.assertEqual(store.filenames.shape, (1, 3))
self.assertEqual(store.timestep_names.shape, (1,))
real_px_size = 6.0435606480754585
np.testing.assert_equal(store.pixel_sizes.value,
[[real_px_size, real_px_size, real_px_size]])
self.assertEqual(store.pixel_unit, 'nm')
class CosmicFileTest(TestCase):
stxm_hdr = os.path.join(COSMIC_DIR, 'stxm-scan.hdr')
ptycho_cxi = os.path.join(COSMIC_DIR, 'ptycho-scan-856eV.cxi')
def setUp(self):
self.hdr = HDRFile(self.stxm_hdr)
self.cxi = CXIFile(self.ptycho_cxi)
def test_hdr_filenames(self):
real_filenames = [os.path.join(COSMIC_DIR, f) for f in
('stxm-scan_a003.xim', 'stxm-scan_a019.xim')]
self.assertEqual(self.hdr.filenames(), real_filenames)
def test_cxi_filenames(self):
self.assertEqual(self.cxi.filenames(), ['ptycho-scan-856eV.cxi'])
def test_cxi_image_data(self):
with self.cxi:
self.assertEqual(self.cxi.num_images(), 1)
self.assertEqual(self.cxi.image_frames().shape, (1, 285, 285))
def test_cxi_image_shape(self):
with self.cxi:
self.assertEqual(self.cxi.image_shape(), (285, 285))
def test_cxi_energies(self):
with self.cxi:
self.assertAlmostEqual(self.cxi.energies()[0], 855.9056, places=3)
def test_cxi_pixel_size(self):
real_px_size = 6.0435606480754585
with self.cxi:
self.assertAlmostEqual(self.cxi.pixel_size(), real_px_size)
def test_hdr_pixel_size(self):
with self.hdr:
self.assertEqual(self.hdr.pixel_size(), 27.2)
def test_hdr_image_data(self):
self.assertEqual(self.hdr.num_images(), 2)
self.assertEqual(self.hdr.image_frames().shape, (2, 120, 120))
def test_hdr_image_shape(self):
self.assertEqual(self.hdr.image_shape(), (120, 120))
def test_hdr_energies(self):
with self.hdr:
self.assertAlmostEqual(self.hdr.energies()[0], 853., places=3)
def test_specific_hdr_files(self):
"""This test check specific HDR files that did not succeed at first.
"""
# This one has a negative sign in front of the x-position
filename1 = os.path.join(COSMIC_DIR, 'NS_181111148.hdr')
hdr1 = HDRFile(filename1)
self.assertAlmostEqual(hdr1.pixel_size(), 66.7)
class XradiaTest(TestCase):
txrm_filename = os.path.join(TEST_DIR, "aps-8BM-sample.txrm")
def test_pixel_size(self):
sample_filename = "rep01_20161456_ssrl-test-data_08324.0_eV_001of003.xrm"
with XRMFile(os.path.join(SSRL_DIR, sample_filename), flavor="ssrl") as xrm:
self.assertAlmostEqual(xrm.um_per_pixel(), 0.03287, places=4)
def test_timestamp_from_xrm(self):
pacific_tz = pytz.timezone("US/Pacific")
chicago_tz = pytz.timezone('US/Central')
sample_filename = "rep01_20161456_ssrl-test-data_08324.0_eV_001of003.xrm"
with XRMFile(os.path.join(SSRL_DIR, sample_filename), flavor="ssrl") as xrm:
# Check start time
start = pacific_tz.localize(dt.datetime(2016, 5, 29, 15, 2, 37))
start = start.astimezone(pytz.utc).replace(tzinfo=None)
self.assertEqual(xrm.starttime(), start)
self.assertEqual(xrm.starttime().tzinfo, None)
# Check end time (offset determined by exposure time)
end = pacific_tz.localize(dt.datetime(2016, 5, 29, 15, 2, 37, 500000))
end = end.astimezone(pytz.utc).replace(tzinfo=None)
self.assertEqual(xrm.endtime(), end)
xrm.close()
# Test APS frame
sample_filename = "fov03_xanesocv_8353_0eV.xrm"
with XRMFile(os.path.join(APS_DIR, sample_filename), flavor="aps") as xrm:
# Check start time
start = chicago_tz.localize(dt.datetime(2016, 7, 2, 17, 50, 35))
start = start.astimezone(pytz.utc).replace(tzinfo=None)
self.assertEqual(xrm.starttime(), start)
# Check end time (offset determined by exposure time)
end = chicago_tz.localize(dt.datetime(2016, 7, 2, 17, 51, 25))
end = end.astimezone(pytz.utc).replace(tzinfo=None)
self.assertEqual(xrm.endtime(), end)
def test_mosaic(self):
# txm-2015-11-11-aps/ncm111-cell1-chargeC15/20151111_002_mosaic_5x5_bin8_5s.xrm
mosaic_filename = 'mosaic_4x4_bin8.xrm'
with XRMFile(os.path.join(TEST_DIR, mosaic_filename), flavor='aps') as xrm:
img_data = xrm.image_data()
# Check basic shape details
self.assertEqual(img_data.shape, (1024, 1024))
self.assertEqual(xrm.mosaic_columns, 4)
self.assertEqual(xrm.mosaic_rows, 4)
self.assertEqual(xrm.um_per_pixel(), 0.15578947961330414)
def test_str_and_repr(self):
sample_filename = "rep01_20161456_ssrl-test-data_08324.0_eV_001of003.xrm"
with XRMFile(os.path.join(SSRL_DIR, sample_filename), flavor="ssrl") as xrm:
self.assertEqual(repr(xrm), "<XRMFile: '{}'>".format(sample_filename))
self.assertEqual(str(xrm), "<XRMFile: '{}'>".format(sample_filename))
def test_binning(self):
sample_filename = "rep01_20161456_ssrl-test-data_08324.0_eV_001of003.xrm"
with XRMFile(os.path.join(SSRL_DIR, sample_filename), flavor="ssrl") as xrm:
self.assertEqual(xrm.binning(), (2, 2))
def test_frame_stack(self):
with TXRMFile(self.txrm_filename, flavor="aps") as txrm:
self.assertEqual(txrm.image_stack().shape, (3, 1024, 1024))
self.assertEqual(txrm.energies().shape, (3,))
def test_num_images(self):
with TXRMFile(self.txrm_filename, flavor="aps") as txrm:
self.assertEqual(txrm.num_images(), 3)
def test_starttimes(self):
with TXRMFile(self.txrm_filename, flavor="aps") as txrm:
result = txrm.starttimes()
expected_start = dt.datetime(2017, 7, 9, 0, 49, 2)
self.assertEqual(result[0], expected_start)
class PtychographyImportTest(TestCase):
def setUp(self):
self.hdf = os.path.join(PTYCHO_DIR, 'testdata.h5')
if os.path.exists(self.hdf):
os.remove(self.hdf)
def tearDown(self):
if os.path.exists(self.hdf):
os.remove(self.hdf)
def test_directory_names(self):
"""Tests for checking some of the edge cases for what can be passed as
a directory string."""
import_nanosurveyor_frameset(PTYCHO_DIR + "/", hdf_filename=self.hdf)
def test_imported_hdf(self):
import_nanosurveyor_frameset(PTYCHO_DIR, hdf_filename=self.hdf)
self.assertTrue(os.path.exists(self.hdf))
with h5py.File(self.hdf, mode='r') as f:
dataset_name = 'NS_160406074'
parent = f[dataset_name]
# Check metadata about the sample
self.assertEqual(parent.attrs['latest_data_name'], "imported")
group = parent['imported']
keys = list(group.keys())
# Check metadata about beamline
self.assertEqual(parent.attrs['technique'], 'ptychography STXM')
# Check data is structured properly
self.assertEqual(group['timestep_names'].value[0], bytes(dataset_name, 'ascii'))
self.assertIn('intensities', keys)
self.assertEqual(group['intensities'].shape, (1, 3, 228, 228))
self.assertEqual(group['intensities'].attrs['context'], 'frameset')
self.assertIn('stxm', keys)
self.assertEqual(group['stxm'].shape, (1, 3, 20, 20))
self.assertEqual(group['pixel_sizes'].attrs['unit'], 'nm')
self.assertTrue(np.all(group['pixel_sizes'].value == 4.16667),
msg=group['pixel_sizes'].value)
self.assertEqual(group['pixel_sizes'].shape, (1, 3))
expected_Es = np.array([[843.9069591, 847.90651815,
850.15627011]])
np.testing.assert_allclose(group['energies'].value, expected_Es)
self.assertEqual(group['energies'].shape, (1, 3))
## NB: Timestamps not available in the cxi files
# self.assertIn('timestamps', keys)
# expected_timestamp = np.array([
# [[b'2016-07-02 16:31:36-05:51', b'2016-07-02 16:32:26-05:51'],
# [b'2016-07-02 17:50:35-05:51', b'2016-07-02 17:51:25-05:51']],
# [[b'2016-07-02 22:19:23-05:51', b'2016-07-02 22:19:58-05:51'],
# [b'2016-07-02 23:21:21-05:51', b'2016-07-02 23:21:56-05:51']],
# ], dtype="S32")
# self.assertTrue(np.array_equal(group['timestamps'].value,
# expected_timestamp))
self.assertIn('filenames', keys)
self.assertEqual(group['filenames'].shape, (1, 3))
self.assertIn('relative_positions', keys)
self.assertEqual(group['relative_positions'].shape, (1, 3, 3))
## NB: It's not clear exactly what "original positions"
## means for STXM data
self.assertIn('original_positions', keys)
self.assertEqual(group['original_positions'].shape, (1, 3, 3))
def test_frame_shape(self):
"""In some cases, frames are different shapes. Specifying a shape in
the importer can fix this.
"""
expected_shape = (220, 220)
import_nanosurveyor_frameset(PTYCHO_DIR,
hdf_filename=self.hdf,
frame_shape=expected_shape)
with h5py.File(self.hdf, mode='r') as f:
real_shape = f['NS_160406074/imported/intensities'].shape
self.assertEqual(real_shape, (1, 3, *expected_shape))
def test_partial_import(self):
"""Sometimes the user may want to specify that only a subset of
ptychographs be imported.
"""
energy_range = (843, 848)
import_nanosurveyor_frameset(PTYCHO_DIR,
energy_range=energy_range,
hdf_filename=self.hdf, quiet=True)
with h5py.File(self.hdf, mode='r') as f:
dataset_name = 'NS_160406074'
parent = f[dataset_name]
group = parent['imported']
self.assertEqual(group['intensities'].shape[0:2],
(1, 2))
self.assertEqual(group['filenames'].shape, (1, 2))
def test_exclude_re(self):
"""Allow the user to exclude specific frames that are bad."""
import_nanosurveyor_frameset(PTYCHO_DIR,
exclude_re="(/009/|/100/)",
hdf_filename=self.hdf, quiet=True)
with h5py.File(self.hdf, mode='r') as f:
dataset_name = 'NS_160406074'
parent = f[dataset_name]
group = parent['imported']
self.assertEqual(group['intensities'].shape[0:2],
(1, 2))
def test_multiple_import(self):
"""Check if we can import multiple different directories of different
energies ranges."""
# Import two data sets (order is important to test for sorting)
import_nanosurveyor_frameset("{}-low-energy".format(PTYCHO_DIR),
hdf_filename=self.hdf, quiet=True,
hdf_groupname="merged")
import_nanosurveyor_frameset("{}-high-energy".format(PTYCHO_DIR),
hdf_filename=self.hdf, quiet=True,
hdf_groupname="merged",
append=True)
# Check resulting HDF5 file
with h5py.File(self.hdf) as f:
self.assertIn('merged', f.keys())
# Check that things are ordered by energy
saved_Es = f['/merged/imported/energies'].value
np.testing.assert_array_equal(saved_Es, np.sort(saved_Es))
# Construct the expected path relative to the current directory
relpath = "ptycho-data-als/NS_160406074-{}-energy/160406074/{}/NS_160406074.cxi"
toplevel = os.getcwd().split('/')[-1]
if toplevel == "tests":
test_dir = ''
else:
test_dir = 'tests'
relpath = os.path.join(test_dir, relpath)
# Compare the expeected file names
sorted_files = [[bytes(relpath.format("low", "001"), 'ascii'),
bytes(relpath.format("low", "009"), 'ascii'),
bytes(relpath.format("high", "021"), 'ascii'),]]
saved_files = f['/merged/imported/filenames']
np.testing.assert_array_equal(saved_files, sorted_files)
class APS8BMFileImportTest(TestCase):
txrm_file = os.path.join(TEST_DIR, 'aps-8BM-sample.txrm')
txrm_ref = os.path.join(TEST_DIR, 'aps-8BM-reference.txrm')
def setUp(self):
self.hdf = os.path.join(APS_DIR, 'testdata.h5')
if os.path.exists(self.hdf):
os.remove(self.hdf)
def tearDown(self):
if os.path.exists(self.hdf):
os.remove(self.hdf)
def test_imported_hdf(self):
import_aps8bm_xanes_file(self.txrm_file,
ref_filename=self.txrm_ref, hdf_filename=self.hdf,
quiet=True)
# Check that the file was created
self.assertTrue(os.path.exists(self.hdf))
with h5py.File(self.hdf, mode='r') as f:
group = f['aps-8BM-sample/imported']
parent = f['aps-8BM-sample']
# Check metadata about beamline
self.assertEqual(parent.attrs['technique'], 'Full-field TXM')
self.assertEqual(parent.attrs['xanespy_version'], CURRENT_VERSION)
self.assertEqual(parent.attrs['beamline'], "APS 8-BM-B")
self.assertEqual(parent.attrs['original_file'], self.txrm_file)
# Check h5 data structure
keys = list(group.keys())
self.assertIn('intensities', keys)
self.assertEqual(group['intensities'].shape, (1, 3, 1024, 1024))
self.assertIn('references', keys)
self.assertIn('optical_depths', keys)
self.assertEqual(group['pixel_sizes'].attrs['unit'], 'µm')
self.assertEqual(group['pixel_sizes'].shape, (1, 3))
self.assertTrue(np.any(group['pixel_sizes'].value > 0))
expected_Es = np.array([[8312.9287109, 8363.0078125, 8412.9541016]])
np.testing.assert_almost_equal(group['energies'].value, expected_Es)
self.assertIn('timestamps', keys)
expected_timestamp = np.array([
[b'2017-07-09 00:49:02', b'2017-07-09 00:49:30', b'2017-07-09 00:49:58'],
], dtype="S32")
np.testing.assert_equal(group['timestamps'].value,
expected_timestamp)
self.assertIn('filenames', keys)
self.assertIn('original_positions', keys)
self.assertEqual(group['original_positions'].shape, (1, 3, 3))
class APS8BMDirImportTest(TestCase):
"""Check that the program can import a collection of SSRL frames from
a directory."""
def setUp(self):
self.hdf = os.path.join(APS_DIR, 'testdata.h5')
if os.path.exists(self.hdf):
os.remove(self.hdf)
def tearDown(self):
if os.path.exists(self.hdf):
os.remove(self.hdf)
def test_import_empty_directory(self):
"""Check that the proper exception is raised if the directory has no
TXM files in it."""
EMPTY_DIR = 'temp-empty-dir'
os.mkdir(EMPTY_DIR)
try:
with self.assertRaisesRegex(exceptions.DataNotFoundError,
'/temp-empty-dir'):
import_aps8bm_xanes_dir(EMPTY_DIR,
hdf_filename="test-file.hdf",
quiet=True)
finally:
# Clean up by deleting any temporary files/directories
if os.path.exists('test-file.hdf'):
os.remove('test-file.hdf')
os.rmdir(EMPTY_DIR)
def test_imported_references(self):
import_aps8bm_xanes_dir(APS_DIR, hdf_filename=self.hdf, quiet=True)
with h5py.File(self.hdf, mode='r') as f:
self.assertIn('references', f['fov03/imported'].keys())
def test_groupname_kwarg(self):
"""The groupname keyword argument needs some special attention."""
with self.assertRaisesRegex(exceptions.CreateGroupError, 'Invalid groupname'):
import_aps8bm_xanes_dir(APS_DIR, hdf_filename=self.hdf,
quiet=True, groupname="Wawa")
# Now does it work with the {} inserted
import_aps8bm_xanes_dir(APS_DIR, hdf_filename=self.hdf,
quiet=True, groupname="Wawa{}")
def test_imported_hdf(self):
import_aps8bm_xanes_dir(APS_DIR, hdf_filename=self.hdf, quiet=True)
# Check that the file was created
self.assertTrue(os.path.exists(self.hdf))
with h5py.File(self.hdf, mode='r') as f:
group = f['fov03/imported']
parent = f['fov03']
# Check metadata about beamline
self.assertEqual(parent.attrs['technique'], 'Full-field TXM')
self.assertEqual(parent.attrs['xanespy_version'], CURRENT_VERSION)
self.assertEqual(parent.attrs['beamline'], "APS 8-BM-B")
self.assertEqual(parent.attrs['original_directory'], APS_DIR)
# Check h5 data structure
keys = list(group.keys())
self.assertIn('intensities', keys)
self.assertEqual(group['intensities'].shape, (2, 2, 1024, 1024))
self.assertIn('references', keys)
self.assertIn('optical_depths', keys)
self.assertEqual(group['pixel_sizes'].attrs['unit'], 'µm')
self.assertEqual(group['pixel_sizes'].shape, (2,2))
self.assertTrue(np.any(group['pixel_sizes'].value > 0))
expected_Es = np.array([[8249.9365234375, 8353.0322265625],
[8249.9365234375, 8353.0322265625]])
self.assertTrue(np.array_equal(group['energies'].value, expected_Es))
self.assertIn('timestamps', keys)
expected_timestamp = np.array([
[[b'2016-07-02 21:31:36', b'2016-07-02 21:32:26'],
[b'2016-07-02 22:50:35', b'2016-07-02 22:51:25']],
[[b'2016-07-03 03:19:23', b'2016-07-03 03:19:58'],
[b'2016-07-03 04:21:21', b'2016-07-03 04:21:56']],
], dtype="S32")
np.testing.assert_equal(group['timestamps'].value,
expected_timestamp)
self.assertIn('filenames', keys)
self.assertIn('original_positions', keys)
# self.assertIn('relative_positions', keys)
# self.assertEqual(group['relative_positions'].shape, (2, 3))
def test_params_from_aps(self):
"""Check that the new naming scheme is decoded properly."""
ref_filename = "ref_xanesocv_8250_0eV.xrm"
result = decode_aps_params(ref_filename)
expected = {
'timestep_name': 'ocv',
'position_name': 'ref',
'is_background': True,
'energy': 8250.0,
}
self.assertEqual(result, expected)
# An example reference filename from 2015-11-11 beamtime
ref_filename = 'ncm111-cell1-chargeC15/operando-xanes00/20151111_UIC_XANES00_bkg_8313.xrm'
result = decode_aps_params(ref_filename)
self.assertTrue(result['is_background'])
self.assertEqual(result['energy'], 8313.0)
self.assertEqual(result['position_name'], 'bkg')
self.assertEqual(result['timestep_name'], '00')
# An example reference filename from 2015-11-11 beamtime
sam_filename = 'ncm111-cell1-chargeC15/operando-xanes05/20151111_UIC_XANES05_sam02_8381.xrm'
result = decode_aps_params(sam_filename)
self.assertFalse(result['is_background'])
self.assertEqual(result['energy'], 8381.0)
self.assertEqual(result['position_name'], 'sam02')
self.assertEqual(result['timestep_name'], '05')
def test_file_metadata(self):
filenames = [os.path.join(APS_DIR, 'fov03_xanessoc01_8353_0eV.xrm')]
df = read_metadata(filenames=filenames, flavor='aps', quiet=True)
self.assertIsInstance(df, pd.DataFrame)
row = df.iloc[0]
self.assertIn('shape', row.keys())
self.assertIn('timestep_name', row.keys())
# Check the correct start time
chicago_tz = pytz.timezone('US/Central')
realtime = chicago_tz.localize(dt.datetime(2016, 7, 2, 23, 21, 21))
realtime = realtime.astimezone(pytz.utc).replace(tzinfo=None)
# Convert to unix timestamp
self.assertIsInstance(row['starttime'], pd.Timestamp)
self.assertEqual(row['starttime'], realtime)
class SSRLImportTest(TestCase):
"""Check that the program can import a collection of SSRL frames from
a directory."""
def setUp(self):
self.hdf = os.path.join(SSRL_DIR, 'testdata.h5')
if os.path.exists(self.hdf):
os.remove(self.hdf)
def tearDown(self):
if os.path.exists(self.hdf):
os.remove(self.hdf)
def test_minimum_shape(self):
shape_list = [(1024, 512), (1024, 1024), (2048, 2048)]
min_shape = minimum_shape(shape_list)
self.assertEqual(min_shape, (1024, 512))
# Check with incompatible shape dimensions
shape_list = [(1024, 1024), (1024, 1024), (2048, 2048, 2048)]
with self.assertRaises(exceptions.ShapeMismatchError):
minimum_shape(shape_list)
# Check that non-power-of-two shapes raise an exception
shape_list = [(5, 1024), (1024, 1024), (2048, 2048)]
with self.assertRaises(exceptions.ShapeMismatchError):
minimum_shape(shape_list)
# Check with using named tuples
shape_list = [utilities.shape(1024, 1024), utilities.shape(1024, 1024)]
min_shape = minimum_shape(shape_list)
print(min_shape)
def test_rebin_image(self):
my_list = [1, 2, 2, 3, 3, 3]
# Test a symmetrical reshape
img = np.ones((64, 64))
new_img = rebin_image(img, (32, 32))
self.assertEqual(new_img.shape, (32, 32))
# Test an asymmetrical reshape
img = np.ones((64, 64))
new_img = rebin_image(img, (32, 16))
self.assertEqual(new_img.shape, (32, 16))
def test_imported_hdf(self):
with warnings.catch_warnings() as w:
# warnings.simplefilter('ignore', RuntimeWarning, 104)
warnings.filterwarnings('ignore',
message='Ignoring invalid file .*',
category=RuntimeWarning)
import_ssrl_xanes_dir(SSRL_DIR, hdf_filename=self.hdf, quiet=True)
# Check that the file was created
self.assertTrue(os.path.exists(self.hdf))
with h5py.File(self.hdf, mode='r') as f:
group = f['ssrl-test-data/imported']
parent = f['ssrl-test-data']
# Check metadata about beamline
self.assertEqual(parent.attrs['technique'], 'Full-field TXM')
self.assertEqual(parent.attrs['xanespy_version'], CURRENT_VERSION)
self.assertEqual(parent.attrs['beamline'], "SSRL 6-2c")
self.assertEqual(parent.attrs['original_directory'], SSRL_DIR)
# Check imported data structure
keys = list(group.keys())
self.assertIn('intensities', keys)
self.assertEqual(group['intensities'].attrs['context'], 'frameset')
self.assertEqual(group['intensities'].shape, (1, 2, 1024, 1024))
self.assertIn('references', keys)
self.assertEqual(group['references'].attrs['context'], 'frameset')
self.assertIn('optical_depths', keys)
self.assertEqual(group['optical_depths'].attrs['context'], 'frameset')
self.assertEqual(group['pixel_sizes'].attrs['unit'], 'µm')
self.assertEqual(group['pixel_sizes'].attrs['context'], 'metadata')
isEqual = np.array_equal(group['energies'].value,
np.array([[8324., 8354.]]))
self.assertTrue(isEqual, msg=group['energies'].value)
self.assertEqual(group['energies'].attrs['context'], 'metadata')
self.assertIn('timestamps', keys)
self.assertEqual(group['timestamps'].attrs['context'], 'metadata')
self.assertIn('filenames', keys)
self.assertEqual(group['filenames'].attrs['context'], 'metadata')
self.assertIn('original_positions', keys)
self.assertEqual(group['original_positions'].attrs['context'], 'metadata')
self.assertIn('relative_positions', keys)
self.assertEqual(group['relative_positions'].attrs['context'], 'metadata')
self.assertIn('timestep_names', keys)
self.assertEqual(group['relative_positions'].attrs['context'], 'metadata')
self.assertEqual(group['timestep_names'][0], "rep01")
def test_params_from_ssrl(self):
# First a reference frame
ref_filename = "rep01_000001_ref_201511202114_NCA_INSITU_OCV_FOV01_Ni_08250.0_eV_001of010.xrm"
result = decode_ssrl_params(ref_filename)
expected = {
'timestep_name': 'rep01',
'position_name': 'NCA_INSITU_OCV_FOV01_Ni',
'is_background': True,
'energy': 8250.0,
}
self.assertEqual(result, expected)
# Now a sample field of view
sample_filename = "rep01_201511202114_NCA_INSITU_OCV_FOV01_Ni_08250.0_eV_001of010.xrm"
result = decode_ssrl_params(sample_filename)
expected = {
'timestep_name': 'rep01',
'position_name': 'NCA_INSITU_OCV_FOV01_Ni',
'is_background': False,
'energy': 8250.0,
}
self.assertEqual(result, expected)
# This one was a problem, from 2017-04-05
sample_filename = (
"NCA_Pg71-5/Pg71-5_NCA_charge2_XANES_170405_1515/"
"rep01_Pg71-5_NCA_charge2_08250.0_eV_001of005.xrm")
result = decode_ssrl_params(sample_filename)
expected = {
'timestep_name': 'rep01',
'position_name': 'Pg71-5_NCA_charge2',
'is_background': False,
'energy': 8250.0,
}
self.assertEqual(result, expected)
# This reference was also a problem
ref_filename = 'rep01_000001_ref_Pg71-5_NCA_charge2_08250.0_eV_001of010.xrm'
result = decode_ssrl_params(ref_filename)
expected = {
'timestep_name': 'rep01',
'position_name': 'Pg71-5_NCA_charge2',
'is_background': True,
'energy': 8250.0,
}
self.assertEqual(result, expected)
# Another bad reference file
ref_filename = 'rep02_000182_ref_201604061951_Pg71-8_NCA_charge1_08400.0_eV_002of010.xrm'
result = decode_ssrl_params(ref_filename)
expected = {
'timestep_name': 'rep02',
'position_name': 'Pg71-8_NCA_charge1',
'is_background': True,
'energy': 8400.0,
}
self.assertEqual(result, expected)
def test_magnification_correction(self):
# Prepare some fake data
img1 = [[1,1,1,1,1],
[1,0,0,0,1],
[1,0,0,0,1],
[1,0,0,0,1],
[1,1,1,1,1]]
img2 = [[0,0,0,0,0],
[0,1,1,1,0],
[0,1,0,1,0],
[0,1,1,1,0],
[0,0,0,0,0]]
imgs = np.array([[img1, img2], [img1, img2]], dtype=np.float)
pixel_sizes = np.array([[1, 2], [1, 2]])
scales, translations = magnification_correction(imgs, pixel_sizes)
# Check that the right shape result is returns
self.assertEqual(scales.shape, (2, 2, 2))
np.testing.assert_equal(scales[..., 0], scales[..., 1])
# Check that the first result is not corrected
np.testing.assert_equal(scales[0, 0], (1., 1.))
np.testing.assert_equal(translations[0, 0], (0, 0))
# # Check the values for translation and scale for the changed image
np.testing.assert_equal(scales[0, 1], (0.5, 0.5))
np.testing.assert_equal(translations[0,1], (1., 1.))
def test_bad_file(self):
# One specific file is not saved properly
filenames = [
# No image data nor timestamp
'rep02_000072_ref_20161456_ssrl-test-data_08348.0_eV_002of010.xrm',
# Valid file
'rep01_000351_ref_20161456_ssrl-test-data_08354.0_eV_001of010.xrm',
# Malformed image data
# 'rep02_000182_ref_20161456_ssrl-test-data_08400.0_eV_002of010.xrm',
]
filenames = [os.path.join(SSRL_DIR, f) for f in filenames]
# Check that the importer warns the user of the bad file
with warnings.catch_warnings(record=True) as ws:
warnings.simplefilter('always')
result = read_metadata(filenames, flavor='ssrl', quiet=True)
self.assertTrue(len(ws) > 0)
self.assertTrue(any([w.category == RuntimeWarning for w in ws]))
self.assertTrue(any(['Ignoring invalid file' in str(w.message) for w in ws]))
# Check that the bad entries was excluded from the processed list
self.assertEqual(len(result), 1)
class SxstmFileTestCase(unittest.TestCase):
"""Tests for soft x-ray tunneling microscope data from APS 4-ID-C."""
def test_header(self):
filename = os.path.join(SXSTM_DIR, 'XGSS_UIC_JC_475v_60c_001_001_001.3ds')
sxstm_data = SxstmDataFile(filename=filename)
header = sxstm_data.header_lines()
self.assertEqual(len(header), 33)
data = sxstm_data.dataframe()
sxstm_data.close()
class SxstmImportTestCase(unittest.TestCase):
"""Tests for importing a set of X-ray tunneleing microscopy data from
APS 4-ID-C.
"""
hdf_filename = os.path.join(SXSTM_DIR, 'sxstm_imported.h5')
parent_groupname = 'sxstm-test-data'
def tearDown(self):
if os.path.exists(self.hdf_filename):
os.remove(self.hdf_filename)
def test_hdf_file(self):
with warnings.catch_warnings():
warnings.filterwarnings('ignore',
message='X and Y pixel sizes')
import_aps4idc_sxstm_files(filenames=os.path.join(TEST_DIR, 'sxstm-data-4idc'),
hdf_filename=self.hdf_filename,
hdf_groupname=self.parent_groupname,
shape=(2, 2),
energies=[8324., 8354.])
# Check that the file exists with the data group
self.assertTrue(os.path.exists(self.hdf_filename))
with h5py.File(self.hdf_filename, mode='r') as f:
# Check that the group structure is correct
self.assertIn(self.parent_groupname, list(f.keys()))
parent = f[self.parent_groupname]
self.assertIn('imported', list(parent.keys()),
"Importer didn't create '/%s/imported'" % self.parent_groupname)
# Check metadata about beamline
self.assertEqual(parent.attrs['technique'],
'Synchrotron X-ray Scanning Tunneling Microscopy')
self.assertEqual(parent.attrs['xanespy_version'], CURRENT_VERSION)
self.assertEqual(parent.attrs['beamline'], "APS 4-ID-C")
self.assertEqual(parent.attrs['latest_data_name'], 'imported')
full_path = os.path.abspath(SXSTM_DIR)
self.assertEqual(parent.attrs['original_directory'], full_path)
# Check that the datasets are created
group = parent['imported']
keys = list(group.keys())
columns = ['bias_calc', 'current', 'LIA_tip_ch1',
'LIA_tip_ch2', 'LIA_sample', 'LIA_shielding',
'LIA_topo', 'shielding', 'flux', 'bias',
'height']
for col in columns:
self.assertIn(col, list(group.keys()),
"Importer didn't create '/%s/imported/%s'"
"" % (self.parent_groupname, col))
self.assertEqual(group[col].attrs['context'], 'frameset')
self.assertEqual(group[col].dtype, 'float32')
self.assertEqual(group[col].shape, (1, 2, 2, 2))
self.assertTrue(np.any(group[col]))
self.assertEqual(group['pixel_sizes'].attrs['unit'], 'µm')
self.assertEqual(group['pixel_sizes'].attrs['context'], 'metadata')
isEqual = np.array_equal(group['energies'].value,
np.array([[8324., 8354.]]))
self.assertTrue(isEqual, msg=group['energies'].value)
self.assertEqual(group['energies'].attrs['context'], 'metadata')
self.assertIn('filenames', keys)
self.assertEqual(group['filenames'].attrs['context'], 'metadata')
self.assertIn('timestep_names', keys)
self.assertEqual(group['timestep_names'].attrs['context'], 'metadata')
self.assertEqual(group['timestep_names'][0], b"ex-situ")
# self.assertIn('timestamps', keys)
# self.assertEqual(group['timestamps'].attrs['context'], 'metadata')
# self.assertIn('original_positions', keys)
# self.assertEqual(group['original_positions'].attrs['context'], 'metadata')
# self.assertIn('relative_positions', keys)
# self.assertEqual(group['relative_positions'].attrs['context'], 'metadata')
def test_file_list(self):
"""See if a file list can be passed instead of a directory name."""
filelist = [os.path.join(SXSTM_DIR, f) for f in os.listdir(SXSTM_DIR)]
with warnings.catch_warnings():
warnings.filterwarnings('ignore',
message='X and Y pixel sizes')
import_aps4idc_sxstm_files(filenames=filelist,
hdf_filename=self.hdf_filename,
hdf_groupname=self.parent_groupname,
shape=(2, 2),
energies=[8324., 8354.])
| gpl-3.0 |
jmschrei/scikit-learn | examples/datasets/plot_random_multilabel_dataset.py | 278 | 3402 | """
==============================================
Plot randomly generated multilabel dataset
==============================================
This illustrates the `datasets.make_multilabel_classification` dataset
generator. Each sample consists of counts of two features (up to 50 in
total), which are differently distributed in each of two classes.
Points are labeled as follows, where Y means the class is present:
===== ===== ===== ======
1 2 3 Color
===== ===== ===== ======
Y N N Red
N Y N Blue
N N Y Yellow
Y Y N Purple
Y N Y Orange
Y Y N Green
Y Y Y Brown
===== ===== ===== ======
A star marks the expected sample for each class; its size reflects the
probability of selecting that class label.
The left and right examples highlight the ``n_labels`` parameter:
more of the samples in the right plot have 2 or 3 labels.
Note that this two-dimensional example is very degenerate:
generally the number of features would be much greater than the
"document length", while here we have much larger documents than vocabulary.
Similarly, with ``n_classes > n_features``, it is much less likely that a
feature distinguishes a particular class.
"""
from __future__ import print_function
import numpy as np
import matplotlib.pyplot as plt
from sklearn.datasets import make_multilabel_classification as make_ml_clf
print(__doc__)
COLORS = np.array(['!',
'#FF3333', # red
'#0198E1', # blue
'#BF5FFF', # purple
'#FCD116', # yellow
'#FF7216', # orange
'#4DBD33', # green
'#87421F' # brown
])
# Use same random seed for multiple calls to make_multilabel_classification to
# ensure same distributions
RANDOM_SEED = np.random.randint(2 ** 10)
def plot_2d(ax, n_labels=1, n_classes=3, length=50):
X, Y, p_c, p_w_c = make_ml_clf(n_samples=150, n_features=2,
n_classes=n_classes, n_labels=n_labels,
length=length, allow_unlabeled=False,
return_distributions=True,
random_state=RANDOM_SEED)
ax.scatter(X[:, 0], X[:, 1], color=COLORS.take((Y * [1, 2, 4]
).sum(axis=1)),
marker='.')
ax.scatter(p_w_c[0] * length, p_w_c[1] * length,
marker='*', linewidth=.5, edgecolor='black',
s=20 + 1500 * p_c ** 2,
color=COLORS.take([1, 2, 4]))
ax.set_xlabel('Feature 0 count')
return p_c, p_w_c
_, (ax1, ax2) = plt.subplots(1, 2, sharex='row', sharey='row', figsize=(8, 4))
plt.subplots_adjust(bottom=.15)
p_c, p_w_c = plot_2d(ax1, n_labels=1)
ax1.set_title('n_labels=1, length=50')
ax1.set_ylabel('Feature 1 count')
plot_2d(ax2, n_labels=3)
ax2.set_title('n_labels=3, length=50')
ax2.set_xlim(left=0, auto=True)
ax2.set_ylim(bottom=0, auto=True)
plt.show()
print('The data was generated from (random_state=%d):' % RANDOM_SEED)
print('Class', 'P(C)', 'P(w0|C)', 'P(w1|C)', sep='\t')
for k, p, p_w in zip(['red', 'blue', 'yellow'], p_c, p_w_c.T):
print('%s\t%0.2f\t%0.2f\t%0.2f' % (k, p, p_w[0], p_w[1]))
| bsd-3-clause |